From c0bf1d7081e1a9d51ca525bad094e75890ada1f1 Mon Sep 17 00:00:00 2001 From: Jason Sexauer Date: Sat, 5 Apr 2014 12:07:30 -0400 Subject: [PATCH] Remove number of deprecated parameters/functions/classes [fix #6641] --- doc/source/release.rst | 22 +++ doc/source/v0.14.0.txt | 23 +++ pandas/core/format.py | 23 +-- pandas/core/frame.py | 34 +--- pandas/core/series.py | 6 +- pandas/io/data.py | 14 +- pandas/io/pytables.py | 7 - pandas/stats/moments.py | 154 ++++++++---------- pandas/stats/tests/test_moments.py | 21 --- pandas/tseries/frequencies.py | 15 -- pandas/tseries/index.py | 32 ++-- .../tseries/tests/test_timeseries_legacy.py | 22 --- 12 files changed, 134 insertions(+), 239 deletions(-) diff --git a/doc/source/release.rst b/doc/source/release.rst index 8bf6a8d7b9488..b541ef4d2dd4b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -180,6 +180,28 @@ Prior Version Deprecations/Changes - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) +- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`6641`) + +- Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, + :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function + encode in unicode by default (:issue:`6641`) + +- Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and + :meth:`DataFrame.to_string` (:issue:`6641`) + +- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`6641`) + +- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`6641`) + +- Remove ``name`` keyword from :func:`get_data_yahoo` and + :func:`get_data_google` (:issue:`6641`) + +- Remove ``offset`` keyword from :class:`DatetimeIndex` constructor + (:issue:`6641`) + +- Remove ``time_rule`` from several rolling-moment statistical functions, such + as :func:`rolling_sum` (:issue:`6641`) + Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 23ab8f10116c1..335546a983e6f 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -328,6 +328,29 @@ Therse are prior version deprecations that are taking effect as of 0.14.0. - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) +- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`6641`) + +- Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, + :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function + encode in unicode by default (:issue:`6641`) + +- Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and + :meth:`DataFrame.to_string` (:issue:`6641`) + +- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`6641`) + +- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`6641`) + +- Remove ``name`` keyword from :func:`get_data_yahoo` and + :func:`get_data_google` (:issue:`6641`) + +- Remove ``offset`` keyword from :class:`DatetimeIndex` constructor + (:issue:`6641`) + +- Remove ``time_rule`` from several rolling-moment statistical functions, such + as :func:`rolling_sum` (:issue:`6641`) + + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/format.py b/pandas/core/format.py index 636b3f452a20c..a7cbf2c70a5d3 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -358,15 +358,10 @@ def _to_str_columns(self): return strcols - def to_string(self, force_unicode=None): + def to_string(self): """ Render a DataFrame to a console-friendly tabular output. """ - import warnings - if force_unicode is not None: # pragma: no cover - warnings.warn( - "force_unicode is deprecated, it will have no effect", - FutureWarning) frame = self.frame @@ -423,8 +418,7 @@ def _join_multiline(self, *strcols): st = ed return '\n\n'.join(str_lst) - def to_latex(self, force_unicode=None, column_format=None, - longtable=False): + def to_latex(self, column_format=None, longtable=False): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ @@ -435,12 +429,6 @@ def get_col_type(dtype): else: return 'l' - import warnings - if force_unicode is not None: # pragma: no cover - warnings.warn( - "force_unicode is deprecated, it will have no effect", - FutureWarning) - frame = self.frame if len(frame.columns) == 0 or len(frame.index) == 0: @@ -2139,7 +2127,7 @@ def __call__(self, num): return formatted # .strip() -def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False): +def set_eng_float_format(accuracy=3, use_eng_prefix=False): """ Alter default behavior on how float is formatted in DataFrame. Format float in engineering format. By accuracy, we mean the number of @@ -2147,11 +2135,6 @@ def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False): See also EngFormatter. """ - if precision is not None: # pragma: no cover - import warnings - warnings.warn("'precision' parameter in set_eng_float_format is " - "being renamed to 'accuracy'", FutureWarning) - accuracy = precision set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) set_option("display.column_space", max(12, accuracy + 9)) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8875d2fdfb39a..a5d93f09c9e07 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1071,7 +1071,7 @@ def to_panel(self): @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, - mode='w', nanRep=None, encoding=None, quoting=None, + mode='w', encoding=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, **kwds): @@ -1128,10 +1128,6 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, Format string for datetime objects cols : kwarg only alias of columns [deprecated] """ - if nanRep is not None: # pragma: no cover - warnings.warn("nanRep is deprecated, use na_rep", - FutureWarning) - na_rep = nanRep formatter = fmt.CSVFormatter(self, path_or_buf, line_terminator=line_terminator, @@ -1275,21 +1271,12 @@ def to_stata( @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, - float_format=None, sparsify=None, nanRep=None, - index_names=True, justify=None, force_unicode=None, - line_width=None, max_rows=None, max_cols=None, + float_format=None, sparsify=None, index_names=True, + justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame to a console-friendly tabular output. """ - if force_unicode is not None: # pragma: no cover - warnings.warn("force_unicode is deprecated, it will have no " - "effect", FutureWarning) - - if nanRep is not None: # pragma: no cover - warnings.warn("nanRep is deprecated, use na_rep", - FutureWarning) - na_rep = nanRep if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", @@ -1318,9 +1305,8 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, - justify=None, force_unicode=None, bold_rows=True, - classes=None, escape=True, max_rows=None, max_cols=None, - show_dimensions=False): + justify=None, bold_rows=True, classes=None, escape=True, + max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame as an HTML table. @@ -1341,10 +1327,6 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, """ - if force_unicode is not None: # pragma: no cover - warnings.warn("force_unicode is deprecated, it will have no " - "effect", FutureWarning) - if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) @@ -1372,7 +1354,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, - bold_rows=True, force_unicode=None, longtable=False): + bold_rows=True, longtable=False): """ Render a DataFrame to a tabular environment table. You can splice this into a LaTeX document. Requires \\usepackage(booktabs}. @@ -1387,10 +1369,6 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, """ - if force_unicode is not None: # pragma: no cover - warnings.warn("force_unicode is deprecated, it will have no " - "effect", FutureWarning) - if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) diff --git a/pandas/core/series.py b/pandas/core/series.py index 4ab7855ec2f84..bf6d96848b41b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -881,7 +881,7 @@ def _repr_footer(self): str(self.dtype.name)) def to_string(self, buf=None, na_rep='NaN', float_format=None, - nanRep=None, length=False, dtype=False, name=False): + length=False, dtype=False, name=False): """ Render a string representation of the Series @@ -906,10 +906,6 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, formatted : string (if not buffer passed) """ - if nanRep is not None: # pragma: no cover - warnings.warn("nanRep is deprecated, use na_rep", FutureWarning) - na_rep = nanRep - the_repr = self._get_repr(float_format=float_format, na_rep=na_rep, length=length, dtype=dtype, name=name) diff --git a/pandas/io/data.py b/pandas/io/data.py index dc5dd2b4b7d80..e875e8aa3c6db 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -338,11 +338,7 @@ def _dl_mult_symbols(symbols, start, end, chunksize, retry_count, pause, def _get_data_from(symbols, start, end, retry_count, pause, adjust_price, - ret_index, chunksize, source, name): - if name is not None: - warnings.warn("Arg 'name' is deprecated, please use 'symbols' " - "instead.", FutureWarning) - symbols = name + ret_index, chunksize, source): src_fn = _source_functions[source] @@ -367,7 +363,7 @@ def _get_data_from(symbols, start, end, retry_count, pause, adjust_price, def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, - chunksize=25, name=None): + chunksize=25): """ Returns DataFrame/Panel of historical stock prices from symbols, over date range, start to end. To avoid being penalized by Yahoo! Finance servers, @@ -402,12 +398,12 @@ def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, hist_data : DataFrame (str) or Panel (array-like object, DataFrame) """ return _get_data_from(symbols, start, end, retry_count, pause, - adjust_price, ret_index, chunksize, 'yahoo', name) + adjust_price, ret_index, chunksize, 'yahoo') def get_data_google(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, - chunksize=25, name=None): + chunksize=25): """ Returns DataFrame/Panel of historical stock prices from symbols, over date range, start to end. To avoid being penalized by Google Finance servers, @@ -436,7 +432,7 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3, hist_data : DataFrame (str) or Panel (array-like object, DataFrame) """ return _get_data_from(symbols, start, end, retry_count, pause, - adjust_price, ret_index, chunksize, 'google', name) + adjust_price, ret_index, chunksize, 'google') _FRED_URL = "http://research.stlouisfed.org/fred2/series/" diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 76f630082aa15..8d2ca794be6b8 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -685,13 +685,6 @@ def select_as_coordinates( return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs) - def unique(self, key, column, **kwargs): - warnings.warn("unique(key,column) is deprecated\n" - "use select_column(key,column).unique() instead", - FutureWarning) - return self.get_storer(key).read_column(column=column, - **kwargs).unique() - def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 523f055eaf605..f98c06a4d63a1 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -54,8 +54,7 @@ (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Set the labels at the center of the window. """ @@ -83,7 +82,6 @@ beginning) freq : None or string alias / date offset object, default=None Frequency to conform to before computing statistic - time_rule is a legacy alias for freq adjust : boolean, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average) @@ -109,8 +107,7 @@ (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. """ @@ -151,7 +148,7 @@ """ -def rolling_count(arg, window, freq=None, center=False, time_rule=None): +def rolling_count(arg, window, freq=None, center=False): """ Rolling count of number of non-NaN observations inside provided window. @@ -163,8 +160,7 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): calculating the statistic. freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq` + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window @@ -178,7 +174,7 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) window = min(window, len(arg)) return_hook, values = _process_data_structure(arg, kill_inf=False) @@ -197,7 +193,7 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): _roll_kw+_pairwise_kw, _flex_retval, _roll_notes) @Appender(_doc_template) def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, - center=False, time_rule=None, pairwise=None): + center=False, pairwise=None): if window is None and isinstance(arg2, (int, float)): window = arg2 arg2 = arg1 @@ -205,8 +201,8 @@ def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, elif arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise # only default unset - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) window = min(window, len(arg1), len(arg2)) def _get_cov(X, Y): @@ -222,7 +218,7 @@ def _get_cov(X, Y): _roll_kw+_pairwise_kw, _flex_retval, _roll_notes) @Appender(_doc_template) def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, - center=False, time_rule=None, pairwise=None): + center=False, pairwise=None): if window is None and isinstance(arg2, (int, float)): window = arg2 arg2 = arg1 @@ -230,17 +226,17 @@ def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, elif arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise # only default unset - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) window = min(window, len(arg1), len(arg2)) def _get_corr(a, b): num = rolling_cov(a, b, window, min_periods, freq=freq, - center=center, time_rule=time_rule) + center=center) den = (rolling_std(a, window, min_periods, freq=freq, - center=center, time_rule=time_rule) * + center=center) * rolling_std(b, window, min_periods, freq=freq, - center=center, time_rule=time_rule)) + center=center)) return num / den return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise)) @@ -296,16 +292,16 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False): _roll_kw, _pairwise_retval, _roll_notes) @Appender(_doc_template) def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None, - freq=None, center=False, time_rule=None): + freq=None, center=False): import warnings warnings.warn("rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)", FutureWarning) return rolling_corr(df1, df2, window=window, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule, + freq=freq, center=center, pairwise=True) def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, - time_rule=None, args=(), kwargs={}, **kwds): + args=(), kwargs={}, **kwds): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. @@ -322,7 +318,6 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - time_rule : Legacy alias for freq args : tuple Passed on to func kwargs : dict @@ -332,7 +327,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, ------- y : type of input """ - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) calc = lambda x: func(x, window, minp=minp, args=args, kwargs=kwargs, **kwds) return_hook, values = _process_data_structure(arg) @@ -417,10 +412,10 @@ def _get_center_of_mass(com, span, halflife): @Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) -def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None, time_rule=None, +def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None, adjust=True): com = _get_center_of_mass(com, span, halflife) - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) def _ewma(v): result = algos.ewma(v, com, int(adjust)) @@ -442,9 +437,9 @@ def _first_valid_index(arr): _ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, - freq=None, time_rule=None): + freq=None): com = _get_center_of_mass(com, span, halflife) - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) moment2nd = ewma(arg * arg, com=com, min_periods=min_periods) moment1st = ewma(arg, com=com, min_periods=min_periods) @@ -458,9 +453,8 @@ def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, @Substitution("Exponentially-weighted moving std", _unary_arg, _ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) -def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, - time_rule=None): - result = ewmvar(arg, com=com, span=span, halflife=halflife, time_rule=time_rule, +def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False): + result = ewmvar(arg, com=com, span=span, halflife=halflife, min_periods=min_periods, bias=bias) return _zsqrt(result) @@ -470,8 +464,8 @@ def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, @Substitution("Exponentially-weighted moving covariance", _binary_arg_flex, _ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) -def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, bias=False, - freq=None, time_rule=None, pairwise=None): +def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, + bias=False, freq=None, pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -479,8 +473,8 @@ def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, b com = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) def _get_ewmcov(X, Y): mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods) @@ -498,7 +492,7 @@ def _get_ewmcov(X, Y): _ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, - freq=None, time_rule=None, pairwise=None): + freq=None, pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -506,8 +500,8 @@ def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, com = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) def _get_ewmcorr(X, Y): mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods) @@ -547,13 +541,7 @@ def _prep_binary(arg1, arg2): # Python interface to Cython functions -def _conv_timerule(arg, freq, time_rule): - if time_rule is not None: - import warnings - warnings.warn("time_rule argument is deprecated, replace with freq", - FutureWarning) - - freq = time_rule +def _conv_timerule(arg, freq): types = (DataFrame, Series) if freq is not None and isinstance(arg, types): @@ -584,13 +572,12 @@ def _rolling_func(func, desc, check_minp=_use_window): @Appender(_doc_template) @wraps(func) def f(arg, window, min_periods=None, freq=None, center=False, - time_rule=None, **kwargs): + **kwargs): def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): minp = check_minp(minp, window) return func(arg, window, minp, **kwds) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, - time_rule=time_rule, **kwargs) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center, **kwargs) return f @@ -612,7 +599,7 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, - center=False, time_rule=None): + center=False): """Moving quantile. Parameters @@ -628,8 +615,7 @@ def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq` + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window @@ -650,12 +636,12 @@ def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, def call_cython(arg, window, minp, args=(), kwargs={}): minp = _use_window(minp, window) return algos.roll_quantile(arg, window, minp, quantile) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, time_rule=time_rule) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center) def rolling_apply(arg, window, func, min_periods=None, freq=None, - center=False, time_rule=None, args=(), kwargs={}): + center=False, args=(), kwargs={}): """Generic moving function application. Parameters @@ -671,8 +657,7 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window args : tuple @@ -696,13 +681,12 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None, def call_cython(arg, window, minp, args, kwargs): minp = _use_window(minp, window) return algos.roll_generic(arg, window, minp, func, args, kwargs) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, time_rule=time_rule, - args=args, kwargs=kwargs) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center, args=args, kwargs=kwargs) def rolling_window(arg, window=None, win_type=None, min_periods=None, - freq=None, center=False, mean=True, time_rule=None, + freq=None, center=False, mean=True, axis=0, **kwargs): """ Applies a moving window of type ``window_type`` and size ``window`` @@ -721,8 +705,7 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window mean : boolean, default True @@ -778,7 +761,7 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, minp = _use_window(min_periods, len(window)) - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) return_hook, values = _process_data_structure(arg) f = lambda x: algos.roll_window(x, window, minp, avg=mean) @@ -816,16 +799,14 @@ def _expanding_func(func, desc, check_minp=_use_window): @Substitution(desc, _unary_arg, _expanding_kw, _type_of_input_retval, "") @Appender(_doc_template) @wraps(func) - def f(arg, min_periods=1, freq=None, center=False, time_rule=None, - **kwargs): + def f(arg, min_periods=1, freq=None, center=False, **kwargs): window = len(arg) def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): minp = check_minp(minp, window) return func(arg, window, minp, **kwds) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, - time_rule=time_rule, **kwargs) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center, **kwargs) return f @@ -849,7 +830,7 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): check_minp=_require_min_periods(4)) -def expanding_count(arg, freq=None, center=False, time_rule=None): +def expanding_count(arg, freq=None, center=False): """ Expanding count of number of non-NaN observations. @@ -858,8 +839,7 @@ def expanding_count(arg, freq=None, center=False, time_rule=None): arg : DataFrame or numpy ndarray-like freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window. @@ -873,12 +853,11 @@ def expanding_count(arg, freq=None, center=False, time_rule=None): frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ - return rolling_count(arg, len(arg), freq=freq, center=center, - time_rule=time_rule) + return rolling_count(arg, len(arg), freq=freq, center=center) def expanding_quantile(arg, quantile, min_periods=1, freq=None, - center=False, time_rule=None): + center=False): """Expanding quantile. Parameters @@ -891,8 +870,7 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window. @@ -907,14 +885,14 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None, of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule) + freq=freq, center=center) @Substitution("Unbiased expanding covariance.", _binary_arg_flex, _expanding_kw+_pairwise_kw, _flex_retval, "") @Appender(_doc_template) def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False, - time_rule=None, pairwise=None): + pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -925,14 +903,14 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False, window = max(len(arg1), len(arg2)) return rolling_cov(arg1, arg2, window, min_periods=min_periods, freq=freq, - center=center, time_rule=time_rule, pairwise=pairwise) + center=center, pairwise=pairwise) @Substitution("Expanding sample correlation.", _binary_arg_flex, _expanding_kw+_pairwise_kw, _flex_retval, "") @Appender(_doc_template) def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, - time_rule=None, pairwise=None): + pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -943,8 +921,7 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, window = max(len(arg1), len(arg2)) return rolling_corr(arg1, arg2, window, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule, - pairwise=pairwise) + freq=freq, center=center, pairwise=pairwise) @Substitution("Deprecated. Use expanding_corr(..., pairwise=True) instead.\n\n" @@ -952,16 +929,15 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, _expanding_kw, _pairwise_retval, "") @Appender(_doc_template) def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None, - center=False, time_rule=None): + center=False): import warnings warnings.warn("expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)", FutureWarning) return expanding_corr(df1, df2, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule, - pairwise=True) + freq=freq, center=center, pairwise=True) def expanding_apply(arg, func, min_periods=1, freq=None, center=False, - time_rule=None, args=(), kwargs={}): + args=(), kwargs={}): """Generic expanding function application. Parameters @@ -974,8 +950,7 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window. args : tuple @@ -995,5 +970,4 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, """ window = len(arg) return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq, - center=center, time_rule=time_rule, args=args, - kwargs=kwargs) + center=center, args=args, kwargs=kwargs) diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 97f08e7052c87..22661ea7cacda 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -485,27 +485,6 @@ def _check_structures(self, func, static_comp, assert_series_equal(series_xp, series_rs) assert_frame_equal(frame_xp, frame_rs) - def test_legacy_time_rule_arg(self): - # suppress deprecation warnings - sys.stderr = StringIO() - - rng = bdate_range('1/1/2000', periods=20) - ts = Series(np.random.randn(20), index=rng) - ts = ts.take(np.random.permutation(len(ts))[:12]).sort_index() - - try: - result = mom.rolling_mean(ts, 1, min_periods=1, freq='B') - expected = mom.rolling_mean(ts, 1, min_periods=1, - time_rule='WEEKDAY') - tm.assert_series_equal(result, expected) - - result = mom.ewma(ts, span=5, freq='B') - expected = mom.ewma(ts, span=5, time_rule='WEEKDAY') - tm.assert_series_equal(result, expected) - - finally: - sys.stderr = sys.__stderr__ - def test_ewma(self): self._check_ew(mom.ewma) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7988b01af8c48..a75d30c3323d6 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -239,21 +239,6 @@ def get_period_alias(offset_str): _legacy_reverse_map = dict((v, k) for k, v in reversed(sorted(compat.iteritems(_rule_aliases)))) - -def inferTimeRule(index): - from pandas.tseries.index import DatetimeIndex - import warnings - warnings.warn("This method is deprecated, use infer_freq or inferred_freq" - " attribute of DatetimeIndex", FutureWarning) - - freq = DatetimeIndex(index).inferred_freq - if freq is None: - raise Exception('Unable to infer time rule') - - offset = to_offset(freq) - return get_legacy_offset_name(offset) - - def to_offset(freqstr): """ Return DateOffset object from string representation diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index c58447acec621..353f5f1c472ba 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -158,10 +158,6 @@ def __new__(cls, data=None, dayfirst = kwds.pop('dayfirst', None) yearfirst = kwds.pop('yearfirst', None) infer_dst = kwds.pop('infer_dst', False) - warn = False - if 'offset' in kwds and kwds['offset']: - freq = kwds['offset'] - warn = True freq_infer = False if not isinstance(freq, DateOffset): @@ -173,14 +169,6 @@ def __new__(cls, data=None, freq_infer = True freq = None - if warn: - import warnings - warnings.warn("parameter 'offset' is deprecated, " - "please use 'freq' instead", - FutureWarning) - - offset = freq - if periods is not None: if com.is_float(periods): periods = int(periods) @@ -188,12 +176,12 @@ def __new__(cls, data=None, raise ValueError('Periods must be a number, got %s' % str(periods)) - if data is None and offset is None: + if data is None and freq is None: raise ValueError("Must provide freq argument if no data is " "supplied") if data is None: - return cls._generate(start, end, periods, name, offset, + return cls._generate(start, end, periods, name, freq, tz=tz, normalize=normalize, closed=closed, infer_dst=infer_dst) @@ -211,11 +199,11 @@ def __new__(cls, data=None, # try a few ways to make it datetime64 if lib.is_string_array(data): - data = _str_to_dt_array(data, offset, dayfirst=dayfirst, + data = _str_to_dt_array(data, freq, dayfirst=dayfirst, yearfirst=yearfirst) else: data = tools.to_datetime(data, errors='raise') - data.offset = offset + data.offset = freq if isinstance(data, DatetimeIndex): if name is not None: data.name = name @@ -226,7 +214,7 @@ def __new__(cls, data=None, return data if issubclass(data.dtype.type, compat.string_types): - data = _str_to_dt_array(data, offset, dayfirst=dayfirst, + data = _str_to_dt_array(data, freq, dayfirst=dayfirst, yearfirst=yearfirst) if issubclass(data.dtype.type, np.datetime64): @@ -238,8 +226,8 @@ def __new__(cls, data=None, subarr = data.values - if offset is None: - offset = data.offset + if freq is None: + freq = data.offset verify_integrity = False else: if data.dtype != _NS_DTYPE: @@ -287,13 +275,13 @@ def __new__(cls, data=None, subarr = subarr.view(cls) subarr.name = name - subarr.offset = offset + subarr.offset = freq subarr.tz = tz if verify_integrity and len(subarr) > 0: - if offset is not None and not freq_infer: + if freq is not None and not freq_infer: inferred = subarr.inferred_freq - if inferred != offset.freqstr: + if inferred != freq.freqstr: raise ValueError('Dates do not conform to passed ' 'frequency') diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py index 3155f0f6e1a80..0315cb598b88a 100644 --- a/pandas/tseries/tests/test_timeseries_legacy.py +++ b/pandas/tseries/tests/test_timeseries_legacy.py @@ -262,28 +262,6 @@ def setUp(self): # suppress deprecation warnings sys.stderr = StringIO() - def test_inferTimeRule(self): - from pandas.tseries.frequencies import inferTimeRule - - index1 = [datetime(2010, 1, 29, 0, 0), - datetime(2010, 2, 26, 0, 0), - datetime(2010, 3, 31, 0, 0)] - - index2 = [datetime(2010, 3, 26, 0, 0), - datetime(2010, 3, 29, 0, 0), - datetime(2010, 3, 30, 0, 0)] - - index3 = [datetime(2010, 3, 26, 0, 0), - datetime(2010, 3, 27, 0, 0), - datetime(2010, 3, 29, 0, 0)] - - # LEGACY - assert inferTimeRule(index1) == 'EOM' - assert inferTimeRule(index2) == 'WEEKDAY' - - self.assertRaises(Exception, inferTimeRule, index1[:2]) - self.assertRaises(Exception, inferTimeRule, index3) - def test_time_rule(self): result = DateRange('1/1/2000', '1/30/2000', time_rule='WEEKDAY') result2 = DateRange('1/1/2000', '1/30/2000', timeRule='WEEKDAY')