From d3b2deb83becfc10a13989e37a09afcd45052374 Mon Sep 17 00:00:00 2001 From: Bart Doekemeijer Date: Mon, 3 Jul 2023 09:16:48 +0200 Subject: [PATCH] Sync: Merge v1.3.1 bug fixes into develop (#96) * Merge in changes from `main` branch (v1.3 -> v1.3.1) into `develop`, notably: - Feature: Add PyPI integration by @Bartdoekemeijer in #87 - Bug fix: various small corrections by @Bartdoekemeijer in #86 --------- Co-authored-by: paulf81 Co-authored-by: Rafael M Mudafort --- .github/workflows/python-publish.yml | 30 ++++ README.rst | 4 +- .../dataframe_filtering.py | 1 - .../dataframe_manipulations.py | 35 ++-- .../energy_ratio_visualization.py | 31 +++- .../energy_ratio_wd_bias_estimation.py | 2 +- flasc/optimization.py | 6 +- flasc/turbine_analysis/ws_pow_filtering.py | 158 ++++++++++------- flasc/utilities.py | 4 +- flasc/version.py | 2 +- pyproject.toml | 161 ++++++++++++++++++ requirements.txt | 1 - setup.py | 6 +- 13 files changed, 346 insertions(+), 95 deletions(-) create mode 100644 .github/workflows/python-publish.yml create mode 100644 pyproject.toml diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 00000000..ead77afc --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,30 @@ +# This workflows will upload a Python Package using Twine when a release is created +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Upload Python Package + +on: + release: + types: [published] + +jobs: + deploy: + if: github.repository_owner == 'NREL' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Build and publish + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + python setup.py sdist bdist_wheel + twine upload dist/* diff --git a/README.rst b/README.rst index 8ae31480..528b53b2 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,6 @@ -========== +============================================= FLORIS-based Analysis for SCADA data (FLASC) -========== +============================================= **Further documentation is available at http://flasc.readthedocs.io/.** diff --git a/flasc/dataframe_operations/dataframe_filtering.py b/flasc/dataframe_operations/dataframe_filtering.py index 41cffdad..da4a7cd0 100644 --- a/flasc/dataframe_operations/dataframe_filtering.py +++ b/flasc/dataframe_operations/dataframe_filtering.py @@ -133,7 +133,6 @@ def df_mark_turbdata_as_faulty(df, cond, turbine_list, exclude_columns=[]): turbine_list = [turbine_list] for ti in turbine_list: - N_init = df_get_no_faulty_measurements(df, ti) cols = [s for s in df.columns if s[-4::] == ('_%03d' % ti) and s not in exclude_columns] df.loc[cond, cols] = None # Delete measurements diff --git a/flasc/dataframe_operations/dataframe_manipulations.py b/flasc/dataframe_operations/dataframe_manipulations.py index b74f91c1..52618167 100644 --- a/flasc/dataframe_operations/dataframe_manipulations.py +++ b/flasc/dataframe_operations/dataframe_manipulations.py @@ -698,7 +698,7 @@ def df_reduce_precision(df_in, verbose=False): Returns: df_out ([pd.DataFrame]): Reduced dataframe """ - df_out = pd.DataFrame() + list_out = [] dtypes = df_in.dtypes for ii, c in enumerate(df_in.columns): datatype = str(dtypes[c]) @@ -711,42 +711,45 @@ def df_reduce_precision(df_in, verbose=False): df_in[c], equal_nan=True))): unique_values = np.unique(df_in[c]) if np.array_equal(unique_values, [0, 1]): - df_out[c] = df_in[c].astype(bool) + var_downsampled = df_in[c].astype(bool) elif np.max(df_in[c]) < np.iinfo(np.int8).max: - df_out[c] = df_in[c].astype(np.int8) + var_downsampled = df_in[c].astype(np.int8) elif np.max(df_in[c]) < np.iinfo(np.int16).max: - df_out[c] = df_in[c].astype(np.int16) + var_downsampled = df_in[c].astype(np.int16) elif np.max(df_in[c]) < np.iinfo(np.int32).max: - df_out[c] = df_in[c].astype(np.int32) + var_downsampled = df_in[c].astype(np.int32) else: - df_out[c] = df_in[c].astype(np.int64) + var_downsampled = df_in[c].astype(np.int64) else: # If not, just simplify as float32 - df_out[c] = df_in[c].astype(np.float32) - max_error = np.max(np.abs(df_out[c]-df_in[c])) + var_downsampled = df_in[c].astype(np.float32) + max_error = np.max(np.abs(var_downsampled-df_in[c])) if verbose: print("Column %s ['%s'] was downsampled to %s." - % (c, datatype, df_out.dtypes[ii])) + % (c, datatype, var_downsampled.dtypes)) print( "Max error: ", max_error) elif ((datatype == 'int64') or (datatype == 'int32') or (datatype == 'int')): - if all(np.unique(df_in[c]) == [0, 1]): - df_out[c] = df_in[c].astype(bool) + if np.array_equal(np.unique(df_in[c]), [0, 1]): + var_downsampled = df_in[c].astype(bool) elif len(np.unique(df_in[c])) < 100: - df_out[c] = df_in[c].astype(np.int16) + var_downsampled = df_in[c].astype(np.int16) else: - df_out[c] = df_in[c].astype(np.int32) - max_error = np.max(np.abs(df_out[c]-df_in[c])) + var_downsampled = df_in[c].astype(np.int32) + max_error = np.max(np.abs(var_downsampled-df_in[c])) if verbose: print("Column %s ['%s'] was downsampled to %s." - % (c, datatype, df_out.dtypes[ii])) + % (c, datatype, var_downsampled.dtypes)) print( "Max error: ", max_error) else: if verbose: print("Datatype '%s' not recognized. Not downsampling." % datatype) - df_out[c] = df_in[c] + var_downsampled = df_in[c] + list_out.append(var_downsampled) + + df_out = pd.concat(list_out, axis=1, ignore_index=False) return df_out diff --git a/flasc/energy_ratio/energy_ratio_visualization.py b/flasc/energy_ratio/energy_ratio_visualization.py index 99776803..9bb4c747 100644 --- a/flasc/energy_ratio/energy_ratio_visualization.py +++ b/flasc/energy_ratio/energy_ratio_visualization.py @@ -134,6 +134,10 @@ def plot( for ii, df in enumerate(energy_ratios): df = df.copy() + if df.shape[0] < 2: + # Do not plot single values + continue + # Get x-axis values x = np.array(df["wd_bin"], dtype=float) @@ -141,13 +145,18 @@ def plot( dwd = np.min(x[1::] - x[0:-1]) jumps = np.where(np.diff(x) > dwd * 1.50)[0] if len(jumps) > 0: - df = df.append( - pd.DataFrame( - { - "wd_bin": x[jumps] + dwd / 2.0, - "N_bin": [0] * len(jumps), - } - ) + df = pd.concat( + [ + df, + pd.DataFrame( + { + "wd_bin": x[jumps] + dwd / 2.0, + "N_bin": [0] * len(jumps), + } + ) + ], + axis=0, + ignore_index=False, ) df = df.iloc[np.argsort(df["wd_bin"])].reset_index(drop=True) x = np.array(df["wd_bin"], dtype=float) @@ -176,7 +185,13 @@ def plot( ) # Plot the bin count - if df_freqs is not None: + is_none = False + if df_freqs is None: + is_none = True + elif isinstance(df_freqs, list): + is_none = np.any([c is None for c in df_freqs]) + + if not is_none: for ii, df_freq in enumerate(df_freqs): wd_bins = df_freq["wd_bin"].unique() n_ws_bins = len(df_freq["ws_bin_edges"].unique()) diff --git a/flasc/energy_ratio/energy_ratio_wd_bias_estimation.py b/flasc/energy_ratio/energy_ratio_wd_bias_estimation.py index b03af56a..80a98af2 100644 --- a/flasc/energy_ratio/energy_ratio_wd_bias_estimation.py +++ b/flasc/energy_ratio/energy_ratio_wd_bias_estimation.py @@ -408,7 +408,7 @@ def cost_fun(wd_bias): xtol=0.1, disp=True) ) - dran = opt_search_range[1]-opt_search_range[0] + dran = opt_search_range[1] - opt_search_range[0] x_opt, J_opt, x, J = opt.brute( func=cost_fun, ranges=[opt_search_range], diff --git a/flasc/optimization.py b/flasc/optimization.py index 7c7fdb84..cef6988d 100644 --- a/flasc/optimization.py +++ b/flasc/optimization.py @@ -259,7 +259,11 @@ def match_y_curves_by_offset(yref, ytest, dy_eval=None, angle_wrapping=True): ytest_cor = ytest - dy y_error = np.abs(yref-ytest_cor) - J = np.nanmean(y_error**2.0) + if np.all(np.isnan(y_error)) | (len(y_error) < 1): + J = np.nan + else: + J = np.nanmean(y_error**2.0) + if np.isnan(J_opt): if not np.isnan(J): J_opt = J diff --git a/flasc/turbine_analysis/ws_pow_filtering.py b/flasc/turbine_analysis/ws_pow_filtering.py index 8a18c102..5a8c0de6 100644 --- a/flasc/turbine_analysis/ws_pow_filtering.py +++ b/flasc/turbine_analysis/ws_pow_filtering.py @@ -71,7 +71,23 @@ def _get_all_unique_flags(self): return all_flags - def _get_mean_power_curves(self, ws_bins=np.arange(0.0, 25.5, 0.5), df=None): + def _reset_mean_power_curves(self, ws_bins=np.arange(0.0, 25.5, 0.5)): + # If uninitialized, create an empty dataframe with NaNs + pw_curve_dict = dict( + zip( + [f"pow_{ti:03d}" for ti in range(self.n_turbines)], + [np.ones(len(ws_bins) - 1) * np.nan] * self.n_turbines + ) + ) + pw_curve_dict["ws"] = (ws_bins[1::] + ws_bins[0:-1]) / 2 + pw_curve_dict["ws_min"] = ws_bins[0:-1] + pw_curve_dict["ws_max"] = ws_bins[1::] + pw_curve_df = pd.DataFrame(pw_curve_dict) + + self._pw_curve_ws_bins = ws_bins + self.pw_curve_df = pw_curve_df + + def _get_mean_power_curves(self, df=None, turbine_subset=None): """Calculates the mean power production in bins of the wind speed, for all turbines in the wind farm. @@ -84,7 +100,9 @@ def _get_mean_power_curves(self, ws_bins=np.arange(0.0, 25.5, 0.5), df=None): * Time of each measurement: time * Wind speed of each turbine: ws_000, ws_001, ... * Power production of each turbine: pow_000, pow_001, ... - + turbine_subset (list, optional): List of turbine indices to + calculate the mean power curve for. If None is specified, + defaults to calculating it for all turbines. Returns: pw_curve_df ([pd.DataFrame]): Dataframe containing the wind speed bins and the mean power production value for every @@ -95,59 +113,52 @@ def _get_mean_power_curves(self, ws_bins=np.arange(0.0, 25.5, 0.5), df=None): if df is None: df = self.df - # Create a dataframe to contain the averaged power curves - ws_max = np.max(ws_bins) - ws_min = np.min(ws_bins) - pw_curve_df = pd.DataFrame( - { - "ws": (ws_bins[1::] + ws_bins[0:-1]) / 2, - "ws_min": ws_bins[0:-1], - "ws_max": ws_bins[1::], - } - ) + # Get existing power curve + pw_curve_df = self.pw_curve_df - # Loop through every turbine - for ti in range(self.n_turbines): - # Extract the measurements and calculate the bin average - ws = df["ws_%03d" % ti] - pw = df["pow_%03d" % ti] - bin_ids = (ws > ws_min) & (ws < ws_max) - ws_clean = ws[bin_ids] - pw_clean = pw[bin_ids] - - bin_array = np.searchsorted(ws_bins, ws_clean, side="left") - bin_array = bin_array - 1 # 0 -> 1st bin, rather than before bin - pow_bins = [ - np.median(pw_clean[bin_array == i]) - for i in range(pw_curve_df.shape[0]) - ] - - # Write outputs to the dataframe - pw_curve_df["pow_%03d" % ti] = pow_bins + # By default, if unspecified, Calculate power curve for all turbines + if turbine_subset is None: + turbine_subset = list(range(self.n_turbines)) - # Save the finalized power curve to self and return it to the user - self.pw_curve_df = pw_curve_df - return pw_curve_df + # Apply binning to the wind speeds of the turbine(s) + ws_bin_cuts_subset = [ + pd.cut(df[f"ws_{ti:03d}"], bins=self._pw_curve_ws_bins) + for ti in turbine_subset + ] - def _reset_df(self): - """Reset the 'filtered' dataframe, self.df, to its original form, - before any measurements were marked as faulty. """ + # Now add the binned wind speeds to the power measurements dataframe + df_pow_and_ws_bins_subset = pd.concat( + [ + df[["pow_%03d" % ti for ti in turbine_subset]], + *ws_bin_cuts_subset + ], + axis=1 + ) - # Copy the original dataframe from self - df = self._df_initial - self.df = df.reset_index(drop=("time" in df.columns)) + # Now group power measurements by their wind speed bin and calculate the median + pw_curve_df_subset = pd.concat( + [ + df_pow_and_ws_bins_subset.groupby(by=f"ws_{ti:03d}")[f"pow_{ti:03d}"].median() + for ti in turbine_subset + ], + axis=1 + ).sort_index().reset_index(drop=True) - # Derive the total number of turbines in the dataframe - self.n_turbines = flascutils.get_num_turbines(df) + # Update the median power curve for the turbines in turbine_subset + pw_curve_df[[f"pow_{ti:03d}" for ti in turbine_subset]] = pw_curve_df_subset - # Get mean power curve from data to start with - self._get_mean_power_curves() + # Save the finalized power curve to self and return it to the user + self.pw_curve_df = pw_curve_df + return pw_curve_df # Public methods def reset_filters(self): """Reset all filter variables and assume all data is clean.""" - # Reset the filtered dataframe to the original, unfiltered one - self._reset_df() + + # Copy the original, unfiltered dataframe from self + df = self._df_initial + self.df = df.reset_index(drop=("time" in df.columns)) + self.n_turbines = flascutils.get_num_turbines(df) # Reset the dataframe with filter flags to mark all data as clean, initially all_clean_array = [ @@ -160,6 +171,9 @@ def reset_filters(self): columns=["WTG_{:03d}".format(ti) for ti in range(self.n_turbines)] ) + # Reset the mean power curves of the turbines + self._reset_mean_power_curves() + def filter_by_condition( self, condition, @@ -241,8 +255,9 @@ def filter_by_condition( for tii in ti: self.df_filters.loc[condition, "WTG_{:03d}".format(tii)] = label - # Recalculate mean power curves - self._get_mean_power_curves() + # Clear the mean power curves. Namely, with this new filtering application + # the mean power curves must be recalculated. + self.pw_curve_df[f"pow_{tii:03d}"] = None # Set as Nones return df_out @@ -377,9 +392,10 @@ def filter_by_power_curve( # the estimated power curve) changes every iteration, and hence so # do the estimated mean power curves again. This explains the # iterative nature of the problem. - df_initial_filtered = self.df.copy() + df_initial_filtered = self.df[[f"ws_{ti:03d}", f"pow_{ti:03d}"]].copy() # Iteratively filter data and recalculate the mean power curves + self._get_mean_power_curves(turbine_subset=[ti]) for ii in range(no_iterations): # Only print final iteration is_final_iteration = (ii == no_iterations - 1) @@ -479,7 +495,7 @@ def filter_by_power_curve( ) # Recalculate the mean power curve based on current iteration's filtered dataframe - self._get_mean_power_curves(df=df_iteration) + self._get_mean_power_curves(df=df_iteration, turbine_subset=[ti]) self.pw_curve_df_bounds["pow_%03d_lb" % ti] = np.interp( x=x, xp=lb_ws, @@ -530,6 +546,11 @@ def filter_by_floris_power_curve( print("Filtering data by deviations from the floris power curve...") # Create upper and lower bounds around floris curve + + # Get mean power curves first, if not yet calculated + if self.pw_curve_df[f"pow_{ti:03d}"].isna().all(): + self._get_mean_power_curves(turbine_subset=[ti]) + df_xy = self.pw_curve_df.copy() rho = fi.floris.flow_field.air_density for ti in range(len(fi.layout_x)): @@ -633,6 +654,7 @@ def filter_by_floris_power_curve( ti=ti, apply_filters_to_df=True, ) + self._get_mean_power_curves(turbine_subset=[ti]) # Recalculate mean curve # Write left and right bound to own curve self.pw_curve_df_bounds["pow_%03d_lb" % ti] = np.interp( @@ -663,14 +685,23 @@ def get_df(self): """ return self.df - def get_power_curve(self): + def get_power_curve(self, calculate_missing=True): """Return the turbine estimated mean power curves to the user. Returns: pw_curve_df ([pd.DataFrame]): Dataframe containing the wind speed bins and the mean power production value for every turbine. + calculate_missing (bool, optional): Calculate the median power + curves for the turbines for the turbines of which their + power curves were previously not yet calculated. """ + if calculate_missing and (self.pw_curve_df.isna().all(axis=0).any()): + turbine_subset = np.where( + self.pw_curve_df[[f"pow_{ti:03d}" for ti in range(self.n_turbines)]].isna().all(axis=0) + )[0] + self._get_mean_power_curves(turbine_subset=turbine_subset) + return self.pw_curve_df def plot_farm_mean_power_curve(self, fi=None): @@ -683,6 +714,14 @@ def plot_farm_mean_power_curve(self, fi=None): from FLORIS will be plotted on top of the SCADA-based power curves. """ + # Get mean power curves for the turbines that are not yet calculated + if self.pw_curve_df.isna().all(axis=0).any(): + turbine_subset = np.where( + self.pw_curve_df[[f"pow_{ti:03d}" for ti in range(self.n_turbines)]].isna().all(axis=0) + )[0] + self._get_mean_power_curves(turbine_subset=turbine_subset) + + # Create the figure fig, ax = plt.subplots() x = np.array(self.pw_curve_df["ws"], dtype=float) for ti in range(self.n_turbines): @@ -692,11 +731,10 @@ def plot_farm_mean_power_curve(self, fi=None): pow_mean_array = self.pw_curve_df[pow_cols].mean(axis=1) pow_std_array = self.pw_curve_df[pow_cols].std(axis=1) - yl = np.array(pow_mean_array - 2 * pow_std_array) - yu = np.array(pow_mean_array + 2 * pow_std_array) ax.fill_between( - np.hstack([x, x[::-1]]), - np.hstack([yl, yu[::-1]]), + x, + np.array(pow_mean_array - 2 * pow_std_array), + np.array(pow_mean_array + 2 * pow_std_array), color="tab:red", label="Uncertainty bounds (2 std. dev.)", alpha=0.30, @@ -718,7 +756,7 @@ def plot_farm_mean_power_curve(self, fi=None): ax.set_title("Mean of all turbine power curves with UQ") return fig, ax - def plot_filters_custom_scatter(self, ti, x_col, y_col, ax=None): + def plot_filters_custom_scatter(self, ti, x_col, y_col, xlabel="Wind speed (m/s)", ylabel="Power (kW)", ax=None): """Plot the filtered data in a scatter plot, categorized by the source of their filter/fault. This is a generic function that allows the user to plot various numeric @@ -752,7 +790,11 @@ def plot_filters_custom_scatter(self, ti, x_col, y_col, ax=None): ids = (df_f == flag) df_subset = self._df_initial.loc[ids] percentage = 100.0 * np.sum(ids) / N - if any(ids): + if ( + any(ids) and + (not df_subset[x_col].isna().all()) and + (not df_subset[y_col].isna().all()) + ): ax.plot( df_subset[x_col], df_subset[y_col], @@ -768,8 +810,8 @@ def plot_filters_custom_scatter(self, ti, x_col, y_col, ax=None): l.set_alpha(1) # Force alpha in legend to 1.0 ax.set_title("WTG {:03d}: Filters".format(ti)) - ax.set_xlabel("Wind speed (m/s)") - ax.set_ylabel("Power (kW)") + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) ax.grid(True) return ax diff --git a/flasc/utilities.py b/flasc/utilities.py index a458899c..80e48046 100644 --- a/flasc/utilities.py +++ b/flasc/utilities.py @@ -13,7 +13,7 @@ import datetime -import numba +# import numba import numpy as np # import scipy.interpolate as interp @@ -79,7 +79,7 @@ def interp_with_max_gap(x, xp, fp, max_gap, kind, wrap_around_360=False): # Credits to 'np8', from https://stackoverflow.com/questions/64045034/interpolate-values-and-replace-with-nans-within-a-long-gap # Adapted to include nearest-neighbor interpolation -@numba.njit() +# @numba.njit() def _interpolate_with_max_gap( x, xp, fp, max_gap, assume_sorted=False, kind="linear", extrapolate=True, ): diff --git a/flasc/version.py b/flasc/version.py index a58941b0..6261a05b 100644 --- a/flasc/version.py +++ b/flasc/version.py @@ -1 +1 @@ -1.3 \ No newline at end of file +1.3.1 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..c43eec34 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,161 @@ +[build-system] +requires = ["setuptools >= 40.6.0", "wheel"] +build-backend = "setuptools.build_meta" + + +[coverage.run] +# Coverage.py configuration file +# https://coverage.readthedocs.io/en/latest/config.html +branch = true +source = "flasc/*" +omit = [ + "setup.py", + "tests/*" +] + + +[tool.pytest.ini_options] +testpaths = "tests" +filterwarnings = [ + "ignore::DeprecationWarning:pandas.*:" +] + + + +## Pyflakes (F) +## pycodestyle (E, W) +# mccabe (C90) +# isort (I) # Use isort directly until more isort features are included in ruff +# pep8-naming (N) +# pydocstyle (D) +# pyupgrade (UP) +# flake8-2020 (YTT) +# flake8-annotations (ANN) +# flake8-bandit (S) +# flake8-blind-except (BLE) +# flake8-boolean-trap (FBT) +# flake8-bugbear (B) +# flake8-builtins (A) +# flake8-commas (COM) +# flake8-comprehensions (C4) +# flake8-datetimez (DTZ) +# flake8-debugger (T10) +# flake8-errmsg (EM) +# flake8-executable (EXE) +# flake8-implicit-str-concat (ISC) +# flake8-import-conventions (ICN) +# flake8-logging-format (G) +# flake8-no-pep420 (INP) +# flake8-pie (PIE) +# flake8-print (T20) +# flake8-pytest-style (PT) +# flake8-quotes (Q) +# flake8-return (RET) +# flake8-simplify (SIM) +# flake8-tidy-imports (TID) +# flake8-type-checking (TCH) +# flake8-unused-arguments (ARG) +# flake8-use-pathlib (PTH) +# eradicate (ERA) +# pandas-vet (PD) +# pygrep-hooks (PGH) +# Pylint (PL) +# - Convention (PLC) +# - Error (PLE) +# - Refactor (PLR) +# - Warning (PLW) +# tryceratops (TRY) +# flake8-raise (RSE) +# flake8-self (SLF) +# Ruff-specific rules (RUF) + +[tool.ruff] +src = ["flasc", "tests"] +line-length = 100 +target-version = "py310" + +# See https://github.com/charliermarsh/ruff#supported-rules +# for rules included and matching to prefix. +select = ["F", "E", "W", "C4", ] #"T20", "I" +# I - isort is not fully implemented in ruff so there is not parity. Consider disabling I. + +# F401 unused-import: Ignore until all used isort flags are adopted in ruff +ignore = ["F401"] + +# Allow autofix for all enabled rules (when `--fix`) is provided. +# fixable = ["A", "B", "C", "D", "E", "F"] +fixable = ["F", "E", "W", "C4"] #"T20", "I" +unfixable = [] + +# Exclude a variety of commonly ignored directories. +exclude = [ + "flasc/version.py", + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", +] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.per-file-ignores] +# F841 unused-variable: ignore since this file uses numexpr and many variables look unused +# ... + +# I001 unsorted-imports: ignore because the import order is meaningful to navigate +# import dependencies +# ... + +[tool.ruff.isort] +combine-as-imports = true +known-first-party = ["flasc"] +order-by-type = false +# lines-after-imports = 2 + +# [tool.ruff.mccabe] +# # Unlike Flake8, default to a complexity level of 10. +# max-complexity = 10 + + +[tool.isort] +sections = [ + "FUTURE", + "STDLIB", + "THIRDPARTY", + "FIRSTPARTY", + "LOCALFOLDER" +] +known_first_party = [ + "flasc" +] +multi_line_output = 3 +combine_as_imports = true +force_grid_wrap = 3 +include_trailing_comma = true +use_parentheses = true +lines_after_imports = 2 +line_length = 100 +order_by_type = false +split_on_trailing_comma = true + +# length_sort = true +# case_sensitive: False +# force_sort_within_sections: True +# reverse_relative: True +# sort_relative_in_force_sorted_sections: True \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index bbea878f..7236cce5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,6 @@ floris>=3.4 feather-format matplotlib>=3.6.3 numpy -numba pandas>=1.5 pyproj pytest diff --git a/setup.py b/setup.py index 643e7bec..628f688e 100644 --- a/setup.py +++ b/setup.py @@ -16,8 +16,6 @@ 'feather-format', 'matplotlib>=3.6.3', 'numpy', - 'numba', - 'openoa', 'pandas>=1.5', 'pyproj', 'pytest', @@ -52,7 +50,7 @@ author=AUTHOR, author_email=EMAIL, url=URL, - packages=find_packages(include=['flasc']), + packages=find_packages(include=['flasc*']), entry_points={ 'console_scripts': [ 'flasc=flasc.cli:main' @@ -64,7 +62,7 @@ zip_safe=False, keywords='flasc', classifiers=[ - 'Development Status :: Release', + 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English',