Skip to content

Commit

Permalink
Lint with ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
mferrera committed Jan 29, 2024
1 parent 6383820 commit 6f7ebb0
Show file tree
Hide file tree
Showing 71 changed files with 424 additions and 569 deletions.
5 changes: 0 additions & 5 deletions .flake8

This file was deleted.

12 changes: 4 additions & 8 deletions .github/workflows/subscript.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,13 @@ jobs:
- name: List all installed packages
run: pip freeze

- name: Lint with isort
- name: Format with ruff
if: ${{ always() }}
run: isort --check-only --profile black src tests
run: ruff format . --check

- name: Lint with black
- name: Lint with ruff
if: ${{ always() }}
run: black --check src tests

- name: Lint with flake8
if: ${{ always() }}
run: flake8 src tests
run: ruff check .

- name: Check typing with mypy
if: ${{ always() }}
Expand Down
5 changes: 2 additions & 3 deletions docs/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,8 @@ the linting done in CI:

.. code-block:: console
isort --check-only --profile black src tests
black --check *.py src tests
flake8 src tests
ruff .
ruff format .
mypy src/subscript
rstcheck -r docs
Expand Down
31 changes: 22 additions & 9 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -58,16 +58,14 @@ dependencies = [

[project.optional-dependencies]
tests = [
"black",
"flake8",
"isort",
"mypy",
"pytest",
"pytest-cov",
"pytest-mock",
"pytest-xdist",
"rstcheck",
"rstcheck-core",
"ruff",
"types-Jinja2",
"types-PyYAML",
"types-python-dateutil",
Expand Down Expand Up @@ -135,12 +133,6 @@ script-files = [
"src/subscript/legacy/runeclipse",
]

[tool.black]
line-length = 88

[tool.isort]
profile = "black"

[tool.mypy]
ignore_missing_imports = true

Expand All @@ -167,3 +159,24 @@ markers = [

[tool.rstcheck]
ignore_directives = ["argparse", "automodule"]

[tool.ruff]
ignore = [
"C901",
]
select = [
"C",
"E",
"F",
"I",
"PIE",
"Q",
"RET",
"RSE",
"SIM",
"W",
]
line-length = 88

[tool.ruff.lint.isort]
combine-as-imports = true
4 changes: 1 addition & 3 deletions src/subscript/bjobsusers/bjobsusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,7 @@ def get_jobs(status: str, bjobs_function: Callable) -> pd.DataFrame:
data = [
[
uname,
1
if rex.match(hname) is None
else int(rex.match(hname).group(1)), # type: ignore
1 if rex.match(hname) is None else int(rex.match(hname).group(1)), # type: ignore
]
for (uname, hname) in slines
]
Expand Down
5 changes: 2 additions & 3 deletions src/subscript/casegen_upcars/casegen_upcars.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,9 +549,8 @@ def main():
built_in_functions = ["range"]

for var in sorted(meta.find_undeclared_variables(ast)):
if dictionary.get(var) is None:
if var not in built_in_functions:
undefined_var.append(var)
if dictionary.get(var) is None and var not in built_in_functions:
undefined_var.append(var)

if undefined_var:
logger.warning(
Expand Down
18 changes: 5 additions & 13 deletions src/subscript/casegen_upcars/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,9 +435,7 @@ def __init__(
idx : idx + self._fracture_cell_count,
start_fracture_idx:end_fracture_idx,
start_fracture_k : end_fracture_k + 1,
] = (
_i + 1
)
] = _i + 1

for _i, idx in enumerate(self._fracture_j):
fracture_length = max(0.0, min(1.0, self._fracture_length_x[_i])) * self._lx
Expand Down Expand Up @@ -608,10 +606,7 @@ def _create_property(
assert len(streak_property) == len(
self._streak_k
), f"Number of input {keyword} is not equal to number of streak"
if isinstance(fracture_property, int):
data_type = np.int16
else:
data_type = float
data_type = np.int16 if isinstance(fracture_property, int) else float
props = np.empty(
(self._total_nx, self._total_ny, self._total_nz), dtype=data_type
)
Expand Down Expand Up @@ -653,10 +648,7 @@ def _create_anisotropy_property(
+ keyword
+ " is not equal to number fault in Y- direction"
)
if isinstance(fracture_x_property, int):
data_type = np.int16
else:
data_type = float
data_type = np.int16 if isinstance(fracture_x_property, int) else float
props = np.empty(
(self._total_nx, self._total_ny, self._total_nz), dtype=data_type
)
Expand Down Expand Up @@ -810,8 +802,8 @@ def export_grdecl(self, filename):
print("/", file=buffer_)

print("COORD", file=buffer_)
for _i in range(0, self._xv.shape[0]):
for _j in range(0, self._xv.shape[1]):
for _i in range(self._xv.shape[0]):
for _j in range(self._xv.shape[1]):
print(
# pylint: disable=consider-using-f-string
"{{x:{0}}} {{y:{0}}} {{z:{0}}} "
Expand Down
20 changes: 5 additions & 15 deletions src/subscript/check_swatinit/check_swatinit.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,10 +323,7 @@ def qc_flag(qc_frame: pd.DataFrame) -> pd.DataFrame:

qc_col = pd.Series(index=qc_frame.index, dtype=str)

if "OWC" in qc_frame:
contact = "OWC"
else:
contact = "GWC"
contact = "OWC" if "OWC" in qc_frame else "GWC"

# Eclipse and libecl does not calculate cell centres to the same decimals.
# Add some tolerance when testing towards fluid contacts.
Expand Down Expand Up @@ -553,10 +550,7 @@ def compute_pc(qc_frame: pd.DataFrame, satfunc_df: pd.DataFrame) -> pd.Series:
swls = satnum_frame["SWL"].values
else:
swls = None
if "SWU" in satnum_frame:
swus = satnum_frame["SWU"].values
else:
swus = None
swus = satnum_frame["SWU"].values if "SWU" in satnum_frame else None
p_cap[satnum_frame.index] = _evaluate_pc(
satnum_frame["SWAT"].values,
satnum_frame["PC_SCALING"].values,
Expand All @@ -565,10 +559,7 @@ def compute_pc(qc_frame: pd.DataFrame, satfunc_df: pd.DataFrame) -> pd.Series:
satfunc_df[satfunc_df["SATNUM"] == satnum],
)
# Fix needed for OPM-flow above contact:
if "OWC" in qc_frame:
contact = "OWC"
else:
contact = "GWC"
contact = "OWC" if "OWC" in qc_frame else "GWC"

# When SWATINIT=SWL=SWAT, PPCW as reported by Eclipse is the
# same as PCOW_MAX, and we cannot use it to compute PC, remove it:
Expand Down Expand Up @@ -618,7 +609,7 @@ def merge_equil(grid_df: pd.DataFrame, equil_df: pd.DataFrame) -> pd.DataFrame:
# Be compatible with future change in res2df:
equil_df.rename({"ACCURACY": "OIP_INIT"}, axis="columns", inplace=True)

contacts = list(set(["OWC", "GOC", "GWC"]).intersection(set(equil_df.columns)))
contacts = list({"OWC", "GOC", "GWC"}.intersection(set(equil_df.columns)))
# Rename and slice the equil dataframe:
equil_df = equil_df.rename(
{"Z": "Z_DATUM", "PRESSURE": "PRESSURE_DATUM"}, axis="columns"
Expand All @@ -630,8 +621,7 @@ def merge_equil(grid_df: pd.DataFrame, equil_df: pd.DataFrame) -> pd.DataFrame:
assert (
not pd.isnull(equil_df).any().any()
), f"BUG: NaNs in equil dataframe:\n{equil_df}"
grid_df = grid_df.merge(equil_df, on="EQLNUM", how="left")
return grid_df
return grid_df.merge(equil_df, on="EQLNUM", how="left")


def merge_pc_max(
Expand Down
10 changes: 2 additions & 8 deletions src/subscript/convert_grid_format/convert_grid_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,10 +147,7 @@ def _convert_ecl2roff(
if not props:
raise SystemExit("STOP. No properties given")

if ":" in props:
props_list = props.split(":")
else:
props_list = props.split()
props_list = props.split(":") if ":" in props else props.split()

fformat = mode
fformat = fformat.replace("restart", "unrst")
Expand All @@ -162,10 +159,7 @@ def _convert_ecl2roff(

if os.path.exists(dates):
dates = " ".join(Path(dates).read_text(encoding="utf8").splitlines())
if ":" in dates:
dates_list = dates.split(":")
else:
dates_list = dates.split()
dates_list = dates.split(":") if ":" in dates else dates.split()
else:
dates_list = None

Expand Down
7 changes: 2 additions & 5 deletions src/subscript/csv2ofmvol/csv2ofmvol.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
import pandas as pd
from dateutil.relativedelta import relativedelta

from subscript import __version__
from subscript import getLogger as subscriptlogger
from subscript import __version__, getLogger as subscriptlogger
from subscript.eclcompress.eclcompress import glob_patterns

logger = subscriptlogger(__name__)
Expand Down Expand Up @@ -71,7 +70,7 @@


def read_pdm_csv_files(
csvfiles: Union[pd.DataFrame, str, List[str], List[pd.DataFrame]]
csvfiles: Union[pd.DataFrame, str, List[str], List[pd.DataFrame]],
) -> pd.DataFrame:
"""Read a list of CSV files and return a dataframe
Expand Down Expand Up @@ -250,8 +249,6 @@ class CustomFormatter(

# pylint: disable=unnecessary-pass

pass


def get_parser() -> argparse.ArgumentParser:
"""Parse command line arguments, return a Namespace with arguments"""
Expand Down
3 changes: 1 addition & 2 deletions src/subscript/csv_merge/csv_merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ class CustomFormatter(
"""

# pylint: disable=unnecessary-pass
pass


class CsvMerge(ErtScript):
Expand Down Expand Up @@ -243,7 +242,7 @@ def taglist(strings: List[str], regexp_str: str) -> list:
list is returned.
"""
regexp = re.compile(regexp_str)
matches = map(lambda x: re.match(regexp, x), strings)
matches = (re.match(regexp, x) for x in strings)
values = [x and x.group(1) for x in matches]
if any(values):
return values
Expand Down
11 changes: 4 additions & 7 deletions src/subscript/csv_stack/csv_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,6 @@ class CustomFormatter(

# pylint: disable=unnecessary-pass

pass


class CsvStack(ErtScript):
"""A class with a run() function that can be registered as an ERT plugin,
Expand Down Expand Up @@ -253,10 +251,9 @@ def drop_constants(
if len(dframe[col].unique()) == 1:
# col was a constant column
columnstodelete.append(col)
if keepminimal:
# Also drop columns not involved in stacking operation
if not (stackmatcher.match(col) or col.lower() in keepthese):
columnstodelete.append(col)
# Also drop columns not involved in stacking operation
if keepminimal and (not (stackmatcher.match(col) or col.lower() in keepthese)):
columnstodelete.append(col)
if keepminimal:
logger.info("Deleting constant and unwanted columns %s", str(columnstodelete))
else:
Expand Down Expand Up @@ -304,7 +301,7 @@ def csv_stack(
colstostack = colstostack + 1
dostack = True
else:
tuplecols.append(tuple([col, ""]))
tuplecols.append((col, ""))
nostackcolumnnames.append(col)

logger.info("Found %d out of %d columns to stack", colstostack, len(dframe.columns))
Expand Down
Loading

0 comments on commit 6f7ebb0

Please sign in to comment.