diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 7368092fc..000000000 --- a/.flake8 +++ /dev/null @@ -1,5 +0,0 @@ -[flake8] -max-line-length = 88 -ignore = E203, W503 -exclude = - src/subscript/version.py diff --git a/.github/workflows/subscript.yml b/.github/workflows/subscript.yml index 2b60a4792..1a7bc07c8 100644 --- a/.github/workflows/subscript.yml +++ b/.github/workflows/subscript.yml @@ -60,17 +60,13 @@ jobs: - name: List all installed packages run: pip freeze - - name: Lint with isort + - name: Format with ruff if: ${{ always() }} - run: isort --check-only --profile black src tests + run: ruff format . --check - - name: Lint with black + - name: Lint with ruff if: ${{ always() }} - run: black --check src tests - - - name: Lint with flake8 - if: ${{ always() }} - run: flake8 src tests + run: ruff check . - name: Check typing with mypy if: ${{ always() }} diff --git a/docs/contributing.rst b/docs/contributing.rst index 55800c1a5..d7ab4fe28 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -69,9 +69,8 @@ the linting done in CI: .. code-block:: console - isort --check-only --profile black src tests - black --check *.py src tests - flake8 src tests + ruff . + ruff format . mypy src/subscript rstcheck -r docs diff --git a/pyproject.toml b/pyproject.toml index 33aeae69d..6e84cb588 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,9 +58,6 @@ dependencies = [ [project.optional-dependencies] tests = [ - "black", - "flake8", - "isort", "mypy", "pytest", "pytest-cov", @@ -68,6 +65,7 @@ tests = [ "pytest-xdist", "rstcheck", "rstcheck-core", + "ruff", "types-Jinja2", "types-PyYAML", "types-python-dateutil", @@ -135,12 +133,6 @@ script-files = [ "src/subscript/legacy/runeclipse", ] -[tool.black] -line-length = 88 - -[tool.isort] -profile = "black" - [tool.mypy] ignore_missing_imports = true @@ -167,3 +159,24 @@ markers = [ [tool.rstcheck] ignore_directives = ["argparse", "automodule"] + +[tool.ruff] +ignore = [ + "C901", +] +select = [ + "C", + "E", + "F", + "I", + "PIE", + "Q", + "RET", + "RSE", + "SIM", + "W", +] +line-length = 88 + +[tool.ruff.lint.isort] +combine-as-imports = true diff --git a/src/subscript/bjobsusers/bjobsusers.py b/src/subscript/bjobsusers/bjobsusers.py index a18257062..23438517e 100755 --- a/src/subscript/bjobsusers/bjobsusers.py +++ b/src/subscript/bjobsusers/bjobsusers.py @@ -65,9 +65,7 @@ def get_jobs(status: str, bjobs_function: Callable) -> pd.DataFrame: data = [ [ uname, - 1 - if rex.match(hname) is None - else int(rex.match(hname).group(1)), # type: ignore + 1 if rex.match(hname) is None else int(rex.match(hname).group(1)), # type: ignore ] for (uname, hname) in slines ] diff --git a/src/subscript/casegen_upcars/casegen_upcars.py b/src/subscript/casegen_upcars/casegen_upcars.py index 7aa8db9ea..14885cfa4 100755 --- a/src/subscript/casegen_upcars/casegen_upcars.py +++ b/src/subscript/casegen_upcars/casegen_upcars.py @@ -549,9 +549,8 @@ def main(): built_in_functions = ["range"] for var in sorted(meta.find_undeclared_variables(ast)): - if dictionary.get(var) is None: - if var not in built_in_functions: - undefined_var.append(var) + if dictionary.get(var) is None and var not in built_in_functions: + undefined_var.append(var) if undefined_var: logger.warning( diff --git a/src/subscript/casegen_upcars/model.py b/src/subscript/casegen_upcars/model.py index 248a5f19a..2ccec4132 100755 --- a/src/subscript/casegen_upcars/model.py +++ b/src/subscript/casegen_upcars/model.py @@ -435,9 +435,7 @@ def __init__( idx : idx + self._fracture_cell_count, start_fracture_idx:end_fracture_idx, start_fracture_k : end_fracture_k + 1, - ] = ( - _i + 1 - ) + ] = _i + 1 for _i, idx in enumerate(self._fracture_j): fracture_length = max(0.0, min(1.0, self._fracture_length_x[_i])) * self._lx @@ -608,10 +606,7 @@ def _create_property( assert len(streak_property) == len( self._streak_k ), f"Number of input {keyword} is not equal to number of streak" - if isinstance(fracture_property, int): - data_type = np.int16 - else: - data_type = float + data_type = np.int16 if isinstance(fracture_property, int) else float props = np.empty( (self._total_nx, self._total_ny, self._total_nz), dtype=data_type ) @@ -653,10 +648,7 @@ def _create_anisotropy_property( + keyword + " is not equal to number fault in Y- direction" ) - if isinstance(fracture_x_property, int): - data_type = np.int16 - else: - data_type = float + data_type = np.int16 if isinstance(fracture_x_property, int) else float props = np.empty( (self._total_nx, self._total_ny, self._total_nz), dtype=data_type ) @@ -810,8 +802,8 @@ def export_grdecl(self, filename): print("/", file=buffer_) print("COORD", file=buffer_) - for _i in range(0, self._xv.shape[0]): - for _j in range(0, self._xv.shape[1]): + for _i in range(self._xv.shape[0]): + for _j in range(self._xv.shape[1]): print( # pylint: disable=consider-using-f-string "{{x:{0}}} {{y:{0}}} {{z:{0}}} " diff --git a/src/subscript/check_swatinit/check_swatinit.py b/src/subscript/check_swatinit/check_swatinit.py index a8a5b7656..6845a1705 100644 --- a/src/subscript/check_swatinit/check_swatinit.py +++ b/src/subscript/check_swatinit/check_swatinit.py @@ -323,10 +323,7 @@ def qc_flag(qc_frame: pd.DataFrame) -> pd.DataFrame: qc_col = pd.Series(index=qc_frame.index, dtype=str) - if "OWC" in qc_frame: - contact = "OWC" - else: - contact = "GWC" + contact = "OWC" if "OWC" in qc_frame else "GWC" # Eclipse and libecl does not calculate cell centres to the same decimals. # Add some tolerance when testing towards fluid contacts. @@ -553,10 +550,7 @@ def compute_pc(qc_frame: pd.DataFrame, satfunc_df: pd.DataFrame) -> pd.Series: swls = satnum_frame["SWL"].values else: swls = None - if "SWU" in satnum_frame: - swus = satnum_frame["SWU"].values - else: - swus = None + swus = satnum_frame["SWU"].values if "SWU" in satnum_frame else None p_cap[satnum_frame.index] = _evaluate_pc( satnum_frame["SWAT"].values, satnum_frame["PC_SCALING"].values, @@ -565,10 +559,7 @@ def compute_pc(qc_frame: pd.DataFrame, satfunc_df: pd.DataFrame) -> pd.Series: satfunc_df[satfunc_df["SATNUM"] == satnum], ) # Fix needed for OPM-flow above contact: - if "OWC" in qc_frame: - contact = "OWC" - else: - contact = "GWC" + contact = "OWC" if "OWC" in qc_frame else "GWC" # When SWATINIT=SWL=SWAT, PPCW as reported by Eclipse is the # same as PCOW_MAX, and we cannot use it to compute PC, remove it: @@ -618,7 +609,7 @@ def merge_equil(grid_df: pd.DataFrame, equil_df: pd.DataFrame) -> pd.DataFrame: # Be compatible with future change in res2df: equil_df.rename({"ACCURACY": "OIP_INIT"}, axis="columns", inplace=True) - contacts = list(set(["OWC", "GOC", "GWC"]).intersection(set(equil_df.columns))) + contacts = list({"OWC", "GOC", "GWC"}.intersection(set(equil_df.columns))) # Rename and slice the equil dataframe: equil_df = equil_df.rename( {"Z": "Z_DATUM", "PRESSURE": "PRESSURE_DATUM"}, axis="columns" @@ -630,8 +621,7 @@ def merge_equil(grid_df: pd.DataFrame, equil_df: pd.DataFrame) -> pd.DataFrame: assert ( not pd.isnull(equil_df).any().any() ), f"BUG: NaNs in equil dataframe:\n{equil_df}" - grid_df = grid_df.merge(equil_df, on="EQLNUM", how="left") - return grid_df + return grid_df.merge(equil_df, on="EQLNUM", how="left") def merge_pc_max( diff --git a/src/subscript/convert_grid_format/convert_grid_format.py b/src/subscript/convert_grid_format/convert_grid_format.py index 2609d1709..949f9c1ef 100644 --- a/src/subscript/convert_grid_format/convert_grid_format.py +++ b/src/subscript/convert_grid_format/convert_grid_format.py @@ -147,10 +147,7 @@ def _convert_ecl2roff( if not props: raise SystemExit("STOP. No properties given") - if ":" in props: - props_list = props.split(":") - else: - props_list = props.split() + props_list = props.split(":") if ":" in props else props.split() fformat = mode fformat = fformat.replace("restart", "unrst") @@ -162,10 +159,7 @@ def _convert_ecl2roff( if os.path.exists(dates): dates = " ".join(Path(dates).read_text(encoding="utf8").splitlines()) - if ":" in dates: - dates_list = dates.split(":") - else: - dates_list = dates.split() + dates_list = dates.split(":") if ":" in dates else dates.split() else: dates_list = None diff --git a/src/subscript/csv2ofmvol/csv2ofmvol.py b/src/subscript/csv2ofmvol/csv2ofmvol.py index 7a7236a62..714bd6cb4 100644 --- a/src/subscript/csv2ofmvol/csv2ofmvol.py +++ b/src/subscript/csv2ofmvol/csv2ofmvol.py @@ -7,8 +7,7 @@ import pandas as pd from dateutil.relativedelta import relativedelta -from subscript import __version__ -from subscript import getLogger as subscriptlogger +from subscript import __version__, getLogger as subscriptlogger from subscript.eclcompress.eclcompress import glob_patterns logger = subscriptlogger(__name__) @@ -71,7 +70,7 @@ def read_pdm_csv_files( - csvfiles: Union[pd.DataFrame, str, List[str], List[pd.DataFrame]] + csvfiles: Union[pd.DataFrame, str, List[str], List[pd.DataFrame]], ) -> pd.DataFrame: """Read a list of CSV files and return a dataframe @@ -250,8 +249,6 @@ class CustomFormatter( # pylint: disable=unnecessary-pass - pass - def get_parser() -> argparse.ArgumentParser: """Parse command line arguments, return a Namespace with arguments""" diff --git a/src/subscript/csv_merge/csv_merge.py b/src/subscript/csv_merge/csv_merge.py index 91542a399..6b010484f 100755 --- a/src/subscript/csv_merge/csv_merge.py +++ b/src/subscript/csv_merge/csv_merge.py @@ -54,7 +54,6 @@ class CustomFormatter( """ # pylint: disable=unnecessary-pass - pass class CsvMerge(ErtScript): @@ -243,7 +242,7 @@ def taglist(strings: List[str], regexp_str: str) -> list: list is returned. """ regexp = re.compile(regexp_str) - matches = map(lambda x: re.match(regexp, x), strings) + matches = (re.match(regexp, x) for x in strings) values = [x and x.group(1) for x in matches] if any(values): return values diff --git a/src/subscript/csv_stack/csv_stack.py b/src/subscript/csv_stack/csv_stack.py index c5f3b7730..2bc89eeac 100755 --- a/src/subscript/csv_stack/csv_stack.py +++ b/src/subscript/csv_stack/csv_stack.py @@ -91,8 +91,6 @@ class CustomFormatter( # pylint: disable=unnecessary-pass - pass - class CsvStack(ErtScript): """A class with a run() function that can be registered as an ERT plugin, @@ -253,10 +251,9 @@ def drop_constants( if len(dframe[col].unique()) == 1: # col was a constant column columnstodelete.append(col) - if keepminimal: - # Also drop columns not involved in stacking operation - if not (stackmatcher.match(col) or col.lower() in keepthese): - columnstodelete.append(col) + # Also drop columns not involved in stacking operation + if keepminimal and (not (stackmatcher.match(col) or col.lower() in keepthese)): + columnstodelete.append(col) if keepminimal: logger.info("Deleting constant and unwanted columns %s", str(columnstodelete)) else: @@ -304,7 +301,7 @@ def csv_stack( colstostack = colstostack + 1 dostack = True else: - tuplecols.append(tuple([col, ""])) + tuplecols.append((col, "")) nostackcolumnnames.append(col) logger.info("Found %d out of %d columns to stack", colstostack, len(dframe.columns)) diff --git a/src/subscript/eclcompress/allowlist.py b/src/subscript/eclcompress/allowlist.py index 3448a6820..b696679f9 100644 --- a/src/subscript/eclcompress/allowlist.py +++ b/src/subscript/eclcompress/allowlist.py @@ -1,215 +1,213 @@ -ALLOWLIST_KEYWORDS = set( - [ - "ACTNUM", - "ALPHANUD", - "ALPHANUI", - "ALPHANUM", - "COLLAPSE", - "COORD", - "DEPTH", - "DIFFMMF", - "DIFFMR", - "DIFFMR-", - "DIFFMTHT", - "DIFFMTH-", - "DIFFMX", - "DIFFMX-", - "DIFFMY", - "DIFFMY-", - "DIFFMZ", - "DIFFMZ-", - "DIFFR", - "DIFFTHT", - "DIFFX", - "DIFFY", - "DIFFZ", - "DPNUM", - "DR", - "DRV", - "DSTNUM", - "DTHETA", - "DTHETAV", - "DX", - "DXV", - "DY", - "DYV", - "DZ", - "DZNET", - "DZV", - "ENDNUM", - "EOSNUM", - "EQLNUM", - "EXTHOST", - "EXTREPGL", - "FIP", - "FIPNUM", - "GASADCO", - "GASADEC", - "GASCONC", - "GASSATC", - "GI", - "HBNUM", - "HMMULTSG", - "HWKRO", - "HWKRORG", - "HWKRORW", - "HWKRW", - "HWKRWR", - "HWPCW", - "HWSNUM", - "HWSOGCR", - "HWSOWCR", - "HWSWCR", - "HWSWL", - "HWSWLPC", - "HWSWU", - "IMBNUMMF", - "IONROCK", - "ISTNUM", - "KRNUMMF", - "LANGMPL", - "LANGMULC", - "LANGMULT", - "LKRO", - "LKRORG", - "LKRORW", - "LKRW", - "LKRWR", - "LPCW", - "LSLTWNUM", - "LSNUM", - "LSOGCR", - "LSOWCR", - "LSWCR", - "LSWL", - "LSWLPC", - "LSWU", - "LWKRO", - "LWKRORG", - "LWKRORW", - "LWKRW", - "LWKRWR", - "LWPCW", - "LWSLTNUM", - "LWSNUM", - "LWSOGCR", - "LWSOWCR", - "LWSWCR", - "LWSWL", - "LWSWLPC", - "LWSWU", - "LX", - "LY", - "LZ", - "MISCNUM", - "MLANG", - "MLANGSLV", - "MPFANUM", - "MULTNUM", - "MULTPV", - "MULTR", - "MULTR-", - "MULTTHT", - "MULTTHT-", - "MULTX", - "MULTX-", - "MULTY", - "MULTY-", - "MULTZ", - "MULTZ-", - "NINENUM", - "NTG", - "OILAPI", - "OPERNUM", - "PBUB", - "PDEW", - "PENUM", - "PERMJFUN", - "PERMR", - "PERMTHT", - "PERMX", - "PERMY", - "PERMZ", - "PLYCAMAX", - "PLYKRRF", - "PLYRMDEN", - "PORO", - "PORV", - "PRESSURE", - "PSTNUM", - "PVTNUM", - "RESIDNUM", - "ROCKDEN", - "ROCKFRAC", - "ROCKNUM", - "ROCKTRMX", - "ROCKTRMY", - "ROCKTRMZ", - "RS", - "RSW", - "RV", - "SAKRO", - "SAKRORW", - "SAKRW", - "SAKRWR", - "SALT", - "SAPCW", - "SASGL", - "SASOWCR", - "SASWCR", - "SASWL", - "SASWU", - "SATNUM", - "SFOAM", - "SGAS", - "SGLPC", - "SGWCR", - "SKRO", - "SKRORG", - "SKRORW", - "SKRW", - "SKRWR", - "SOCRS", - "SOLVCONC", - "SOLVFRAC", - "SOLWNUM", - "SORBFRAC", - "SORBPRES", - "SPOLY", - "SSGCR", - "SSGL", - "SSOGCR", - "SSOL", - "SSOWCR", - "SSWCR", - "SSWL", - "SSWU", - "SURF", - "SURFNUM", - "SURFWNUM", - "SWAT", - "SWATINIT", - "SWGCR", - "SWLPC", - "TBLK", - "THCGAS", - "THCOIL", - "THCONR", - "THCROCK", - "THCSOLID", - "THCWATER", - "THERMNUM", - "TNUM", - "TOPS", - "TRACKREG", - "TRANR", - "TRANTHT", - "TRANX", - "TRANY", - "TRANZ", - "TRKPF", - "WH2NUM", - "WH3NUM", - "ZCORN", - ] -) +ALLOWLIST_KEYWORDS = { + "ACTNUM", + "ALPHANUD", + "ALPHANUI", + "ALPHANUM", + "COLLAPSE", + "COORD", + "DEPTH", + "DIFFMMF", + "DIFFMR", + "DIFFMR-", + "DIFFMTHT", + "DIFFMTH-", + "DIFFMX", + "DIFFMX-", + "DIFFMY", + "DIFFMY-", + "DIFFMZ", + "DIFFMZ-", + "DIFFR", + "DIFFTHT", + "DIFFX", + "DIFFY", + "DIFFZ", + "DPNUM", + "DR", + "DRV", + "DSTNUM", + "DTHETA", + "DTHETAV", + "DX", + "DXV", + "DY", + "DYV", + "DZ", + "DZNET", + "DZV", + "ENDNUM", + "EOSNUM", + "EQLNUM", + "EXTHOST", + "EXTREPGL", + "FIP", + "FIPNUM", + "GASADCO", + "GASADEC", + "GASCONC", + "GASSATC", + "GI", + "HBNUM", + "HMMULTSG", + "HWKRO", + "HWKRORG", + "HWKRORW", + "HWKRW", + "HWKRWR", + "HWPCW", + "HWSNUM", + "HWSOGCR", + "HWSOWCR", + "HWSWCR", + "HWSWL", + "HWSWLPC", + "HWSWU", + "IMBNUMMF", + "IONROCK", + "ISTNUM", + "KRNUMMF", + "LANGMPL", + "LANGMULC", + "LANGMULT", + "LKRO", + "LKRORG", + "LKRORW", + "LKRW", + "LKRWR", + "LPCW", + "LSLTWNUM", + "LSNUM", + "LSOGCR", + "LSOWCR", + "LSWCR", + "LSWL", + "LSWLPC", + "LSWU", + "LWKRO", + "LWKRORG", + "LWKRORW", + "LWKRW", + "LWKRWR", + "LWPCW", + "LWSLTNUM", + "LWSNUM", + "LWSOGCR", + "LWSOWCR", + "LWSWCR", + "LWSWL", + "LWSWLPC", + "LWSWU", + "LX", + "LY", + "LZ", + "MISCNUM", + "MLANG", + "MLANGSLV", + "MPFANUM", + "MULTNUM", + "MULTPV", + "MULTR", + "MULTR-", + "MULTTHT", + "MULTTHT-", + "MULTX", + "MULTX-", + "MULTY", + "MULTY-", + "MULTZ", + "MULTZ-", + "NINENUM", + "NTG", + "OILAPI", + "OPERNUM", + "PBUB", + "PDEW", + "PENUM", + "PERMJFUN", + "PERMR", + "PERMTHT", + "PERMX", + "PERMY", + "PERMZ", + "PLYCAMAX", + "PLYKRRF", + "PLYRMDEN", + "PORO", + "PORV", + "PRESSURE", + "PSTNUM", + "PVTNUM", + "RESIDNUM", + "ROCKDEN", + "ROCKFRAC", + "ROCKNUM", + "ROCKTRMX", + "ROCKTRMY", + "ROCKTRMZ", + "RS", + "RSW", + "RV", + "SAKRO", + "SAKRORW", + "SAKRW", + "SAKRWR", + "SALT", + "SAPCW", + "SASGL", + "SASOWCR", + "SASWCR", + "SASWL", + "SASWU", + "SATNUM", + "SFOAM", + "SGAS", + "SGLPC", + "SGWCR", + "SKRO", + "SKRORG", + "SKRORW", + "SKRW", + "SKRWR", + "SOCRS", + "SOLVCONC", + "SOLVFRAC", + "SOLWNUM", + "SORBFRAC", + "SORBPRES", + "SPOLY", + "SSGCR", + "SSGL", + "SSOGCR", + "SSOL", + "SSOWCR", + "SSWCR", + "SSWL", + "SSWU", + "SURF", + "SURFNUM", + "SURFWNUM", + "SWAT", + "SWATINIT", + "SWGCR", + "SWLPC", + "TBLK", + "THCGAS", + "THCOIL", + "THCONR", + "THCROCK", + "THCSOLID", + "THCWATER", + "THERMNUM", + "TNUM", + "TOPS", + "TRACKREG", + "TRANR", + "TRANTHT", + "TRANX", + "TRANY", + "TRANZ", + "TRKPF", + "WH2NUM", + "WH3NUM", + "ZCORN", +} diff --git a/src/subscript/eclcompress/eclcompress.py b/src/subscript/eclcompress/eclcompress.py index d55f906de..54acfe299 100755 --- a/src/subscript/eclcompress/eclcompress.py +++ b/src/subscript/eclcompress/eclcompress.py @@ -60,9 +60,7 @@ ] EPILOG = """ -Default list of files to compress is """ + " ".join( - DEFAULT_FILES_TO_COMPRESS -) +Default list of files to compress is """ + " ".join(DEFAULT_FILES_TO_COMPRESS) CATEGORY = "modelling.reservoir" @@ -393,7 +391,6 @@ class CustomFormatter( """ # pylint: disable=W0107 - pass def get_parser() -> argparse.ArgumentParser: @@ -459,8 +456,7 @@ def parse_wildcardfile(filename: str) -> List[str]: lines = [line.strip() for line in lines] lines = [line.split("#")[0] for line in lines] lines = [line.split("--")[0] for line in lines] - lines = list(filter(len, lines)) - return lines + return list(filter(len, lines)) def main(): diff --git a/src/subscript/ecldiff2roff/ecldiff2roff.py b/src/subscript/ecldiff2roff/ecldiff2roff.py index 98531846b..30772f748 100644 --- a/src/subscript/ecldiff2roff/ecldiff2roff.py +++ b/src/subscript/ecldiff2roff/ecldiff2roff.py @@ -40,7 +40,6 @@ class CustomFormatter( and raw description formatter""" # pylint: disable=unnecessary-pass - pass def get_parser() -> argparse.ArgumentParser: diff --git a/src/subscript/fmuobs/fmuobs.py b/src/subscript/fmuobs/fmuobs.py index df9ebc95b..ef8c37d10 100644 --- a/src/subscript/fmuobs/fmuobs.py +++ b/src/subscript/fmuobs/fmuobs.py @@ -77,7 +77,6 @@ class CustomFormatter( and raw description formatter""" # pylint: disable=unnecessary-pass - pass def get_parser() -> argparse.ArgumentParser: @@ -238,10 +237,11 @@ def autoparse_file(filename: str) -> Tuple[Optional[str], Union[pd.DataFrame, di try: obsdict = yaml.safe_load(Path(filename).read_text(encoding="utf8")) - if isinstance(obsdict, dict): - if obsdict.get("smry", None) or obsdict.get("rft", None): - logger.info("Parsed %s as a YAML file with observations", filename) - return ("yaml", obsdict2df(obsdict)) + if isinstance(obsdict, dict) and ( + obsdict.get("smry", None) or obsdict.get("rft", None) + ): + logger.info("Parsed %s as a YAML file with observations", filename) + return ("yaml", obsdict2df(obsdict)) except yaml.scanner.ScannerError as exception: # This occurs if there are tabs in the file, which is not # allowed in a YAML file (but it can be present in ERT observation files) @@ -264,10 +264,13 @@ def autoparse_file(filename: str) -> Tuple[Optional[str], Union[pd.DataFrame, di filename, ) return ("ert", pd.DataFrame()) - if {"CLASS", "LABEL"}.issubset(dframe.columns) and not dframe.empty: - if set(dframe["CLASS"]).intersection(set(CLASS_SHORTNAME.keys())): - logger.info("Parsed %s as an ERT observation file", filename) - return ("ert", dframe) + if ( + {"CLASS", "LABEL"}.issubset(dframe.columns) + and not dframe.empty + and set(dframe["CLASS"]).intersection(set(CLASS_SHORTNAME.keys())) + ): + logger.info("Parsed %s as an ERT observation file", filename) + return ("ert", dframe) except ValueError: pass diff --git a/src/subscript/fmuobs/parsers.py b/src/subscript/fmuobs/parsers.py index 50d6253c9..ad88d5347 100644 --- a/src/subscript/fmuobs/parsers.py +++ b/src/subscript/fmuobs/parsers.py @@ -98,10 +98,7 @@ def mask_curly_braces(string: str, mask_char: str = "X") -> str: # in the strings after the loop above, mask these also: for match in re.compile( # r"(\{[\sA-Za-z=\.,:;0-9\-_/" + mask_char + r";]+\})" - r"(\{[" - + _KEY_VALUE_CHARS - + mask_char - + r";]+\})" + r"(\{[" + _KEY_VALUE_CHARS + mask_char + r";]+\})" ).findall(string): if match: string = string.replace(match, mask_char * len(match)) @@ -356,7 +353,7 @@ def flatten_observation_unit( # Inject a default segment if segments are in use: if any("SEGMENT" in key for key in subunit_keys): - obs_subunits.append({**{"SEGMENT": "DEFAULT"}, **keyvalues}) + obs_subunits.append({"SEGMENT": "DEFAULT", **keyvalues}) for subunit in subunit_keys: if len(subunit.split()) < 2: @@ -364,7 +361,7 @@ def flatten_observation_unit( raise ValueError("Wrong observation subunit syntax: " + str(subunit)) obs_subunits.append( { - **{subunit.split()[0]: subunit.split()[1]}, + subunit.split()[0]: subunit.split()[1], **keyvalues, **obsunit[subunit], } diff --git a/src/subscript/interp_relperm/interp_relperm.py b/src/subscript/interp_relperm/interp_relperm.py index f95f10bfe..45440ed50 100755 --- a/src/subscript/interp_relperm/interp_relperm.py +++ b/src/subscript/interp_relperm/interp_relperm.py @@ -323,21 +323,20 @@ def prepend_root_path_to_relative_files( Returns: Modified configuration for interp_relperm """ - if "base" in cfg.keys() and isinstance(cfg["base"], list): + if "base" in cfg and isinstance(cfg["base"], list): for idx in range(len(cfg["base"])): if not os.path.isabs(cfg["base"][idx]): cfg["base"][idx] = str(root_path / Path(cfg["base"][idx])) - if "high" in cfg.keys() and isinstance(cfg["high"], list): + if "high" in cfg and isinstance(cfg["high"], list): for idx in range(len(cfg["high"])): if not os.path.isabs(cfg["high"][idx]): cfg["high"][idx] = str(root_path / Path(cfg["high"][idx])) - if "low" in cfg.keys() and isinstance(cfg["low"], list): + if "low" in cfg and isinstance(cfg["low"], list): for idx in range(len(cfg["low"])): if not os.path.isabs(cfg["low"][idx]): cfg["low"][idx] = str(root_path / Path(cfg["low"][idx])) - if "pyscalfile" in cfg.keys(): - if not os.path.isabs(cfg["pyscalfile"]): - cfg["pyscalfile"] = str(root_path / Path(cfg["pyscalfile"])) + if "pyscalfile" in cfg and not os.path.isabs(cfg["pyscalfile"]): + cfg["pyscalfile"] = str(root_path / Path(cfg["pyscalfile"])) return cfg diff --git a/src/subscript/merge_rft_ertobs/merge_rft_ertobs.py b/src/subscript/merge_rft_ertobs/merge_rft_ertobs.py index ffbfc6e3b..bf8164555 100644 --- a/src/subscript/merge_rft_ertobs/merge_rft_ertobs.py +++ b/src/subscript/merge_rft_ertobs/merge_rft_ertobs.py @@ -42,7 +42,6 @@ class CustomFormatter( and raw description formatter""" # pylint: disable=unnecessary-pass - pass def get_parser() -> argparse.ArgumentParser: @@ -239,9 +238,8 @@ def merge_rft_ertobs(gendatacsv: str, obsdir: str) -> pd.DataFrame: if "report_step" in sim_df.columns: return pd.merge(sim_df, obs_df, how="left", on=["well", "order", "report_step"]) - else: - # Ensure backward compatibility where gendata_rft doesn't have report_step - return pd.merge(sim_df, obs_df, how="left", on=["well", "order"]) + # Ensure backward compatibility where gendata_rft doesn't have report_step + return pd.merge(sim_df, obs_df, how="left", on=["well", "order"]) def main() -> None: diff --git a/src/subscript/ofmvol2csv/ofmvol2csv.py b/src/subscript/ofmvol2csv/ofmvol2csv.py index 0955fa901..7e4fa9258 100644 --- a/src/subscript/ofmvol2csv/ofmvol2csv.py +++ b/src/subscript/ofmvol2csv/ofmvol2csv.py @@ -7,8 +7,7 @@ import pandas as pd -from subscript import __version__ -from subscript import getLogger as subscriptlogger +from subscript import __version__, getLogger as subscriptlogger from subscript.eclcompress.eclcompress import glob_patterns logger = subscriptlogger(__name__) @@ -42,8 +41,6 @@ class CustomFormatter( # pylint: disable=unnecessary-pass - pass - def get_parser() -> argparse.ArgumentParser: """Construct a parser for the command line utility ofmvol2csv and for @@ -105,8 +102,7 @@ def cleanse_ofm_lines(filelines: List[str]) -> List[str]: # Make everything upper case (not pretty, but simplifies parsing) filelines = [line.upper() for line in filelines] # OFM sometimes uses the tab character, replace by space to robustify parsing - filelines = [line.replace("\t", " ") for line in filelines] - return filelines + return [line.replace("\t", " ") for line in filelines] def unify_dateformat(lines: List[str]) -> List[str]: @@ -158,8 +154,7 @@ def extract_columnnames(filelines: List[str]) -> List[str]: logger.error("Only support files with *DATE occuring once") raise ValueError - columnnames = columnnamelines[0].rstrip().replace("*", "").split() - return columnnames + return columnnamelines[0].rstrip().replace("*", "").split() def split_list(linelist: List[str], splitidxs: List[int]) -> List[List[str]]: @@ -206,10 +201,7 @@ def find_wellstart_indices(filelines: List[str]) -> List[int]: Returns: List of integers """ - wellnamelinenumbers = [ - i for i in range(0, len(filelines)) if filelines[i].startswith("*NAME") - ] - return wellnamelinenumbers + return [i for i in range(len(filelines)) if filelines[i].startswith("*NAME")] def parse_well(well_lines: List[str], columnnames: List[str]) -> pd.DataFrame: diff --git a/src/subscript/pack_sim/pack_sim.py b/src/subscript/pack_sim/pack_sim.py index 9d8b56689..977c0297f 100755 --- a/src/subscript/pack_sim/pack_sim.py +++ b/src/subscript/pack_sim/pack_sim.py @@ -245,11 +245,7 @@ def inspect_file( line_strip_no_comment = _remove_comments(True, line_strip).strip() line = _remove_comments(clear_comments, line) - if ( - line.upper().startswith("INCLUDE") - or line.startswith("GDFILE") - or line.startswith("IMPORT") - ): + if line.upper().startswith(("INCLUDE", "GDFILE", "IMPORT")): # Include keyword found! logger.info("%s%s", indent, "FOUND INCLUDE FILE ==>") new_data_file += line @@ -268,8 +264,8 @@ def inspect_file( line_strip = _remove_comments(clear_comments, line_strip) include_line = _remove_comments(clear_comments, include_line) - if not len(include_line.strip()) == 0: - if "--" not in line_strip[0:3] and not len(line_strip) == 0: + if len(include_line.strip()) != 0: + if "--" not in line_strip[0:3] and len(line_strip) != 0: # This is the include file! include_full = line_strip.split("--")[0] include_stripped = Path(shlex.split(include_full)[0]) @@ -376,10 +372,7 @@ def inspect_file( indent, new_include, ) - if fmu: - fmu_include = "../" - else: - fmu_include = "" + fmu_include = "../" if fmu else "" # Change the include path in the current file being inspected if "'" in include_full or '"' in include_full: @@ -395,8 +388,7 @@ def inspect_file( # Ignore comments after the include statement break - else: - new_data_file += include_line + new_data_file += include_line elif line_strip_no_comment == "RUNSPEC" and fmu: section = "runspec/" (packing_path / "include" / section).mkdir(exist_ok=True) diff --git a/src/subscript/params2csv/params2csv.py b/src/subscript/params2csv/params2csv.py index 3c8ed4f96..63c628db5 100755 --- a/src/subscript/params2csv/params2csv.py +++ b/src/subscript/params2csv/params2csv.py @@ -77,8 +77,6 @@ class CustomFormatter( # pylint: disable=unnecessary-pass - pass - class Params2Csv(ErtScript): """A class with a run() function that can be registered as an ERT plugin, diff --git a/src/subscript/presentvalue/presentvalue.py b/src/subscript/presentvalue/presentvalue.py index 7280f43b7..933cd3c10 100755 --- a/src/subscript/presentvalue/presentvalue.py +++ b/src/subscript/presentvalue/presentvalue.py @@ -144,15 +144,16 @@ def main() -> None: discountrate=args.discountrate, ) - if args.basedatafiles: - if len(args.basedatafiles) > 1 and len(args.basedatafiles) != len( - args.datafiles - ): - msg = ( - "Supply either no base case, a single base case or " - "exactly as many base cases as datafiles." - ) - raise ValueError(msg) + if ( + args.basedatafiles + and len(args.basedatafiles) > 1 + and len(args.basedatafiles) != len(args.datafiles) + ): + msg = ( + "Supply either no base case, a single base case or " + "exactly as many base cases as datafiles." + ) + raise ValueError(msg) for idx, datafile in enumerate(args.datafiles): if args.basedatafiles: @@ -379,7 +380,7 @@ def calc_presentvalue_df( prodecon["deltayears"] = prodecon.index - discountto prodecon["discountfactors"] = 1.0 / ( - ((1.0 + prodecon["discountrate"] / 100.0) ** np.array(prodecon["deltayears"])) + (1.0 + prodecon["discountrate"] / 100.0) ** np.array(prodecon["deltayears"]) ) prodecon["presentvalue"] = ( @@ -474,9 +475,8 @@ def prepare_econ_table( if filename: econ_df = pd.read_csv(filename, index_col=0) econ_df.columns = econ_df.columns.map(str.strip) - if "discountrate" in econ_df: - if len(econ_df["discountrate"]) > 1: - raise ValueError("discountrate must be constant") + if "discountrate" in econ_df and len(econ_df["discountrate"]) > 1: + raise ValueError("discountrate must be constant") # assert first column is year. else: # Make a default dataframe if nothing provided. @@ -520,7 +520,7 @@ def calc_pv_irr(rate: float, pv_df: pd.DataFrame, cutoffyear: int) -> float: Computed presentvalue """ discountfactors_irr = 1.0 / (1.0 + rate / 100.0) ** np.array( - list(range(0, len(pv_df))) + list(range(len(pv_df))) ) if len(pv_df) < 2: raise ValueError("IRR computation meaningless on a single year") diff --git a/src/subscript/prtvol2csv/prtvol2csv.py b/src/subscript/prtvol2csv/prtvol2csv.py index 0c08c8a82..0ddafbb3c 100644 --- a/src/subscript/prtvol2csv/prtvol2csv.py +++ b/src/subscript/prtvol2csv/prtvol2csv.py @@ -50,8 +50,6 @@ class CustomFormatter( # pylint: disable=unnecessary-pass - pass - def get_parser() -> argparse.ArgumentParser: """A parser for command line argument parsing and for documentation.""" diff --git a/src/subscript/restartthinner/restartthinner.py b/src/subscript/restartthinner/restartthinner.py index 2ad3c8af0..ba1d2c67f 100644 --- a/src/subscript/restartthinner/restartthinner.py +++ b/src/subscript/restartthinner/restartthinner.py @@ -73,10 +73,7 @@ def rd_repacker(rstfilename: str, slicerstindices: list, quiet: bool) -> None: location of the UNRST file, dump temporary files in there, and modify the original filename. """ - if quiet: - out = " >/dev/null" - else: - out = "" + out = " >/dev/null" if quiet else "" # Error early if libecl tools are not available try: find_resdata_app("rd_unpack") @@ -137,7 +134,7 @@ def restartthinner( rst = ResdataFile(filename) restart_indices = get_restart_indices(filename) restart_dates = [ - rst.iget_restart_sim_time(index) for index in range(0, len(restart_indices)) + rst.iget_restart_sim_time(index) for index in range(len(restart_indices)) ] if numberofslices > 1: @@ -161,10 +158,7 @@ def restartthinner( print("Selected restarts:") print("-----------------------") for idx, rstidx in enumerate(restart_indices): - if restart_indices[idx] in slicerstindices: - slicepresent = "X" - else: - slicepresent = "" + slicepresent = "X" if restart_indices[idx] in slicerstindices else "" print( f"{rstidx:4d} " f"{datetime.date.strftime(restart_dates[idx], '%Y-%m-%d')} " diff --git a/src/subscript/ri_wellmod/ri_wellmod.py b/src/subscript/ri_wellmod/ri_wellmod.py index d10230219..28286c18e 100755 --- a/src/subscript/ri_wellmod/ri_wellmod.py +++ b/src/subscript/ri_wellmod/ri_wellmod.py @@ -90,7 +90,6 @@ class CustomFormatter( """ # pylint: disable=unnecessary-pass - pass def get_resinsight_exe() -> Optional[str]: @@ -164,7 +163,7 @@ def get_rips_version_triplet() -> Tuple[int, int, int]: def find_and_wrap_resinsight_version( - version_triplet: Tuple[int, int, int] + version_triplet: Tuple[int, int, int], ) -> Optional[str]: """ Find a ResInsight executable matching at least the major.minor version diff --git a/src/subscript/rmsecl_volumetrics/rmsecl_volumetrics.py b/src/subscript/rmsecl_volumetrics/rmsecl_volumetrics.py index a9d9127c1..93651de1a 100644 --- a/src/subscript/rmsecl_volumetrics/rmsecl_volumetrics.py +++ b/src/subscript/rmsecl_volumetrics/rmsecl_volumetrics.py @@ -92,9 +92,9 @@ def _compare_volumetrics( ) # Slicing in multiindex requires a list of unique tuples: - regzones = set( + regzones = { tuple(regzone) for regzone in regzonfip_set[["REGION", "ZONE"]].to_numpy() - ).intersection(set(tuple(regzone) for regzone in volumetrics_df.index)) + }.intersection({tuple(regzone) for regzone in volumetrics_df.index}) if not regzones: # Skip sets for which there are not volumetrics: logger.warning( @@ -129,13 +129,11 @@ def _disjoint_sets_to_dict( """From the dataframe of sets, construct a dictionary indexed by set index provide lists of members in the set for FIPNUM, ZONE and REGION""" regions = disjoint_sets_df.groupby(["SET"])["REGION"].apply( - lambda x: sorted(list(set(x))) - ) - zones = disjoint_sets_df.groupby(["SET"])["ZONE"].apply( - lambda x: sorted(list(set(x))) + lambda x: sorted(set(x)) ) + zones = disjoint_sets_df.groupby(["SET"])["ZONE"].apply(lambda x: sorted(set(x))) fipnums = disjoint_sets_df.groupby(["SET"])["FIPNUM"].apply( - lambda x: sorted(list(set(x))) + lambda x: sorted(set(x)) ) return pd.concat([regions, zones, fipnums], axis=1).to_dict(orient="index") diff --git a/src/subscript/runrms/runrms.py b/src/subscript/runrms/runrms.py index 3205d2bc1..532d6dc1d 100644 --- a/src/subscript/runrms/runrms.py +++ b/src/subscript/runrms/runrms.py @@ -358,7 +358,7 @@ def parse_setup(self): if not pathlib.Path(setup).is_file(): xcritical(f"Requested setup <{setup}> does not exist!") - raise FileNotFoundError() + raise FileNotFoundError with open(setup, "r", encoding="utf-8") as stream: logger.debug("Actual setup file: %s", setup) diff --git a/src/subscript/sector2fluxnum/datafile_obj.py b/src/subscript/sector2fluxnum/datafile_obj.py index 5527755bb..f6420deb8 100644 --- a/src/subscript/sector2fluxnum/datafile_obj.py +++ b/src/subscript/sector2fluxnum/datafile_obj.py @@ -437,7 +437,7 @@ def run_DUMPFLUX_nosim(self, ecl_version=None): print("ERROR: Some errors occured during DUMPFLUX run.\n") print("Please check PRT output...") print(line_elements) - raise Exception() + raise Exception if not Path(f"{self.DUMPFLUX_name.split('.')[0]}.FLUX").exists(): raise Exception("FLUX file template not created!") diff --git a/src/subscript/sector2fluxnum/flux_util.py b/src/subscript/sector2fluxnum/flux_util.py index 315d5d92a..a9c640a0a 100644 --- a/src/subscript/sector2fluxnum/flux_util.py +++ b/src/subscript/sector2fluxnum/flux_util.py @@ -51,15 +51,13 @@ def filter_region( ): # Intersection region.select_all() # region.select_active() - region = region & region_i & region_j & region_k & region_fip - return region + return region & region_i & region_j & region_k & region_fip if combine_operator == "union": # Union region1.select_active() region2 = region_i | region_j | region_k | region_fip - region = region1 & region2 - return region + return region1 & region2 raise Exception( f"ERROR: '{combine_operator}' is not a valid operator to combine regions." @@ -102,6 +100,4 @@ def unpack_ijk(i_str, j_str, k_str): k_start = int(k_str_split[0]) k_end = int(k_str_split[1]) - ijk_list = [i_start, i_end, j_start, j_end, k_start, k_end] - - return ijk_list + return [i_start, i_end, j_start, j_end, k_start, k_end] diff --git a/src/subscript/sector2fluxnum/fluxfile_obj.py b/src/subscript/sector2fluxnum/fluxfile_obj.py index 837293834..f00a27c68 100644 --- a/src/subscript/sector2fluxnum/fluxfile_obj.py +++ b/src/subscript/sector2fluxnum/fluxfile_obj.py @@ -195,25 +195,15 @@ def write_new_fluxfile_from_rst( kw_temp[1] = prev_days_flux + delta_days prev_days_flux += delta_days - elif flux_fine[j_idx].header[0] == "WELLNAME": - kw_temp = flux_fine[j_idx].deep_copy() - - elif flux_fine[j_idx].header[0] == "WELLFLOW": - kw_temp = flux_fine[j_idx].deep_copy() - - elif flux_fine[j_idx].header[0] == "PMER": - kw_temp = flux_fine[j_idx].deep_copy() - - elif flux_fine[j_idx].header[0] == "PADMAX": - kw_temp = flux_fine[j_idx].deep_copy() - - elif flux_fine[j_idx].header[0] == "PMAX": - kw_temp = flux_fine[j_idx].deep_copy() - - elif flux_fine[j_idx].header[0] == "PADS": - kw_temp = flux_fine[j_idx].deep_copy() - - elif flux_fine[j_idx].header[0] == "": # OBS! + elif flux_fine[j_idx].header[0] in ( + "WELLNAME", + "WELLFLOW", + "PMER", + "PADMAX", + "PMAX", + "PADS", + "", # OBS! + ): kw_temp = flux_fine[j_idx].deep_copy() elif flux_fine[j_idx].header[0] == "POIL": diff --git a/src/subscript/summaryplot/summaryplot.py b/src/subscript/summaryplot/summaryplot.py index 4b07d3947..079650e71 100644 --- a/src/subscript/summaryplot/summaryplot.py +++ b/src/subscript/summaryplot/summaryplot.py @@ -346,7 +346,7 @@ def summaryplotter( # Loop over all restart steps last_step = range(rstfiles[idx].num_named_kw("SWAT"))[-1] - for report_step in range(0, last_step + 1): + for report_step in range(last_step + 1): restartvectordates[rstvec][datafile].append( rstfiles[idx].iget_restart_sim_time(report_step) ) @@ -436,7 +436,7 @@ def summaryplotter( histvec = toks[0] + "H" if len(toks) > 1: histvec = histvec + ":" + toks[1] - if histvec in firstsummary.keys(): + if histvec in firstsummary: values = firstsummary.numpy_vector(histvec) sumlabel = "_nolegend_" if normalize: @@ -453,7 +453,7 @@ def summaryplotter( fig.autofmt_xdate() for idx, summaryfile in enumerate(summaryfiles): - if vector in summaryfile.keys(): + if vector in summaryfile: if idx >= maxlabels: # Truncate legend if too many sumlabel = "_nolegend_" else: @@ -465,11 +465,7 @@ def summaryplotter( values = summaryfile.numpy_vector(vector) if ensemblemode: - cycledcolor = colours[vector_idx] - if idx == 0: - sumlabel = vector - else: - sumlabel = "_nolegend_" + cycledcolor = vector if idx == 0 else "_nolegend_" elif singleplot: cycledcolor = colours[vector_idx] else: @@ -536,10 +532,7 @@ def summaryplotter( if ensemblemode: cycledcolor = colours[len(matchedsummaryvectors) + rstvec_idx] - if datafile_idx == 0: - rstlabel = rstvec - else: - rstlabel = "_nolegend_" + rstlabel = rstvec if datafile_idx == 0 else "_nolegend_" else: cycledcolor = colours[datafile_idx] @@ -649,21 +642,21 @@ def main(): plotprocess = Process( target=summaryplotter, - kwargs=dict( - summaryfiles=summaryfiles, - datafiles=datafiles, - vectors=vectors, - colourby=args.colourby, - maxlabels=args.maxlabels, - logcolourby=args.logcolourby, - parameterfiles=parameterfiles, - histvectors=args.hist, - normalize=args.normalize, - singleplot=args.singleplot, - nolegend=args.nolegend, - dumpimages=args.dumpimages, - ensemblemode=args.ensemblemode, - ), + kwargs={ + "summaryfiles": summaryfiles, + "datafiles": datafiles, + "vectors": vectors, + "colourby": args.colourby, + "maxlabels": args.maxlabels, + "logcolourby": args.logcolourby, + "parameterfiles": parameterfiles, + "histvectors": args.hist, + "normalize": args.normalize, + "singleplot": args.singleplot, + "nolegend": args.nolegend, + "dumpimages": args.dumpimages, + "ensemblemode": args.ensemblemode, + }, ) plotprocess.start() @@ -683,21 +676,21 @@ def main(): plotprocess.terminate() plotprocess = Process( target=summaryplotter, - kwargs=dict( - summaryfiles=None, # forces reload - datafiles=datafiles, - vectors=vectors, - colourby=args.colourby, - maxlabels=args.maxlabels, - logcolourby=args.logcolourby, - parameterfiles=parameterfiles, - histvectors=args.hist, - normalize=args.normalize, - singleplot=args.singleplot, - nolegend=args.nolegend, - dumpimages=args.dumpimages, - ensemblemode=args.ensemblemode, - ), + kwargs={ + "summaryfiles": None, # forces reload + "datafiles": datafiles, + "vectors": vectors, + "colourby": args.colourby, + "maxlabels": args.maxlabels, + "logcolourby": args.logcolourby, + "parameterfiles": parameterfiles, + "histvectors": args.hist, + "normalize": args.normalize, + "singleplot": args.singleplot, + "nolegend": args.nolegend, + "dumpimages": args.dumpimages, + "ensemblemode": args.ensemblemode, + }, ) plotprocess.start() except KeyboardInterrupt: diff --git a/src/subscript/sunsch/sunsch.py b/src/subscript/sunsch/sunsch.py index efe4d15e9..7117e77fc 100755 --- a/src/subscript/sunsch/sunsch.py +++ b/src/subscript/sunsch/sunsch.py @@ -85,7 +85,7 @@ def __init__(self, **config): def datetime_from_date( - date: Union[str, datetime.datetime, datetime.date] + date: Union[str, datetime.datetime, datetime.date], ) -> datetime.datetime: """Set time to 00:00:00 in a date, keep time info if given a datetime object""" if isinstance(date, datetime.datetime): diff --git a/src/subscript/sunsch/time_vector.py b/src/subscript/sunsch/time_vector.py index 105a55c37..a86720f26 100644 --- a/src/subscript/sunsch/time_vector.py +++ b/src/subscript/sunsch/time_vector.py @@ -55,25 +55,25 @@ def _make_datetime(dates_record): date_dt = datetime.datetime(year, ecl_month[month], day) if len(dates_record) < 4: return date_dt - else: - time_str = dates_record[3].get_str(0) - time_list = time_str.split(":") - hour = minute = second = microsecond = 0 - hour = int(time_list[0]) - if len(time_list) > 1: - minute = int(time_list[1]) - if len(time_list) > 2: - sec_list = time_list[2].split(".") - second = int(sec_list[0]) - if len(sec_list) > 1: - ms_str = sec_list[1].strip() - npad = 6 - len(ms_str) - ms_str += "".join(["0" for i in range(npad)]) - microsecond = int(ms_str) - - return datetime.datetime( - year, ecl_month[month], day, hour, minute, second, microsecond - ) + + time_str = dates_record[3].get_str(0) + time_list = time_str.split(":") + hour = minute = second = microsecond = 0 + hour = int(time_list[0]) + if len(time_list) > 1: + minute = int(time_list[1]) + if len(time_list) > 2: + sec_list = time_list[2].split(".") + second = int(sec_list[0]) + if len(sec_list) > 1: + ms_str = sec_list[1].strip() + npad = 6 - len(ms_str) + ms_str += "".join(["0" for i in range(npad)]) + microsecond = int(ms_str) + + return datetime.datetime( + year, ecl_month[month], day, hour, minute, second, microsecond + ) class TimeStep(object): @@ -106,10 +106,7 @@ def __len__(self): return len(self.keywords) def __contains__(self, arg): - for kw in self.keywords: - if arg == kw.name: - return True - return False + return any(arg == kw.name for kw in self.keywords) def __str__(self): string = StringIO() @@ -304,13 +301,13 @@ def __getitem__(self, index): """ if isinstance(index, int): return self.time_steps_list[index] - else: - if not isinstance(index, datetime.datetime) and isinstance( - index, datetime.date - ): - index = datetime.datetime(index.year, index.month, index.day) - return self.time_steps_dict[index] + if not isinstance(index, datetime.datetime) and isinstance( + index, datetime.date + ): + index = datetime.datetime(index.year, index.month, index.day) + + return self.time_steps_dict[index] def _add_dates_block(self, ts): self.time_steps_dict[ts.dt] = ts diff --git a/src/subscript/sw_model_utilities/sw_model_utilities.py b/src/subscript/sw_model_utilities/sw_model_utilities.py index db5d57df9..540b47426 100755 --- a/src/subscript/sw_model_utilities/sw_model_utilities.py +++ b/src/subscript/sw_model_utilities/sw_model_utilities.py @@ -87,7 +87,7 @@ def menu(): hmax = float(input("Height maximum: ")) - for i in range(0, nplot): + for i in range(nplot): print("Set no. ", i + 1) poro.append(float(input("Poro (frac): "))) perm.append(float(input("Perm (mD): "))) diff --git a/src/subscript/vfp2csv/vfp2csv.py b/src/subscript/vfp2csv/vfp2csv.py index b4a0373f3..2d8062c67 100644 --- a/src/subscript/vfp2csv/vfp2csv.py +++ b/src/subscript/vfp2csv/vfp2csv.py @@ -202,5 +202,4 @@ def vfpfile2df(filename: str) -> pd.DataFrame: bhp_values_stacked["TABTYPE"] = tab bhp_values_stacked["FILENAME"] = filename - - return bhp_values_stacked + return bhp_values_stacked diff --git a/src/subscript/welltest_dpds/welltest_dpds.py b/src/subscript/welltest_dpds/welltest_dpds.py index ff3cab585..f14f1fb66 100755 --- a/src/subscript/welltest_dpds/welltest_dpds.py +++ b/src/subscript/welltest_dpds/welltest_dpds.py @@ -103,7 +103,6 @@ class CustomFormatter( """ # pylint: disable=unnecessary-pass - pass def get_parser(): @@ -219,7 +218,7 @@ def get_buildup_indices(rates): for idx, rate in enumerate(rates): if np.isclose(rate, 0) and last > 0.0: buildup_indices.append(idx) - if rate > 0 and np.isclose(last, 0) and not idx == 0: + if rate > 0 and np.isclose(last, 0) and idx != 0: buildup_end_indices.append(idx - 1) if idx == len(rates) - 1 and np.isclose(rate, 0): buildup_end_indices.append(idx) @@ -256,7 +255,7 @@ def supertime(time, rate, bu_start_ind, bu_end_ind): for bu_time_ind in range(1, bu_end_ind - bu_start_ind + 1): # Cannot start from zero. Hence from 1 and not 0 in loop. (Avoid ln(0)) tot = 0.0 - for idx in range(0, bu_start_ind): + for idx in range(bu_start_ind): # End at len-1 because n is not included - only n-1 in formul a tot = tot + rdiff[idx] * np.log( time[bu_start_ind + bu_time_ind] - time[idx] @@ -302,11 +301,9 @@ def weighted_avg_press_time_derivative_lag1(delta_p, dspt): dpdspt_backward = np.hstack((0, dpdspt)) # Make sure that the first dpdspt_backward is set ot zero. I.e. not really defined. - dpdspt_weighted = ( - dpdspt_forward * dspt_backward + dpdspt_backward * dspt_forward - ) / (dspt_forward + dspt_backward) - - return dpdspt_weighted + return (dpdspt_forward * dspt_backward + dpdspt_backward * dspt_forward) / ( + dspt_forward + dspt_backward + ) def weighted_avg_press_time_derivative_lag2( @@ -360,13 +357,11 @@ def weighted_avg_press_time_derivative_lag2( dspt_lag2_forward[0] = 0 dspt_lag2_backward[-1] = 0 - dpdspt_weighted_lag2 = ( + return ( dpdspt_lag2_forward * dspt_lag2_backward + dpdspt_lag2_backward * dspt_lag2_forward ) / (dspt_lag2_backward + dspt_lag2_forward) - return dpdspt_weighted_lag2 - def to_csv(filen, field_list, header_list=None, start=0, end=None, sep=","): """ diff --git a/tests/conftest.py b/tests/conftest.py index edcdec148..1b10d35c5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,7 +2,6 @@ from os import path import pytest - import subscript diff --git a/tests/test_bjobsusers.py b/tests/test_bjobsusers.py index 01afc9229..26a91366f 100644 --- a/tests/test_bjobsusers.py +++ b/tests/test_bjobsusers.py @@ -3,7 +3,6 @@ import pandas as pd import pytest - from subscript.bjobsusers import bjobsusers diff --git a/tests/test_casegen_upcars.py b/tests/test_casegen_upcars.py index 0e9c3f3e6..421423ad1 100755 --- a/tests/test_casegen_upcars.py +++ b/tests/test_casegen_upcars.py @@ -7,7 +7,6 @@ import opm.io import pandas as pd import pytest - from subscript.casegen_upcars import casegen_upcars TESTDATA = "testdata_casegen_upcars" diff --git a/tests/test_check_swatinit.py b/tests/test_check_swatinit.py index dcbf6d309..c52cb014f 100644 --- a/tests/test_check_swatinit.py +++ b/tests/test_check_swatinit.py @@ -6,7 +6,6 @@ import pandas as pd import pytest from matplotlib import pyplot - from subscript.check_swatinit.check_swatinit import ( __FINE_EQUIL__, __HC_BELOW_FWL__, @@ -342,7 +341,7 @@ def test_qc_volumes(propslist, expected_dict): """Test that we calculate qc volumes correctly from a cell-based qc dataframe""" qc_frame = pd.DataFrame(propslist) qc_vols = qc_volumes(qc_frame) - for key in expected_dict.keys(): + for key in expected_dict: assert np.isclose(qc_vols[key], expected_dict[key]) diff --git a/tests/test_check_swatinit_simulators.py b/tests/test_check_swatinit_simulators.py index b135cd41b..61e0d5f31 100644 --- a/tests/test_check_swatinit_simulators.py +++ b/tests/test_check_swatinit_simulators.py @@ -17,7 +17,6 @@ import pandas as pd import pytest import res2df - from subscript.check_swatinit.check_swatinit import ( __HC_BELOW_FWL__, __PC_SCALED__, @@ -331,7 +330,7 @@ def test_swatinit_1_far_above_contact(simulator, tmp_path): cells=200, apex=1000, owc=[2000], swatinit=[1] * 200, swl=[0.1] ) qc_frame = run_reservoir_simulator(simulator, biggermodel) - assert set(qc_frame["QC_FLAG"]) == set([__SWATINIT_1__, __WATER__]) + assert set(qc_frame["QC_FLAG"]) == set(__SWATINIT_1__, __WATER__) assert qc_frame[qc_frame["Z"] < 2000]["QC_FLAG"].unique()[0] == __SWATINIT_1__ assert qc_frame[qc_frame["Z"] > 2000]["QC_FLAG"].unique()[0] == __WATER__ if "flow" in simulator: @@ -396,10 +395,7 @@ def test_capillary_entry_pressure(simulator, tmp_path): pressure, both swat and swatinit should be less than 1.""" os.chdir(tmp_path) - if "flow" in simulator: - pc_25m_above_contact = 0.373919 - else: - pc_25m_above_contact = 0.373836 + pc_25m_above_contact = 0.373919 if "flow" in simulator else 0.373836 model = PillarModel( cells=1, @@ -426,10 +422,7 @@ def test_below_capillary_entry_pressure(simulator, tmp_path): """Test what we get below the capillary entry pressure""" os.chdir(tmp_path) - if "flow" in simulator: - pc_10m_above_contact = 0.150006 - else: - pc_10m_above_contact = 0.148862 + pc_10m_above_contact = 0.150006 if "flow" in simulator else 0.148862 model = PillarModel( cells=1, @@ -460,10 +453,7 @@ def test_swatinit_almost1_slightly_above_contact(simulator, tmp_path): """ os.chdir(tmp_path) - if "flow" in simulator: - p_cap = 0.37392 - else: - p_cap = 0.3738366 + p_cap = 0.37392 if "flow" in simulator else 0.3738366 model = PillarModel(cells=1, apex=1000, owc=[1030], swatinit=[0.999], swl=[0.1]) qc_frame = run_reservoir_simulator(simulator, model) diff --git a/tests/test_convert_grid_format.py b/tests/test_convert_grid_format.py index 1edeb37c5..d9c74828a 100644 --- a/tests/test_convert_grid_format.py +++ b/tests/test_convert_grid_format.py @@ -4,9 +4,8 @@ from pathlib import Path import pytest -import xtgeo - import subscript.convert_grid_format.convert_grid_format as cgf +import xtgeo from subscript import getLogger logger = getLogger(__name__) diff --git a/tests/test_csv2ofmvol.py b/tests/test_csv2ofmvol.py index 9b9e5e2b6..3106c6a1d 100644 --- a/tests/test_csv2ofmvol.py +++ b/tests/test_csv2ofmvol.py @@ -7,7 +7,6 @@ import numpy as np import pandas as pd import pytest - from subscript.csv2ofmvol import csv2ofmvol from subscript.ofmvol2csv import ofmvol2csv diff --git a/tests/test_csv_merge.py b/tests/test_csv_merge.py index 194fb2c65..978ad3fe8 100644 --- a/tests/test_csv_merge.py +++ b/tests/test_csv_merge.py @@ -5,7 +5,6 @@ import pandas as pd import pytest - from subscript.csv_merge import csv_merge try: @@ -143,7 +142,7 @@ def test_main_merge(tmp_path, mocker): csv_merge.main() merged = pd.read_csv(merged_csv) assert "FILETYPE" in merged - assert set(merged["FILETYPE"].unique()) == set([test_csv_1, test_csv_2]) + assert set(merged["FILETYPE"].unique()) == {test_csv_1, test_csv_2} @pytest.mark.parametrize( diff --git a/tests/test_csv_stack.py b/tests/test_csv_stack.py index 56ed7bfcd..84db2d527 100644 --- a/tests/test_csv_stack.py +++ b/tests/test_csv_stack.py @@ -6,7 +6,6 @@ import pandas as pd import pytest - from subscript.csv_stack import csv_stack try: @@ -146,7 +145,7 @@ def test_csv_stack_all(): dframe["WOPR:A2"] = [20, 21, 22, 23, 24, 25, 27] all_stacked = csv_stack.csv_stack(dframe, regexp, colon, col_name) assert len(all_stacked) == 28 - assert set(all_stacked["IDENTIFIER"].unique()) == set(["1", "2", "A1", "A2"]) + assert set(all_stacked["IDENTIFIER"].unique()) == {"1", "2", "A1", "A2"} @pytest.mark.integration diff --git a/tests/test_eclcompress.py b/tests/test_eclcompress.py index e8ca5ea62..977d701db 100644 --- a/tests/test_eclcompress.py +++ b/tests/test_eclcompress.py @@ -9,7 +9,6 @@ import numpy as np import opm.io import pytest - from subscript.eclcompress.eclcompress import ( compress_multiple_keywordsets, eclcompress, diff --git a/tests/test_ecldiff2roff.py b/tests/test_ecldiff2roff.py index 8433d3ead..f23a267bc 100644 --- a/tests/test_ecldiff2roff.py +++ b/tests/test_ecldiff2roff.py @@ -7,7 +7,6 @@ import numpy as np import pytest import xtgeo - from subscript import getLogger from subscript.ecldiff2roff import ecldiff2roff diff --git a/tests/test_fmu_copy_revision.py b/tests/test_fmu_copy_revision.py index 4f4d61002..6bbcabdc3 100644 --- a/tests/test_fmu_copy_revision.py +++ b/tests/test_fmu_copy_revision.py @@ -5,7 +5,6 @@ from pathlib import Path import pytest - import subscript.fmu_copy_revision.fmu_copy_revision as fcr SCRIPTNAME = "fmu_copy_revision" diff --git a/tests/test_fmuobs.py b/tests/test_fmuobs.py index 062ea410e..8d8444dfd 100644 --- a/tests/test_fmuobs.py +++ b/tests/test_fmuobs.py @@ -9,7 +9,6 @@ import pandas as pd import pytest import yaml - from subscript.fmuobs.fmuobs import autoparse_file, main from subscript.fmuobs.parsers import ertobs2df, obsdict2df, resinsight_df2df from subscript.fmuobs.writers import df2ertobs, df2obsdict, df2resinsight_df @@ -146,10 +145,9 @@ def test_roundtrip_ertobs(filename, readonly_testdata_dir): subframe.drop( ["COMMENT", "SUBCOMMENT"], axis="columns", errors="ignore", inplace=True ) - if _class == "BLOCK_OBSERVATION": - if "WELL" in subframe: - # WELL as used in yaml is not preservable in roundtrips - del subframe["WELL"] + if _class == "BLOCK_OBSERVATION" and "WELL" in subframe: + # WELL as used in yaml is not preservable in roundtrips + del subframe["WELL"] # print(roundtrip_subframe) # print(subframe) diff --git a/tests/test_fmuobs_parsers.py b/tests/test_fmuobs_parsers.py index 996ce65cd..f3d200e7e 100644 --- a/tests/test_fmuobs_parsers.py +++ b/tests/test_fmuobs_parsers.py @@ -8,7 +8,6 @@ import pandas as pd import pytest - from subscript.fmuobs.parsers import ( INCLUDE_RE, OBS_ARGS_RE, diff --git a/tests/test_fmuobs_writers.py b/tests/test_fmuobs_writers.py index 9668f548f..370449d5c 100644 --- a/tests/test_fmuobs_writers.py +++ b/tests/test_fmuobs_writers.py @@ -6,7 +6,6 @@ import numpy as np import pandas as pd import pytest - from subscript.fmuobs.parsers import ertobs2df from subscript.fmuobs.writers import ( block_df2obsdict, diff --git a/tests/test_hook_implementations.py b/tests/test_hook_implementations.py index 31d98c213..26971bf1a 100644 --- a/tests/test_hook_implementations.py +++ b/tests/test_hook_implementations.py @@ -4,9 +4,8 @@ import pytest import rstcheck_core.checker -from ert.shared.plugins.plugin_manager import ErtPluginManager - import subscript.hook_implementations.jobs +from ert.shared.plugins.plugin_manager import ErtPluginManager # pylint: disable=redefined-outer-name @@ -97,7 +96,7 @@ def test_hook_implementations_job_docs(): assert set(docs.keys()) == set(installable_jobs.keys()) - for job_name in installable_jobs.keys(): + for job_name in installable_jobs: desc = docs[job_name]["description"] assert desc != "" assert not list(rstcheck_core.checker.check_source(desc)) diff --git a/tests/test_interp_relperm.py b/tests/test_interp_relperm.py index 775a6b088..a356d3f05 100644 --- a/tests/test_interp_relperm.py +++ b/tests/test_interp_relperm.py @@ -9,7 +9,6 @@ from pyscal import PyscalFactory from pyscal.utils.testing import sat_table_str_ok from res2df import satfunc - from subscript.interp_relperm import interp_relperm from subscript.interp_relperm.interp_relperm import InterpRelpermConfig diff --git a/tests/test_merge_rft_ertobs.py b/tests/test_merge_rft_ertobs.py index a286fc686..1e8788531 100644 --- a/tests/test_merge_rft_ertobs.py +++ b/tests/test_merge_rft_ertobs.py @@ -7,7 +7,6 @@ import numpy as np import pandas as pd import pytest - from subscript import getLogger from subscript.merge_rft_ertobs.merge_rft_ertobs import ( get_observations, diff --git a/tests/test_merge_unrst_files.py b/tests/test_merge_unrst_files.py index f450f4d11..211c025d6 100644 --- a/tests/test_merge_unrst_files.py +++ b/tests/test_merge_unrst_files.py @@ -4,7 +4,6 @@ import pytest import resfo - from subscript.merge_unrst_files import merge_unrst_files UNRST_HIST = ( diff --git a/tests/test_ofmvol2csv.py b/tests/test_ofmvol2csv.py index 64099326a..93dbe8012 100644 --- a/tests/test_ofmvol2csv.py +++ b/tests/test_ofmvol2csv.py @@ -6,7 +6,6 @@ import pandas as pd import pytest - from subscript.csv2ofmvol import csv2ofmvol from subscript.ofmvol2csv import ofmvol2csv diff --git a/tests/test_pack_sim.py b/tests/test_pack_sim.py index f22f78b12..9b74a46f6 100644 --- a/tests/test_pack_sim.py +++ b/tests/test_pack_sim.py @@ -5,7 +5,6 @@ from pathlib import Path import pytest - from subscript.pack_sim import pack_sim ECLDIR = Path(__file__).absolute().parent / "data" / "reek" / "eclipse" / "model" diff --git a/tests/test_params2csv.py b/tests/test_params2csv.py index 36485a412..4d54179a3 100644 --- a/tests/test_params2csv.py +++ b/tests/test_params2csv.py @@ -4,7 +4,6 @@ import pandas as pd import pytest - from subscript.params2csv import params2csv try: @@ -36,7 +35,7 @@ def test_main(tmp_path, mocker): assert "CONSTANT" not in result assert "BOGUS" not in result assert "filename" in result - assert set(result["filename"].values) == set(["parameters1.txt", "parameters2.txt"]) + assert set(result["filename"].values) == set("parameters1.txt", "parameters2.txt") # Test the cleaning mode: mocker.patch( @@ -69,7 +68,7 @@ def test_main(tmp_path, mocker): assert "CONSTANT" not in result assert "BOGUS" not in result assert "filename" in result - assert set(result["filename"].values) == set(["parameters1.txt", "parameters2.txt"]) + assert set(result["filename"].values) == {"parameters1.txt", "parameters2.txt"} def test_spaces_in_values(tmp_path, mocker): diff --git a/tests/test_presentvalue.py b/tests/test_presentvalue.py index c473c82c8..aaa7be093 100644 --- a/tests/test_presentvalue.py +++ b/tests/test_presentvalue.py @@ -8,7 +8,6 @@ import pytest import res2df from resdata.summary import Summary - from subscript.presentvalue import presentvalue ECLDIR = Path(__file__).absolute().parent / "data" / "reek" / "eclipse" / "model" @@ -285,7 +284,7 @@ def test_main(tmp_path, mocker): back results to parameters.txt in the original runpath""" shutil.copytree( ECLDIR, - tmp_path / "model" + tmp_path / "model", # This is somewhat spacious, 39M, but the test will fail # if you try with a symlink (presentvalue.py looks through symlinks) ) diff --git a/tests/test_prtvol2csv.py b/tests/test_prtvol2csv.py index 35dbc0509..ebd5da070 100644 --- a/tests/test_prtvol2csv.py +++ b/tests/test_prtvol2csv.py @@ -9,7 +9,6 @@ import pytest import yaml from fmu.tools.fipmapper.fipmapper import FipMapper - from subscript.prtvol2csv import prtvol2csv TESTDATADIR = Path(__file__).absolute().parent / "data/reek/eclipse/model" @@ -492,8 +491,7 @@ def test_ert_forward_model_backwards_compat_deprecation(tmp_path): "RUNPATH ", "", ( - "FORWARD_MODEL PRTVOL2CSV(" - "=" + str(prtfile) + ")" # noqa + "FORWARD_MODEL PRTVOL2CSV(" "=" + str(prtfile) + ")" # noqa ), ] ), diff --git a/tests/test_restartthinner.py b/tests/test_restartthinner.py index 20e716250..b4f693fc6 100644 --- a/tests/test_restartthinner.py +++ b/tests/test_restartthinner.py @@ -4,7 +4,6 @@ from pathlib import Path import pytest - from subscript.restartthinner import restartthinner ECLDIR = Path(__file__).absolute().parent / "data/reek/eclipse/model" diff --git a/tests/test_ri_wellmod.py b/tests/test_ri_wellmod.py index 790c523a1..27d2d3b4d 100644 --- a/tests/test_ri_wellmod.py +++ b/tests/test_ri_wellmod.py @@ -3,7 +3,6 @@ from pathlib import Path import pytest - from subscript.ri_wellmod import ri_wellmod pytestmark = pytest.mark.xfail() diff --git a/tests/test_rmsecl_volumetrics.py b/tests/test_rmsecl_volumetrics.py index 1056322b9..526e9d4bd 100644 --- a/tests/test_rmsecl_volumetrics.py +++ b/tests/test_rmsecl_volumetrics.py @@ -6,7 +6,6 @@ import pytest import yaml from fmu.tools.fipmapper import fipmapper - from subscript.rmsecl_volumetrics.rmsecl_volumetrics import ( _compare_volumetrics, _disjoint_sets_to_dict, diff --git a/tests/test_runrms.py b/tests/test_runrms.py index 0f1699f6e..a08ac8408 100644 --- a/tests/test_runrms.py +++ b/tests/test_runrms.py @@ -7,7 +7,6 @@ import pytest import yaml - from subscript.runrms import runrms as rr # the resolve().as_posix() for pytest tmp_path fixture (workaround) diff --git a/tests/test_sector2fluxnum.py b/tests/test_sector2fluxnum.py index 5c635165d..68cd85492 100644 --- a/tests/test_sector2fluxnum.py +++ b/tests/test_sector2fluxnum.py @@ -2,7 +2,6 @@ from pathlib import Path import pytest - from subscript.sector2fluxnum import sector2fluxnum TESTDATA = Path(__file__).absolute().parent / "testdata_sector2fluxnum" diff --git a/tests/test_summaryplot.py b/tests/test_summaryplot.py index 00f4e4d20..79003f02c 100644 --- a/tests/test_summaryplot.py +++ b/tests/test_summaryplot.py @@ -5,7 +5,6 @@ import pytest from resdata.summary import Summary - from subscript.summaryplot import summaryplot DATAFILE = Path(__file__).parent / "data/reek/eclipse/model/2_R001_REEK-0.DATA" diff --git a/tests/test_sunsch.py b/tests/test_sunsch.py index d4c78a8d0..403dc2010 100644 --- a/tests/test_sunsch.py +++ b/tests/test_sunsch.py @@ -7,7 +7,6 @@ import pytest # noqa: F401 import yaml from pydantic import ValidationError - from subscript.sunsch import sunsch DATADIR = Path(__file__).absolute().parent / "testdata_sunsch" @@ -173,7 +172,7 @@ def test_templating(tmp_path): { "template": "template.tmpl", "days": 10, - "substitute": dict(WELLNAME="A-007", ORAT=200.3, GRAT=1.4e6), + "substitute": {"WELLNAME": "A-007", "ORAT": 200.3, "GRAT": 1.4e6}, } ], } @@ -190,7 +189,7 @@ def test_templating(tmp_path): { "template": "template.tmpl", "days": 10, - "substitute": dict(WELLNAME="A-007"), + "substitute": {"WELLNAME": "A-007"}, } ], } @@ -203,7 +202,7 @@ def test_templating(tmp_path): sunschconf = { "startdate": datetime.date(2020, 1, 1), "enddate": datetime.date(2021, 1, 1), - "insert": [{"template": "template.tmpl", "substitute": dict(WELLNAME="A-007")}], + "insert": [{"template": "template.tmpl", "substitute": {"WELLNAME": "A-007"}}], } sch = sunsch.process_sch_config(sunschconf) # sunsch logs this as an error that there is no date defined for the template. @@ -228,7 +227,7 @@ def test_templating(tmp_path): { "template": "empty.tmpl", "days": 10, - "substitute": dict(WELLNAME="A-007", ORAT=200.3, GRAT=1.4e6), + "substitute": {"WELLNAM": "A-007", "ORAT": 200.3, "GRAT": 1.4e6}, } ], } diff --git a/tests/test_sw_model_utilities.py b/tests/test_sw_model_utilities.py index 3a07b9f35..2287779ae 100644 --- a/tests/test_sw_model_utilities.py +++ b/tests/test_sw_model_utilities.py @@ -1,7 +1,6 @@ import subprocess import pytest - from subscript.sw_model_utilities import sw_model_utilities as swtool # This is an interactive program, so currently only a few functions are diff --git a/tests/test_vfp2csv.py b/tests/test_vfp2csv.py index 21d7508a3..a976a588e 100644 --- a/tests/test_vfp2csv.py +++ b/tests/test_vfp2csv.py @@ -4,7 +4,6 @@ import pandas as pd import pytest - from subscript.vfp2csv import vfp2csv diff --git a/tests/test_welltest_dpds.py b/tests/test_welltest_dpds.py index 7f5a3724d..d6391b680 100644 --- a/tests/test_welltest_dpds.py +++ b/tests/test_welltest_dpds.py @@ -6,7 +6,6 @@ import pandas as pd import pytest from resdata.summary import Summary - from subscript.welltest_dpds import welltest_dpds ECLDIR = Path(__file__).parent.absolute() / Path("data/welltest/eclipse/model")