Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Bump ruff to 0.9.2 #523

Merged
merged 1 commit into from
Jan 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: no-commit-to-branch
args: ['--branch', 'master']
Expand All @@ -13,7 +13,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.7
rev: v0.9.2
hooks:
- id: ruff
args: [ --fix ]
Expand Down
2 changes: 1 addition & 1 deletion res2df/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ def fill_reverse_parser(
"""
parser.add_argument(
"csvfile",
help="Name of CSV file with " + modulename + " data with " "res2df format",
help="Name of CSV file with " + modulename + " data with res2df format",
)
parser.add_argument(
"-o",
Expand Down
9 changes: 3 additions & 6 deletions res2df/compdat.py
Original file line number Diff line number Diff line change
Expand Up @@ -615,8 +615,7 @@ def expand_wlist(wlist_df: pd.DataFrame) -> pd.DataFrame:
and wlist_record["NAME"] not in currentstate
):
raise ValueError(
"WLIST ADD/DEL only works on existing well lists: "
f"{str(wlist_record)}"
f"WLIST ADD/DEL only works on existing well lists: {str(wlist_record)}"
)
if wlist_record["ACTION"] == "ADD":
currentstate[wlist_record["NAME"]] = " ".join(
Expand Down Expand Up @@ -888,14 +887,12 @@ def applywelopen(
].drop_duplicates(subset=["I", "J", "K1", "K2"], keep="last")
else:
raise ValueError(
"A WELOPEN keyword contains data that could not be parsed. "
f"\n {row} "
f"A WELOPEN keyword contains data that could not be parsed. \n {row} "
)

if previous_state.empty:
raise ValueError(
"A WELOPEN keyword is not acting on any existing connection. "
f"\n {row} "
f"A WELOPEN keyword is not acting on any existing connection. \n {row} "
)

new_state = previous_state
Expand Down
3 changes: 1 addition & 2 deletions res2df/csv2res.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,7 @@ def get_parser() -> argparse.ArgumentParser:
"satfunc",
help="Write saturation function include files",
description=(
"Write saturation function include files from CSV files with "
"res2df format."
"Write saturation function include files from CSV files with res2df format."
),
)
satfunc_fill_reverse_parser(satfunc_parser)
Expand Down
2 changes: 1 addition & 1 deletion res2df/equil.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"-o",
"--output",
type=str,
help=("Name of output csv file. " "Use '-' for stdout."),
help=("Name of output csv file. Use '-' for stdout."),
default="equil.csv",
)
parser.add_argument(
Expand Down
5 changes: 1 addition & 4 deletions res2df/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,10 +686,7 @@ def df2res(

if "GLOBAL_INDEX" not in grid_df:
logger.warning(
(
"Global index not found in grid dataframe. "
"Assumes all cells are active"
)
("Global index not found in grid dataframe. Assumes all cells are active")
)
# Drop NaN rows for columns to be used (triggered by stacked
# dates and no global index, unlikely)
Expand Down
4 changes: 2 additions & 2 deletions res2df/gruptree.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def df(
# Flags which will tell when a new network related keyword
# has been encountered
keywords = ["GRUPTREE", "BRANPROP", "WELSPECS", "GRUPNET", "NODEPROP"]
found_keywords = {key: False for key in keywords}
found_keywords = dict.fromkeys(keywords, False)
for kword in deck:
if kword.name in ["DATES", "START", "TSTEP"]:
# Whenever we encounter a new DATES, it means that
Expand All @@ -107,7 +107,7 @@ def df(
edgerecords += _write_edgerecords(
currentedges, nodedata, wellspecsedges, found_keywords, date
)
found_keywords = {key: False for key in keywords}
found_keywords = dict.fromkeys(keywords, False)
# Done dumping the data for the previous date, parse the fresh
# date:
if kword.name in ["DATES", "START"]:
Expand Down
6 changes: 3 additions & 3 deletions res2df/satfunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,9 @@ def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame:
can consist of multiple SATNUMs.
"""
sat_cols: set = {"SW", "SO", "SG", "SL"}.intersection(dframe.columns)
assert (
len(sat_cols) == 1
), f"Could not determine a single saturation column in {dframe.columns}"
assert len(sat_cols) == 1, (
f"Could not determine a single saturation column in {dframe.columns}"
)
sat_col = list(sat_cols)[0]

if dframe[sat_col].isna().any():
Expand Down
4 changes: 1 addition & 3 deletions res2df/summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,9 +761,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"-o",
"--output",
type=str,
help=(
"Name of output file. Use '-' to write to stdout. " "Default 'summary.csv'"
),
help=("Name of output file. Use '-' to write to stdout. Default 'summary.csv'"),
default="summary.csv",
)
parser.add_argument("--arrow", action="store_true", help="Write to pyarrow format")
Expand Down
Loading