diff --git a/mdbenchmark/submit.py b/mdbenchmark/submit.py index 75f274ff..a046cc3e 100644 --- a/mdbenchmark/submit.py +++ b/mdbenchmark/submit.py @@ -87,7 +87,19 @@ def submit(directory, force_restart, yes): if not bundle: console.error("No benchmarks found.") - df = DataFrameFromBundle(bundle) + grouped_bundles = bundle.categories.groupby("started") + try: + bundles_not_yet_started = grouped_bundles[False] + except KeyError: + bundles_not_yet_started = None + if not bundles_not_yet_started and not force_restart: + console.error( + "All generated benchmarks were already started once. " + "You can force a restart with {}.", + "--force", + ) + + df = DataFrameFromBundle(bundles_not_yet_started) # Reformat NaN values nicely into question marks. df_to_print = df.replace(np.nan, "?") @@ -96,23 +108,12 @@ def submit(directory, force_restart, yes): df_short = ConsolidateDataFrame(df_to_print) PrintDataFrame(df_short) - # here I add the user promt to confirm the submission of the simulations + # Ask the user to confirm whether they want to submit the benchmarks if yes: console.info("The above benchmarks will be submitted.") elif not click.confirm("The above benchmarks will be submitted. Continue?"): console.error("Exiting. No benchmarks submitted.") - grouped_bundles = bundle.categories.groupby("started") - try: - bundles_not_yet_started = grouped_bundles[False] - except KeyError: - bundles_not_yet_started = None - if not bundles_not_yet_started and not force_restart: - console.error( - "All generated benchmarks were already started once. " - "You can force a restart with {}.", - "--force", - ) # Start all benchmark simulations if a restart was requested. Otherwise # only start the ones that were not run yet. bundles_to_start = bundle