Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Add --cpu flag to generate #69

Merged
merged 6 commits into from
Jul 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
.. image:: https://travis-ci.org/bio-phys/MDBenchmark.svg?branch=develop
:target: https://travis-ci.org/bio-phys/MDBenchmark

.. image:: https://codecov.io/gh/bio-phys/MDBenchmark/branch/master/graph/badge.svg
.. image:: https://codecov.io/gh/bio-phys/MDBenchmark/branch/develop/graph/badge.svg
:target: https://codecov.io/gh/bio-phys/MDBenchmark

.. image:: https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square
Expand Down
1 change: 1 addition & 0 deletions changelog/69.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
`mdbenchmark generate` now accepts `--cpu` / `--no-cpu` and `--gpu` / `--no-gpu`. The default is `--cpu` and `--no-gpu`.
81 changes: 54 additions & 27 deletions mdbenchmark/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,15 @@ def validate_module(ctx, param, module=None):
return module


def validate_cpu_gpu_flags(cpu, gpu):
"""Validate that either the CPU or GPU flag is set to True.
"""
if not (cpu or gpu):
raise click.BadParameter(
'You must select either CPUs or GPUs to run the benchmarks on.',
param_hint='"--cpu" / "--gpu"')


def validate_number_of_nodes(min_nodes, max_nodes):
"""Validate that the minimal number of nodes is smaller than the maximal
number.
Expand Down Expand Up @@ -101,9 +110,16 @@ def validate_hosts(ctx, param, host=None):
'--name',
help='Name of input files. All files must have the same base name.',
callback=validate_name)
@click.option(
'-c',
'--cpu/--no-cpu',
is_flag=True,
help='Use CPUs for benchmark.',
default=True,
show_default=True)
@click.option(
'-g',
'--gpu',
'--gpu/--no-gpu',
is_flag=True,
help='Use GPUs for benchmark.',
show_default=True)
Expand Down Expand Up @@ -148,9 +164,12 @@ def validate_hosts(ctx, param, host=None):
help='Skip the validation of module names.',
default=False,
is_flag=True)
def generate(name, gpu, module, host, min_nodes, max_nodes, time,
def generate(name, cpu, gpu, module, host, min_nodes, max_nodes, time,
skip_validation):
"""Generate benchmarks simulations from the CLI."""
# Validate the CPU and GPU flags
validate_cpu_gpu_flags(cpu, gpu)

# Validate the number of nodes
validate_number_of_nodes(min_nodes=min_nodes, max_nodes=max_nodes)

Expand All @@ -173,34 +192,42 @@ def generate(name, gpu, module, host, min_nodes, max_nodes, time,
# Here we detect the MD engine (supported: GROMACS and NAMD).
engine = mdengines.detect_md_engine(m)

directory = '{}_{}'.format(host, m)
gpu_string = ''
if gpu:
directory += '_gpu'
gpu_string = ' with GPUs'

# Check if all needed files exist. Throw an error if they do not.
engine.check_input_file_exists(name)

console.info('Creating benchmark system for {}.', m + gpu_string)
number_of_benchmarks = (len(module) * (max_nodes + 1 - min_nodes))
run_time_each = '{} minutes'.format(time)
console.info(
'Creating a total of {} benchmarks, with a run time of {} each.',
number_of_benchmarks, run_time_each)

base_directory = dtr.Tree(directory)
for n in range(min_nodes, max_nodes + 1):
write_benchmark(
engine=engine,
base_directory=base_directory,
template=template,
nodes=n,
gpu=gpu,
module=m,
name=name,
host=host,
time=time)
gpu_cpu = {'cpu': cpu, 'gpu': gpu}
for pu, state in sorted(gpu_cpu.items()):
if not state:
continue

directory = '{}_{}'.format(host, m)

gpu = False
gpu_string = ''
if pu == 'gpu':
gpu = True
directory += '_gpu'
gpu_string = ' with GPUs'

console.info('Creating benchmark system for {}.', m + gpu_string)
number_of_benchmarks = (len(module) * (max_nodes + 1 - min_nodes))
run_time_each = '{} minutes'.format(time)
console.info(
'Creating a total of {} benchmarks, with a run time of {} each.',
number_of_benchmarks, run_time_each)

base_directory = dtr.Tree(directory)
for n in range(min_nodes, max_nodes + 1):
write_benchmark(
engine=engine,
base_directory=base_directory,
template=template,
nodes=n,
gpu=gpu,
module=m,
name=name,
host=host,
time=time)

# Provide some output for the user
console.info('Finished generating all benchmarks.\n'
Expand Down
100 changes: 86 additions & 14 deletions mdbenchmark/tests/test_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,15 @@
# along with MDBenchmark. If not, see <http://www.gnu.org/licenses/>.
import os

import pytest
from click import exceptions

import pytest
from mdbenchmark import cli
from mdbenchmark.ext.click_test import cli_runner
from mdbenchmark.generate import (NAMD_WARNING, print_known_hosts,
validate_hosts, validate_module,
validate_name, validate_number_of_nodes)
validate_cpu_gpu_flags, validate_hosts,
validate_module, validate_name,
validate_number_of_nodes)
from mdbenchmark.mdengines import SUPPORTED_ENGINES

DIR_STRUCTURE = {
Expand Down Expand Up @@ -58,11 +59,34 @@ def exit(self):


@pytest.fixture
def generate_output():
return 'Creating benchmark system for {} with GPUs.\n' \
'Creating a total of 4 benchmarks, with a run time of 15' \
' minutes each.\nFinished generating all benchmarks.\nYou can' \
' now submit the jobs with mdbenchmark submit.\n'
def generate_output_create():
def _output(gpu=True, n_benchmarks=4, runtime=15):
gpu_string = '{}'
if gpu:
gpu_string = '{} with GPUs'

return 'Creating benchmark system for {}.\n' \
'Creating a total of {} benchmarks, with a run time of {}' \
' minutes each.\n'.format(gpu_string, n_benchmarks, runtime)

return _output


@pytest.fixture
def generate_output_finish():
return 'Finished generating all benchmarks.\nYou can' \
' now submit the jobs with mdbenchmark submit.\n'


@pytest.fixture
def generate_output(generate_output_create, generate_output_finish):
def _output(gpu=True, n_benchmarks=4, runtime=15):
create_string = generate_output_create(
gpu=gpu, n_benchmarks=n_benchmarks, runtime=runtime)
finish_string = generate_output_finish
return create_string + finish_string

return _output


@pytest.mark.parametrize('module, extensions',
Expand All @@ -75,9 +99,37 @@ def test_generate_simple_input(cli_runner, generate_output, module, extensions,
for ext in extensions:
open('protein.{}'.format(ext), 'a').close()

output = generate_output.format(module)
output = generate_output().format(module)
output = 'WARNING Cannot locate modules available on this host. ' \
'Not performing module name validation.\n' + output
if 'namd' in module:
output = NAMD_WARNING_FORMATTED + output

# Test that we get a warning, if no module name validation is performed.
result = cli_runner.invoke(cli.cli, [
'generate', '--module={}'.format(module), '--host=draco',
'--max-nodes=4', '--gpu', '--no-cpu', '--name=protein'
])
assert result.exit_code == 0
assert result.output == output


@pytest.mark.parametrize('module, extensions',
[('gromacs/2016', ['tpr']),
('namd/11', ['namd', 'pdb', 'psf'])])
def test_generate_simple_input_with_cpu_gpu(cli_runner, generate_output_create,
generate_output_finish, module,
extensions, tmpdir):
"""Test that we can generate benchmarks for CPUs and GPUs at once."""
with tmpdir.as_cwd():
for ext in extensions:
open('protein.{}'.format(ext), 'a').close()

output = generate_output_create(gpu=False).format(module)
output = 'WARNING Cannot locate modules available on this host. ' \
'Not performing module name validation.\n' + output
output += generate_output_create(gpu=True).format(module)
output += generate_output_finish
if 'namd' in module:
output = NAMD_WARNING_FORMATTED + output

Expand All @@ -100,7 +152,7 @@ def test_generate_simple_input_with_working_validation(
for ext in extensions:
open('protein.{}'.format(ext), 'a').close()

output = generate_output.format(module)
output = generate_output().format(module)
if 'namd' in module:
output = NAMD_WARNING_FORMATTED + output

Expand All @@ -111,7 +163,7 @@ def test_generate_simple_input_with_working_validation(
# Test that we get a warning, if no module name validation is performed.
result = cli_runner.invoke(cli.cli, [
'generate', '--module={}'.format(module), '--host=draco',
'--max-nodes=4', '--gpu', '--name=protein'
'--max-nodes=4', '--gpu', '--no-cpu', '--name=protein'
])
assert result.exit_code == 0
assert result.output == output
Expand All @@ -131,14 +183,15 @@ def test_generate_skip_validation(cli_runner, module, extensions,
monkeypatch.setattr('mdbenchmark.mdengines.get_available_modules',
lambda: {'gromacs': ['2016'], 'namd': ['11']})

output = generate_output.format(module)
output = generate_output().format(module)
output = 'WARNING Not performing module name validation.\n' + output
if 'namd' in module:
output = NAMD_WARNING_FORMATTED + output

result = cli_runner.invoke(cli.cli, [
'generate', '--module={}'.format(module), '--host=draco',
'--max-nodes=4', '--gpu', '--name=protein', '--skip-validation'
'--max-nodes=4', '--gpu', '--no-cpu', '--name=protein',
'--skip-validation'
])
assert result.exit_code == 0
assert result.output == output
Expand Down Expand Up @@ -194,7 +247,8 @@ def test_generate_odd_number_of_nodes(cli_runner, engine, module, extensions,

result = cli_runner.invoke(cli.cli, [
'generate', '--module={}'.format(module), '--host=draco',
'--min-nodes=6', '--max-nodes=8', '--gpu', '--name=protein'
'--min-nodes=6', '--max-nodes=8', '--gpu', '--no-cpu',
'--name=protein'
])
assert result.exit_code == 0
assert result.output == output
Expand Down Expand Up @@ -332,6 +386,24 @@ def test_validate_generate_module(ctx_mock):
assert validate_module(ctx_mock, None, 'gromacs/123') == 'gromacs/123'


def test_validate_cpu_gpu_flags():
"""Test that the validate_cpu_gpu_flags function works as expected."""

with pytest.raises(exceptions.BadParameter) as error:
validate_cpu_gpu_flags(
cpu=False,
gpu=False,
)

assert str(
error.value
) == 'You must select either CPUs or GPUs to run the benchmarks on.'

assert validate_cpu_gpu_flags(cpu=True, gpu=False) is None
assert validate_cpu_gpu_flags(cpu=False, gpu=True) is None
assert validate_cpu_gpu_flags(cpu=True, gpu=True) is None


def test_validate_generate_number_of_nodes():
"""Test that the validate_generate_number_of_nodes function works as expected."""

Expand Down