Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

updated tip for reference implementation #1912

Merged
merged 1 commit into from
Nov 9, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 31 additions & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,12 +237,19 @@ def mlperf_inference_implementation_readme(
extra_docker_input_string,
)

common_info = get_common_info(
spaces + 16,
implementation
)

if (
execution_env == "Native"
): # Native implementation steps through virtual environment
content += f"{cur_space3}####### Setup a virtual environment for Python\n"
content += get_venv_command(spaces + 16)
content += f"{cur_space3}####### Performance Estimation for Offline Scenario\n"

content += common_info

content += setup_run_cmd.replace(
"--docker ", "")
Expand All @@ -258,6 +265,9 @@ def mlperf_inference_implementation_readme(
device,
setup_tips,
)

content += common_info

content += docker_info

content += setup_run_cmd
Expand Down Expand Up @@ -475,6 +485,23 @@ def get_venv_command(spaces):
{pre_space}export CM_SCRIPT_EXTRA_CMD=\"--adr.python.name=mlperf\"
{pre_space}```\n"""

# contains run command information which is common to both docker and native runs
def get_common_info(spaces, implementation):
info = ""
pre_space = ""
for i in range(1, spaces):
pre_space = pre_space + " "
pre_space += " "
# pre_space = " "
info += f"\n{pre_space}!!! tip\n\n"
info += f"{pre_space} - Batch size could be adjusted using `--batch_size=#`, where `#` is the desired batch size. This option works only if the implementation in use is supporting the given batch size.\n\n"
if implementation.lower() == "reference":
info += f"{pre_space} - Add `--adr.mlperf-implementation.tags=_branch.master,_repo.<CUSTOM_INFERENCE_REPO_LINK>` if you are modifying the official MLPerf Inference implementation in a custom fork.\n\n"
info += f"{pre_space} - Add `--adr.inference-src.tags=_repo.<CUSTOM_INFERENCE_REPO_LINK>` if you are modifying the model config accuracy script in the submission checker within a custom fork.\n\n"
info += f"{pre_space} - Add `--adr.inference-src.version=custom` if you are using the modified MLPerf Inference code or accuracy script on submission checker within a custom fork.\n\n"

return info

def get_docker_info(spaces, model, implementation,
device, setup_tips=True):
info = ""
Expand All @@ -489,7 +516,6 @@ def get_docker_info(spaces, model, implementation,
if model == "sdxl":
info += f"{pre_space} - `--env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes` option can be used to download the model on the host so that it can be reused across different container lanuches. \n\n"

info += f"{pre_space} - Batch size could be adjusted using `--batch_size=#`, where `#` is the desired batch size. This option works only if the implementation in use is supporting the given batch size.\n\n"
if implementation.lower() == "nvidia":
info += f"{pre_space} - Default batch size is assigned based on [GPU memory](https://github.com/mlcommons/cm4mlops/blob/dd0c35856969c68945524d5c80414c615f5fe42c/script/app-mlperf-inference-nvidia/_cm.yaml#L1129) or the [specified GPU](https://github.com/mlcommons/cm4mlops/blob/dd0c35856969c68945524d5c80414c615f5fe42c/script/app-mlperf-inference-nvidia/_cm.yaml#L1370). Please click more option for *docker launch* or *run command* to see how to specify the GPU name.\n\n"
info += f"{pre_space} - When run with `--all_models=yes`, all the benchmark models of NVIDIA implementation can be executed within the same container.\n\n"
Expand All @@ -501,6 +527,10 @@ def get_docker_info(spaces, model, implementation,
info += f"\n{pre_space}!!! tip\n\n"
info += f"{pre_space} - `--env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes` option can be used to download the model on the host so that it can be reused across different container lanuches. \n\n"

# return empty string if nothing is filled inside the tip
if info == f"\n{pre_space}!!! tip\n\n":
return ""

return info

def get_readme_prefix(spaces, model, implementation, extra_variation_tags):
Expand Down