From d1525afef233af64b05b89a2eec0fb3cffae6e67 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 22 Nov 2024 02:29:22 +0530 Subject: [PATCH 1/4] Use edge as the default system in run-mlperf-inference-app --- script/run-mlperf-inference-app/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index edb27174f..d54978978 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -94,7 +94,7 @@ def preprocess(i): env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" if env.get('CM_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": - env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios(env['CM_MODEL'], system_meta['system_type'], env['CM_MLPERF_LAST_RELEASE'], env['CM_MLPERF_INFERENCE_SOURCE']) + env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios(env['CM_MODEL'], system_meta.get('system_type', 'edge'), env['CM_MLPERF_LAST_RELEASE'], env['CM_MLPERF_INFERENCE_SOURCE']) else: system_meta = {} env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ env['CM_MLPERF_LOADGEN_SCENARIO'] ] From 42c0b6ff3f6d62913fbf34c9ecaecaa9aded741b Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 22 Nov 2024 02:58:08 +0530 Subject: [PATCH 2/4] Added use_model_from_host option for run-mlperf-inference-app --- script/app-mlperf-inference/_cm.yaml | 28 ++++++++++++++++++------ script/run-mlperf-inference-app/_cm.yaml | 1 + 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml index e508e2997..70d656bba 100644 --- a/script/app-mlperf-inference/_cm.yaml +++ b/script/app-mlperf-inference/_cm.yaml @@ -769,36 +769,44 @@ variations: sdxl,nvidia-original: docker: deps: - - enable_if_env: + - enable_if_any_env: CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone sdxl,reference,float16: docker: image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float16 deps: - - enable_if_env: + - enable_if_any_env: CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone sdxl,reference,bfloat16: docker: image_name: mlperf-inference-mlcommons-python-implementation-sdxl-bfloat16 deps: - - enable_if_env: + - enable_if_any_env: CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' tags: get,ml-model,sdxl,_fp16,_rclone sdxl,reference,float32: docker: image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float32 deps: - - enable_if_env: + - enable_if_any_env: CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' tags: get,ml-model,sdxl,_fp32,_rclone llama2-70b_: @@ -849,9 +857,11 @@ variations: docker: image_name: mlperf-inference-mlcommons-python-implementation-llama2-70b deps: - - enable_if_env: + - enable_if_any_env: CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' tags: get,ml-model,llama2 llama2-70b_,amd: @@ -860,9 +870,11 @@ variations: mounts: - "${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }}:${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }" deps: - - enable_if_env: + - enable_if_any_env: CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' tags: get,ml-model,llama2,_amd,_pytorch mixtral-8x7b: @@ -901,9 +913,11 @@ variations: names: - ml-model - mixtral-model - enable_if_env: + enable_if_any_env: CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' - tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined names: - openorca-mbxp-gsm8k-combined-preprocessed diff --git a/script/run-mlperf-inference-app/_cm.yaml b/script/run-mlperf-inference-app/_cm.yaml index d8413393a..8706ba3ae 100644 --- a/script/run-mlperf-inference-app/_cm.yaml +++ b/script/run-mlperf-inference-app/_cm.yaml @@ -116,6 +116,7 @@ input_mapping: all_models: CM_MLPERF_ALL_MODELS criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH use_dataset_from_host: CM_USE_DATASET_FROM_HOST + use_model_from_host: CM_USE_MODEL_FROM_HOST new_state_keys: - app_mlperf_inference_* From 58cf0b8bffd7e1cc7dcb4a8b75989ff95bf5c413 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 21 Nov 2024 21:57:59 +0000 Subject: [PATCH 3/4] Update test-nvidia-mlperf-inference-implementations.yml --- .../workflows/test-nvidia-mlperf-inference-implementations.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index ae6afe5fa..465d8edae 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -39,6 +39,6 @@ jobs: pip install --upgrade cm4mlops cm pull repo - cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --adr.submission-checker-src.tags=_branch.dev --execution_mode=valid --gpu_name=rtx_4090 --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --device=cuda --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet + cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --adr.submission-checker-src.tags=_branch.dev --execution_mode=valid --gpu_name=rtx_4090 --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=main --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name From 29e0974575c9cc1e458f8c02088fe298a06df475 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 22 Nov 2024 05:00:09 +0530 Subject: [PATCH 4/4] Fix typo in benchmark-program --- script/benchmark-program/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index c19b26c1c..6b5cb6ebf 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -45,7 +45,7 @@ def preprocess(i): if os_info['platform'] != 'windows' and str(env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in [ "no", "false", "0"]: logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) - env['CM_RUN_CMD'] += r" 2>&1 | tee " + q+ os.path.join(logs_dir, "console.out") + q + r" echo \${PIPESTATUS[0]} > exitstatus" + env['CM_RUN_CMD'] += r" 2>&1 | tee " + q+ os.path.join(logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" # additional arguments and tags for measuring system informations(only if 'CM_PROFILE_NVIDIA_POWER' is 'on') if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on":