Skip to content

Commit 9c070fd

Browse files
authored
Migrating changes from inference pr2162 + compliance run code changes-automotive (#452)
* migrating changes from inference pr2162 * update workflow file path * rename master to main * add round parameter * corrected the logic * adr given empty dictionary as the default value * add skip calibration check * skip calibration.md file check * code change for supporting compliance runs * create dummy calibration.md file when generating submission * supply preprocessed dataset env variables to higher level scripts
1 parent 2395e55 commit 9c070fd

File tree

10 files changed

+220
-23
lines changed

10 files changed

+220
-23
lines changed

.github/workflows/test-mlperf-inference-submission-generation.yml

Lines changed: 49 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,12 @@
22

33
name: MLC based MLPerf Inference Submission Generation
44

5+
56
on:
67
pull_request:
7-
branches: [ "master", "dev" ]
8+
branches: [ "main", "dev" ]
89
paths:
9-
- '.github/workflows/mlperf-submission.yml'
10+
- '.github/workflows/test-mlperf-inference-submission-generation.yml'
1011
- '**'
1112
- '!**.md'
1213
workflow_call:
@@ -28,12 +29,44 @@ jobs:
2829
python-version: [ "3.12" ]
2930
division: ["closed", "open", "closed-open"]
3031
category: ["datacenter", "edge"]
32+
round: ["4.1", "5.0"]
3133
case: ["closed"]
3234
action: ["run", "docker"]
3335
exclude:
3436
- os: macos-latest
3537
- os: windows-latest
3638
- category: "edge"
39+
- round: "4.1"
40+
- case: "closed-edge"
41+
include:
42+
- os: ubuntu-latest
43+
python-version: "3.12"
44+
division: "closed"
45+
category: "edge"
46+
round: "5.0"
47+
case: "closed-edge"
48+
action: "run"
49+
- os: ubuntu-latest
50+
python-version: "3.12"
51+
division: "closed"
52+
category: "edge"
53+
round: "5.0"
54+
case: "closed-edge"
55+
action: "docker"
56+
- os: ubuntu-latest
57+
python-version: "3.12"
58+
division: "open"
59+
category: "edge"
60+
round: "5.0"
61+
case: "closed-edge"
62+
action: "run"
63+
- os: ubuntu-latest
64+
python-version: "3.12"
65+
division: "open"
66+
category: "edge"
67+
round: "5.0"
68+
case: "closed-edge"
69+
action: "docker"
3770

3871
steps:
3972
- uses: actions/checkout@v3
@@ -55,8 +88,7 @@ jobs:
5588
- name: Pull repo where test cases are uploaded
5689
run: |
5790
git clone -b submission-generation-examples https://github.com/mlcommons/inference.git submission_generation_examples
58-
- name: Run Submission Generation - ${{ matrix.case }} ${{ matrix.action }} ${{ matrix.category }} ${{ matrix.division }}
59-
continue-on-error: true
91+
- name: Run Submission Generation - round-${{ matrix.round }}${{ matrix.case }} ${{ matrix.action }} ${{ matrix.category }} ${{ matrix.division }}
6092
run: |
6193
if [ "${{ matrix.case }}" == "closed" ]; then
6294
description="Test submission - contains closed edge and datacenter"
@@ -65,10 +97,18 @@ jobs:
6597
fi
6698
# Dynamically set the log group to simulate a dynamic step name
6799
echo "::group::$description"
68-
if [ -z "${{ inputs.repo-url }}" ]; then
69-
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v5.0 --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }}
70-
mlc ${{ matrix.action }} script --tags=run,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/submissions --submission_tar_file=mysubmission.tar.gz --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }}
100+
101+
docker_tags=""
102+
if [ "${{ matrix.action }}" == "docker" ] && [ -z "${{ inputs.repo-url }}" ]; then
103+
docker_tags="--docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }}"
104+
fi
105+
106+
if [ -n "${{ inputs.repo-url }}" ]; then
107+
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.case }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} --skip_calibration_check=yes $docker_tags
108+
mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} --skip_calibration_check=yes $docker_tags
71109
else
72-
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v5.0 --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet
73-
mlc ${{ matrix.action }} script --tags=run,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/submissions --submission_tar_file=mysubmission.tar.gz
110+
mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.case }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --skip_calibration_check=yes $docker_tags
111+
mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --skip_calibration_check=yes $docker_tags
74112
fi
113+
114+
echo "::endgroup::"

automation/script/docker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def docker_run(self_module, i):
274274
env.setdefault('MLC_DOCKER_CACHE', docker_cache)
275275

276276
image_repo = i.get('docker_image_repo', '')
277-
add_deps_recursive = i.get('add_deps_recursive')
277+
add_deps_recursive = i.get('add_deps_recursive', {})
278278

279279
# Ensure Docker is available
280280
r = self_module.action_object.access(

script/app-mlperf-automotive-mlcommons-python/meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ env_key_mappings:
7676
new_env_keys:
7777
- MLC_MLPERF_*
7878
- MLC_DATASET_*
79+
- MLC_PREPROCESSED_DATASET_*
7980
- MLC_HW_NAME
8081
- MLC_COGNATA_ACCURACY_DUMP_FILE
8182
- MLC_OUTPUT_PREDICTIONS_PATH

script/app-mlperf-automotive/customize.py

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,33 @@ def postprocess(i):
5959
model = env['MLC_MODEL']
6060
model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model)
6161

62+
if mode == "accuracy" or mode == "compliance" and env[
63+
'MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01":
64+
65+
out_baseline_accuracy_string = f"""> {q}{os.path.join(output_dir, "accuracy", "baseline_accuracy.txt")}{q} """
66+
out_compliance_accuracy_string = f"""> {q}{os.path.join(output_dir, "accuracy", "compliance_accuracy.txt")}{q} """
67+
68+
if model == "ssd":
69+
accuracy_filename = "accuracy_cognata.py"
70+
accuracy_file_path = os.path.join(
71+
env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], accuracy_filename)
72+
dataset_args = f""" --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --config baseline_8MP_ss_scales_fm1_5x5_all """
73+
accuracy_log_file_option_name = " --mlperf-accuracy-file "
74+
75+
if model == "bevformer":
76+
accuracy_filename = "accuracy_nuscenes_cpu.py"
77+
accuracy_file_path = os.path.join(
78+
env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], accuracy_filename)
79+
dataset_args = f""" --nuscenes-dir {env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH']} --config {os.path.join(env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py")} """
80+
accuracy_log_file_option_name = " --mlperf-accuracy-file "
81+
82+
if model == "deeplabv3plus":
83+
accuracy_filename = "accuracy_cognata.py"
84+
accuracy_file_path = os.path.join(
85+
env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], accuracy_filename)
86+
dataset_args = f""" --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} """
87+
accuracy_log_file_option_name = " --mlperf-accuracy-file "
88+
6289
scenario = env['MLC_MLPERF_LOADGEN_SCENARIO']
6390

6491
if not os.path.exists(output_dir) or not os.path.exists(
@@ -357,6 +384,92 @@ def postprocess(i):
357384
with open("README-extra.md", "w") as fp:
358385
fp.write(extra_readme)
359386

387+
elif mode == "compliance":
388+
test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01")
389+
390+
RESULT_DIR = os.path.split(output_dir)[0]
391+
COMPLIANCE_DIR = output_dir
392+
OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR)
393+
394+
SCRIPT_PATH = os.path.join(
395+
env['MLC_MLPERF_INFERENCE_SOURCE'],
396+
"compliance",
397+
test,
398+
"run_verification.py")
399+
if test == "TEST06":
400+
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {q}{SCRIPT_PATH}{q} -c {q}{COMPLIANCE_DIR}{q} -o {q}{OUTPUT_DIR}{q} --scenario {scenario} --dtype int32"""
401+
else:
402+
cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {q}{SCRIPT_PATH}{q} -r {q}{RESULT_DIR}{q} -c {q}{COMPLIANCE_DIR}{q} -o {q}{OUTPUT_DIR}{q}"""
403+
404+
logger.info(cmd)
405+
os.system(cmd)
406+
407+
if test == "TEST01":
408+
409+
run_script_input = i['run_script_input']
410+
automation = i['automation']
411+
412+
SCRIPT_PATH = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", test,
413+
"create_accuracy_baseline.sh")
414+
TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01")
415+
OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy")
416+
if not os.path.exists(OUTPUT_DIR):
417+
os.makedirs(OUTPUT_DIR)
418+
419+
ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy")
420+
if not os.path.exists(ACCURACY_DIR):
421+
logger.warning("Accuracy run not yet completed")
422+
return {
423+
'return': 1, 'error': 'TEST01 needs accuracy run to be completed first'}
424+
425+
cmd = f"""cd {q}{TEST01_DIR}{q} && bash {q}{SCRIPT_PATH}{q} {q}{os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json")}{q} {q}{os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json")}{q} """
426+
env['CMD'] = cmd
427+
logger.info(cmd)
428+
r = automation.run_native_script(
429+
{'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'})
430+
if r['return'] > 0:
431+
return r
432+
433+
verify_accuracy_file = os.path.join(
434+
TEST01_DIR, "verify_accuracy.txt")
435+
with open(verify_accuracy_file, 'r') as file:
436+
data = file.read().replace('\n', '\t')
437+
438+
if 'TEST PASS' not in data:
439+
logger.warning(
440+
"\nDeterministic TEST01 failed... Trying with non-determinism.\n")
441+
# #Normal test failed, trying the check with non-determinism
442+
443+
baseline_accuracy_file = os.path.join(
444+
TEST01_DIR, "mlperf_log_accuracy_baseline.json")
445+
CMD = f"""cd {q}{ACCURACY_DIR}{q} && {q}{env['MLC_PYTHON_BIN_WITH_PATH']}{q} {q}{accuracy_filepath}{q} \
446+
{accuracy_log_file_option_name} {q}{baseline_accuracy_file}{q} {dataset_args} {datatype_option} {out_baseline_accuracy_string} """
447+
448+
env['CMD'] = CMD
449+
r = automation.run_native_script(
450+
{'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'})
451+
if r['return'] > 0:
452+
return r
453+
454+
if os.stat(baseline_accuracy_file).st_size == 0:
455+
return {'return': 1,
456+
'error': f"{baseline_accuracy_file} is empty"}
457+
458+
CMD = f"""cd {q}{ACCURACY_DIR}{q} && {q}{env['MLC_PYTHON_BIN_WITH_PATH']}{q} {q}{accuracy_filepath}{q} \
459+
{accuracy_log_file_option_name} {q}{os.path.join(TEST01_DIR, "mlperf_log_accuracy.json")}{q} {dataset_args} {datatype_option} \
460+
{out_compliance_accuracy_string} """
461+
462+
env['CMD'] = CMD
463+
r = automation.run_native_script(
464+
{'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'})
465+
if r['return'] > 0:
466+
return r
467+
import submission_checker as checker
468+
is_valid = checker.check_compliance_perf_dir(
469+
COMPLIANCE_DIR) if test != "TEST06" else True
470+
state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME']
471+
][model][scenario][test] = "passed" if is_valid else "failed"
472+
360473
if state.get(
361474
'abtf-inference-implementation') and state['abtf-inference-implementation'].get('version_info'):
362475
env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join(

script/generate-mlperf-inference-submission/customize.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,21 @@ def generate_submission(env, state, inp, submission_division, logger):
166166
if not os.path.isdir(path_submission):
167167
os.makedirs(path_submission)
168168

169+
# Save empty calibration.md file in the root directory and make it
170+
# available for the submitters to fill
171+
try:
172+
calibration_readme_path = os.path.join(
173+
path_submission, "calibration.md")
174+
with open(calibration_readme_path, "w") as fp:
175+
fp.write("MLPerf Inference Calibration and Quantization Details\n")
176+
logger.info(
177+
f"Created calibration.md file at {calibration_readme_path}")
178+
except Exception as e:
179+
logger.error(f"Error creating calibration.md file: {e}")
180+
return {'return': 1, 'error': f"Error creating calibration.md file: {e}"}
181+
182+
logger.info(
183+
f"Created calibration.md file at {calibration_readme_path}")
169184
# SUT base
170185
system = env.get('MLC_HW_NAME', 'default').replace(' ', '_')
171186

script/generate-mlperf-inference-user-conf/customize.py

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -265,12 +265,20 @@ def preprocess(i):
265265
else:
266266
audit_path = test
267267

268-
audit_full_path = os.path.join(
269-
env['MLC_MLPERF_INFERENCE_SOURCE'],
270-
"compliance",
271-
"nvidia",
272-
audit_path,
273-
"audit.config")
268+
if env['MLC_BENCHMARK_GROUP'] == "automotive":
269+
audit_full_path = os.path.join(
270+
env['MLC_MLPERF_INFERENCE_SOURCE'],
271+
"compliance",
272+
audit_path,
273+
"audit.config")
274+
else:
275+
audit_full_path = os.path.join(
276+
env['MLC_MLPERF_INFERENCE_SOURCE'],
277+
"compliance",
278+
"nvidia",
279+
audit_path,
280+
"audit.config")
281+
274282
env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path
275283
# copy the audit conf to the run directory incase the implementation is
276284
# not supporting the audit-conf path
@@ -518,12 +526,20 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env, logger):
518526

519527
test = env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST']
520528

521-
SCRIPT_PATH = os.path.join(
522-
env['MLC_MLPERF_INFERENCE_SOURCE'],
523-
"compliance",
524-
"nvidia",
525-
test,
526-
"run_verification.py")
529+
if env['MLC_BENCHMARK_GROUP'] == "automotive":
530+
SCRIPT_PATH = os.path.join(
531+
env['MLC_MLPERF_INFERENCE_SOURCE'],
532+
"compliance",
533+
test,
534+
"run_verification.py")
535+
else:
536+
SCRIPT_PATH = os.path.join(
537+
env['MLC_MLPERF_INFERENCE_SOURCE'],
538+
"compliance",
539+
"nvidia",
540+
test,
541+
"run_verification.py")
542+
527543
if test == "TEST06":
528544
cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32"
529545
else:

script/get-ml-model-bevformer/meta.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ variations:
2525
env:
2626
MLC_MODEL_FORMAT: pth
2727
MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_bevformer/bevformer_tiny_epoch_24.pth
28-
MLC_ML_MODEL_FILENAME: bevformer_tiny_epoch_24.onnx
28+
MLC_ML_MODEL_FILENAME: bevformer_tiny_epoch_24.pth
2929
mlc:
3030
group: download-src
3131
default: true

script/run-mlperf-automotive-app/customize.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,12 @@ def preprocess(i):
116116

117117
test_list = []
118118

119+
# Add the compliance tests required for the models
120+
if env['MLC_MODEL'] in ['ssd', 'bevformer', 'deeplabv3plus']:
121+
test_list.append('TEST01')
122+
if env['MLC_MODEL'] in ['bevformer', 'deeplabv3plus']:
123+
test_list.append('TEST04')
124+
119125
variation_benchmark_version = "_" + env["MLC_MLPERF_INFERENCE_VERSION"]
120126
variation_implementation = ",_" + \
121127
env.get("MLC_MLPERF_IMPLEMENTATION", "reference")

script/run-mlperf-inference-submission-checker/customize.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@ def preprocess(i):
5858
else:
5959
power_check = ""
6060

61+
if is_true(env.get('MLC_MLPERF_SKIP_CALIBRATION_CHECK', 'no')):
62+
calibration_check = " --skip-calibration-check"
63+
else:
64+
calibration_check = ""
65+
6166
extra_args = ' ' + env.get('MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '')
6267

6368
x_submitter = ' --submitter ' + q + submitter + q if submitter != '' else ''

script/run-mlperf-inference-submission-checker/meta.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ input_mapping:
8686
repo_owner: MLC_MLPERF_RESULTS_GIT_REPO_OWNER
8787
skip_compliance: MLC_MLPERF_SKIP_COMPLIANCE
8888
skip_power_check: MLC_MLPERF_SKIP_POWER_CHECK
89+
skip_calibration_check: MLC_MLPERF_SKIP_CALIBRATION_CHECK
8990
src_version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION
9091
submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR
9192
submitter: MLC_MLPERF_SUBMITTER

0 commit comments

Comments
 (0)