diff --git a/.github/workflows/test-mlperf-inference-submission-generation.yml b/.github/workflows/test-mlperf-inference-submission-generation.yml index 4889e202e..1b25e3b8c 100644 --- a/.github/workflows/test-mlperf-inference-submission-generation.yml +++ b/.github/workflows/test-mlperf-inference-submission-generation.yml @@ -2,11 +2,12 @@ name: MLC based MLPerf Inference Submission Generation + on: pull_request: - branches: [ "master", "dev" ] + branches: [ "main", "dev" ] paths: - - '.github/workflows/mlperf-submission.yml' + - '.github/workflows/test-mlperf-inference-submission-generation.yml' - '**' - '!**.md' workflow_call: @@ -28,12 +29,44 @@ jobs: python-version: [ "3.12" ] division: ["closed", "open", "closed-open"] category: ["datacenter", "edge"] + round: ["4.1", "5.0"] case: ["closed"] action: ["run", "docker"] exclude: - os: macos-latest - os: windows-latest - category: "edge" + - round: "4.1" + - case: "closed-edge" + include: + - os: ubuntu-latest + python-version: "3.12" + division: "closed" + category: "edge" + round: "5.0" + case: "closed-edge" + action: "run" + - os: ubuntu-latest + python-version: "3.12" + division: "closed" + category: "edge" + round: "5.0" + case: "closed-edge" + action: "docker" + - os: ubuntu-latest + python-version: "3.12" + division: "open" + category: "edge" + round: "5.0" + case: "closed-edge" + action: "run" + - os: ubuntu-latest + python-version: "3.12" + division: "open" + category: "edge" + round: "5.0" + case: "closed-edge" + action: "docker" steps: - uses: actions/checkout@v3 @@ -55,8 +88,7 @@ jobs: - name: Pull repo where test cases are uploaded run: | git clone -b submission-generation-examples https://github.com/mlcommons/inference.git submission_generation_examples - - name: Run Submission Generation - ${{ matrix.case }} ${{ matrix.action }} ${{ matrix.category }} ${{ matrix.division }} - continue-on-error: true + - name: Run Submission Generation - round-${{ matrix.round }}${{ matrix.case }} ${{ matrix.action }} ${{ matrix.category }} ${{ matrix.division }} run: | if [ "${{ matrix.case }}" == "closed" ]; then description="Test submission - contains closed edge and datacenter" @@ -65,10 +97,18 @@ jobs: fi # Dynamically set the log group to simulate a dynamic step name echo "::group::$description" - if [ -z "${{ inputs.repo-url }}" ]; then - mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v5.0 --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} - mlc ${{ matrix.action }} script --tags=run,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/submissions --submission_tar_file=mysubmission.tar.gz --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} + + docker_tags="" + if [ "${{ matrix.action }}" == "docker" ] && [ -z "${{ inputs.repo-url }}" ]; then + docker_tags="--docker_mlc_repo=${{ github.event.pull_request.head.repo.html_url }} --docker_mlc_repo_branch=${{ github.event.pull_request.head.ref }}" + fi + + if [ -n "${{ inputs.repo-url }}" ]; then + mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.case }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} --skip_calibration_check=yes $docker_tags + mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --adr.inference-src.tags=_repo.${{ inputs.repo-url }},_branch.${{ inputs.ref }} --skip_calibration_check=yes $docker_tags else - mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v5.0 --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_tests/${{ matrix.case }}/ --run-checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet - mlc ${{ matrix.action }} script --tags=run,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/submissions --submission_tar_file=mysubmission.tar.gz + mlc ${{ matrix.action }} script --tags=generate,inference,submission --adr.compiler.tags=gcc --version=v${{ matrix.round }} --clean --preprocess_submission=yes --submission_base_dir=mysubmissions --results_dir=$PWD/submission_generation_examples/submission_round_${{ matrix.round }}/${{ matrix.case }}/ --run_checker --submitter=MLCommons --tar=yes --division=${{ matrix.division }} --env.MLC_DETERMINE_MEMORY_CONFIGURATION=yes --quiet --skip_calibration_check=yes $docker_tags + mlc ${{ matrix.action }} script --tags=run,inference,submission,checker --submitter_id_off=mysubmitter_id --tar=yes --submission_dir=mysubmissions/mlperf_submission --submission_tar_file=mysubmission.tar.gz --quiet --src_version=v${{ matrix.round }} --skip_calibration_check=yes $docker_tags fi + + echo "::endgroup::" \ No newline at end of file diff --git a/automation/script/docker.py b/automation/script/docker.py index 37fa84ab0..0d33391e7 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -274,7 +274,7 @@ def docker_run(self_module, i): env.setdefault('MLC_DOCKER_CACHE', docker_cache) image_repo = i.get('docker_image_repo', '') - add_deps_recursive = i.get('add_deps_recursive') + add_deps_recursive = i.get('add_deps_recursive', {}) # Ensure Docker is available r = self_module.action_object.access( diff --git a/script/app-mlperf-automotive-mlcommons-python/meta.yaml b/script/app-mlperf-automotive-mlcommons-python/meta.yaml index 785e67166..c574cc7c0 100644 --- a/script/app-mlperf-automotive-mlcommons-python/meta.yaml +++ b/script/app-mlperf-automotive-mlcommons-python/meta.yaml @@ -76,6 +76,7 @@ env_key_mappings: new_env_keys: - MLC_MLPERF_* - MLC_DATASET_* + - MLC_PREPROCESSED_DATASET_* - MLC_HW_NAME - MLC_COGNATA_ACCURACY_DUMP_FILE - MLC_OUTPUT_PREDICTIONS_PATH diff --git a/script/app-mlperf-automotive/customize.py b/script/app-mlperf-automotive/customize.py index f1c0f5958..32f5b3416 100644 --- a/script/app-mlperf-automotive/customize.py +++ b/script/app-mlperf-automotive/customize.py @@ -59,6 +59,33 @@ def postprocess(i): model = env['MLC_MODEL'] model_full_name = env.get('MLC_ML_MODEL_FULL_NAME', model) + if mode == "accuracy" or mode == "compliance" and env[ + 'MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": + + out_baseline_accuracy_string = f"""> {q}{os.path.join(output_dir, "accuracy", "baseline_accuracy.txt")}{q} """ + out_compliance_accuracy_string = f"""> {q}{os.path.join(output_dir, "accuracy", "compliance_accuracy.txt")}{q} """ + + if model == "ssd": + accuracy_filename = "accuracy_cognata.py" + accuracy_file_path = os.path.join( + env['MLC_MLPERF_INFERENCE_SSD_RESNET50_PATH'], accuracy_filename) + dataset_args = f""" --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} --config baseline_8MP_ss_scales_fm1_5x5_all """ + accuracy_log_file_option_name = " --mlperf-accuracy-file " + + if model == "bevformer": + accuracy_filename = "accuracy_nuscenes_cpu.py" + accuracy_file_path = os.path.join( + env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], accuracy_filename) + dataset_args = f""" --nuscenes-dir {env['MLC_PREPROCESSED_DATASET_NUSCENES_ACC_CHECKER_MIN_FILES_PATH']} --config {os.path.join(env['MLC_MLPERF_INFERENCE_BEVFORMER_PATH'], "projects" + "configs" + "bevformer" + "bevformer_tiny.py")} """ + accuracy_log_file_option_name = " --mlperf-accuracy-file " + + if model == "deeplabv3plus": + accuracy_filename = "accuracy_cognata.py" + accuracy_file_path = os.path.join( + env['MLC_MLPERF_INFERENCE_DEEPLABV3PLUS_PATH'], accuracy_filename) + dataset_args = f""" --dataset-path {env['MLC_PREPROCESSED_DATASET_COGNATA_PATH']} """ + accuracy_log_file_option_name = " --mlperf-accuracy-file " + scenario = env['MLC_MLPERF_LOADGEN_SCENARIO'] if not os.path.exists(output_dir) or not os.path.exists( @@ -357,6 +384,92 @@ def postprocess(i): with open("README-extra.md", "w") as fp: fp.write(extra_readme) + elif mode == "compliance": + test = env.get("MLC_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + + RESULT_DIR = os.path.split(output_dir)[0] + COMPLIANCE_DIR = output_dir + OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) + + SCRIPT_PATH = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "compliance", + test, + "run_verification.py") + if test == "TEST06": + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {q}{SCRIPT_PATH}{q} -c {q}{COMPLIANCE_DIR}{q} -o {q}{OUTPUT_DIR}{q} --scenario {scenario} --dtype int32""" + else: + cmd = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {q}{SCRIPT_PATH}{q} -r {q}{RESULT_DIR}{q} -c {q}{COMPLIANCE_DIR}{q} -o {q}{OUTPUT_DIR}{q}""" + + logger.info(cmd) + os.system(cmd) + + if test == "TEST01": + + run_script_input = i['run_script_input'] + automation = i['automation'] + + SCRIPT_PATH = os.path.join(env['MLC_MLPERF_INFERENCE_SOURCE'], "compliance", test, + "create_accuracy_baseline.sh") + TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01") + OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy") + if not os.path.exists(OUTPUT_DIR): + os.makedirs(OUTPUT_DIR) + + ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy") + if not os.path.exists(ACCURACY_DIR): + logger.warning("Accuracy run not yet completed") + return { + 'return': 1, 'error': 'TEST01 needs accuracy run to be completed first'} + + cmd = f"""cd {q}{TEST01_DIR}{q} && bash {q}{SCRIPT_PATH}{q} {q}{os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json")}{q} {q}{os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json")}{q} """ + env['CMD'] = cmd + logger.info(cmd) + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r + + verify_accuracy_file = os.path.join( + TEST01_DIR, "verify_accuracy.txt") + with open(verify_accuracy_file, 'r') as file: + data = file.read().replace('\n', '\t') + + if 'TEST PASS' not in data: + logger.warning( + "\nDeterministic TEST01 failed... Trying with non-determinism.\n") + # #Normal test failed, trying the check with non-determinism + + baseline_accuracy_file = os.path.join( + TEST01_DIR, "mlperf_log_accuracy_baseline.json") + CMD = f"""cd {q}{ACCURACY_DIR}{q} && {q}{env['MLC_PYTHON_BIN_WITH_PATH']}{q} {q}{accuracy_filepath}{q} \ +{accuracy_log_file_option_name} {q}{baseline_accuracy_file}{q} {dataset_args} {datatype_option} {out_baseline_accuracy_string} """ + + env['CMD'] = CMD + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r + + if os.stat(baseline_accuracy_file).st_size == 0: + return {'return': 1, + 'error': f"{baseline_accuracy_file} is empty"} + + CMD = f"""cd {q}{ACCURACY_DIR}{q} && {q}{env['MLC_PYTHON_BIN_WITH_PATH']}{q} {q}{accuracy_filepath}{q} \ +{accuracy_log_file_option_name} {q}{os.path.join(TEST01_DIR, "mlperf_log_accuracy.json")}{q} {dataset_args} {datatype_option} \ +{out_compliance_accuracy_string} """ + + env['CMD'] = CMD + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r + import submission_checker as checker + is_valid = checker.check_compliance_perf_dir( + COMPLIANCE_DIR) if test != "TEST06" else True + state['mlc-mlperf-inference-results'][state['MLC_SUT_CONFIG_NAME'] + ][model][scenario][test] = "passed" if is_valid else "failed" + if state.get( 'abtf-inference-implementation') and state['abtf-inference-implementation'].get('version_info'): env['MLC_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 05c681b20..eb614ecd5 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -166,6 +166,21 @@ def generate_submission(env, state, inp, submission_division, logger): if not os.path.isdir(path_submission): os.makedirs(path_submission) + # Save empty calibration.md file in the root directory and make it + # available for the submitters to fill + try: + calibration_readme_path = os.path.join( + path_submission, "calibration.md") + with open(calibration_readme_path, "w") as fp: + fp.write("MLPerf Inference Calibration and Quantization Details\n") + logger.info( + f"Created calibration.md file at {calibration_readme_path}") + except Exception as e: + logger.error(f"Error creating calibration.md file: {e}") + return {'return': 1, 'error': f"Error creating calibration.md file: {e}"} + + logger.info( + f"Created calibration.md file at {calibration_readme_path}") # SUT base system = env.get('MLC_HW_NAME', 'default').replace(' ', '_') diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index ce3ad9664..f80b9d893 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -265,12 +265,20 @@ def preprocess(i): else: audit_path = test - audit_full_path = os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "compliance", - "nvidia", - audit_path, - "audit.config") + if env['MLC_BENCHMARK_GROUP'] == "automotive": + audit_full_path = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "compliance", + audit_path, + "audit.config") + else: + audit_full_path = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + audit_path, + "audit.config") + env['MLC_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path # copy the audit conf to the run directory incase the implementation is # not supporting the audit-conf path @@ -518,12 +526,20 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env, logger): test = env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] - SCRIPT_PATH = os.path.join( - env['MLC_MLPERF_INFERENCE_SOURCE'], - "compliance", - "nvidia", - test, - "run_verification.py") + if env['MLC_BENCHMARK_GROUP'] == "automotive": + SCRIPT_PATH = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "compliance", + test, + "run_verification.py") + else: + SCRIPT_PATH = os.path.join( + env['MLC_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + test, + "run_verification.py") + if test == "TEST06": cmd = f"{env['MLC_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" else: diff --git a/script/get-ml-model-bevformer/meta.yaml b/script/get-ml-model-bevformer/meta.yaml index e4c156030..4c877ed68 100644 --- a/script/get-ml-model-bevformer/meta.yaml +++ b/script/get-ml-model-bevformer/meta.yaml @@ -25,7 +25,7 @@ variations: env: MLC_MODEL_FORMAT: pth MLC_MODEL_RCLONE_FILEPATH: model_checkpoint_bevformer/bevformer_tiny_epoch_24.pth - MLC_ML_MODEL_FILENAME: bevformer_tiny_epoch_24.onnx + MLC_ML_MODEL_FILENAME: bevformer_tiny_epoch_24.pth mlc: group: download-src default: true diff --git a/script/run-mlperf-automotive-app/customize.py b/script/run-mlperf-automotive-app/customize.py index 5e26568db..fc7c22134 100644 --- a/script/run-mlperf-automotive-app/customize.py +++ b/script/run-mlperf-automotive-app/customize.py @@ -116,6 +116,12 @@ def preprocess(i): test_list = [] + # Add the compliance tests required for the models + if env['MLC_MODEL'] in ['ssd', 'bevformer', 'deeplabv3plus']: + test_list.append('TEST01') + if env['MLC_MODEL'] in ['bevformer', 'deeplabv3plus']: + test_list.append('TEST04') + variation_benchmark_version = "_" + env["MLC_MLPERF_INFERENCE_VERSION"] variation_implementation = ",_" + \ env.get("MLC_MLPERF_IMPLEMENTATION", "reference") diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index a23bf2947..533cc2e46 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -58,6 +58,11 @@ def preprocess(i): else: power_check = "" + if is_true(env.get('MLC_MLPERF_SKIP_CALIBRATION_CHECK', 'no')): + calibration_check = " --skip-calibration-check" + else: + calibration_check = "" + extra_args = ' ' + env.get('MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') x_submitter = ' --submitter ' + q + submitter + q if submitter != '' else '' diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index 95117baa5..fb263740a 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -86,6 +86,7 @@ input_mapping: repo_owner: MLC_MLPERF_RESULTS_GIT_REPO_OWNER skip_compliance: MLC_MLPERF_SKIP_COMPLIANCE skip_power_check: MLC_MLPERF_SKIP_POWER_CHECK + skip_calibration_check: MLC_MLPERF_SKIP_CALIBRATION_CHECK src_version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR submitter: MLC_MLPERF_SUBMITTER