Skip to content

Commit 76c088d

Browse files
authored
Add model environment variable (#1660)
Signed-off-by: ZePan110 <[email protected]>
1 parent cee24a0 commit 76c088d

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ def run_benchmark(benchmark_config, chart_name, namespace, node_num=1, llm_model
402402

403403
dataset = None
404404
query_data = None
405-
405+
os.environ["MODEL_NAME"] = test_suite_config.get("llm_model", "meta-llama/Meta-Llama-3-8B-Instruct")
406406
# Do benchmark in for-loop for different llm_max_token_size
407407
for llm_max_token in parsed_data["llm_max_token_size"]:
408408
print(f"[OPEA BENCHMARK] 🚀 Run benchmark on {dataset} with llm max-output-token {llm_max_token}.")

0 commit comments

Comments
 (0)