diff --git a/AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh b/AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh index 4ee4320b03..d4a0bda6d1 100644 --- a/AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh +++ b/AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh @@ -6,8 +6,8 @@ # export host_ip= # export host_ip=$(hostname -I | awk '{print $1}') -export host_ip="192.165.1.21" -export HUGGINGFACEHUB_API_TOKEN=${YOUR_HUGGINGFACEHUB_API_TOKEN} +export host_ip=${ip_address} +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} # export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 diff --git a/AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh b/AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh index 2eb724dc6e..9cd8934f49 100644 --- a/AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh +++ b/AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh @@ -6,8 +6,8 @@ # export host_ip= # export host_ip=$(hostname -I | awk '{print $1}') -export host_ip="" -export external_host_ip="" +export host_ip=${ip_address} +export external_host_ip=${ip_address} export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export HF_CACHE_DIR="./data" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" diff --git a/AudioQnA/docker_compose/intel/cpu/xeon/set_env.sh b/AudioQnA/docker_compose/intel/cpu/xeon/set_env.sh index adc652f169..4a63ef65b3 100644 --- a/AudioQnA/docker_compose/intel/cpu/xeon/set_env.sh +++ b/AudioQnA/docker_compose/intel/cpu/xeon/set_env.sh @@ -14,7 +14,8 @@ export MEGA_SERVICE_HOST_IP=${host_ip} export WHISPER_SERVER_HOST_IP=${host_ip} export SPEECHT5_SERVER_HOST_IP=${host_ip} export LLM_SERVER_HOST_IP=${host_ip} - +export GPT_SOVITS_SERVER_HOST_IP=${host_ip} +export GPT_SOVITS_SERVER_PORT=9880 export WHISPER_SERVER_PORT=7066 export SPEECHT5_SERVER_PORT=7055 export LLM_SERVER_PORT=3006 diff --git a/AudioQnA/tests/README.md b/AudioQnA/tests/README.md new file mode 100644 index 0000000000..390c182447 --- /dev/null +++ b/AudioQnA/tests/README.md @@ -0,0 +1,45 @@ +# AudioQnA E2E test scripts + +## Set the required environment variable + +```bash +export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" +``` + +## Run test + +On Intel Xeon with TGI: + +```bash +bash test_compose_tgi_on_xeon.sh +``` + +On Intel Xeon with vLLM: + +```bash +bash test_compose_on_xeon.sh +``` + +On Intel Gaudi with TGI: + +```bash +bash test_compose_tgi_on_gaudi.sh +``` + +On Intel Gaudi with vLLM: + +```bash +bash test_compose_on_gaudi.sh +``` + +On AMD ROCm with TGI: + +```bash +bash test_compose_on_rocm.sh +``` + +On AMD ROCm with vLLM: + +```bash +bash test_compose_vllm_on_rocm.sh +``` diff --git a/AudioQnA/tests/test_compose_multilang_on_xeon.sh b/AudioQnA/tests/test_compose_multilang_on_xeon.sh index 2bf05b3529..f958c91c19 100644 --- a/AudioQnA/tests/test_compose_multilang_on_xeon.sh +++ b/AudioQnA/tests/test_compose_multilang_on_xeon.sh @@ -40,21 +40,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct - - export MEGA_SERVICE_HOST_IP=${ip_address} - export WHISPER_SERVER_HOST_IP=${ip_address} - export GPT_SOVITS_SERVER_HOST_IP=${ip_address} - export LLM_SERVER_HOST_IP=${ip_address} - - export WHISPER_SERVER_PORT=7066 - export GPT_SOVITS_SERVER_PORT=9880 - export LLM_SERVER_PORT=3006 - - export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna export host_ip=${ip_address} - + source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers diff --git a/AudioQnA/tests/test_compose_on_gaudi.sh b/AudioQnA/tests/test_compose_on_gaudi.sh index d999cf5183..e2d58b72e9 100644 --- a/AudioQnA/tests/test_compose_on_gaudi.sh +++ b/AudioQnA/tests/test_compose_on_gaudi.sh @@ -40,24 +40,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct - export NUM_CARDS=1 - export BLOCK_SIZE=128 - export MAX_NUM_SEQS=256 - export MAX_SEQ_LEN_TO_CAPTURE=2048 - - export MEGA_SERVICE_HOST_IP=${ip_address} - export WHISPER_SERVER_HOST_IP=${ip_address} - export SPEECHT5_SERVER_HOST_IP=${ip_address} - export LLM_SERVER_HOST_IP=${ip_address} - - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_PORT=3006 - - export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna export host_ip=${ip_address} + source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers diff --git a/AudioQnA/tests/test_compose_on_rocm.sh b/AudioQnA/tests/test_compose_on_rocm.sh index 117e92971d..f30abe355f 100644 --- a/AudioQnA/tests/test_compose_on_rocm.sh +++ b/AudioQnA/tests/test_compose_on_rocm.sh @@ -35,20 +35,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/gpu/rocm/ - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 - - export MEGA_SERVICE_HOST_IP=${ip_address} - export WHISPER_SERVER_HOST_IP=${ip_address} - export SPEECHT5_SERVER_HOST_IP=${ip_address} - export LLM_SERVER_HOST_IP=${ip_address} - - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_PORT=3006 - - export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna - + source set_env.sh # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log n=0 diff --git a/AudioQnA/tests/test_compose_on_xeon.sh b/AudioQnA/tests/test_compose_on_xeon.sh index 8abda55023..9803591bc9 100644 --- a/AudioQnA/tests/test_compose_on_xeon.sh +++ b/AudioQnA/tests/test_compose_on_xeon.sh @@ -40,21 +40,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct - - export MEGA_SERVICE_HOST_IP=${ip_address} - export WHISPER_SERVER_HOST_IP=${ip_address} - export SPEECHT5_SERVER_HOST_IP=${ip_address} - export LLM_SERVER_HOST_IP=${ip_address} - - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_PORT=3006 - - export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna export host_ip=${ip_address} - + source set_env.sh # sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers diff --git a/AudioQnA/tests/test_compose_tgi_on_gaudi.sh b/AudioQnA/tests/test_compose_tgi_on_gaudi.sh index 156dce92cd..dd68dfe770 100644 --- a/AudioQnA/tests/test_compose_tgi_on_gaudi.sh +++ b/AudioQnA/tests/test_compose_tgi_on_gaudi.sh @@ -34,21 +34,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct - - export MEGA_SERVICE_HOST_IP=${ip_address} - export WHISPER_SERVER_HOST_IP=${ip_address} - export SPEECHT5_SERVER_HOST_IP=${ip_address} - export LLM_SERVER_HOST_IP=${ip_address} - - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_PORT=3006 - - export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna export host_ip=${ip_address} - + source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log n=0 diff --git a/AudioQnA/tests/test_compose_tgi_on_xeon.sh b/AudioQnA/tests/test_compose_tgi_on_xeon.sh index 3190818124..0e0b2d571a 100644 --- a/AudioQnA/tests/test_compose_tgi_on_xeon.sh +++ b/AudioQnA/tests/test_compose_tgi_on_xeon.sh @@ -34,21 +34,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct - - export MEGA_SERVICE_HOST_IP=${ip_address} - export WHISPER_SERVER_HOST_IP=${ip_address} - export SPEECHT5_SERVER_HOST_IP=${ip_address} - export LLM_SERVER_HOST_IP=${ip_address} - - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_PORT=3006 - - export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna export host_ip=${ip_address} - + source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log n=0 diff --git a/AudioQnA/tests/test_compose_vllm_on_rocm.sh b/AudioQnA/tests/test_compose_vllm_on_rocm.sh index 982bc74de9..924c8c25fa 100644 --- a/AudioQnA/tests/test_compose_vllm_on_rocm.sh +++ b/AudioQnA/tests/test_compose_vllm_on_rocm.sh @@ -33,27 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/amd/gpu/rocm/ - - export host_ip=${ip_address} - export external_host_ip=${ip_address} - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export HF_CACHE_DIR="./data" - export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" - export VLLM_SERVICE_PORT="8081" - - export MEGA_SERVICE_HOST_IP=${host_ip} - export WHISPER_SERVER_HOST_IP=${host_ip} - export SPEECHT5_SERVER_HOST_IP=${host_ip} - export LLM_SERVER_HOST_IP=${host_ip} - - export WHISPER_SERVER_PORT=7066 - export SPEECHT5_SERVER_PORT=7055 - export LLM_SERVER_PORT=${VLLM_SERVICE_PORT} - export BACKEND_SERVICE_PORT=3008 - export FRONTEND_SERVICE_PORT=5173 - - export BACKEND_SERVICE_ENDPOINT=http://${external_host_ip}:${BACKEND_SERVICE_PORT}/v1/audioqna - + source set_env_vllm.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env # Start Docker Containers