Skip to content

Integrate AudioQnA set_env to ut scripts. #1897

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
May 8, 2025
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

# export host_ip=<your External Public IP> # export host_ip=$(hostname -I | awk '{print $1}')

export host_ip="192.165.1.21"
export HUGGINGFACEHUB_API_TOKEN=${YOUR_HUGGINGFACEHUB_API_TOKEN}
export host_ip=${ip_address}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
# <token>

export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
Expand Down
4 changes: 2 additions & 2 deletions AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

# export host_ip=<your External Public IP> # export host_ip=$(hostname -I | awk '{print $1}')

export host_ip=""
export external_host_ip=""
export host_ip=${ip_address}
export external_host_ip=${ip_address}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export HF_CACHE_DIR="./data"
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
Expand Down
45 changes: 45 additions & 0 deletions AudioQnA/tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# AudioQnA E2E test scripts

## Set the required environment variable

```bash
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```

## Run test

On Intel Xeon with TGI:

```bash
bash test_compose_tgi_on_xeon.sh
```

On Intel Xeon with vLLM:

```bash
bash test_compose_on_xeon.sh
```

On Intel Gaudi with TGI:

```bash
bash test_compose_tgi_on_gaudi.sh
```

On Intel Gaudi with vLLM:

```bash
bash test_compose_on_gaudi.sh
```

On AMD ROCm with TGI:

```bash
bash test_compose_on_rocm.sh
```

On AMD ROCm with vLLM:

```bash
bash test_compose_vllm_on_rocm.sh
```
13 changes: 1 addition & 12 deletions AudioQnA/tests/test_compose_multilang_on_xeon.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,21 +40,10 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct

export MEGA_SERVICE_HOST_IP=${ip_address}
export WHISPER_SERVER_HOST_IP=${ip_address}
export GPT_SOVITS_SERVER_HOST_IP=${ip_address}
export LLM_SERVER_HOST_IP=${ip_address}

export WHISPER_SERVER_PORT=7066
export GPT_SOVITS_SERVER_PORT=9880
export LLM_SERVER_PORT=3006

export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
export host_ip=${ip_address}

source set_env.sh
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

# Start Docker Containers
Expand Down
18 changes: 1 addition & 17 deletions AudioQnA/tests/test_compose_on_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,24 +40,8 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct
export NUM_CARDS=1
export BLOCK_SIZE=128
export MAX_NUM_SEQS=256
export MAX_SEQ_LEN_TO_CAPTURE=2048

export MEGA_SERVICE_HOST_IP=${ip_address}
export WHISPER_SERVER_HOST_IP=${ip_address}
export SPEECHT5_SERVER_HOST_IP=${ip_address}
export LLM_SERVER_HOST_IP=${ip_address}

export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_PORT=3006

export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
export host_ip=${ip_address}
source set_env.sh
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

# Start Docker Containers
Expand Down
15 changes: 1 addition & 14 deletions AudioQnA/tests/test_compose_on_rocm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,20 +35,7 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/amd/gpu/rocm/
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3

export MEGA_SERVICE_HOST_IP=${ip_address}
export WHISPER_SERVER_HOST_IP=${ip_address}
export SPEECHT5_SERVER_HOST_IP=${ip_address}
export LLM_SERVER_HOST_IP=${ip_address}

export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_PORT=3006

export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna

source set_env.sh
# Start Docker Containers
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
Expand Down
15 changes: 1 addition & 14 deletions AudioQnA/tests/test_compose_on_xeon.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,21 +40,8 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct

export MEGA_SERVICE_HOST_IP=${ip_address}
export WHISPER_SERVER_HOST_IP=${ip_address}
export SPEECHT5_SERVER_HOST_IP=${ip_address}
export LLM_SERVER_HOST_IP=${ip_address}

export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_PORT=3006

export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
export host_ip=${ip_address}

source set_env.sh
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

# Start Docker Containers
Expand Down
15 changes: 1 addition & 14 deletions AudioQnA/tests/test_compose_tgi_on_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,8 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/intel/hpu/gaudi
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct

export MEGA_SERVICE_HOST_IP=${ip_address}
export WHISPER_SERVER_HOST_IP=${ip_address}
export SPEECHT5_SERVER_HOST_IP=${ip_address}
export LLM_SERVER_HOST_IP=${ip_address}

export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_PORT=3006

export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
export host_ip=${ip_address}

source set_env.sh
# Start Docker Containers
docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
Expand Down
15 changes: 1 addition & 14 deletions AudioQnA/tests/test_compose_tgi_on_xeon.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,8 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/intel/cpu/xeon/
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct

export MEGA_SERVICE_HOST_IP=${ip_address}
export WHISPER_SERVER_HOST_IP=${ip_address}
export SPEECHT5_SERVER_HOST_IP=${ip_address}
export LLM_SERVER_HOST_IP=${ip_address}

export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_PORT=3006

export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
export host_ip=${ip_address}

source set_env.sh
# Start Docker Containers
docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
n=0
Expand Down
22 changes: 1 addition & 21 deletions AudioQnA/tests/test_compose_vllm_on_rocm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,27 +33,7 @@ function build_docker_images() {

function start_services() {
cd $WORKPATH/docker_compose/amd/gpu/rocm/

export host_ip=${ip_address}
export external_host_ip=${ip_address}
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
export HF_CACHE_DIR="./data"
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
export VLLM_SERVICE_PORT="8081"

export MEGA_SERVICE_HOST_IP=${host_ip}
export WHISPER_SERVER_HOST_IP=${host_ip}
export SPEECHT5_SERVER_HOST_IP=${host_ip}
export LLM_SERVER_HOST_IP=${host_ip}

export WHISPER_SERVER_PORT=7066
export SPEECHT5_SERVER_PORT=7055
export LLM_SERVER_PORT=${VLLM_SERVICE_PORT}
export BACKEND_SERVICE_PORT=3008
export FRONTEND_SERVICE_PORT=5173

export BACKEND_SERVICE_ENDPOINT=http://${external_host_ip}:${BACKEND_SERVICE_PORT}/v1/audioqna

source set_env_vllm.sh
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env

# Start Docker Containers
Expand Down