Skip to content

Commit 5499ac8

Browse files
ZePan110yongfengdu
authored andcommitted
Integrate AudioQnA set_env to ut scripts. (opea-project#1897)
Signed-off-by: ZePan110 <[email protected]>
1 parent 0cfb145 commit 5499ac8

11 files changed

+58
-113
lines changed

AudioQnA/docker_compose/amd/gpu/rocm/set_env.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66

77
# export host_ip=<your External Public IP> # export host_ip=$(hostname -I | awk '{print $1}')
88

9-
export host_ip="192.165.1.21"
10-
export HUGGINGFACEHUB_API_TOKEN=${YOUR_HUGGINGFACEHUB_API_TOKEN}
9+
export host_ip=${ip_address}
10+
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
1111
# <token>
1212

1313
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3

AudioQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66

77
# export host_ip=<your External Public IP> # export host_ip=$(hostname -I | awk '{print $1}')
88

9-
export host_ip=""
10-
export external_host_ip=""
9+
export host_ip=${ip_address}
10+
export external_host_ip=${ip_address}
1111
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
1212
export HF_CACHE_DIR="./data"
1313
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"

AudioQnA/docker_compose/intel/cpu/xeon/set_env.sh

+2-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@ export MEGA_SERVICE_HOST_IP=${host_ip}
1414
export WHISPER_SERVER_HOST_IP=${host_ip}
1515
export SPEECHT5_SERVER_HOST_IP=${host_ip}
1616
export LLM_SERVER_HOST_IP=${host_ip}
17-
17+
export GPT_SOVITS_SERVER_HOST_IP=${host_ip}
18+
export GPT_SOVITS_SERVER_PORT=9880
1819
export WHISPER_SERVER_PORT=7066
1920
export SPEECHT5_SERVER_PORT=7055
2021
export LLM_SERVER_PORT=3006

AudioQnA/tests/README.md

+45
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
# AudioQnA E2E test scripts
2+
3+
## Set the required environment variable
4+
5+
```bash
6+
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
7+
```
8+
9+
## Run test
10+
11+
On Intel Xeon with TGI:
12+
13+
```bash
14+
bash test_compose_tgi_on_xeon.sh
15+
```
16+
17+
On Intel Xeon with vLLM:
18+
19+
```bash
20+
bash test_compose_on_xeon.sh
21+
```
22+
23+
On Intel Gaudi with TGI:
24+
25+
```bash
26+
bash test_compose_tgi_on_gaudi.sh
27+
```
28+
29+
On Intel Gaudi with vLLM:
30+
31+
```bash
32+
bash test_compose_on_gaudi.sh
33+
```
34+
35+
On AMD ROCm with TGI:
36+
37+
```bash
38+
bash test_compose_on_rocm.sh
39+
```
40+
41+
On AMD ROCm with vLLM:
42+
43+
```bash
44+
bash test_compose_vllm_on_rocm.sh
45+
```

AudioQnA/tests/test_compose_multilang_on_xeon.sh

+1-14
Original file line numberDiff line numberDiff line change
@@ -40,21 +40,8 @@ function build_docker_images() {
4040

4141
function start_services() {
4242
cd $WORKPATH/docker_compose/intel/cpu/xeon/
43-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
44-
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct
45-
46-
export MEGA_SERVICE_HOST_IP=${ip_address}
47-
export WHISPER_SERVER_HOST_IP=${ip_address}
48-
export GPT_SOVITS_SERVER_HOST_IP=${ip_address}
49-
export LLM_SERVER_HOST_IP=${ip_address}
50-
51-
export WHISPER_SERVER_PORT=7066
52-
export GPT_SOVITS_SERVER_PORT=9880
53-
export LLM_SERVER_PORT=3006
54-
55-
export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
5643
export host_ip=${ip_address}
57-
44+
source set_env.sh
5845
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
5946

6047
# Start Docker Containers

AudioQnA/tests/test_compose_on_gaudi.sh

+1-17
Original file line numberDiff line numberDiff line change
@@ -40,24 +40,8 @@ function build_docker_images() {
4040

4141
function start_services() {
4242
cd $WORKPATH/docker_compose/intel/hpu/gaudi
43-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
44-
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct
45-
export NUM_CARDS=1
46-
export BLOCK_SIZE=128
47-
export MAX_NUM_SEQS=256
48-
export MAX_SEQ_LEN_TO_CAPTURE=2048
49-
50-
export MEGA_SERVICE_HOST_IP=${ip_address}
51-
export WHISPER_SERVER_HOST_IP=${ip_address}
52-
export SPEECHT5_SERVER_HOST_IP=${ip_address}
53-
export LLM_SERVER_HOST_IP=${ip_address}
54-
55-
export WHISPER_SERVER_PORT=7066
56-
export SPEECHT5_SERVER_PORT=7055
57-
export LLM_SERVER_PORT=3006
58-
59-
export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
6043
export host_ip=${ip_address}
44+
source set_env.sh
6145
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
6246

6347
# Start Docker Containers

AudioQnA/tests/test_compose_on_rocm.sh

+1-14
Original file line numberDiff line numberDiff line change
@@ -35,20 +35,7 @@ function build_docker_images() {
3535

3636
function start_services() {
3737
cd $WORKPATH/docker_compose/amd/gpu/rocm/
38-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
39-
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
40-
41-
export MEGA_SERVICE_HOST_IP=${ip_address}
42-
export WHISPER_SERVER_HOST_IP=${ip_address}
43-
export SPEECHT5_SERVER_HOST_IP=${ip_address}
44-
export LLM_SERVER_HOST_IP=${ip_address}
45-
46-
export WHISPER_SERVER_PORT=7066
47-
export SPEECHT5_SERVER_PORT=7055
48-
export LLM_SERVER_PORT=3006
49-
50-
export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
51-
38+
source set_env.sh
5239
# Start Docker Containers
5340
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log
5441
n=0

AudioQnA/tests/test_compose_on_xeon.sh

+1-14
Original file line numberDiff line numberDiff line change
@@ -40,21 +40,8 @@ function build_docker_images() {
4040

4141
function start_services() {
4242
cd $WORKPATH/docker_compose/intel/cpu/xeon/
43-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
44-
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct
45-
46-
export MEGA_SERVICE_HOST_IP=${ip_address}
47-
export WHISPER_SERVER_HOST_IP=${ip_address}
48-
export SPEECHT5_SERVER_HOST_IP=${ip_address}
49-
export LLM_SERVER_HOST_IP=${ip_address}
50-
51-
export WHISPER_SERVER_PORT=7066
52-
export SPEECHT5_SERVER_PORT=7055
53-
export LLM_SERVER_PORT=3006
54-
55-
export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
5643
export host_ip=${ip_address}
57-
44+
source set_env.sh
5845
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
5946

6047
# Start Docker Containers

AudioQnA/tests/test_compose_tgi_on_gaudi.sh

+1-14
Original file line numberDiff line numberDiff line change
@@ -34,21 +34,8 @@ function build_docker_images() {
3434

3535
function start_services() {
3636
cd $WORKPATH/docker_compose/intel/hpu/gaudi
37-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
38-
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct
39-
40-
export MEGA_SERVICE_HOST_IP=${ip_address}
41-
export WHISPER_SERVER_HOST_IP=${ip_address}
42-
export SPEECHT5_SERVER_HOST_IP=${ip_address}
43-
export LLM_SERVER_HOST_IP=${ip_address}
44-
45-
export WHISPER_SERVER_PORT=7066
46-
export SPEECHT5_SERVER_PORT=7055
47-
export LLM_SERVER_PORT=3006
48-
49-
export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
5037
export host_ip=${ip_address}
51-
38+
source set_env.sh
5239
# Start Docker Containers
5340
docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
5441
n=0

AudioQnA/tests/test_compose_tgi_on_xeon.sh

+1-14
Original file line numberDiff line numberDiff line change
@@ -34,21 +34,8 @@ function build_docker_images() {
3434

3535
function start_services() {
3636
cd $WORKPATH/docker_compose/intel/cpu/xeon/
37-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
38-
export LLM_MODEL_ID=meta-llama/Meta-Llama-3-8B-Instruct
39-
40-
export MEGA_SERVICE_HOST_IP=${ip_address}
41-
export WHISPER_SERVER_HOST_IP=${ip_address}
42-
export SPEECHT5_SERVER_HOST_IP=${ip_address}
43-
export LLM_SERVER_HOST_IP=${ip_address}
44-
45-
export WHISPER_SERVER_PORT=7066
46-
export SPEECHT5_SERVER_PORT=7055
47-
export LLM_SERVER_PORT=3006
48-
49-
export BACKEND_SERVICE_ENDPOINT=http://${ip_address}:3008/v1/audioqna
5037
export host_ip=${ip_address}
51-
38+
source set_env.sh
5239
# Start Docker Containers
5340
docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
5441
n=0

AudioQnA/tests/test_compose_vllm_on_rocm.sh

+1-21
Original file line numberDiff line numberDiff line change
@@ -33,27 +33,7 @@ function build_docker_images() {
3333

3434
function start_services() {
3535
cd $WORKPATH/docker_compose/amd/gpu/rocm/
36-
37-
export host_ip=${ip_address}
38-
export external_host_ip=${ip_address}
39-
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
40-
export HF_CACHE_DIR="./data"
41-
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
42-
export VLLM_SERVICE_PORT="8081"
43-
44-
export MEGA_SERVICE_HOST_IP=${host_ip}
45-
export WHISPER_SERVER_HOST_IP=${host_ip}
46-
export SPEECHT5_SERVER_HOST_IP=${host_ip}
47-
export LLM_SERVER_HOST_IP=${host_ip}
48-
49-
export WHISPER_SERVER_PORT=7066
50-
export SPEECHT5_SERVER_PORT=7055
51-
export LLM_SERVER_PORT=${VLLM_SERVICE_PORT}
52-
export BACKEND_SERVICE_PORT=3008
53-
export FRONTEND_SERVICE_PORT=5173
54-
55-
export BACKEND_SERVICE_ENDPOINT=http://${external_host_ip}:${BACKEND_SERVICE_PORT}/v1/audioqna
56-
36+
source set_env_vllm.sh
5737
sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
5838

5939
# Start Docker Containers

0 commit comments

Comments
 (0)