Skip to content

Commit b467a13

Browse files
authored
daily update vLLM&vLLM-fork version (#1914)
Signed-off-by: Sun, Xuehao <[email protected]>
1 parent 05011eb commit b467a13

12 files changed

+119
-26
lines changed

.github/workflows/_build_image.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,9 @@ jobs:
8383
fi
8484
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
8585
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
86-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
87-
echo "Check out vLLM tag ${VLLM_VER}"
88-
git checkout ${VLLM_VER} &> /dev/null && cd ../
86+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
87+
echo "Check out vLLM tag ${VLLM_FORK_VER}"
88+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
8989
fi
9090
git clone --depth 1 --branch ${{ inputs.opea_branch }} https://github.com/opea-project/GenAIComps.git
9191
cd GenAIComps && git rev-parse HEAD && cd ../
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
# Copyright (C) 2025 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
name: Daily update vLLM & vLLM-fork version
5+
6+
on:
7+
schedule:
8+
- cron: "30 22 * * *"
9+
workflow_dispatch:
10+
11+
env:
12+
BRANCH_NAME: "update"
13+
USER_NAME: "CICD-at-OPEA"
14+
USER_EMAIL: "[email protected]"
15+
16+
jobs:
17+
freeze-tag:
18+
runs-on: ubuntu-latest
19+
strategy:
20+
matrix:
21+
include:
22+
- repo: vLLM
23+
repo_name: vllm-project/vllm
24+
ver_name: VLLM_VER
25+
- repo: vLLM-fork
26+
repo_url: HabanaAI/vllm-fork
27+
ver_name: VLLM_FORK_VER
28+
permissions:
29+
contents: write
30+
pull-requests: write
31+
steps:
32+
- name: Checkout repository
33+
uses: actions/checkout@v4
34+
with:
35+
fetch-depth: 0
36+
ref: ${{ github.ref }}
37+
38+
- name: Set up Git
39+
run: |
40+
git config --global user.name ${{ env.USER_NAME }}
41+
git config --global user.email ${{ env.USER_EMAIL }}
42+
git remote set-url origin https://${{ env.USER_NAME }}:"${{ secrets.ACTION_TOKEN }}"@github.com/${{ github.repository }}.git
43+
git fetch
44+
45+
if git ls-remote https://github.com/${{ github.repository }}.git "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | grep -q "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}"; then
46+
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} exists"
47+
git checkout ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
48+
else
49+
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} not exists"
50+
git checkout -b ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
51+
git push origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
52+
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} created successfully"
53+
fi
54+
55+
- name: Run script
56+
run: |
57+
latest_vllm_ver=$(curl -s "https://api.github.com/repos/${{ matrix.repo_name }}/tags" | jq '.[0].name' -)
58+
echo "latest_vllm_ver=${latest_vllm_ver}" >> "$GITHUB_ENV"
59+
find . -type f \( -name "*.sh" -o -name "_build_image.yml" \) -exec sed -i "s/${{ matrix.ver_name }}=.*/${{ matrix.ver_name }}=${latest_vllm_ver}/" {} \;
60+
61+
- name: Commit changes
62+
run: |
63+
git add .
64+
if git diff-index --quiet HEAD --; then
65+
echo "No changes detected, skipping commit."
66+
exit 1
67+
else
68+
git commit -s -m "Update ${{ matrix.repo }} version to ${latest_vllm_ver}"
69+
git push
70+
fi
71+
72+
- name: Create Pull Request
73+
run: |
74+
pr_count=$(curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -s "https://api.github.com/repos/${{ github.repository }}/pulls?state=all&head=${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | jq '. | length')
75+
if [ $pr_count -gt 0 ]; then
76+
echo "Pull Request exists"
77+
pr_number=$(curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -s "https://api.github.com/repos/${{ github.repository }}/pulls?state=all&head=${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | jq '.[0].number')
78+
curl -X PATCH -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -d "{
79+
\"title\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
80+
\"body\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
81+
\"state\":\"open\"
82+
}" "https://api.github.com/repos/${{ github.repository }}/pulls/${pr_number}"
83+
echo "Pull Request updated successfully"
84+
else
85+
echo "Pull Request not exists..."
86+
curl -H "Authorization: token ${{ secrets.ACTION_TOKEN }}" -d "{
87+
\"title\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
88+
\"body\":\"Update ${{ matrix.repo }} version to ${latest_vllm_ver}\",
89+
\"head\":\"${{ env.USER_NAME }}:${{ env.BRANCH_NAME }}_${{ matrix.repo }}\",
90+
\"base\":\"main\"
91+
}" "https://api.github.com/repos/${{ github.repository }}/pulls"
92+
echo "Pull Request created successfully"
93+
fi

AgentQnA/tests/step1_build_images.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ function build_agent_docker_image_gaudi_vllm() {
3737
get_genai_comps
3838

3939
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
40-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
41-
git checkout ${VLLM_VER} &> /dev/null && cd ../
40+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
41+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
4242

4343
echo "Build agent image with --no-cache..."
4444
service_list="agent agent-ui vllm-gaudi"

AudioQnA/tests/test_compose_on_gaudi.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,9 @@ function build_docker_images() {
2727

2828
git clone https://github.com/HabanaAI/vllm-fork.git
2929
cd vllm-fork/
30-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
31-
echo "Check out vLLM tag ${VLLM_VER}"
32-
git checkout ${VLLM_VER} &> /dev/null && cd ../
30+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
31+
echo "Check out vLLM tag ${VLLM_FORK_VER}"
32+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
3333

3434
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3535
service_list="audioqna audioqna-ui whisper-gaudi speecht5-gaudi vllm-gaudi"

ChatQnA/tests/test_compose_faqgen_on_gaudi.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ function build_docker_images() {
2424
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
2525
popd && sleep 1s
2626
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
27-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
28-
git checkout ${VLLM_VER} &> /dev/null && cd ../
27+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
28+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
2929

3030
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3131
service_list="chatqna chatqna-ui dataprep retriever llm-faqgen vllm-gaudi nginx"

ChatQnA/tests/test_compose_guardrails_on_gaudi.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ function build_docker_images() {
2424
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
2525
popd && sleep 1s
2626
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
27-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
28-
git checkout ${VLLM_VER} &> /dev/null && cd ../
27+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
28+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
2929

3030
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3131
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi guardrails nginx"

ChatQnA/tests/test_compose_on_gaudi.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ function build_docker_images() {
2424
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
2525
popd && sleep 1s
2626
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
27-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
28-
git checkout ${VLLM_VER} &> /dev/null && cd ../
27+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
28+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
2929

3030
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3131
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx"

ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ function build_docker_images() {
2424
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
2525
popd && sleep 1s
2626
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
27-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
28-
git checkout ${VLLM_VER} &> /dev/null && cd ../
27+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
28+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
2929

3030
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3131
service_list="chatqna chatqna-ui dataprep retriever vllm-gaudi nginx"

CodeGen/tests/test_compose_on_gaudi.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,9 @@ function build_docker_images() {
4343

4444
# Download Gaudi vllm of latest tag
4545
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
46-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
47-
echo "Check out vLLM tag ${VLLM_VER}"
48-
git checkout ${VLLM_VER} &> /dev/null && cd ../
46+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
47+
echo "Check out vLLM tag ${VLLM_FORK_VER}"
48+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
4949

5050
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
5151
service_list="codegen codegen-gradio-ui llm-textgen vllm-gaudi dataprep retriever embedding"

CodeTrans/tests/test_compose_on_gaudi.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ function build_docker_images() {
3131
cd $WORKPATH/docker_image_build
3232
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
3333
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
34-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
35-
git checkout ${VLLM_VER} &> /dev/null && cd ../
34+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
35+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
3636

3737
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3838
service_list="codetrans codetrans-ui llm-textgen vllm-gaudi nginx"

DocSum/tests/test_compose_on_gaudi.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ function build_docker_images() {
5050
popd && sleep 1s
5151

5252
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
53-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
54-
git checkout ${VLLM_VER} &> /dev/null && cd ../
53+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
54+
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
5555

5656
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
5757
service_list="docsum docsum-gradio-ui whisper llm-docsum vllm-gaudi"

FinanceAgent/tests/test_compose_on_gaudi.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,9 @@ function build_vllm_docker_image() {
5959
git clone https://github.com/HabanaAI/vllm-fork.git
6060
fi
6161
cd ./vllm-fork
62-
# VLLM_VER=$(git describe --tags "$(git rev-list --tags --max-count=1)")
63-
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
64-
git checkout ${VLLM_VER} &> /dev/null
62+
63+
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
64+
git checkout ${VLLM_FORK_VER} &> /dev/null
6565
docker build --no-cache -f Dockerfile.hpu -t $vllm_image --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
6666
if [ $? -ne 0 ]; then
6767
echo "$vllm_image failed"

0 commit comments

Comments
 (0)