Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit 12882d8

Browse files
authored
remove chatbot test dependency on local code (#716)
1 parent 674ae4d commit 12882d8

10 files changed

+22
-24
lines changed

.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,13 @@ jobs:
3232
run: |
3333
cid=$(docker ps -q --filter "name=chatbotfinetune-hpu-s0")
3434
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
35-
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-hpu-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-hpu:latest
35+
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-hpu-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-hpu:latest
3636
3737
- name: Run Finetuning
3838
run: |
39-
cmd="python3 /root/chatbot/workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py \
39+
cmd="python3 /intel-extension-for-transformers/workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py \
4040
--model_name_or_path mosaicml/mpt-7b-chat \
41-
--train_file /root/chatbot/.github/workflows/sample_data/alpaca_data_sample_45.json \
41+
--train_file /intel-extension-for-transformers/.github/workflows/sample_data/alpaca_data_sample_45.json \
4242
--bf16 True \
4343
--output_dir ./mpt_peft_finetuned_model \
4444
--num_train_epochs 3 \

.github/workflows/chatbot-finetune-mpt-7b-chat.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ jobs:
3232
run: |
3333
cid=$(docker ps -q --filter "name=chatbotfinetune-mpi-s0")
3434
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
35-
numactl --cpunodebind=0 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-mpi:latest
35+
numactl --cpunodebind=0 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s0" --hostname="chatbotfinetune-container-mpi-s0" chatbotfinetune-mpi:latest
3636
master=$(docker inspect -f "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}" "chatbotfinetune-mpi-s0")
3737
echo "master_node=$master" >> $GITHUB_OUTPUT
3838
@@ -41,16 +41,16 @@ jobs:
4141
run: |
4242
cid=$(docker ps -q --filter "name=chatbotfinetune-mpi-s1")
4343
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
44-
numactl --cpunodebind=1 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s1" --hostname="chatbotfinetune-container-mpi-s1" chatbotfinetune-mpi:latest
44+
numactl --cpunodebind=1 -- docker run -tid -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotfinetune-mpi-s1" --hostname="chatbotfinetune-container-mpi-s1" chatbotfinetune-mpi:latest
4545
slave=$(docker inspect -f "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}" "chatbotfinetune-mpi-s1")
4646
echo "slave_node=$slave" >> $GITHUB_OUTPUT
4747
4848
- name: Run Finetuning
4949
run: |
5050
sh .github/workflows/script/chatbot/prepare_ft_mpt-7b-chat_mpi.sh ${{ steps.master_container.outputs.master_node }} ${{ steps.slave_container.outputs.slave_node }}
51-
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /root/chatbot && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
52-
docker exec "chatbotfinetune-mpi-s1" bash -c "cd /root/chatbot && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
53-
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /root/chatbot; source ./bash_setup.sh; mpirun -f ./hosts2 -n 2 -ppn 1 -genv OMP_NUM_THREADS=48 sh .github/workflows/script/chatbot/start_ft_mpt-7b-chat_mpi.sh"
51+
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
52+
docker exec "chatbotfinetune-mpi-s1" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat && pip uninstall intel-extension-for-transformers -y && python setup.py install"
53+
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /intel-extension-for-transformers; source ./bash_setup.sh; mpirun -f ./hosts2 -n 2 -ppn 1 -genv OMP_NUM_THREADS=48 sh .github/workflows/script/chatbot/start_ft_mpt-7b-chat_mpi.sh"
5454
5555
- name: Print Logs and Check Finetuning Status
5656
if: success() || failure()

.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,11 @@ jobs:
2828
run: |
2929
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
3030
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
31-
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
31+
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
3232
3333
- name: Run Inference Test
3434
run: |
35-
docker exec "chatbotinfer-gha" bash -c "cd /root/chatbot && source activate && conda activate neuralchat;\
35+
docker exec "chatbotinfer-gha" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat;\
3636
git config --global --add safe.directory '*' && \
3737
git submodule update --init --recursive && \
3838
pip uninstall intel-extension-for-transformers -y; \
@@ -45,7 +45,7 @@ jobs:
4545
if: always()
4646
run: |
4747
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
48-
if [[ ! -z "$cid" ]]; then docker exec "chatbotinfer-gha" bash -c "rm -rf /root/chatbot/* && rm -rf /root/chatbot/.* || echo Clean" && docker stop $cid && docker rm $cid; fi
48+
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
4949
5050
- name: Test Summary
5151
run: echo "Inference completed successfully"

.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,15 @@ jobs:
2929
run: |
3030
cid=$(docker ps -q --filter "name=chatbotinfer-hpu")
3131
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
32-
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
32+
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
3333
3434
- name: Run Inference Test without DeepSpeed
3535
run: |
36-
docker exec "chatbotinfer-hpu" bash -c "cd /root/chatbot; python workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
36+
docker exec "chatbotinfer-hpu" bash -c "cd /intel-extension-for-transformers; python workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
3737
3838
- name: Run Inference Test with DeepSpeed
3939
run: |
40-
docker exec "chatbotinfer-hpu" bash -c "cd /root/chatbot; export HABANA_VISIBLE_MODULES=\"0,1\"; python workflows/chatbot/utils/gaudi_spawn.py --use_deepspeed --world_size 2 workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --use_kv_cache --task chat --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
40+
docker exec "chatbotinfer-hpu" bash -c "cd /intel-extension-for-transformers; export HABANA_VISIBLE_MODULES=\"0,1\"; python workflows/chatbot/utils/gaudi_spawn.py --use_deepspeed --world_size 2 workflows/chatbot/inference/generate.py --base_model_path \"meta-llama/Llama-2-7b-chat-hf\" --hf_access_token \"${{ env.HF_ACCESS_TOKEN }}\" --habana --use_hpu_graphs --use_kv_cache --task chat --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
4141
4242
- name: Stop Container
4343
if: success() || failure()

.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,11 @@ jobs:
3030
run: |
3131
cid=$(docker ps -q --filter "name=chatbotinfer-hpu")
3232
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
33-
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
33+
docker run -tid --runtime=habana -v /mnt/DP_disk1/huggingface/cache/:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-hpu" --hostname="chatbotinfer-hpu-container" chatbotinfer-hpu:latest
3434
3535
- name: Run Inference Test
3636
run: |
37-
docker exec "chatbotinfer-hpu" bash -c "cd /root/chatbot; python workflows/chatbot/inference/generate.py --base_model_path \"mosaicml/mpt-7b-chat\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
37+
docker exec "chatbotinfer-hpu" bash -c "cd /intel-extension-for-transformers; python workflows/chatbot/inference/generate.py --base_model_path \"mosaicml/mpt-7b-chat\" --habana --use_hpu_graphs --instructions \"Transform the following sentence into one that shows contrast. The tree is rotten.\" "
3838
3939
- name: Stop Container
4040
if: success() || failure()

.github/workflows/chatbot-inference-mpt-7b-chat.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,11 @@ jobs:
2929
run: |
3030
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
3131
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
32-
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -v .:/root/chatbot -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
32+
docker run -tid -v /home/sdp/.cache/huggingface/hub:/root/.cache/huggingface/hub -e http_proxy="${{ env.HTTP_PROXY_CONTAINER_RUN }}" -e https_proxy="${{ env.HTTPS_PROXY_CONTAINER_RUN }}" --name="chatbotinfer-gha" --hostname="chatbotinfer-gha-container" chatbotinfer-gha:latest
3333
3434
- name: Run Inference Test
3535
run: |
36-
docker exec "chatbotinfer-gha" bash -c "cd /root/chatbot && source activate && conda activate neuralchat; \
36+
docker exec "chatbotinfer-gha" bash -c "cd /intel-extension-for-transformers && source activate && conda activate neuralchat; \
3737
git config --global --add safe.directory '*' && \
3838
git submodule update --init --recursive && \
3939
pip uninstall intel-extension-for-transformers -y; \
@@ -46,7 +46,7 @@ jobs:
4646
if: always()
4747
run: |
4848
cid=$(docker ps -q --filter "name=chatbotinfer-gha")
49-
if [[ ! -z "$cid" ]]; then docker exec "chatbotinfer-gha" bash -c "rm -rf /root/chatbot/* && rm -rf /root/chatbot/.* || echo Clean" && docker stop $cid && docker rm $cid; fi
49+
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
5050
5151
- name: Test Summary
5252
run: echo "Inference completed successfully"

.github/workflows/script/chatbot/hpu_check/run_check.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,5 @@ cid=$(docker ps -q --filter "name=$cont_name")
1818
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
1919

2020
# run checks
21-
script_dir=$(dirname "$0")
22-
docker run --rm --runtime=habana -v $script_dir:/root/chatbot --name="$cont_name" --hostname="chatbot-hpu-check-container" "$image_name" bash -c "python /root/chatbot/to_hpu.py"
21+
docker run --rm --runtime=habana --name="$cont_name" --hostname="chatbot-hpu-check-container" "$image_name" bash -c "python /intel-extension-for-transformers/.github/workflows/script/chatbot/hpu_check/to_hpu.py"
2322

.github/workflows/script/chatbot/hpu_check/to_hpu.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import torch
2-
from optimum.habana.transformers.modeling_utils import adapt_transformers_to_gaudi
32

43
ly = torch.nn.Linear(2, 4)
54
ly.to("hpu")

.github/workflows/script/chatbot/prepare_ft_mpt-7b-chat_mpi.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ export I_MPI_HYDRA_IFACE=eth0
88
EOF
99
)"
1010
# for launching mpirun from yaml
11-
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /root/chatbot; echo \"source activate && conda activate neuralchat\" > bash_setup.sh; echo export MASTER_ADDR=$master_node >> bash_setup.sh"
11+
docker exec "chatbotfinetune-mpi-s0" bash -c "cd /intel-extension-for-transformers; echo \"source activate && conda activate neuralchat\" > bash_setup.sh; echo export MASTER_ADDR=$master_node >> bash_setup.sh"
1212
# for ssh setup mpi and oneccl properly
1313
docker exec "chatbotfinetune-mpi-s0" bash -c "echo \"$prepare_script\" >> ~/.bashrc; echo export MASTER_ADDR=$master_node >> ~/.bashrc"
1414

.github/workflows/script/chatbot/start_ft_mpt-7b-chat_mpi.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
cd /root/chatbot
1+
cd /intel-extension-for-transformers
22
hname=$(hostname -s)
33
python3 workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py \
44
--model_name_or_path mosaicml/mpt-7b-chat \

0 commit comments

Comments
 (0)