Skip to content

Commit dc196df

Browse files
author
yuekaiz
committed
fix decoupled mode
1 parent 178da09 commit dc196df

File tree

3 files changed

+52
-33
lines changed

3 files changed

+52
-33
lines changed

runtime/triton_trtllm/model_repo/cosyvoice2/1/model.py

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -295,11 +295,26 @@ def execute(self, requests):
295295
if self.decoupled:
296296
response_sender = request.get_response_sender()
297297
request_id = request.request_id()
298-
for generated_ids in generated_ids_iter:
299-
raise NotImplementedError("Decoupled mode is not implemented")
298+
generated_ids = []
299+
for generated_id in generated_ids_iter:
300+
# convert the numpy array into a int32 tensor
301+
generated_id = generated_id.tolist()
302+
if len(generated_id) > 0:
303+
assert len(generated_id) == 1, "Generated ID is not a single integer"
304+
generated_ids.append(generated_id[0])
305+
generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(torch.int32).to(self.device)
306+
prompt_spk_embedding = self._extract_spk_embedding(wav_tensor)
307+
audio = self.forward_token2wav(prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding, generated_ids)
308+
309+
# Prepare response
310+
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio))
311+
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
312+
response_sender.send(inference_response)
313+
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
314+
self.logger.log_info(f"send tritonserver_response_complete_final to end")
300315
else:
301316
generated_ids = next(generated_ids_iter)
302-
generated_ids = torch.tensor([generated_ids]).to(self.device)
317+
generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(self.device)
303318
if generated_ids is None or len(generated_ids) == 0:
304319
raise pb_utils.TritonModelException("Generated IDs is None or empty")
305320

@@ -311,9 +326,5 @@ def execute(self, requests):
311326
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
312327
responses.append(inference_response)
313328

314-
if self.decoupled:
315-
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
316-
self.logger.log_info(f"send tritonserver_response_complete_final to end")
317-
318329
if not self.decoupled:
319330
return responses

runtime/triton_trtllm/requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,5 @@ wget
1010
librosa
1111
pyworld
1212
openai-whisper
13-
tritonclient
13+
tritonclient
14+
modelscope

runtime/triton_trtllm/run.sh

Lines changed: 32 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,31 @@
11

22
export CUDA_VISIBLE_DEVICES=0
3-
export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice:$PYTHONPATH
4-
export PYTHONPATH=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice/third_party/Matcha-TTS:$PYTHONPATH
3+
cosyvoice_path=/workspace/CosyVoice
4+
export PYTHONPATH=${cosyvoice_path}:$PYTHONPATH
5+
export PYTHONPATH=${cosyvoice_path}/third_party/Matcha-TTS:$PYTHONPATH
56
stage=$1
67
stop_stage=$2
78

8-
huggingface_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/cosyvoice2_llm
9-
model_scope_model_local_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/CosyVoice2-0.5B
9+
huggingface_model_local_dir=./cosyvoice2_llm
10+
model_scope_model_local_dir=./CosyVoice2-0.5B
1011
trt_dtype=bfloat16
11-
trt_weights_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_weights_${trt_dtype}
12-
trt_engines_dir=/home/scratch.yuekaiz_wwfo_1/tts/cosyvoice/trt_engines_${trt_dtype}
12+
trt_weights_dir=./trt_weights_${trt_dtype}
13+
trt_engines_dir=./trt_engines_${trt_dtype}
1314

1415
model_repo=./model_repo_cosyvoice2
1516

16-
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
17-
echo " "
18-
huggingface-cli download --local-dir cosyvoice2_llm yuekai/cosyvoice2_llm
19-
modelscope download --model iic/CosyVoice2-0.5B --local_dir ./CosyVoice2-0.5B/
20-
git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git
21-
cd CosyVoice
17+
if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
18+
echo "Cloning CosyVoice"
19+
git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git $cosyvoice_path
20+
cd $cosyvoice_path
2221
git submodule update --init --recursive
22+
cd runtime/triton_trtllm
23+
fi
24+
25+
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
26+
echo "Downloading CosyVoice2-0.5B"
27+
huggingface-cli download --local-dir $huggingface_model_local_dir yuekai/cosyvoice2_llm
28+
modelscope download --model iic/CosyVoice2-0.5B --local_dir $model_scope_model_local_dir
2329
fi
2430

2531

@@ -35,17 +41,15 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
3541
--max_batch_size 16 \
3642
--max_num_tokens 32768 \
3743
--gemm_plugin $trt_dtype || exit 1
38-
fi
3944

40-
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
4145
echo "Testing TensorRT engines"
42-
python3 ./test_llm.py --input_text "你好,请问你叫什么?" \
46+
python3 ./scripts/test_llm.py --input_text "你好,请问你叫什么?" \
4347
--tokenizer_dir $huggingface_model_local_dir \
4448
--top_k 50 --top_p 0.95 --temperature 0.8 \
4549
--engine_dir=$trt_engines_dir || exit 1
4650
fi
4751

48-
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
52+
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
4953
echo "Creating model repository"
5054
rm -rf $model_repo
5155
mkdir -p $model_repo
@@ -71,28 +75,31 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
7175

7276
fi
7377

74-
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
75-
78+
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
79+
echo "Starting Triton server"
7680
tritonserver --model-repository $model_repo
7781
fi
7882

79-
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
80-
echo "Testing TensorRT engines"
83+
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
84+
echo "Single request test http"
8185
python3 client_http.py \
82-
--reference-audio ./prompt_audio.wav \
86+
--reference-audio ./assets/prompt_audio.wav \
8387
--reference-text "吃燕窝就选燕之屋,本节目由26年专注高品质燕窝的燕之屋冠名播出。豆奶牛奶换着喝,营养更均衡,本节目由豆本豆豆奶特约播出。" \
8488
--target-text "身临其境,换新体验。塑造开源语音合成新范式,让智能语音更自然。" \
8589
--model-name cosyvoice2
8690
fi
8791

88-
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
89-
echo "Running benchmark client"
92+
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
93+
echo "Running benchmark client grpc"
9094
num_task=4
95+
# set mode=streaming, when decoupled=True
96+
# set mode=offline, when decoupled=False
97+
mode=offline
9198
python3 client_grpc.py \
9299
--server-addr localhost \
93100
--model-name cosyvoice2 \
94101
--num-tasks $num_task \
95-
--mode offline \
102+
--mode $mode \
96103
--huggingface-dataset yuekai/seed_tts_cosy2 \
97-
--log-dir ./log_concurrent_tasks_${num_task}_offline_bls_4_${trt_dtype}
104+
--log-dir ./log_concurrent_tasks_${num_task}_${mode}_bls_4_${trt_dtype}
98105
fi

0 commit comments

Comments
 (0)