@@ -5,6 +5,7 @@ import { Message } from 'llama-api-client/resources/chat';
55import { CompletionCreateParams } from 'llama-api-client/resources/chat' ;
66
77const client = new LlamaAPIClient ( ) ;
8+ const model = 'Llama-3.3-70B-Instruct' ;
89
910const tools : CompletionCreateParams . Tool [ ] = [
1011 {
@@ -38,18 +39,14 @@ interface getWeatherArgs {
3839
3940async function run_streaming ( ) : Promise < void > {
4041 const messages : Message [ ] = [
41- {
42- role : 'system' ,
43- content : 'You are a helpful assistant.' ,
44- } ,
4542 {
4643 role : 'user' ,
4744 content : 'Is it raining in Bellevue?' ,
4845 } ,
4946 ] ;
5047
5148 const response = await client . chat . completions . create ( {
52- model : 'Llama-3.3-70B-Instruct' ,
49+ model : model ,
5350 messages : messages ,
5451 tools : tools ,
5552 stream : true ,
@@ -78,7 +75,7 @@ async function run_streaming(): Promise<void> {
7875
7976 const completionMessage : Message = {
8077 role : 'assistant' ,
81- content : '' ,
78+ content : { type : 'text' , text : '' } ,
8279 tool_calls : [ toolCall ] ,
8380 stop_reason : stopReason as 'stop' | 'tool_calls' | 'length' ,
8481 } ;
@@ -87,7 +84,6 @@ async function run_streaming(): Promise<void> {
8784 if ( toolCall . function . name === 'get_weather' ) {
8885 const getWeatherArgs : getWeatherArgs = JSON . parse ( toolCall . function . arguments ) ;
8986 const toolResult : string = await getWeather ( getWeatherArgs . location ) ;
90- toolCall . function . arguments = toolResult ;
9187 messages . push ( {
9288 role : 'tool' ,
9389 tool_call_id : toolCall . id ,
@@ -97,7 +93,7 @@ async function run_streaming(): Promise<void> {
9793 }
9894
9995 const nextResponse = await client . chat . completions . create ( {
100- model : 'Llama-3.3-70B-Instruct' ,
96+ model : model ,
10197 messages : messages ,
10298 tools : tools ,
10399 stream : true ,
@@ -113,18 +109,14 @@ async function run_streaming(): Promise<void> {
113109
114110async function run ( ) : Promise < void > {
115111 const messages : Message [ ] = [
116- {
117- role : 'system' ,
118- content : 'You are a helpful assistant.' ,
119- } ,
120112 {
121113 role : 'user' ,
122114 content : 'Is it raining in Bellevue?' ,
123115 } ,
124116 ] ;
125117
126118 const response = await client . chat . completions . create ( {
127- model : 'Llama-3.3-70B-Instruct' ,
119+ model : model ,
128120 messages : messages ,
129121 tools : tools ,
130122 max_completion_tokens : 2048 ,
@@ -155,7 +147,7 @@ async function run(): Promise<void> {
155147 }
156148 // Next Turn
157149 const nextResponse = await client . chat . completions . create ( {
158- model : 'Llama-3.3-70B-Instruct' ,
150+ model : model ,
159151 messages : messages ,
160152 tools : tools ,
161153 max_completion_tokens : 2048 ,
0 commit comments