@@ -69,18 +69,31 @@ type FunctionCall struct {
6969 Arguments string `json:"arguments,omitempty"`
7070}
7171
72+ type ChatCompletionResponseFormatType string
73+
74+ const (
75+ ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object"
76+ ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text"
77+ )
78+
79+ type ChatCompletionResponseFormat struct {
80+ Type ChatCompletionResponseFormatType `json:"type"`
81+ }
82+
7283// ChatCompletionRequest represents a request structure for chat completion API.
7384type ChatCompletionRequest struct {
74- Model string `json:"model"`
75- Messages []ChatCompletionMessage `json:"messages"`
76- MaxTokens int `json:"max_tokens,omitempty"`
77- Temperature float32 `json:"temperature,omitempty"`
78- TopP float32 `json:"top_p,omitempty"`
79- N int `json:"n,omitempty"`
80- Stream bool `json:"stream,omitempty"`
81- Stop []string `json:"stop,omitempty"`
82- PresencePenalty float32 `json:"presence_penalty,omitempty"`
83- FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
85+ Model string `json:"model"`
86+ Messages []ChatCompletionMessage `json:"messages"`
87+ MaxTokens int `json:"max_tokens,omitempty"`
88+ Temperature float32 `json:"temperature,omitempty"`
89+ TopP float32 `json:"top_p,omitempty"`
90+ N int `json:"n,omitempty"`
91+ Stream bool `json:"stream,omitempty"`
92+ Stop []string `json:"stop,omitempty"`
93+ PresencePenalty float32 `json:"presence_penalty,omitempty"`
94+ ResponseFormat ChatCompletionResponseFormat `json:"response_format,omitempty"`
95+ Seed * int `json:"seed,omitempty"`
96+ FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
8497 // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
8598 // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
8699 // refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
0 commit comments