Skip to content

Commit 1dfcbe4

Browse files
committedMar 3, 2025
rewrite ollama format
1 parent 22ae204 commit 1dfcbe4

File tree

4 files changed

+163
-117
lines changed

4 files changed

+163
-117
lines changed
 

‎convert/entity.go

+58-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,64 @@ type ClientResponse struct {
2626
Error string `json:"error"`
2727
}
2828

29+
// Message represents a single message in the conversation.
2930
type Message struct {
30-
Role string `json:"role"`
31+
// Role indicates the role of the message sender (e.g., "system", "user", "assistant").
32+
Role string `json:"role"`
33+
// Content contains the actual text of the message.
3134
Content string `json:"content"`
3235
}
36+
37+
// Response represents the response payload received from the OpenAI API.
38+
type Response struct {
39+
// Id is the unique identifier for the response.
40+
Id string `json:"id"`
41+
// Object specifies the type of object returned (e.g., "response").
42+
Object string `json:"object"`
43+
// Created is the timestamp of when the response was generated.
44+
Created int `json:"created"`
45+
// Model indicates the model used to generate the response.
46+
Model string `json:"model"`
47+
// SystemFingerprint is a fingerprint of the system generating the response.
48+
SystemFingerprint string `json:"system_fingerprint"`
49+
// Choices contains the list of choices generated by the model.
50+
Choices []ResponseChoice `json:"choices"`
51+
// Usage provides information about token usage for the request and response.
52+
Usage Usage `json:"usage"`
53+
Error Error `json:"error"`
54+
}
55+
56+
type Error struct {
57+
Message string `json:"message"`
58+
Type string `json:"type"`
59+
}
60+
61+
// ResponseChoice represents a single choice in the response.
62+
type ResponseChoice struct {
63+
// Index is the position of the choice in the response list.
64+
Index int `json:"index"`
65+
// Message contains the generated message for this choice.
66+
Message Message `json:"message"`
67+
// Logprobs provides log probability data for the tokens (if available).
68+
Logprobs interface{} `json:"logprobs"`
69+
// FinishReason indicates why the generation stopped (e.g., "length", "stop").
70+
FinishReason string `json:"finish_reason"`
71+
}
72+
73+
// Usage provides information about the token counts for the request and response.
74+
type Usage struct {
75+
// PromptTokens is the number of tokens used in the input prompt.
76+
PromptTokens int `json:"prompt_tokens"`
77+
// CompletionTokens is the number of tokens generated in the response.
78+
CompletionTokens int `json:"completion_tokens"`
79+
// TotalTokens is the total number of tokens used (prompt + completion).
80+
TotalTokens int `json:"total_tokens"`
81+
// CompletionTokensDetails provides a breakdown of completion tokens.
82+
CompletionTokensDetails CompletionTokensDetails `json:"completion_tokens_details"`
83+
}
84+
85+
// CompletionTokensDetails provides detailed information about completion tokens.
86+
type CompletionTokensDetails struct {
87+
// ReasoningTokens is the number of tokens used for reasoning in the response.
88+
ReasoningTokens int `json:"reasoning_tokens"`
89+
}

‎drivers/ai-provider/ollama/message.go

+13-15
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
package ollama
22

3-
import "time"
4-
53
// ClientRequest represents the request payload sent to the Ollama API.
64
type ClientRequest struct {
75
// Messages is a list of messages forming the conversation history.
@@ -16,16 +14,16 @@ type Message struct {
1614
Content string `json:"content"`
1715
}
1816

19-
type Response struct {
20-
Model string `json:"model"`
21-
CreatedAt time.Time `json:"created_at"`
22-
Message *Message `json:"message"`
23-
Done bool `json:"done"`
24-
TotalDuration int64 `json:"total_duration"`
25-
LoadDuration int `json:"load_duration"`
26-
PromptEvalCount int `json:"prompt_eval_count"`
27-
PromptEvalDuration int `json:"prompt_eval_duration"`
28-
EvalCount int `json:"eval_count"`
29-
EvalDuration int64 `json:"eval_duration"`
30-
Error string `json:"error"`
31-
}
17+
//type Response struct {
18+
// Model string `json:"model"`
19+
// CreatedAt time.Time `json:"created_at"`
20+
// Message *Message `json:"message"`
21+
// Done bool `json:"done"`
22+
// TotalDuration int64 `json:"total_duration"`
23+
// LoadDuration int `json:"load_duration"`
24+
// PromptEvalCount int `json:"prompt_eval_count"`
25+
// PromptEvalDuration int `json:"prompt_eval_duration"`
26+
// EvalCount int `json:"eval_count"`
27+
// EvalDuration int64 `json:"eval_duration"`
28+
// Error string `json:"error"`
29+
//}

‎drivers/ai-provider/ollama/mode.go

+78-87
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ type Chat struct {
2626
// NewChat initializes and returns a new Chat instance.
2727
func NewChat() *Chat {
2828
return &Chat{
29-
endPoint: "/api/chat",
29+
endPoint: "/v1/chat/completions",
3030
}
3131
}
3232

@@ -85,58 +85,58 @@ func (c *Chat) RequestConvert(ctx eocontext.EoContext, extender map[string]inter
8585

8686
// SetProvider the modified body in the HTTP context.
8787
httpContext.Proxy().Body().SetRaw("application/json", body)
88-
httpContext.Response().AppendStreamFunc(c.streamFunc())
88+
//httpContext.Response().AppendStreamFunc(c.streamFunc())
8989
return nil
9090
}
9191

92-
func (c *Chat) streamFunc() http_context.StreamFunc {
93-
return func(ctx http_context.IHttpContext, p []byte) ([]byte, error) {
94-
data := eosc.NewBase[Response]()
95-
err := json.Unmarshal(p, data)
96-
if err != nil {
97-
return nil, err
98-
}
99-
status := ctx.Response().StatusCode()
100-
switch status {
101-
case 200:
102-
// Calculate the token consumption for a successful request.
103-
usage := data.Config
104-
if usage.Done {
105-
convert.SetAIStatusNormal(ctx)
106-
convert.SetAIModelInputToken(ctx, usage.PromptEvalCount)
107-
convert.SetAIModelOutputToken(ctx, usage.EvalCount)
108-
convert.SetAIModelTotalToken(ctx, usage.PromptEvalCount+usage.EvalCount)
109-
}
110-
case 404:
111-
convert.SetAIStatusInvalid(ctx)
112-
case 429:
113-
convert.SetAIStatusExceeded(ctx)
114-
}
115-
116-
// Prepare the response body for the client.
117-
responseBody := &convert.ClientResponse{}
118-
resp := data.Config
119-
if resp.Message != nil {
120-
responseBody.Message = &convert.Message{
121-
Role: resp.Message.Role,
122-
Content: resp.Message.Content,
123-
}
124-
if resp.Done {
125-
responseBody.FinishReason = convert.FinishStop
126-
}
127-
} else {
128-
responseBody.Code = -1
129-
responseBody.Error = "response message is nil"
130-
}
131-
132-
// Marshal the modified response body back into JSON.
133-
body, err := json.Marshal(responseBody)
134-
if err != nil {
135-
return nil, err
136-
}
137-
return body, nil
138-
}
139-
}
92+
//func (c *Chat) streamFunc() http_context.StreamFunc {
93+
// return func(ctx http_context.IHttpContext, p []byte) ([]byte, error) {
94+
// data := eosc.NewBase[Response]()
95+
// err := json.Unmarshal(p, data)
96+
// if err != nil {
97+
// return nil, err
98+
// }
99+
// status := ctx.Response().StatusCode()
100+
// switch status {
101+
// case 200:
102+
// // Calculate the token consumption for a successful request.
103+
// usage := data.Config
104+
// if usage.Done {
105+
// convert.SetAIStatusNormal(ctx)
106+
// convert.SetAIModelInputToken(ctx, usage.PromptEvalCount)
107+
// convert.SetAIModelOutputToken(ctx, usage.EvalCount)
108+
// convert.SetAIModelTotalToken(ctx, usage.PromptEvalCount+usage.EvalCount)
109+
// }
110+
// case 404:
111+
// convert.SetAIStatusInvalid(ctx)
112+
// case 429:
113+
// convert.SetAIStatusExceeded(ctx)
114+
// }
115+
//
116+
// // Prepare the response body for the client.
117+
// responseBody := &convert.ClientResponse{}
118+
// resp := data.Config
119+
// if resp.Message != nil {
120+
// responseBody.Message = &convert.Message{
121+
// Role: resp.Message.Role,
122+
// Content: resp.Message.Content,
123+
// }
124+
// if resp.Done {
125+
// responseBody.FinishReason = convert.FinishStop
126+
// }
127+
// } else {
128+
// responseBody.Code = -1
129+
// responseBody.Error = "response message is nil"
130+
// }
131+
//
132+
// // Marshal the modified response body back into JSON.
133+
// body, err := json.Marshal(responseBody)
134+
// if err != nil {
135+
// return nil, err
136+
// }
137+
// return body, nil
138+
// }
139+
//}
140140

141141
// ResponseConvert converts the response body for the Chat mode.
142142
// It processes the response to ensure it conforms to the expected format and encoding.
@@ -147,58 +147,49 @@ func (c *Chat) ResponseConvert(ctx eocontext.EoContext) error {
147147
return err
148148
}
149149

150-
status := httpContext.Response().StatusCode()
151-
switch status {
152-
case 200:
153-
convert.SetAIStatusNormal(ctx)
154-
}
155-
if httpContext.Response().IsBodyStream() {
156-
157-
return nil
158-
}
159150
// Retrieve the response body.
160151
body := httpContext.Response().GetBody()
161152
if body == nil {
162153
return nil
163154
}
164155

165156
// Parse the response body into a base configuration.
166-
data := eosc.NewBase[Response]()
157+
data := eosc.NewBase[convert.Response]()
167158
err = json.Unmarshal(body, data)
168159
if err != nil {
169160
return err
170161
}
171162
switch httpContext.Response().StatusCode() {
172163
case 200:
173164
// Calculate the token consumption for a successful request.
174-
usage := data.Config
165+
usage := data.Config.Usage
175166
convert.SetAIStatusNormal(ctx)
176-
convert.SetAIModelInputToken(ctx, usage.PromptEvalCount)
177-
convert.SetAIModelOutputToken(ctx, usage.EvalCount)
178-
convert.SetAIModelTotalToken(ctx, usage.PromptEvalCount+usage.EvalCount)
179-
}
180-
181-
// Prepare the response body for the client.
182-
responseBody := &convert.ClientResponse{}
183-
resp := data.Config
184-
if resp.Message != nil {
185-
responseBody.Message = &convert.Message{
186-
Role: resp.Message.Role,
187-
Content: resp.Message.Content,
188-
}
189-
responseBody.FinishReason = convert.FinishStop
190-
} else {
191-
responseBody.Code = -1
192-
responseBody.Error = resp.Error
193-
}
194-
195-
// Marshal the modified response body back into JSON.
196-
body, err = json.Marshal(responseBody)
197-
if err != nil {
198-
return err
199-
}
200-
201-
httpContext.Response().SetBody(body)
167+
convert.SetAIModelInputToken(ctx, usage.PromptTokens)
168+
convert.SetAIModelOutputToken(ctx, usage.CompletionTokens)
169+
convert.SetAIModelTotalToken(ctx, usage.TotalTokens)
170+
}
171+
//
172+
//// Prepare the response body for the client.
173+
//responseBody := &convert.ClientResponse{}
174+
//resp := data.Config
175+
//if resp.Choices != nil {
176+
// responseBody.Message = &convert.Message{
177+
// Role: resp.Message.Role,
178+
// Content: resp.Message.Content,
179+
// }
180+
// responseBody.FinishReason = convert.FinishStop
181+
//} else {
182+
// responseBody.Code = -1
183+
// responseBody.Error = resp.Error
184+
//}
185+
//
186+
//// Marshal the modified response body back into JSON.
187+
//body, err = json.Marshal(responseBody)
188+
//if err != nil {
189+
// return err
190+
//}
191+
//
192+
//httpContext.Response().SetBody(body)
202193

203194
// SetProvider the modified response in the HTTP context.
204195
return nil

‎drivers/ai-provider/openAI/mode.go

+14-14
Original file line numberDiff line numberDiff line change
@@ -146,22 +146,22 @@ func (c *Chat) ResponseConvert(ctx eocontext.EoContext) error {
146146
convert.SetAIStatusInvalid(ctx)
147147
}
148148

149-
// Prepare the response body for the client.
150-
responseBody := &convert.ClientResponse{}
151-
if len(data.Config.Choices) > 0 {
152-
msg := data.Config.Choices[0]
153-
responseBody.Message = &convert.Message{
154-
Role: msg.Message.Role,
155-
Content: msg.Message.Content,
156-
}
157-
responseBody.FinishReason = msg.FinishReason
158-
} else {
159-
responseBody.Code = -1
160-
responseBody.Error = data.Config.Error.Message
161-
}
149+
//// Prepare the response body for the client.
150+
//responseBody := &convert.ClientResponse{}
151+
//if len(data.Config.Choices) > 0 {
152+
// msg := data.Config.Choices[0]
153+
// responseBody.Message = &convert.Message{
154+
// Role: msg.Message.Role,
155+
// Content: msg.Message.Content,
156+
// }
157+
// responseBody.FinishReason = msg.FinishReason
158+
//} else {
159+
// responseBody.Code = -1
160+
// responseBody.Error = data.Config.Error.Message
161+
//}
162162

163163
// Marshal the modified response body back into JSON.
164-
body, err = json.Marshal(responseBody)
164+
body, err = json.Marshal(data)
165165
if err != nil {
166166
return err
167167
}

0 commit comments

Comments
 (0)
Please sign in to comment.