Skip to content

Commit 87c8971

Browse files
authored
fix: max_tokens change to max_completion_tokens (#68)
1 parent 09f5221 commit 87c8971

File tree

1 file changed

+10
-0
lines changed

1 file changed

+10
-0
lines changed

src/providers/openai/provider.rs

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,16 @@ impl From<ChatCompletionRequest> for OpenAIChatCompletionRequest {
2525
fn from(mut base: ChatCompletionRequest) -> Self {
2626
let reasoning_effort = base.reasoning.as_ref().and_then(|r| r.to_openai_effort());
2727

28+
// Handle max_completion_tokens logic - use max_completion_tokens if provided and > 0,
29+
// otherwise fall back to max_tokens
30+
base.max_completion_tokens = match (base.max_completion_tokens, base.max_tokens) {
31+
(Some(v), _) if v > 0 => Some(v),
32+
(_, Some(v)) if v > 0 => Some(v),
33+
_ => None,
34+
};
35+
36+
base.max_tokens = None;
37+
2838
// Remove reasoning field from base request since OpenAI uses reasoning_effort
2939
base.reasoning = None;
3040

0 commit comments

Comments
 (0)