Skip to content

Commit 59ec0eb

Browse files
authored
Merge branch 'main' into fix-voice-stt-task-cleanup
2 parents 43d79b1 + 4bc33e3 commit 59ec0eb

File tree

4 files changed

+173
-0
lines changed

4 files changed

+173
-0
lines changed

.github/workflows/tests.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ jobs:
2222
enable-cache: true
2323
- name: Install dependencies
2424
run: make sync
25+
- name: Verify formatting
26+
run: make format-check
2527
- name: Run lint
2628
run: make lint
2729

src/agents/extensions/models/litellm_model.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,23 @@ async def _fetch_response(
326326
)
327327

328328
reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
329+
# Enable developers to pass non-OpenAI compatible reasoning_effort data like "none"
330+
# Priority order:
331+
# 1. model_settings.reasoning.effort
332+
# 2. model_settings.extra_body["reasoning_effort"]
333+
# 3. model_settings.extra_args["reasoning_effort"]
334+
if (
335+
reasoning_effort is None # Unset in model_settings
336+
and isinstance(model_settings.extra_body, dict)
337+
and "reasoning_effort" in model_settings.extra_body
338+
):
339+
reasoning_effort = model_settings.extra_body["reasoning_effort"]
340+
if (
341+
reasoning_effort is None # Unset in both model_settings and model_settings.extra_body
342+
and model_settings.extra_args
343+
and "reasoning_effort" in model_settings.extra_args
344+
):
345+
reasoning_effort = model_settings.extra_args["reasoning_effort"]
329346

330347
stream_options = None
331348
if stream and model_settings.include_usage is not None:
@@ -343,6 +360,9 @@ async def _fetch_response(
343360
if model_settings.extra_args:
344361
extra_kwargs.update(model_settings.extra_args)
345362

363+
# Prevent duplicate reasoning_effort kwargs when it was promoted to a top-level argument.
364+
extra_kwargs.pop("reasoning_effort", None)
365+
346366
ret = await litellm.acompletion(
347367
model=self.model,
348368
messages=converted_messages,

tests/models/test_kwargs_functionality.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,3 +176,41 @@ async def fake_acompletion(model, messages=None, **kwargs):
176176

177177
# Should work without error and include regular parameters
178178
assert captured["temperature"] == 0.3
179+
180+
181+
@pytest.mark.allow_call_model_methods
182+
@pytest.mark.asyncio
183+
async def test_reasoning_effort_falls_back_to_extra_args(monkeypatch):
184+
"""
185+
Ensure reasoning_effort from extra_args is promoted when reasoning settings are missing.
186+
"""
187+
captured: dict[str, object] = {}
188+
189+
async def fake_acompletion(model, messages=None, **kwargs):
190+
captured.update(kwargs)
191+
msg = Message(role="assistant", content="test response")
192+
choice = Choices(index=0, message=msg)
193+
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
194+
195+
monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
196+
197+
# GitHub issue context: https://github.com/openai/openai-agents-python/issues/1764.
198+
settings = ModelSettings(
199+
extra_args={"reasoning_effort": "none", "custom_param": "custom_value"}
200+
)
201+
model = LitellmModel(model="test-model")
202+
203+
await model.get_response(
204+
system_instructions=None,
205+
input="test input",
206+
model_settings=settings,
207+
tools=[],
208+
output_schema=None,
209+
handoffs=[],
210+
tracing=ModelTracing.DISABLED,
211+
previous_response_id=None,
212+
)
213+
214+
assert captured["reasoning_effort"] == "none"
215+
assert captured["custom_param"] == "custom_value"
216+
assert settings.extra_args == {"reasoning_effort": "none", "custom_param": "custom_value"}

tests/models/test_litellm_extra_body.py

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,3 +42,116 @@ async def fake_acompletion(model, messages=None, **kwargs):
4242
)
4343

4444
assert {"cached_content": "some_cache", "foo": 123}.items() <= captured.items()
45+
46+
47+
@pytest.mark.allow_call_model_methods
48+
@pytest.mark.asyncio
49+
async def test_extra_body_reasoning_effort_is_promoted(monkeypatch):
50+
"""
51+
Ensure reasoning_effort from extra_body is promoted to the top-level parameter.
52+
"""
53+
captured: dict[str, object] = {}
54+
55+
async def fake_acompletion(model, messages=None, **kwargs):
56+
captured.update(kwargs)
57+
msg = Message(role="assistant", content="ok")
58+
choice = Choices(index=0, message=msg)
59+
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
60+
61+
monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
62+
# GitHub issue context: https://github.com/openai/openai-agents-python/issues/1764.
63+
settings = ModelSettings(
64+
extra_body={"reasoning_effort": "none", "cached_content": "some_cache"}
65+
)
66+
model = LitellmModel(model="test-model")
67+
68+
await model.get_response(
69+
system_instructions=None,
70+
input=[],
71+
model_settings=settings,
72+
tools=[],
73+
output_schema=None,
74+
handoffs=[],
75+
tracing=ModelTracing.DISABLED,
76+
previous_response_id=None,
77+
)
78+
79+
assert captured["reasoning_effort"] == "none"
80+
assert captured["cached_content"] == "some_cache"
81+
assert settings.extra_body == {"reasoning_effort": "none", "cached_content": "some_cache"}
82+
83+
84+
@pytest.mark.allow_call_model_methods
85+
@pytest.mark.asyncio
86+
async def test_reasoning_effort_prefers_model_settings(monkeypatch):
87+
"""
88+
Verify explicit ModelSettings.reasoning takes precedence over extra_body entries.
89+
"""
90+
from openai.types.shared import Reasoning
91+
92+
captured: dict[str, object] = {}
93+
94+
async def fake_acompletion(model, messages=None, **kwargs):
95+
captured.update(kwargs)
96+
msg = Message(role="assistant", content="ok")
97+
choice = Choices(index=0, message=msg)
98+
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
99+
100+
monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
101+
settings = ModelSettings(
102+
reasoning=Reasoning(effort="low"),
103+
extra_body={"reasoning_effort": "high"},
104+
)
105+
model = LitellmModel(model="test-model")
106+
107+
await model.get_response(
108+
system_instructions=None,
109+
input=[],
110+
model_settings=settings,
111+
tools=[],
112+
output_schema=None,
113+
handoffs=[],
114+
tracing=ModelTracing.DISABLED,
115+
previous_response_id=None,
116+
)
117+
118+
assert captured["reasoning_effort"] == "low"
119+
assert settings.extra_body == {"reasoning_effort": "high"}
120+
121+
122+
@pytest.mark.allow_call_model_methods
123+
@pytest.mark.asyncio
124+
async def test_extra_body_reasoning_effort_overrides_extra_args(monkeypatch):
125+
"""
126+
Ensure extra_body reasoning_effort wins over extra_args when both are provided.
127+
"""
128+
captured: dict[str, object] = {}
129+
130+
async def fake_acompletion(model, messages=None, **kwargs):
131+
captured.update(kwargs)
132+
msg = Message(role="assistant", content="ok")
133+
choice = Choices(index=0, message=msg)
134+
return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
135+
136+
monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
137+
# GitHub issue context: https://github.com/openai/openai-agents-python/issues/1764.
138+
settings = ModelSettings(
139+
extra_body={"reasoning_effort": "none"},
140+
extra_args={"reasoning_effort": "low", "custom_param": "custom"},
141+
)
142+
model = LitellmModel(model="test-model")
143+
144+
await model.get_response(
145+
system_instructions=None,
146+
input=[],
147+
model_settings=settings,
148+
tools=[],
149+
output_schema=None,
150+
handoffs=[],
151+
tracing=ModelTracing.DISABLED,
152+
previous_response_id=None,
153+
)
154+
155+
assert captured["reasoning_effort"] == "none"
156+
assert captured["custom_param"] == "custom"
157+
assert settings.extra_args == {"reasoning_effort": "low", "custom_param": "custom"}

0 commit comments

Comments
 (0)