diff --git a/CHANGELOG.md b/CHANGELOG.md index b33e557a0b..b03cbad7a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ - Agent Bridge: Consolidate bridged tools implementation into the existing sandbox model proxy service (eliminate Python requirement for using bridged tools). - Anthropic: Correctly replay reasoning when sourced from Inspect cache. +- OpenAI Compatible: Don't ever send `background` parameter as this is OpenAI service-specific. +- OpenAI Compatible: Added support for disabling reasoning history emulation. - Grok: Correctly replay tool calling errors in message history. - VLLM and SGLang: Don't require API key environment variable to be set when running in local mode. - Google: Support `minimal` and `medium` reasoning effort levels for Gemini 3 Flash. @@ -13,7 +15,6 @@ - Inspect View: Scale ANSI display in messages view to preserve row/column layout without wrapping. - Inspect View: Render custom tool view when viewing messages. - Bugfix: Prevent component not found error during Human Agent transition. -- OpenAI Compatible: Added support for disabling reasoning history emulation. ## 0.3.159 (03 January 2026) diff --git a/src/inspect_ai/model/_providers/openai_compatible.py b/src/inspect_ai/model/_providers/openai_compatible.py index 821ad4c7f1..062beb71a5 100644 --- a/src/inspect_ai/model/_providers/openai_compatible.py +++ b/src/inspect_ai/model/_providers/openai_compatible.py @@ -161,7 +161,7 @@ async def generate( tools=tools, tool_choice=tool_choice, config=config, - background=False, + background=None, service_tier=None, prompt_cache_key=NOT_GIVEN, prompt_cache_retention=NOT_GIVEN, diff --git a/src/inspect_ai/model/_providers/openai_responses.py b/src/inspect_ai/model/_providers/openai_responses.py index f5fe3a8b37..6098011540 100644 --- a/src/inspect_ai/model/_providers/openai_responses.py +++ b/src/inspect_ai/model/_providers/openai_responses.py @@ -69,14 +69,14 @@ async def generate_responses( handle_bad_request: Callable[[APIStatusError], ModelOutput | Exception] | None = None, ) -> ModelOutput | tuple[ModelOutput | Exception, ModelCall]: - # batch mode and background are incompatible - if batcher: - background = False - # background in extra_body should be applied if background is None and config.extra_body: background = config.extra_body.pop("background", None) + # batch mode and background are incompatible + if batcher: + background = None + # allocate request_id (so we can see it from ModelCall) request_id = http_hooks.start_request()