From 68a20ca55a1c1b378e95cd3ad8346c565763db2a Mon Sep 17 00:00:00 2001 From: Elias Hohl Date: Sat, 11 Oct 2025 23:51:00 +0200 Subject: [PATCH] Fix "No call message found" bug with gpt-oss-120b when using responses API --- .../openai/langchain_openai/chat_models/base.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 8303242ebbc8c..ca2ee2f5cdd33 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -4117,7 +4117,7 @@ def _construct_lc_result_from_responses_api( {"type": "refusal", "refusal": content.refusal, "id": output.id} ) elif output.type == "function_call": - content_blocks.append(output.model_dump(exclude_none=True, mode="json")) + content_blocks.append(output.model_dump(exclude_none=False, mode="json")) try: args = json.loads(output.arguments, strict=False) error = None @@ -4142,7 +4142,7 @@ def _construct_lc_result_from_responses_api( } invalid_tool_calls.append(tool_call) elif output.type == "custom_tool_call": - content_blocks.append(output.model_dump(exclude_none=True, mode="json")) + content_blocks.append(output.model_dump(exclude_none=False, mode="json")) tool_call = { "type": "tool_call", "name": output.name, @@ -4161,7 +4161,7 @@ def _construct_lc_result_from_responses_api( "mcp_approval_request", "image_generation_call", ): - content_blocks.append(output.model_dump(exclude_none=True, mode="json")) + content_blocks.append(output.model_dump(exclude_none=False, mode="json")) # Workaround for parsing structured output in the streaming case. # from openai import OpenAI @@ -4290,7 +4290,7 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None: # Appears to be a breaking change in openai==1.82.0 annotation = chunk.annotation else: - annotation = chunk.annotation.model_dump(exclude_none=True, mode="json") + annotation = chunk.annotation.model_dump(exclude_none=False, mode="json") content.append( { @@ -4362,7 +4362,7 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None: "image_generation_call", ): _advance(chunk.output_index) - tool_output = chunk.item.model_dump(exclude_none=True, mode="json") + tool_output = chunk.item.model_dump(exclude_none=False, mode="json") tool_output["index"] = current_index content.append(tool_output) elif ( @@ -4370,7 +4370,7 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None: and chunk.item.type == "custom_tool_call" ): _advance(chunk.output_index) - tool_output = chunk.item.model_dump(exclude_none=True, mode="json") + tool_output = chunk.item.model_dump(exclude_none=False, mode="json") tool_output["index"] = current_index content.append(tool_output) tool_call_chunks.append( @@ -4395,7 +4395,7 @@ def _advance(output_idx: int, sub_idx: int | None = None) -> None: elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning": _advance(chunk.output_index) current_sub_index = 0 - reasoning = chunk.item.model_dump(exclude_none=True, mode="json") + reasoning = chunk.item.model_dump(exclude_none=False, mode="json") reasoning["index"] = current_index content.append(reasoning) elif chunk.type == "response.reasoning_summary_part.added":