Skip to content

Feature Request: Support all AgentHooks when Agent.as_tool() #2036

@hayescode

Description

@hayescode

When running the Manager pattern the sub-agents I'm running as tools do not trigger the on_tool_start or on_tool_end methods of the AgentHooks. The documentation does not mention this limitation so i'm not sure if this is a bug or feature request. Maybe related to #1057 .

TestAgentHooks
####### AgentHooks ########

from typing import Any, Generic, Optional
from agents import AgentHooks
from typing_extensions import TypeVar

from agents.agent import Agent, AgentBase
from agents.items import ModelResponse, TResponseInputItem
from agents.run_context import RunContextWrapper, TContext
from agents.tool import Tool

class TestAgentHooks(AgentHooks):

    async def on_start(self, context: RunContextWrapper, agent: Agent) -> None:
        """Called before the agent is invoked. Called each time the running agent is changed to this
        agent."""
        print(f"### AgentHook: on_start ({agent.name}) Agent {agent.name} starting...")

    async def on_end(
        self,
        context: RunContextWrapper,
        agent: Agent,
        output: Any,
    ) -> None:
        """Called when the agent produces a final output."""
        print(f"### AgentHook: on_end Agent ({agent.name}) finished with output: {output}")

    async def on_handoff(
        self,
        context: RunContextWrapper,
        agent: Agent,
        source: Agent,
    ) -> None:
        """Called when the agent is being handed off to. The `source` is the agent that is handing
        off to this agent."""
        print(f"### AgentHook: on_handoff Agent({agent.name}) is being handed off to from {source.name}...")

    async def on_tool_start(
        self,
        context: RunContextWrapper,
        agent: Agent,
        tool: Tool,
    ) -> None:
        """Called concurrently with tool invocation."""
        print(f"### AgentHook: on_tool_start Agent {agent.name} is invoking tool {tool.name}... Tool Context: {context.context}")

    async def on_tool_end(
        self,
        context: RunContextWrapper,
        agent: Agent,
        tool: Tool,
        result: str,
    ) -> None:
        """Called after a tool is invoked."""
        print(f"### AgentHook: on_tool_end Agent ({agent.name})  finished invoking tool {tool.name} with result: {result}")

    async def on_llm_start(
        self,
        context: RunContextWrapper,
        agent: Agent,
        system_prompt: Optional[str],
        input_items: list[TResponseInputItem],
    ) -> None:
        """Called immediately before the agent issues an LLM call."""
        print(f"### AgentHook: on_llm_start Agent ({agent.name}) is about to call LLM with system prompt: {system_prompt} and input items: {input_items}")

    async def on_llm_end(
        self,
        context: RunContextWrapper,
        agent: Agent,
        response: ModelResponse,
    ) -> None:
        """Called immediately after the agent receives the LLM response."""
        print(f"### AgentHook: on_llm_end Agent ({agent.name}) received LLM response: {response}")
coding_agent = Agent(
    name="Coding Agent",
    instructions="You love doing math.",
    model=OpenAIResponsesModel(model="gpt-4.1", openai_client=openai_client),
    tools=[CodeInterpreterTool(tool_config={"type": "code_interpreter", "container": {"type": "auto"}})],
    hooks=TestAgentHooks(),
)

devops_mcp_agent = Agent(
    name="DevOps MCP Agent",
    instructions="You are responsible for managing DevOps pipelines and MCP configurations.",
    tools=[HostedMCPTool(tool_config=AzureDevOps.tool_definition())],
    model=OpenAIResponsesModel(model="gpt-4.1", openai_client=openai_client),
    hooks=TestAgentHooks(),
)

orchestrator = Agent(
    name="Orchestrator",
    instructions="Call the relevant tools when specialized expertise is needed.",
    model=OpenAIResponsesModel(model="gpt-4.1", openai_client=openai_client),
    tools=[
        coding_agent.as_tool(
            tool_name="code_interpreter_agent",
            tool_description="Perform calculations and data analysis using code.",
        ),
        devops_mcp_agent.as_tool(
            tool_name="devops_mcp_agent",
            tool_description="Manage DevOps pipelines and MCP configurations.",
        ),
    ],
    hooks=TestAgentHooks(),
)


stream_result = Runner.run_streamed(
    orchestrator, 
    "Write and run some python to give me the result of 589725*27363/7.23474.",
    context=agent_context,
    run_config=RunConfig(trace_include_sensitive_data=True, tracing_disabled=False),
)

async for event in stream_result.stream_events():
    pass

This is the result. I can see on_llm_* and on_agent_* methods fire, but not the on_tool_*.

### AgentHook: on_start (Orchestrator) Agent Orchestrator starting...
### AgentHook: on_llm_start Agent (Orchestrator) is about to call LLM with system prompt: Call the relevant tools when specialized expertise is needed. and input items: [{'content': 'Write and run some python to give me the result of 589725*27363/7.23474.', 'role': 'user'}]
### AgentHook: on_llm_end Agent (Orchestrator) received LLM response: ModelResponse(output=[ResponseFunctionToolCall(arguments='{"input":"result = 589725 * 27363 / 7.23474\\nresult"}', call_id='call_moMTYk41AvtNZ1eXaa3lQxVV', name='code_interpreter_agent', type='function_call', id='fc_0d3f5cda170b02ae00690927651ba0819684ae7baebe84bd7a', status='completed')], usage=Usage(requests=1, input_tokens=167, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=33, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=200), response_id='resp_0d3f5cda170b02ae0069092764d6d8819698ce15cfa6a2876f')
### AgentHook: on_tool_start Agent (Orchestrator) is invoking tool code_interpreter_agent...
### AgentHook: on_start (Coding Agent) Agent Coding Agent starting...
### AgentHook: on_llm_start Agent (Coding Agent) is about to call LLM with system prompt: You love doing math. 

### THIS IS WHERE I'M EXPECTING on_tool_start for Agent (Coding Agent) 
### THIS IS WHERE I'M EXPECTING on_tool_end for Agent (Coding Agent) 

### AgentHook: on_llm_end Agent (Coding Agent) received LLM response: ModelResponse(output=[ResponseCodeInterpreterToolCall(id='ci_07f8ca8d3798d8ed0069092767686c819381ca4407d3c64f44', code='# Calculate the result of the given expression\nresult = 589725 * 27363 / 7.23474\nresult', container_id='cntr_690927660b0c819097e29c2794c956f603aaf67281e4765a', outputs=None, status='completed', type='code_interpreter_call'), ResponseOutputMessage(id='msg_07f8ca8d3798d8ed00690927687cc88193a6eda7fa80d34b07', content=[ResponseOutputText(annotations=[], text='The result of the expression \\( 589725 \\times 27363 \\div 7.23474 \\) is approximately **2,230,438,851.29**.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], usage=Usage(requests=1, input_tokens=426, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=71, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=497), response_id='resp_07f8ca8d3798d8ed00690927656e4c8193a0f25882da6ce3fc')
### AgentHook: on_end Agent (Coding Agent) finished with output: The result of the expression \( 589725 \times 27363 \div 7.23474 \) is approximately **2,230,438,851.29**.
### AgentHook: on_tool_end Agent (Orchestrator) finished invoking tool code_interpreter_agent with result: The result of the expression \( 589725 \times 27363 \div 7.23474 \) is approximately **2,230,438,851.29**.
### AgentHook: on_llm_start Agent (Orchestrator) is about to call LLM with system prompt: Call the relevant tools when specialized expertise is needed. and input items: [{'content': 'Write and run some python to give me the result of 589725*27363/7.23474.', 'role': 'user'}, {'arguments': '{"input":"result = 589725 * 27363 / 7.23474\\nresult"}', 'call_id': 'call_moMTYk41AvtNZ1eXaa3lQxVV', 'name': 'code_interpreter_agent', 'type': 'function_call', 'id': 'fc_0d3f5cda170b02ae00690927651ba0819684ae7baebe84bd7a', 'status': 'completed'}, {'call_id': 'call_moMTYk41AvtNZ1eXaa3lQxVV', 'output': 'The result of the expression \\( 589725 \\times 27363 \\div 7.23474 \\) is approximately **2,230,438,851.29**.', 'type': 'function_call_output'}]
### AgentHook: on_llm_end Agent (Orchestrator) received LLM response: ModelResponse(output=[ResponseOutputMessage(id='msg_0d3f5cda170b02ae0069092769a9288196a608df25aeedeef4', content=[ResponseOutputText(annotations=[], text='The result of \\( 589725 \\times 27363 \\div 7.23474 \\) is approximately **2,230,438,851.29**.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], usage=Usage(requests=1, input_tokens=247, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=37, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=284), response_id='resp_0d3f5cda170b02ae006909276959d0819684d08ffba1f7ab9a')
### AgentHook: on_end Agent (Orchestrator) finished with output: The result of \( 589725 \times 27363 \div 7.23474 \) is approximately **2,230,438,851.29**.

Metadata

Metadata

Assignees

No one assigned

    Type

    No type

    Projects

    No projects

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions