-
|
I've been playing around with workflows. When running a workflow directly, it is easy to get the outputs, for example using For example, using this workflow: import asyncio
from agent_framework import (
AgentExecutorRequest,
ChatAgent,
ChatMessage,
WorkflowBuilder,
WorkflowContext,
executor,
)
from agent_framework.azure import AzureOpenAIChatClient
from azure.identity import AzureCliCredential
@executor
async def start(messages: list[ChatMessage], ctx: WorkflowContext[AgentExecutorRequest, str]) -> None:
await ctx.yield_output("Start...")
# Send the prompt to the next executor.
await ctx.send_message(
AgentExecutorRequest(
messages=messages,
should_respond=True,
)
)
def create_writer_agent() -> ChatAgent:
"""Factory function to create a writer agent."""
chat_client = AzureOpenAIChatClient(credential=AzureCliCredential())
return chat_client.create_agent(
instructions=(
"You are an excellent content writer. You create new content and edit contents based on the feedback."
),
name="writer",
)
def create_reviewer_agent() -> ChatAgent:
"""Factory function to create a reviewer agent."""
chat_client = AzureOpenAIChatClient(credential=AzureCliCredential())
return chat_client.create_agent(
instructions=(
"You are an excellent content reviewer."
"Provide actionable feedback to the writer about the provided content."
"Provide the feedback in the most concise manner possible."
),
name="reviewer",
)
async def main():
# Build the workflow using the fluent builder.
# Set the start node and connect an edge from writer to reviewer.
workflow = (
WorkflowBuilder()
.register_executor(lambda: start, "start")
.register_agent(create_writer_agent, "writer")
.register_agent(create_reviewer_agent, "reviewer", output_response=True)
.set_start_executor("start")
.add_edge("start", "writer")
.add_edge("writer", "reviewer")
.build()
)I can get the outputs when running the workflow directly: prompt = "Create a slogan for a new electric SUV that is affordable and fun to drive."
res = await workflow.run([ChatMessage(role="user", text=prompt)])
for output in res.get_outputs():
print(f"Workflow Output: {output}")But how do you get the outputs if you run it as an agent? workflow_agent = workflow.as_agent()
thread = workflow_agent.get_new_thread()
res = await workflow_agent.run(prompt, thread=thread)
If you loop through |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 2 replies
-
|
Hi @cecheta, This behavior was a bug that has been fixed. When running a workflow as an agent via The fix (now on workflow_agent = workflow.as_agent()
thread = workflow_agent.get_new_thread()
res = await workflow_agent.run(prompt, thread=thread)
# res.text now contains all yield_output() values
# res.messages contains each output as a separate ChatMessageFor streaming: async for update in workflow_agent.run_stream(prompt, thread=thread):
print(update.text) # Each yield_output() surfaces as an updateSome other notes:
Please update to the latest version to get this fix. |
Beta Was this translation helpful? Give feedback.
-
|
Hi @moonbox3 , I've just checked I'm on the latest version, but the behaviour you've described isn't what I'm seeing. Here is the code I'm running: Code
import asyncio
from agent_framework import (
AgentExecutorRequest,
ChatAgent,
ChatMessage,
WorkflowBuilder,
WorkflowContext,
executor,
)
from agent_framework.azure import AzureOpenAIChatClient
from azure.identity import DefaultAzureCredential
@executor
async def start(
messages: list[ChatMessage], ctx: WorkflowContext[AgentExecutorRequest, str]
) -> None:
await ctx.yield_output("Start...")
await ctx.send_message(
AgentExecutorRequest(
messages=messages,
should_respond=True,
)
)
def create_writer_agent() -> ChatAgent:
"""Factory function to create a writer agent."""
chat_client = AzureOpenAIChatClient(credential=DefaultAzureCredential())
return chat_client.create_agent(
instructions=(
"You are an excellent content writer. You create new content and edit contents based on the feedback."
),
name="writer",
)
def create_reviewer_agent() -> ChatAgent:
"""Factory function to create a reviewer agent."""
chat_client = AzureOpenAIChatClient(credential=DefaultAzureCredential())
return chat_client.create_agent(
instructions=(
"You are an excellent content reviewer."
"Provide actionable feedback to the writer about the provided content."
"Provide the feedback in the most concise manner possible."
),
name="reviewer",
)
async def main():
workflow = (
WorkflowBuilder()
.register_executor(lambda: start, "start")
.register_agent(create_writer_agent, "writer")
.register_agent(create_reviewer_agent, "reviewer", output_response=True)
.set_start_executor("start")
.add_edge("start", "writer")
.add_edge("writer", "reviewer")
.build()
)
prompt = (
"Create a slogan for a new electric SUV that is affordable and fun to drive."
)
workflow_agent = workflow.as_agent()
thread = workflow_agent.get_new_thread()
res = await workflow_agent.run(prompt, thread=thread)
for msg in res.messages:
print(f"{msg.role}: {msg.text}")
if __name__ == "__main__":
asyncio.run(main())And this is the output: It seems like there are there are two messages for the two workflow outputs, but I also get the agent responses from both agent executors, regardless of whether A similar thing happens when streaming: async for update in workflow_agent.run_stream(prompt, thread=thread):
print(update.text)Output
Interestingly, if I use |
Beta Was this translation helpful? Give feedback.
@cecheta I've converted this to an issue to better track it. Please follow along with #2957.