Skip to content

Commit a1697ac

Browse files
authored
feat: replace based chat configuration (#33)
1 parent fc93374 commit a1697ac

File tree

4 files changed

+191
-41
lines changed

4 files changed

+191
-41
lines changed

.rubocop.yml

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,20 +10,18 @@ Style/StringLiterals:
1010
Style/StringLiteralsInInterpolation:
1111
EnforcedStyle: double_quotes
1212

13+
Metrics/MethodLength:
14+
Max: 20
15+
Metrics/ClassLength:
16+
Enabled: false
17+
18+
RSpec/MultipleDescribes:
19+
Enabled: false
1320
RSpec/MultipleExpectations:
1421
Max: 10
15-
1622
RSpec/ExampleLength:
1723
Max: 20
18-
1924
RSpec/MultipleMemoizedHelpers:
2025
Max: 15
21-
2226
RSpec/SpecFilePathFormat:
2327
Enabled: false
24-
25-
Metrics/MethodLength:
26-
Max: 20
27-
28-
RSpec/MultipleDescribes:
29-
Enabled: false

lib/agents/runner.rb

Lines changed: 56 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ class Runner
5555
DEFAULT_MAX_TURNS = 10
5656

5757
class MaxTurnsExceeded < StandardError; end
58+
class AgentNotFoundError < StandardError; end
5859

5960
# Create a thread-safe agent runner for multi-agent conversations.
6061
# The first agent becomes the default entry point for new conversations.
@@ -91,14 +92,15 @@ def run(starting_agent, input, context: {}, registry: {}, max_turns: DEFAULT_MAX
9192
current_turn = 0
9293

9394
# Create chat and restore conversation history
94-
chat = create_chat(current_agent, context_wrapper)
95+
chat = RubyLLM::Chat.new(model: current_agent.model)
96+
configure_chat_for_agent(chat, current_agent, context_wrapper, replace: false)
9597
restore_conversation_history(chat, context_wrapper)
9698

9799
loop do
98100
current_turn += 1
99101
raise MaxTurnsExceeded, "Exceeded maximum turns: #{max_turns}" if current_turn > max_turns
100102

101-
# Get response from LLM (Extended Chat handles tool execution with handoff detection)
103+
# Get response from LLM (RubyLLM handles tool execution with halting based handoff detection)
102104
result = if current_turn == 1
103105
# Emit agent thinking event for initial message
104106
context_wrapper.callback_manager.emit_agent_thinking(current_agent.name, input)
@@ -118,14 +120,14 @@ def run(starting_agent, input, context: {}, registry: {}, max_turns: DEFAULT_MAX
118120
# Validate that the target agent is in our registry
119121
# This prevents handoffs to agents that weren't explicitly provided
120122
unless registry[next_agent.name]
121-
puts "[Agents] Warning: Handoff to unregistered agent '#{next_agent.name}', continuing with current agent"
122-
# Return the halt content as the final response
123123
save_conversation_state(chat, context_wrapper, current_agent)
124+
error = AgentNotFoundError.new("Handoff failed: Agent '#{next_agent.name}' not found in registry")
124125
return RunResult.new(
125-
output: response.content,
126+
output: nil,
126127
messages: MessageExtractor.extract_messages(chat, current_agent),
127128
usage: context_wrapper.usage,
128-
context: context_wrapper.context
129+
context: context_wrapper.context,
130+
error: error
129131
)
130132
end
131133

@@ -139,9 +141,8 @@ def run(starting_agent, input, context: {}, registry: {}, max_turns: DEFAULT_MAX
139141
current_agent = next_agent
140142
context_wrapper.context[:current_agent] = next_agent.name
141143

142-
# Create new chat for new agent with restored history
143-
chat = create_chat(current_agent, context_wrapper)
144-
restore_conversation_history(chat, context_wrapper)
144+
# Reconfigure existing chat for new agent - preserves conversation history automatically
145+
configure_chat_for_agent(chat, current_agent, context_wrapper, replace: true)
145146

146147
# Force the new agent to respond to the conversation context
147148
# This ensures the user gets a response from the new agent
@@ -201,6 +202,11 @@ def run(starting_agent, input, context: {}, registry: {}, max_turns: DEFAULT_MAX
201202

202203
private
203204

205+
# Creates a deep copy of context data for thread safety.
206+
# Preserves conversation history array structure while avoiding agent mutation.
207+
#
208+
# @param context [Hash] The context to copy
209+
# @return [Hash] Thread-safe deep copy of the context
204210
def deep_copy_context(context)
205211
# Handle deep copying for thread safety
206212
context.dup.tap do |copied|
@@ -211,6 +217,11 @@ def deep_copy_context(context)
211217
end
212218
end
213219

220+
# Restores conversation history from context into RubyLLM chat.
221+
# Converts stored message hashes back into RubyLLM::Message objects with proper content handling.
222+
#
223+
# @param chat [RubyLLM::Chat] The chat instance to restore history into
224+
# @param context_wrapper [RunContext] Context containing conversation history
214225
def restore_conversation_history(chat, context_wrapper)
215226
history = context_wrapper.context[:conversation_history] || []
216227

@@ -228,18 +239,15 @@ def restore_conversation_history(chat, context_wrapper)
228239
content: content
229240
)
230241
chat.add_message(message)
231-
rescue StandardError => e
232-
# Continue with partial history on error
233-
# TODO: Remove this, and let the error propagate up the call stack
234-
puts "[Agents] Failed to restore message: #{e.message}\n#{e.backtrace.join("\n")}"
235242
end
236-
rescue StandardError => e
237-
# If history restoration completely fails, continue with empty history
238-
# TODO: Remove this, and let the error propagate up the call stack
239-
puts "[Agents] Failed to restore conversation history: #{e.message}"
240-
context_wrapper.context[:conversation_history] = []
241243
end
242244

245+
# Saves current conversation state from RubyLLM chat back to context for persistence.
246+
# Maintains conversation continuity across agent handoffs and process boundaries.
247+
#
248+
# @param chat [RubyLLM::Chat] The chat instance to extract state from
249+
# @param context_wrapper [RunContext] Context to save state into
250+
# @param current_agent [Agents::Agent] The currently active agent
243251
def save_conversation_state(chat, context_wrapper, current_agent)
244252
# Extract messages from chat
245253
messages = MessageExtractor.extract_messages(chat, current_agent)
@@ -254,14 +262,39 @@ def save_conversation_state(chat, context_wrapper, current_agent)
254262
context_wrapper.context.delete(:pending_handoff)
255263
end
256264

257-
def create_chat(agent, context_wrapper)
265+
# Configures a RubyLLM chat instance with agent-specific settings.
266+
# Uses RubyLLM's replace option to swap agent context while preserving conversation history during handoffs.
267+
#
268+
# @param chat [RubyLLM::Chat] The chat instance to configure
269+
# @param agent [Agents::Agent] The agent whose configuration to apply
270+
# @param context_wrapper [RunContext] Thread-safe context wrapper
271+
# @param replace [Boolean] Whether to replace existing configuration (true for handoffs, false for initial setup)
272+
# @return [RubyLLM::Chat] The configured chat instance
273+
def configure_chat_for_agent(chat, agent, context_wrapper, replace: false)
258274
# Get system prompt (may be dynamic)
259275
system_prompt = agent.get_system_prompt(context_wrapper)
260276

261-
# Create standard RubyLLM chat
262-
chat = RubyLLM::Chat.new(model: agent.model)
263-
264277
# Combine all tools - both handoff and regular tools need wrapping
278+
all_tools = build_agent_tools(agent, context_wrapper)
279+
280+
# Switch model if different (important for handoffs between agents using different models)
281+
chat.with_model(agent.model) if replace
282+
283+
# Configure chat with instructions, temperature, tools, and schema
284+
chat.with_instructions(system_prompt, replace: replace) if system_prompt
285+
chat.with_temperature(agent.temperature) if agent.temperature
286+
chat.with_tools(*all_tools, replace: replace)
287+
chat.with_schema(agent.response_schema) if agent.response_schema
288+
289+
chat
290+
end
291+
292+
# Builds thread-safe tool wrappers for an agent's tools and handoff tools.
293+
#
294+
# @param agent [Agents::Agent] The agent whose tools to wrap
295+
# @param context_wrapper [RunContext] Thread-safe context wrapper for tool execution
296+
# @return [Array<ToolWrapper>] Array of wrapped tools ready for RubyLLM
297+
def build_agent_tools(agent, context_wrapper)
265298
all_tools = []
266299

267300
# Add handoff tools
@@ -275,13 +308,7 @@ def create_chat(agent, context_wrapper)
275308
all_tools << ToolWrapper.new(tool, context_wrapper)
276309
end
277310

278-
# Configure chat with instructions, temperature, tools, and schema
279-
chat.with_instructions(system_prompt) if system_prompt
280-
chat.with_temperature(agent.temperature) if agent.temperature
281-
chat.with_tools(*all_tools) if all_tools.any?
282-
chat.with_schema(agent.response_schema) if agent.response_schema
283-
284-
chat
311+
all_tools
285312
end
286313
end
287314
end

spec/agents/runner_spec.rb

Lines changed: 81 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,50 @@
196196
expect(result.output).to eq("Hello, I'm the specialist. How can I help?")
197197
expect(result.context[:current_agent]).to eq("HandoffAgent")
198198
end
199+
200+
it "returns error when handoff to unregistered agent is attempted" do
201+
# Only register the triage agent, not the handoff target
202+
registry = { "TriageAgent" => agent_with_handoffs }
203+
204+
# Mock only the first tool call that triggers handoff
205+
stub_request(:post, "https://api.openai.com/v1/chat/completions")
206+
.to_return(
207+
status: 200,
208+
body: {
209+
id: "chatcmpl-handoff",
210+
object: "chat.completion",
211+
created: 1_677_652_288,
212+
model: "gpt-4o",
213+
choices: [{
214+
index: 0,
215+
message: {
216+
role: "assistant",
217+
content: nil,
218+
tool_calls: [{
219+
id: "call_handoff",
220+
type: "function",
221+
function: {
222+
name: "handoff_to_handoffagent",
223+
arguments: "{}"
224+
}
225+
}]
226+
},
227+
finish_reason: "tool_calls"
228+
}],
229+
usage: { prompt_tokens: 20, completion_tokens: 5, total_tokens: 25 }
230+
}.to_json,
231+
headers: { "Content-Type" => "application/json" }
232+
)
233+
234+
result = runner.run(agent_with_handoffs, "I need specialist help", registry: registry)
235+
236+
expect(result.failed?).to be true
237+
expect(result.error).to be_a(Agents::Runner::AgentNotFoundError)
238+
expect(result.error.message).to eq("Handoff failed: Agent 'HandoffAgent' not found in registry")
239+
expect(result.output).to be_nil
240+
expect(result.context[:current_agent]).to eq("TriageAgent")
241+
expect(result.context[:pending_handoff]).to be_nil # Should clear pending handoff
242+
end
199243
end
200244

201245
context "when max_turns is exceeded" do
@@ -204,8 +248,9 @@
204248
mock_chat = instance_double(RubyLLM::Chat)
205249
mock_response = instance_double(RubyLLM::Message, tool_call?: true)
206250

251+
allow(RubyLLM::Chat).to receive(:new).and_return(mock_chat)
207252
allow(runner).to receive_messages(
208-
create_chat: mock_chat,
253+
configure_chat_for_agent: mock_chat,
209254
restore_conversation_history: nil,
210255
save_conversation_state: nil
211256
)
@@ -224,7 +269,7 @@
224269
context "when standard error occurs" do
225270
it "handles errors gracefully and returns error result" do
226271
# Mock chat creation to raise an error
227-
allow(runner).to receive(:create_chat).and_raise(StandardError, "Test error")
272+
allow(RubyLLM::Chat).to receive(:new).and_raise(StandardError, "Test error")
228273

229274
result = runner.run(agent, "Error test")
230275

@@ -272,8 +317,9 @@
272317
mock_halt = instance_double(RubyLLM::Tool::Halt, content: "Processing complete", is_a?: true)
273318

274319
allow(mock_halt).to receive(:is_a?).with(RubyLLM::Tool::Halt).and_return(true)
320+
allow(RubyLLM::Chat).to receive(:new).and_return(mock_chat)
275321
allow(runner).to receive_messages(
276-
create_chat: mock_chat,
322+
configure_chat_for_agent: mock_chat,
277323
restore_conversation_history: nil,
278324
save_conversation_state: nil
279325
)
@@ -368,5 +414,37 @@
368414
end
369415
end
370416
end
417+
418+
context "when agent has regular tools" do
419+
let(:agent_with_tools) do
420+
instance_double(Agents::Agent,
421+
name: "ToolAgent",
422+
model: "gpt-4o",
423+
tools: [test_tool],
424+
handoff_agents: [],
425+
temperature: 0.7,
426+
response_schema: nil,
427+
get_system_prompt: "You are an agent with tools")
428+
end
429+
430+
it "wraps regular tools in ToolWrapper" do
431+
# Spy on ToolWrapper constructor
432+
allow(Agents::ToolWrapper).to receive(:new).and_call_original
433+
434+
# Stub a simple response that doesn't use tools
435+
stub_simple_chat("I have tools available")
436+
437+
runner.run(
438+
agent_with_tools,
439+
"Hello",
440+
context: {},
441+
registry: { "ToolAgent" => agent_with_tools },
442+
max_turns: 1
443+
)
444+
445+
# Verify ToolWrapper was called with the regular tool
446+
expect(Agents::ToolWrapper).to have_received(:new).with(test_tool, anything)
447+
end
448+
end
371449
end
372450
end

spec/agents/tool_context_spec.rb

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,4 +48,51 @@
4848
expect(tool_context.retry_count).to eq(2)
4949
end
5050
end
51+
52+
describe "#state" do
53+
context "when state exists in context" do
54+
let(:existing_state) { { customer_id: 456, customer_name: "John" } }
55+
let(:context_hash) { { user_id: 123, session: "test", state: existing_state } }
56+
57+
it "returns the existing state hash" do
58+
result = tool_context.state
59+
expect(result).to eq(existing_state)
60+
end
61+
62+
it "allows modifications to the state" do
63+
tool_context.state[:new_key] = "new_value"
64+
expect(tool_context.state[:new_key]).to eq("new_value")
65+
end
66+
end
67+
68+
context "when state does not exist in context" do
69+
let(:context_hash) { { user_id: 123, session: "test" } }
70+
71+
it "initializes state as empty hash" do
72+
result = tool_context.state
73+
expect(result).to eq({})
74+
end
75+
76+
it "persists the initialized state in context" do
77+
tool_context.state
78+
expect(context_hash[:state]).to eq({})
79+
end
80+
81+
it "allows adding to the initialized state" do
82+
tool_context.state[:customer_id] = 789
83+
expect(tool_context.state[:customer_id]).to eq(789)
84+
expect(context_hash[:state][:customer_id]).to eq(789)
85+
end
86+
end
87+
88+
context "when state is nil in context" do
89+
let(:context_hash) { { user_id: 123, session: "test", state: nil } }
90+
91+
it "replaces nil with empty hash" do
92+
result = tool_context.state
93+
expect(result).to eq({})
94+
expect(context_hash[:state]).to eq({})
95+
end
96+
end
97+
end
5198
end

0 commit comments

Comments
 (0)