Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions openhands-sdk/openhands/sdk/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,13 +181,13 @@ def step(
include=None,
store=False,
add_security_risk_prediction=self._add_security_risk_prediction,
metadata=self.llm.metadata,
extra_body=self.llm.litellm_extra_body,
)
else:
llm_response = self.llm.completion(
messages=_messages,
tools=list(self.tools_map.values()),
extra_body={"metadata": self.llm.metadata},
extra_body=self.llm.litellm_extra_body,
add_security_risk_prediction=self._add_security_risk_prediction,
)
except Exception as e:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def get_condensation(self, view: View) -> Condensation:

llm_response = self.llm.completion(
messages=messages,
extra_body={"metadata": self.llm.metadata},
extra_body=self.llm.litellm_extra_body,
)
# Extract summary from the LLMResponse message
summary = None
Expand Down
6 changes: 4 additions & 2 deletions openhands-sdk/openhands/sdk/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,10 +225,12 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
"telemetry, and spend tracking."
),
)
metadata: dict[str, Any] = Field(
litellm_extra_body: dict[str, Any] = Field(
default_factory=dict,
description=(
"Additional metadata for the LLM instance. "
"Additional key-value pairs to pass to litellm's extra_body parameter. "
"This is useful for custom inference clusters that need additional "
"metadata for logging, tracking, or routing purposes. "
"Example structure: "
"{'trace_version': '1.0.0', 'tags': ['model:gpt-4', 'agent:my-agent'], "
"'session_id': 'session-123', 'trace_user_id': 'user-456'}"
Expand Down
6 changes: 5 additions & 1 deletion openhands-sdk/openhands/sdk/llm/options/chat_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,12 @@ def select_chat_options(
out.pop("tools", None)
out.pop("tool_choice", None)

# Pass through litellm_extra_body if provided
if llm.litellm_extra_body:
out["extra_body"] = llm.litellm_extra_body
# non litellm proxy special-case: keep `extra_body` off unless model requires it
if "litellm_proxy" not in llm.model:
# or user provided it
elif "litellm_proxy" not in llm.model:
out.pop("extra_body", None)

return out
4 changes: 4 additions & 0 deletions openhands-sdk/openhands/sdk/llm/options/responses_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,8 @@ def select_responses_options(
effort = llm.reasoning_effort or "high"
out["reasoning"] = {"effort": effort, "summary": "detailed"}

# Pass through litellm_extra_body if provided
if llm.litellm_extra_body:
out["extra_body"] = llm.litellm_extra_body

return out
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def create_completion_result(content: str) -> LLMResponse:
mock_llm.custom_tokenizer = None
mock_llm.base_url = None
mock_llm.reasoning_effort = None
mock_llm.metadata = {}
mock_llm.litellm_extra_body = {}

# Explicitly set pricing attributes required by LLM -> Telemetry wiring
mock_llm.input_cost_per_token = None
Expand Down
87 changes: 87 additions & 0 deletions tests/sdk/llm/test_llm_litellm_extra_body.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
from openhands.sdk.llm import LLM


def test_llm_litellm_extra_body_default():
"""Test that litellm_extra_body field defaults to empty dict."""
llm = LLM(model="gpt-4o", usage_id="test")
assert llm.litellm_extra_body == {}


def test_llm_litellm_extra_body_initialization():
"""Test litellm_extra_body field initialization with custom values."""
custom_extra_body = {
"trace_version": "1.0.0",
"tags": ["model:gpt-4", "agent:my-agent"],
"session_id": "session-123",
"trace_user_id": "user-456",
}
llm = LLM(model="gpt-4o", usage_id="test", litellm_extra_body=custom_extra_body)
assert llm.litellm_extra_body == custom_extra_body


def test_llm_litellm_extra_body_modification():
"""Test that litellm_extra_body field can be modified after initialization."""
llm = LLM(model="gpt-4o", usage_id="test")

# Start with empty litellm_extra_body
assert llm.litellm_extra_body == {}

# Add some extra body data
llm.litellm_extra_body["custom_key"] = "custom_value"
llm.litellm_extra_body["session_id"] = "session-123"

assert llm.litellm_extra_body["custom_key"] == "custom_value"
assert llm.litellm_extra_body["session_id"] == "session-123"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hah, Sonnet! 😅

This test may be useful if it tested what we actually send to litellm completion() here I guess



def test_llm_litellm_extra_body_complex_structure():
"""Test litellm_extra_body field with complex nested structure."""
complex_extra_body = {
"trace_version": "2.1.0",
"tags": ["model:claude-3", "agent:coding-agent", "env:production"],
"session_info": {
"id": "session-789",
"user_id": "user-101",
"created_at": "2024-01-01T00:00:00Z",
},
"metrics": {
"tokens_used": 1500,
"response_time_ms": 250,
},
}
llm = LLM(
model="claude-3-5-sonnet",
usage_id="test",
litellm_extra_body=complex_extra_body,
)
assert llm.litellm_extra_body == complex_extra_body

# Test nested access
assert llm.litellm_extra_body["session_info"]["id"] == "session-789"
assert llm.litellm_extra_body["metrics"]["tokens_used"] == 1500


def test_llm_litellm_extra_body_for_custom_inference():
"""Test litellm_extra_body field for custom inference cluster use case."""
# Example of custom metadata for logging/tracking/routing
inference_metadata = {
"cluster_id": "prod-cluster-1",
"routing_key": "high-priority",
"user_tier": "premium",
"request_id": "req-12345",
"experiment_id": "exp-abc123",
"custom_headers": {
"X-Custom-Auth": "bearer-token",
"X-Request-Source": "openhands-agent",
},
}
llm = LLM(model="gpt-4o", usage_id="test", litellm_extra_body=inference_metadata)
assert llm.litellm_extra_body == inference_metadata

# Verify specific fields that would be useful for custom inference clusters
assert llm.litellm_extra_body["cluster_id"] == "prod-cluster-1"
assert llm.litellm_extra_body["routing_key"] == "high-priority"
assert (
llm.litellm_extra_body["custom_headers"]["X-Request-Source"]
== "openhands-agent"
)
57 changes: 0 additions & 57 deletions tests/sdk/llm/test_llm_metadata.py

This file was deleted.

Loading