Skip to content

Commit 18fceff

Browse files
authored
Merge pull request #16523 from BerriAI/litellm_add_openai_metadata_field
Enable OpenAI metadata passthrough in the request
2 parents 9120a02 + 25ac10b commit 18fceff

File tree

4 files changed

+70
-5
lines changed

4 files changed

+70
-5
lines changed

docs/my-website/docs/providers/openai.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,18 @@ response = completion(
2929
)
3030
```
3131

32+
:::info Metadata passthrough (preview)
33+
When `litellm.enable_preview_features = True`, LiteLLM forwards only the values inside `metadata` to OpenAI.
34+
35+
```python
36+
completion(
37+
model="gpt-4o",
38+
messages=[{"role": "user", "content": "hi"}],
39+
metadata= {"custom_meta_key": "value"},
40+
)
41+
```
42+
:::
43+
3244
### Usage - LiteLLM Proxy Server
3345

3446
Here's how to call OpenAI models with the LiteLLM Proxy Server

litellm/main.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@
105105
ProviderConfigManager,
106106
Usage,
107107
_get_model_info_helper,
108-
add_openai_metadata,
108+
get_requester_metadata,
109109
add_provider_specific_params_to_optional_params,
110110
async_mock_completion_streaming_obj,
111111
convert_to_model_response_object,
@@ -2086,10 +2086,12 @@ def completion( # type: ignore # noqa: PLR0915
20862086
if extra_headers is not None:
20872087
optional_params["extra_headers"] = extra_headers
20882088

2089-
if litellm.enable_preview_features:
2090-
metadata_payload = add_openai_metadata(metadata)
2091-
if metadata_payload is not None:
2092-
optional_params["metadata"] = metadata_payload
2089+
if (
2090+
litellm.enable_preview_features and metadata is not None
2091+
): # [PREVIEW] allow metadata to be passed to OPENAI
2092+
openai_metadata = get_requester_metadata(metadata)
2093+
if openai_metadata is not None:
2094+
optional_params["metadata"] = openai_metadata
20932095

20942096
## LOAD CONFIG - if set
20952097
config = litellm.OpenAIConfig.get_config()

litellm/utils.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8088,6 +8088,21 @@ def add_openai_metadata(metadata: Optional[Mapping[str, Any]]) -> Optional[Dict[
80888088

80898089
return visible_metadata.copy()
80908090

8091+
def get_requester_metadata(metadata: dict):
8092+
if not metadata:
8093+
return None
8094+
8095+
requester_metadata = metadata.get("requester_metadata")
8096+
if isinstance(requester_metadata, dict):
8097+
cleaned_metadata = add_openai_metadata(requester_metadata)
8098+
if cleaned_metadata:
8099+
return cleaned_metadata
8100+
8101+
cleaned_metadata = add_openai_metadata(metadata)
8102+
if cleaned_metadata:
8103+
return cleaned_metadata
8104+
8105+
return None
80918106

80928107
def return_raw_request(endpoint: CallTypes, kwargs: dict) -> RawRequestTypedDict:
80938108
"""

tests/llm_translation/test_optional_params.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
get_optional_params,
2424
get_optional_params_embeddings,
2525
get_optional_params_image_gen,
26+
get_requester_metadata,
2627
)
2728

2829
## get_optional_params_embeddings
@@ -67,6 +68,41 @@ def test_anthropic_optional_params(stop_sequence, expected_count):
6768
assert len(optional_params) == expected_count
6869

6970

71+
def test_get_requester_metadata_returns_none_for_empty():
72+
metadata = {"requester_metadata": {}}
73+
assert get_requester_metadata(metadata) is None
74+
75+
76+
@patch("litellm.main.openai_chat_completions.completion")
77+
def test_requester_metadata_forwarded_to_openai(mock_completion):
78+
mock_completion.return_value = MagicMock()
79+
metadata = {
80+
"requester_metadata": {
81+
"custom_meta_key": "value",
82+
"hidden_params": "secret",
83+
"int_value": 123,
84+
}
85+
}
86+
87+
original_api_key = litellm.api_key
88+
litellm.api_key = "sk-test"
89+
original_preview_flag = litellm.enable_preview_features
90+
litellm.enable_preview_features = True
91+
92+
try:
93+
litellm.completion(
94+
model="gpt-4o",
95+
messages=[{"role": "user", "content": "hi"}],
96+
metadata=metadata,
97+
)
98+
finally:
99+
litellm.api_key = original_api_key
100+
litellm.enable_preview_features = original_preview_flag
101+
102+
sent_metadata = mock_completion.call_args.kwargs["optional_params"]["metadata"]
103+
assert sent_metadata == {"custom_meta_key": "value"}
104+
105+
70106
def test_get_optional_params_with_allowed_openai_params():
71107
"""
72108
Test if use can dynamically pass in allowed_openai_params to override default behavior

0 commit comments

Comments
 (0)