diff --git a/docs/static/llama-stack-spec.html b/docs/static/llama-stack-spec.html
index 3da721a4e5..349720f4e9 100644
--- a/docs/static/llama-stack-spec.html
+++ b/docs/static/llama-stack-spec.html
@@ -252,15 +252,15 @@
"deprecated": false
}
},
- "/v1/embeddings": {
+ "/v1/conversations": {
"post": {
"responses": {
"200": {
- "description": "An OpenAIEmbeddingsResponse containing the embeddings.",
+ "description": "The created conversation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIEmbeddingsResponse"
+ "$ref": "#/components/schemas/Conversation"
}
}
}
@@ -279,16 +279,16 @@
}
},
"tags": [
- "Inference"
+ "Conversations"
],
- "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
- "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "summary": "Create a conversation.",
+ "description": "Create a conversation.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenaiEmbeddingsRequest"
+ "$ref": "#/components/schemas/CreateConversationRequest"
}
}
},
@@ -297,15 +297,15 @@
"deprecated": false
}
},
- "/v1/files": {
+ "/v1/conversations/{conversation_id}": {
"get": {
"responses": {
"200": {
- "description": "An ListOpenAIFileResponse containing the list of files.",
+ "description": "The conversation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIFileResponse"
+ "$ref": "#/components/schemas/Conversation"
}
}
}
@@ -324,46 +324,19 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Returns a list of files that belong to the user's organization.",
- "description": "Returns a list of files that belong to the user's organization.",
+ "summary": "Get a conversation with the given ID.",
+ "description": "Get a conversation with the given ID.",
"parameters": [
{
- "name": "after",
- "in": "query",
- "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.",
- "required": false,
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
"schema": {
"type": "string"
}
- },
- {
- "name": "limit",
- "in": "query",
- "description": "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.",
- "required": false,
- "schema": {
- "type": "integer"
- }
- },
- {
- "name": "order",
- "in": "query",
- "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/Order"
- }
- },
- {
- "name": "purpose",
- "in": "query",
- "description": "Only return files with the given purpose.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/OpenAIFilePurpose"
- }
}
],
"deprecated": false
@@ -371,11 +344,11 @@
"post": {
"responses": {
"200": {
- "description": "An OpenAIFileObject representing the uploaded file.",
+ "description": "The updated conversation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIFileObject"
+ "$ref": "#/components/schemas/Conversation"
}
}
}
@@ -394,49 +367,41 @@
}
},
"tags": [
- "Files"
+ "Conversations"
+ ],
+ "summary": "Update a conversation's metadata with the given ID.",
+ "description": "Update a conversation's metadata with the given ID.",
+ "parameters": [
+ {
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "Upload a file that can be used across various endpoints.",
- "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
- "parameters": [],
"requestBody": {
"content": {
- "multipart/form-data": {
+ "application/json": {
"schema": {
- "type": "object",
- "properties": {
- "file": {
- "type": "string",
- "format": "binary"
- },
- "purpose": {
- "$ref": "#/components/schemas/OpenAIFilePurpose"
- },
- "expires_after": {
- "$ref": "#/components/schemas/ExpiresAfter"
- }
- },
- "required": [
- "file",
- "purpose"
- ]
+ "$ref": "#/components/schemas/UpdateConversationRequest"
}
}
},
"required": true
},
"deprecated": false
- }
- },
- "/v1/files/{file_id}": {
- "get": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "An OpenAIFileObject containing file information.",
+ "description": "The deleted conversation resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIFileObject"
+ "$ref": "#/components/schemas/ConversationDeletedResource"
}
}
}
@@ -455,15 +420,15 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Returns information about a specific file.",
- "description": "Returns information about a specific file.",
+ "summary": "Delete a conversation with the given ID.",
+ "description": "Delete a conversation with the given ID.",
"parameters": [
{
- "name": "file_id",
+ "name": "conversation_id",
"in": "path",
- "description": "The ID of the file to use for this request.",
+ "description": "The conversation identifier.",
"required": true,
"schema": {
"type": "string"
@@ -471,15 +436,17 @@
}
],
"deprecated": false
- },
- "delete": {
+ }
+ },
+ "/v1/conversations/{conversation_id}/items": {
+ "get": {
"responses": {
"200": {
- "description": "An OpenAIFileDeleteResponse indicating successful deletion.",
+ "description": "List of conversation items.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIFileDeleteResponse"
+ "$ref": "#/components/schemas/ConversationItemList"
}
}
}
@@ -498,33 +465,118 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Delete a file.",
- "description": "Delete a file.",
+ "summary": "List items in the conversation.",
+ "description": "List items in the conversation.",
"parameters": [
{
- "name": "file_id",
+ "name": "conversation_id",
"in": "path",
- "description": "The ID of the file to use for this request.",
+ "description": "The conversation identifier.",
"required": true,
"schema": {
"type": "string"
}
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "An item ID to list items after, used in pagination.",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
+ },
+ {
+ "name": "include",
+ "in": "query",
+ "description": "Specify additional output data to include in the response.",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content"
+ ]
+ }
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "A limit on the number of objects to be returned (1-100, default 20).",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "The order to return items in (asc or desc, default desc).",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "asc",
+ "desc"
+ ]
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
}
],
"deprecated": false
- }
- },
- "/v1/files/{file_id}/content": {
- "get": {
+ },
+ "post": {
"responses": {
"200": {
- "description": "The raw file content as a binary response.",
+ "description": "List of created items.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Response"
+ "$ref": "#/components/schemas/ConversationItemList"
}
}
}
@@ -543,33 +595,43 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Returns the contents of the specified file.",
- "description": "Returns the contents of the specified file.",
+ "summary": "Create items in the conversation.",
+ "description": "Create items in the conversation.",
"parameters": [
{
- "name": "file_id",
+ "name": "conversation_id",
"in": "path",
- "description": "The ID of the file to use for this request.",
+ "description": "The conversation identifier.",
"required": true,
"schema": {
"type": "string"
}
}
],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/AddItemsRequest"
+ }
+ }
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/health": {
+ "/v1/conversations/{conversation_id}/items/{item_id}": {
"get": {
"responses": {
"200": {
- "description": "Health information indicating if the service is operational.",
+ "description": "The conversation item.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/HealthInfo"
+ "$ref": "#/components/schemas/ConversationItem"
}
}
}
@@ -588,23 +650,40 @@
}
},
"tags": [
- "Inspect"
+ "Conversations"
+ ],
+ "summary": "Retrieve a conversation item.",
+ "description": "Retrieve a conversation item.",
+ "parameters": [
+ {
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "item_id",
+ "in": "path",
+ "description": "The item identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "Get the current health status of the service.",
- "description": "Get the current health status of the service.",
- "parameters": [],
"deprecated": false
- }
- },
- "/v1/inspect/routes": {
- "get": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "Response containing information about all available routes.",
+ "description": "The deleted item resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListRoutesResponse"
+ "$ref": "#/components/schemas/ConversationItemDeletedResource"
}
}
}
@@ -623,23 +702,42 @@
}
},
"tags": [
- "Inspect"
+ "Conversations"
+ ],
+ "summary": "Delete a conversation item.",
+ "description": "Delete a conversation item.",
+ "parameters": [
+ {
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "item_id",
+ "in": "path",
+ "description": "The item identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "List all available API routes with their methods and implementing providers.",
- "description": "List all available API routes with their methods and implementing providers.",
- "parameters": [],
"deprecated": false
}
},
- "/v1/models": {
- "get": {
+ "/v1/embeddings": {
+ "post": {
"responses": {
"200": {
- "description": "A ListModelsResponse.",
+ "description": "An OpenAIEmbeddingsResponse containing the embeddings.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListModelsResponse"
+ "$ref": "#/components/schemas/OpenAIEmbeddingsResponse"
}
}
}
@@ -658,21 +756,103 @@
}
},
"tags": [
- "Models"
+ "Inference"
+ ],
+ "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenaiEmbeddingsRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/files": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "An ListOpenAIFileResponse containing the list of files.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListOpenAIFileResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Files"
+ ],
+ "summary": "Returns a list of files that belong to the user's organization.",
+ "description": "Returns a list of files that belong to the user's organization.",
+ "parameters": [
+ {
+ "name": "after",
+ "in": "query",
+ "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/Order"
+ }
+ },
+ {
+ "name": "purpose",
+ "in": "query",
+ "description": "Only return files with the given purpose.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIFilePurpose"
+ }
+ }
],
- "summary": "List all models.",
- "description": "List all models.",
- "parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "A Model.",
+ "description": "An OpenAIFileObject representing the uploaded file.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Model"
+ "$ref": "#/components/schemas/OpenAIFileObject"
}
}
}
@@ -691,16 +871,32 @@
}
},
"tags": [
- "Models"
+ "Files"
],
- "summary": "Register a model.",
- "description": "Register a model.",
+ "summary": "Upload a file that can be used across various endpoints.",
+ "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
- "application/json": {
+ "multipart/form-data": {
"schema": {
- "$ref": "#/components/schemas/RegisterModelRequest"
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "string",
+ "format": "binary"
+ },
+ "purpose": {
+ "$ref": "#/components/schemas/OpenAIFilePurpose"
+ },
+ "expires_after": {
+ "$ref": "#/components/schemas/ExpiresAfter"
+ }
+ },
+ "required": [
+ "file",
+ "purpose"
+ ]
}
}
},
@@ -709,15 +905,15 @@
"deprecated": false
}
},
- "/v1/models/{model_id}": {
+ "/v1/files/{file_id}": {
"get": {
"responses": {
"200": {
- "description": "A Model.",
+ "description": "An OpenAIFileObject containing file information.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Model"
+ "$ref": "#/components/schemas/OpenAIFileObject"
}
}
}
@@ -736,15 +932,15 @@
}
},
"tags": [
- "Models"
+ "Files"
],
- "summary": "Get a model by its identifier.",
- "description": "Get a model by its identifier.",
+ "summary": "Returns information about a specific file.",
+ "description": "Returns information about a specific file.",
"parameters": [
{
- "name": "model_id",
+ "name": "file_id",
"in": "path",
- "description": "The identifier of the model to get.",
+ "description": "The ID of the file to use for this request.",
"required": true,
"schema": {
"type": "string"
@@ -756,7 +952,14 @@
"delete": {
"responses": {
"200": {
- "description": "OK"
+ "description": "An OpenAIFileDeleteResponse indicating successful deletion.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIFileDeleteResponse"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -772,15 +975,15 @@
}
},
"tags": [
- "Models"
+ "Files"
],
- "summary": "Unregister a model.",
- "description": "Unregister a model.",
+ "summary": "Delete a file.",
+ "description": "Delete a file.",
"parameters": [
{
- "name": "model_id",
+ "name": "file_id",
"in": "path",
- "description": "The identifier of the model to unregister.",
+ "description": "The ID of the file to use for this request.",
"required": true,
"schema": {
"type": "string"
@@ -790,15 +993,15 @@
"deprecated": false
}
},
- "/v1/moderations": {
- "post": {
+ "/v1/files/{file_id}/content": {
+ "get": {
"responses": {
"200": {
- "description": "A moderation object.",
+ "description": "The raw file content as a binary response.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ModerationObject"
+ "$ref": "#/components/schemas/Response"
}
}
}
@@ -817,33 +1020,33 @@
}
},
"tags": [
- "Safety"
+ "Files"
],
- "summary": "Classifies if text and/or image inputs are potentially harmful.",
- "description": "Classifies if text and/or image inputs are potentially harmful.",
- "parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/RunModerationRequest"
- }
+ "summary": "Returns the contents of the specified file.",
+ "description": "Returns the contents of the specified file.",
+ "parameters": [
+ {
+ "name": "file_id",
+ "in": "path",
+ "description": "The ID of the file to use for this request.",
+ "required": true,
+ "schema": {
+ "type": "string"
}
- },
- "required": true
- },
+ }
+ ],
"deprecated": false
}
},
- "/v1/prompts": {
+ "/v1/health": {
"get": {
"responses": {
"200": {
- "description": "A ListPromptsResponse containing all prompts.",
+ "description": "Health information indicating if the service is operational.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListPromptsResponse"
+ "$ref": "#/components/schemas/HealthInfo"
}
}
}
@@ -862,21 +1065,23 @@
}
},
"tags": [
- "Prompts"
+ "Inspect"
],
- "summary": "List all prompts.",
- "description": "List all prompts.",
+ "summary": "Get the current health status of the service.",
+ "description": "Get the current health status of the service.",
"parameters": [],
"deprecated": false
- },
- "post": {
+ }
+ },
+ "/v1/inspect/routes": {
+ "get": {
"responses": {
"200": {
- "description": "The created Prompt resource.",
+ "description": "Response containing information about all available routes.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Prompt"
+ "$ref": "#/components/schemas/ListRoutesResponse"
}
}
}
@@ -895,33 +1100,23 @@
}
},
"tags": [
- "Prompts"
+ "Inspect"
],
- "summary": "Create a new prompt.",
- "description": "Create a new prompt.",
+ "summary": "List all available API routes with their methods and implementing providers.",
+ "description": "List all available API routes with their methods and implementing providers.",
"parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreatePromptRequest"
- }
- }
- },
- "required": true
- },
"deprecated": false
}
},
- "/v1/prompts/{prompt_id}": {
+ "/v1/models": {
"get": {
"responses": {
"200": {
- "description": "A Prompt resource.",
+ "description": "A ListModelsResponse.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Prompt"
+ "$ref": "#/components/schemas/ListModelsResponse"
}
}
}
@@ -940,40 +1135,21 @@
}
},
"tags": [
- "Prompts"
- ],
- "summary": "Get a prompt by its identifier and optional version.",
- "description": "Get a prompt by its identifier and optional version.",
- "parameters": [
- {
- "name": "prompt_id",
- "in": "path",
- "description": "The identifier of the prompt to get.",
- "required": true,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "version",
- "in": "query",
- "description": "The version of the prompt to get (defaults to latest).",
- "required": false,
- "schema": {
- "type": "integer"
- }
- }
+ "Models"
],
+ "summary": "List all models.",
+ "description": "List all models.",
+ "parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "The updated Prompt resource with incremented version.",
+ "description": "A Model.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Prompt"
+ "$ref": "#/components/schemas/Model"
}
}
}
@@ -992,37 +1168,36 @@
}
},
"tags": [
- "Prompts"
- ],
- "summary": "Update an existing prompt (increments version).",
- "description": "Update an existing prompt (increments version).",
- "parameters": [
- {
- "name": "prompt_id",
- "in": "path",
- "description": "The identifier of the prompt to update.",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
+ "Models"
],
+ "summary": "Register a model.",
+ "description": "Register a model.",
+ "parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdatePromptRequest"
+ "$ref": "#/components/schemas/RegisterModelRequest"
}
}
},
"required": true
},
"deprecated": false
- },
- "delete": {
+ }
+ },
+ "/v1/models/{model_id}": {
+ "get": {
"responses": {
"200": {
- "description": "OK"
+ "description": "A Model.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Model"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1038,15 +1213,15 @@
}
},
"tags": [
- "Prompts"
+ "Models"
],
- "summary": "Delete a prompt.",
- "description": "Delete a prompt.",
+ "summary": "Get a model by its identifier.",
+ "description": "Get a model by its identifier.",
"parameters": [
{
- "name": "prompt_id",
+ "name": "model_id",
"in": "path",
- "description": "The identifier of the prompt to delete.",
+ "description": "The identifier of the model to get.",
"required": true,
"schema": {
"type": "string"
@@ -1054,20 +1229,11 @@
}
],
"deprecated": false
- }
- },
- "/v1/prompts/{prompt_id}/set-default-version": {
- "post": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "The prompt with the specified version now set as default.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Prompt"
- }
- }
- }
+ "description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1083,43 +1249,33 @@
}
},
"tags": [
- "Prompts"
+ "Models"
],
- "summary": "Set which version of a prompt should be the default in get_prompt (latest).",
- "description": "Set which version of a prompt should be the default in get_prompt (latest).",
+ "summary": "Unregister a model.",
+ "description": "Unregister a model.",
"parameters": [
{
- "name": "prompt_id",
+ "name": "model_id",
"in": "path",
- "description": "The identifier of the prompt.",
+ "description": "The identifier of the model to unregister.",
"required": true,
"schema": {
"type": "string"
}
}
],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/SetDefaultVersionRequest"
- }
- }
- },
- "required": true
- },
"deprecated": false
}
},
- "/v1/prompts/{prompt_id}/versions": {
- "get": {
+ "/v1/moderations": {
+ "post": {
"responses": {
"200": {
- "description": "A ListPromptsResponse containing all versions of the prompt.",
+ "description": "A moderation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListPromptsResponse"
+ "$ref": "#/components/schemas/ModerationObject"
}
}
}
@@ -1138,33 +1294,33 @@
}
},
"tags": [
- "Prompts"
+ "Safety"
],
- "summary": "List all versions of a specific prompt.",
- "description": "List all versions of a specific prompt.",
- "parameters": [
- {
- "name": "prompt_id",
- "in": "path",
- "description": "The identifier of the prompt to list versions for.",
- "required": true,
- "schema": {
- "type": "string"
+ "summary": "Classifies if text and/or image inputs are potentially harmful.",
+ "description": "Classifies if text and/or image inputs are potentially harmful.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/RunModerationRequest"
+ }
}
- }
- ],
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/providers": {
+ "/v1/prompts": {
"get": {
"responses": {
"200": {
- "description": "A ListProvidersResponse containing information about all providers.",
+ "description": "A ListPromptsResponse containing all prompts.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListProvidersResponse"
+ "$ref": "#/components/schemas/ListPromptsResponse"
}
}
}
@@ -1183,23 +1339,21 @@
}
},
"tags": [
- "Providers"
+ "Prompts"
],
- "summary": "List all available providers.",
- "description": "List all available providers.",
+ "summary": "List all prompts.",
+ "description": "List all prompts.",
"parameters": [],
"deprecated": false
- }
- },
- "/v1/providers/{provider_id}": {
- "get": {
+ },
+ "post": {
"responses": {
"200": {
- "description": "A ProviderInfo object containing the provider's details.",
+ "description": "The created Prompt resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ProviderInfo"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1218,33 +1372,33 @@
}
},
"tags": [
- "Providers"
+ "Prompts"
],
- "summary": "Get detailed information about a specific provider.",
- "description": "Get detailed information about a specific provider.",
- "parameters": [
- {
- "name": "provider_id",
- "in": "path",
- "description": "The ID of the provider to inspect.",
- "required": true,
- "schema": {
- "type": "string"
+ "summary": "Create a new prompt.",
+ "description": "Create a new prompt.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreatePromptRequest"
+ }
}
- }
- ],
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/responses": {
+ "/v1/prompts/{prompt_id}": {
"get": {
"responses": {
"200": {
- "description": "A ListOpenAIResponseObject.",
+ "description": "A Prompt resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIResponseObject"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1263,46 +1417,28 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
+ "summary": "Get a prompt by its identifier and optional version.",
+ "description": "Get a prompt by its identifier and optional version.",
"parameters": [
{
- "name": "after",
- "in": "query",
- "description": "The ID of the last response to return.",
- "required": false,
+ "name": "prompt_id",
+ "in": "path",
+ "description": "The identifier of the prompt to get.",
+ "required": true,
"schema": {
"type": "string"
}
},
{
- "name": "limit",
+ "name": "version",
"in": "query",
- "description": "The number of responses to return.",
+ "description": "The version of the prompt to get (defaults to latest).",
"required": false,
"schema": {
"type": "integer"
}
- },
- {
- "name": "model",
- "in": "query",
- "description": "The model to filter responses by.",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "order",
- "in": "query",
- "description": "The order to sort responses by when sorted by created_at ('asc' or 'desc').",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/Order"
- }
}
],
"deprecated": false
@@ -1310,11 +1446,11 @@
"post": {
"responses": {
"200": {
- "description": "A ListOpenAIResponseObject.",
+ "description": "The updated Prompt resource with incremented version.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIResponseObject"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1333,36 +1469,37 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
+ ],
+ "summary": "Update an existing prompt (increments version).",
+ "description": "Update an existing prompt (increments version).",
+ "parameters": [
+ {
+ "name": "prompt_id",
+ "in": "path",
+ "description": "The identifier of the prompt to update.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
- "parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenaiResponsesRequest"
+ "$ref": "#/components/schemas/UpdatePromptRequest"
}
}
},
"required": true
},
"deprecated": false
- }
- },
- "/v1/responses/{response_id}": {
- "get": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "An OpenAIResponseObject.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/OpenAIResponseObject"
- }
- }
- }
+ "description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1378,15 +1515,15 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "Retrieve an OpenAI response by its ID.",
- "description": "Retrieve an OpenAI response by its ID.",
+ "summary": "Delete a prompt.",
+ "description": "Delete a prompt.",
"parameters": [
{
- "name": "response_id",
+ "name": "prompt_id",
"in": "path",
- "description": "The ID of the OpenAI response to retrieve.",
+ "description": "The identifier of the prompt to delete.",
"required": true,
"schema": {
"type": "string"
@@ -1394,15 +1531,17 @@
}
],
"deprecated": false
- },
- "delete": {
+ }
+ },
+ "/v1/prompts/{prompt_id}/set-default-version": {
+ "post": {
"responses": {
"200": {
- "description": "An OpenAIDeleteResponseObject",
+ "description": "The prompt with the specified version now set as default.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIDeleteResponseObject"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1421,33 +1560,43 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "Delete an OpenAI response by its ID.",
- "description": "Delete an OpenAI response by its ID.",
+ "summary": "Set which version of a prompt should be the default in get_prompt (latest).",
+ "description": "Set which version of a prompt should be the default in get_prompt (latest).",
"parameters": [
{
- "name": "response_id",
+ "name": "prompt_id",
"in": "path",
- "description": "The ID of the OpenAI response to delete.",
+ "description": "The identifier of the prompt.",
"required": true,
"schema": {
"type": "string"
}
}
],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/SetDefaultVersionRequest"
+ }
+ }
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/responses/{response_id}/input_items": {
+ "/v1/prompts/{prompt_id}/versions": {
"get": {
"responses": {
"200": {
- "description": "An ListOpenAIResponseInputItem.",
+ "description": "A ListPromptsResponse containing all versions of the prompt.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIResponseInputItem"
+ "$ref": "#/components/schemas/ListPromptsResponse"
}
}
}
@@ -1466,81 +1615,68 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "List input items for a given OpenAI response.",
- "description": "List input items for a given OpenAI response.",
+ "summary": "List all versions of a specific prompt.",
+ "description": "List all versions of a specific prompt.",
"parameters": [
{
- "name": "response_id",
+ "name": "prompt_id",
"in": "path",
- "description": "The ID of the response to retrieve input items for.",
+ "description": "The identifier of the prompt to list versions for.",
"required": true,
"schema": {
"type": "string"
}
- },
- {
- "name": "after",
- "in": "query",
- "description": "An item ID to list items after, used for pagination.",
- "required": false,
- "schema": {
- "type": "string"
+ }
+ ],
+ "deprecated": false
+ }
+ },
+ "/v1/providers": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "A ListProvidersResponse containing information about all providers.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListProvidersResponse"
+ }
+ }
}
},
- {
- "name": "before",
- "in": "query",
- "description": "An item ID to list items before, used for pagination.",
- "required": false,
- "schema": {
- "type": "string"
- }
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
},
- {
- "name": "include",
- "in": "query",
- "description": "Additional fields to include in the response.",
- "required": false,
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
},
- {
- "name": "limit",
- "in": "query",
- "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.",
- "required": false,
- "schema": {
- "type": "integer"
- }
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
},
- {
- "name": "order",
- "in": "query",
- "description": "The order to return the input items in. Default is desc.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/Order"
- }
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
}
+ },
+ "tags": [
+ "Providers"
],
+ "summary": "List all available providers.",
+ "description": "List all available providers.",
+ "parameters": [],
"deprecated": false
}
},
- "/v1/safety/run-shield": {
- "post": {
+ "/v1/providers/{provider_id}": {
+ "get": {
"responses": {
"200": {
- "description": "A RunShieldResponse.",
+ "description": "A ProviderInfo object containing the provider's details.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/RunShieldResponse"
+ "$ref": "#/components/schemas/ProviderInfo"
}
}
}
@@ -1559,33 +1695,33 @@
}
},
"tags": [
- "Safety"
+ "Providers"
],
- "summary": "Run a shield.",
- "description": "Run a shield.",
- "parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/RunShieldRequest"
- }
+ "summary": "Get detailed information about a specific provider.",
+ "description": "Get detailed information about a specific provider.",
+ "parameters": [
+ {
+ "name": "provider_id",
+ "in": "path",
+ "description": "The ID of the provider to inspect.",
+ "required": true,
+ "schema": {
+ "type": "string"
}
- },
- "required": true
- },
+ }
+ ],
"deprecated": false
}
},
- "/v1/scoring-functions": {
+ "/v1/responses": {
"get": {
"responses": {
"200": {
- "description": "A ListScoringFunctionsResponse.",
+ "description": "A ListOpenAIResponseObject.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListScoringFunctionsResponse"
+ "$ref": "#/components/schemas/ListOpenAIResponseObject"
}
}
}
@@ -1604,17 +1740,61 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
+ ],
+ "summary": "List all OpenAI responses.",
+ "description": "List all OpenAI responses.",
+ "parameters": [
+ {
+ "name": "after",
+ "in": "query",
+ "description": "The ID of the last response to return.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "The number of responses to return.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "model",
+ "in": "query",
+ "description": "The model to filter responses by.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "The order to sort responses by when sorted by created_at ('asc' or 'desc').",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/Order"
+ }
+ }
],
- "summary": "List all scoring functions.",
- "description": "List all scoring functions.",
- "parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "OK"
+ "description": "A ListOpenAIResponseObject.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListOpenAIResponseObject"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1630,16 +1810,16 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
],
- "summary": "Register a scoring function.",
- "description": "Register a scoring function.",
+ "summary": "List all OpenAI responses.",
+ "description": "List all OpenAI responses.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/RegisterScoringFunctionRequest"
+ "$ref": "#/components/schemas/ListOpenaiResponsesRequest"
}
}
},
@@ -1648,15 +1828,15 @@
"deprecated": false
}
},
- "/v1/scoring-functions/{scoring_fn_id}": {
+ "/v1/responses/{response_id}": {
"get": {
"responses": {
"200": {
- "description": "A ScoringFn.",
+ "description": "An OpenAIResponseObject.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoringFn"
+ "$ref": "#/components/schemas/OpenAIResponseObject"
}
}
}
@@ -1675,15 +1855,15 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
],
- "summary": "Get a scoring function by its ID.",
- "description": "Get a scoring function by its ID.",
+ "summary": "Retrieve an OpenAI response by its ID.",
+ "description": "Retrieve an OpenAI response by its ID.",
"parameters": [
{
- "name": "scoring_fn_id",
+ "name": "response_id",
"in": "path",
- "description": "The ID of the scoring function to get.",
+ "description": "The ID of the OpenAI response to retrieve.",
"required": true,
"schema": {
"type": "string"
@@ -1695,7 +1875,14 @@
"delete": {
"responses": {
"200": {
- "description": "OK"
+ "description": "An OpenAIDeleteResponseObject",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIDeleteResponseObject"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1711,15 +1898,15 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
],
- "summary": "Unregister a scoring function.",
- "description": "Unregister a scoring function.",
+ "summary": "Delete an OpenAI response by its ID.",
+ "description": "Delete an OpenAI response by its ID.",
"parameters": [
{
- "name": "scoring_fn_id",
+ "name": "response_id",
"in": "path",
- "description": "The ID of the scoring function to unregister.",
+ "description": "The ID of the OpenAI response to delete.",
"required": true,
"schema": {
"type": "string"
@@ -1729,15 +1916,15 @@
"deprecated": false
}
},
- "/v1/scoring/score": {
- "post": {
+ "/v1/responses/{response_id}/input_items": {
+ "get": {
"responses": {
"200": {
- "description": "A ScoreResponse object containing rows and aggregated results.",
+ "description": "An ListOpenAIResponseInputItem.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoreResponse"
+ "$ref": "#/components/schemas/ListOpenAIResponseInputItem"
}
}
}
@@ -1756,33 +1943,81 @@
}
},
"tags": [
- "Scoring"
+ "Agents"
],
- "summary": "Score a list of rows.",
- "description": "Score a list of rows.",
- "parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ScoreRequest"
+ "summary": "List input items for a given OpenAI response.",
+ "description": "List input items for a given OpenAI response.",
+ "parameters": [
+ {
+ "name": "response_id",
+ "in": "path",
+ "description": "The ID of the response to retrieve input items for.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "An item ID to list items after, used for pagination.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "An item ID to list items before, used for pagination.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "include",
+ "in": "query",
+ "description": "Additional fields to include in the response.",
+ "required": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
}
}
},
- "required": true
- },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "The order to return the input items in. Default is desc.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/Order"
+ }
+ }
+ ],
"deprecated": false
}
},
- "/v1/scoring/score-batch": {
+ "/v1/safety/run-shield": {
"post": {
"responses": {
"200": {
- "description": "A ScoreBatchResponse.",
+ "description": "A RunShieldResponse.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoreBatchResponse"
+ "$ref": "#/components/schemas/RunShieldResponse"
}
}
}
@@ -1801,16 +2036,16 @@
}
},
"tags": [
- "Scoring"
+ "Safety"
],
- "summary": "Score a batch of rows.",
- "description": "Score a batch of rows.",
+ "summary": "Run a shield.",
+ "description": "Run a shield.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoreBatchRequest"
+ "$ref": "#/components/schemas/RunShieldRequest"
}
}
},
@@ -1819,15 +2054,15 @@
"deprecated": false
}
},
- "/v1/shields": {
+ "/v1/scoring-functions": {
"get": {
"responses": {
"200": {
- "description": "A ListShieldsResponse.",
+ "description": "A ListScoringFunctionsResponse.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListShieldsResponse"
+ "$ref": "#/components/schemas/ListScoringFunctionsResponse"
}
}
}
@@ -1846,24 +2081,17 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "List all shields.",
- "description": "List all shields.",
+ "summary": "List all scoring functions.",
+ "description": "List all scoring functions.",
"parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "A Shield.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Shield"
- }
- }
- }
+ "description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1879,16 +2107,16 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "Register a shield.",
- "description": "Register a shield.",
+ "summary": "Register a scoring function.",
+ "description": "Register a scoring function.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/RegisterShieldRequest"
+ "$ref": "#/components/schemas/RegisterScoringFunctionRequest"
}
}
},
@@ -1897,15 +2125,15 @@
"deprecated": false
}
},
- "/v1/shields/{identifier}": {
+ "/v1/scoring-functions/{scoring_fn_id}": {
"get": {
"responses": {
"200": {
- "description": "A Shield.",
+ "description": "A ScoringFn.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Shield"
+ "$ref": "#/components/schemas/ScoringFn"
}
}
}
@@ -1924,15 +2152,15 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "Get a shield by its identifier.",
- "description": "Get a shield by its identifier.",
+ "summary": "Get a scoring function by its ID.",
+ "description": "Get a scoring function by its ID.",
"parameters": [
{
- "name": "identifier",
+ "name": "scoring_fn_id",
"in": "path",
- "description": "The identifier of the shield to get.",
+ "description": "The ID of the scoring function to get.",
"required": true,
"schema": {
"type": "string"
@@ -1960,15 +2188,15 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "Unregister a shield.",
- "description": "Unregister a shield.",
+ "summary": "Unregister a scoring function.",
+ "description": "Unregister a scoring function.",
"parameters": [
{
- "name": "identifier",
+ "name": "scoring_fn_id",
"in": "path",
- "description": "The identifier of the shield to unregister.",
+ "description": "The ID of the scoring function to unregister.",
"required": true,
"schema": {
"type": "string"
@@ -1978,15 +2206,15 @@
"deprecated": false
}
},
- "/v1/synthetic-data-generation/generate": {
+ "/v1/scoring/score": {
"post": {
"responses": {
"200": {
- "description": "Response containing filtered synthetic data samples and optional statistics",
+ "description": "A ScoreResponse object containing rows and aggregated results.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/SyntheticDataGenerationResponse"
+ "$ref": "#/components/schemas/ScoreResponse"
}
}
}
@@ -2005,16 +2233,16 @@
}
},
"tags": [
- "SyntheticDataGeneration (Coming Soon)"
+ "Scoring"
],
- "summary": "Generate synthetic data based on input dialogs and apply filtering.",
- "description": "Generate synthetic data based on input dialogs and apply filtering.",
+ "summary": "Score a list of rows.",
+ "description": "Score a list of rows.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/SyntheticDataGenerateRequest"
+ "$ref": "#/components/schemas/ScoreRequest"
}
}
},
@@ -2023,11 +2251,18 @@
"deprecated": false
}
},
- "/v1/telemetry/events": {
+ "/v1/scoring/score-batch": {
"post": {
"responses": {
"200": {
- "description": "OK"
+ "description": "A ScoreBatchResponse.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ScoreBatchResponse"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -2043,16 +2278,16 @@
}
},
"tags": [
- "Telemetry"
+ "Scoring"
],
- "summary": "Log an event.",
- "description": "Log an event.",
+ "summary": "Score a batch of rows.",
+ "description": "Score a batch of rows.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/LogEventRequest"
+ "$ref": "#/components/schemas/ScoreBatchRequest"
}
}
},
@@ -2061,15 +2296,48 @@
"deprecated": false
}
},
- "/v1/tool-runtime/invoke": {
+ "/v1/shields": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "A ListShieldsResponse.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListShieldsResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Shields"
+ ],
+ "summary": "List all shields.",
+ "description": "List all shields.",
+ "parameters": [],
+ "deprecated": false
+ },
"post": {
"responses": {
"200": {
- "description": "A ToolInvocationResult.",
+ "description": "A Shield.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ToolInvocationResult"
+ "$ref": "#/components/schemas/Shield"
}
}
}
@@ -2088,16 +2356,16 @@
}
},
"tags": [
- "ToolRuntime"
+ "Shields"
],
- "summary": "Run a tool with the given arguments.",
- "description": "Run a tool with the given arguments.",
+ "summary": "Register a shield.",
+ "description": "Register a shield.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/InvokeToolRequest"
+ "$ref": "#/components/schemas/RegisterShieldRequest"
}
}
},
@@ -2106,15 +2374,15 @@
"deprecated": false
}
},
- "/v1/tool-runtime/list-tools": {
+ "/v1/shields/{identifier}": {
"get": {
"responses": {
"200": {
- "description": "A ListToolDefsResponse.",
+ "description": "A Shield.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListToolDefsResponse"
+ "$ref": "#/components/schemas/Shield"
}
}
}
@@ -2133,28 +2401,237 @@
}
},
"tags": [
- "ToolRuntime"
+ "Shields"
],
- "summary": "List all tools in the runtime.",
- "description": "List all tools in the runtime.",
+ "summary": "Get a shield by its identifier.",
+ "description": "Get a shield by its identifier.",
"parameters": [
{
- "name": "tool_group_id",
- "in": "query",
- "description": "The ID of the tool group to list tools for.",
- "required": false,
+ "name": "identifier",
+ "in": "path",
+ "description": "The identifier of the shield to get.",
+ "required": true,
"schema": {
"type": "string"
}
- },
- {
- "name": "mcp_endpoint",
- "in": "query",
- "description": "The MCP endpoint to use for the tool group.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/URL"
- }
+ }
+ ],
+ "deprecated": false
+ },
+ "delete": {
+ "responses": {
+ "200": {
+ "description": "OK"
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Shields"
+ ],
+ "summary": "Unregister a shield.",
+ "description": "Unregister a shield.",
+ "parameters": [
+ {
+ "name": "identifier",
+ "in": "path",
+ "description": "The identifier of the shield to unregister.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "deprecated": false
+ }
+ },
+ "/v1/synthetic-data-generation/generate": {
+ "post": {
+ "responses": {
+ "200": {
+ "description": "Response containing filtered synthetic data samples and optional statistics",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/SyntheticDataGenerationResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "SyntheticDataGeneration (Coming Soon)"
+ ],
+ "summary": "Generate synthetic data based on input dialogs and apply filtering.",
+ "description": "Generate synthetic data based on input dialogs and apply filtering.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/SyntheticDataGenerateRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/telemetry/events": {
+ "post": {
+ "responses": {
+ "200": {
+ "description": "OK"
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Telemetry"
+ ],
+ "summary": "Log an event.",
+ "description": "Log an event.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/LogEventRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/tool-runtime/invoke": {
+ "post": {
+ "responses": {
+ "200": {
+ "description": "A ToolInvocationResult.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ToolInvocationResult"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "ToolRuntime"
+ ],
+ "summary": "Run a tool with the given arguments.",
+ "description": "Run a tool with the given arguments.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/InvokeToolRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/tool-runtime/list-tools": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "A ListToolDefsResponse.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListToolDefsResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "ToolRuntime"
+ ],
+ "summary": "List all tools in the runtime.",
+ "description": "List all tools in the runtime.",
+ "parameters": [
+ {
+ "name": "tool_group_id",
+ "in": "query",
+ "description": "The ID of the tool group to list tools for.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "mcp_endpoint",
+ "in": "query",
+ "description": "The MCP endpoint to use for the tool group.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/URL"
+ }
}
],
"deprecated": false
@@ -5106,877 +5583,988 @@
"title": "OpenAICompletionChoice",
"description": "A choice from an OpenAI-compatible completion response."
},
- "OpenaiEmbeddingsRequest": {
- "type": "object",
- "properties": {
- "model": {
- "type": "string",
- "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint."
+ "ConversationItem": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMessage"
},
- "input": {
- "oneOf": [
- {
- "type": "string"
- },
- {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- ],
- "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
},
- "encoding_format": {
- "type": "string",
- "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
},
- "dimensions": {
- "type": "integer",
- "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
},
- "user": {
- "type": "string",
- "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse."
- }
- },
- "additionalProperties": false,
- "required": [
- "model",
- "input"
- ],
- "title": "OpenaiEmbeddingsRequest"
- },
- "OpenAIEmbeddingData": {
- "type": "object",
- "properties": {
- "object": {
- "type": "string",
- "const": "embedding",
- "default": "embedding",
- "description": "The object type, which will be \"embedding\""
- },
- "embedding": {
- "oneOf": [
- {
- "type": "array",
- "items": {
- "type": "number"
- }
- },
- {
- "type": "string"
- }
- ],
- "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")"
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall"
},
- "index": {
- "type": "integer",
- "description": "The index of the embedding in the input list"
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
}
- },
- "additionalProperties": false,
- "required": [
- "object",
- "embedding",
- "index"
],
- "title": "OpenAIEmbeddingData",
- "description": "A single embedding data object from an OpenAI-compatible embeddings response."
- },
- "OpenAIEmbeddingUsage": {
- "type": "object",
- "properties": {
- "prompt_tokens": {
- "type": "integer",
- "description": "The number of tokens in the input"
- },
- "total_tokens": {
- "type": "integer",
- "description": "The total number of tokens used"
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "message": "#/components/schemas/OpenAIResponseMessage",
+ "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall",
+ "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall",
+ "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall",
+ "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall",
+ "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
}
- },
- "additionalProperties": false,
- "required": [
- "prompt_tokens",
- "total_tokens"
- ],
- "title": "OpenAIEmbeddingUsage",
- "description": "Usage information for an OpenAI-compatible embeddings response."
+ }
},
- "OpenAIEmbeddingsResponse": {
+ "OpenAIResponseAnnotationCitation": {
"type": "object",
"properties": {
- "object": {
+ "type": {
"type": "string",
- "const": "list",
- "default": "list",
- "description": "The object type, which will be \"list\""
+ "const": "url_citation",
+ "default": "url_citation",
+ "description": "Annotation type identifier, always \"url_citation\""
},
- "data": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIEmbeddingData"
- },
- "description": "List of embedding data objects"
+ "end_index": {
+ "type": "integer",
+ "description": "End position of the citation span in the content"
},
- "model": {
+ "start_index": {
+ "type": "integer",
+ "description": "Start position of the citation span in the content"
+ },
+ "title": {
"type": "string",
- "description": "The model that was used to generate the embeddings"
+ "description": "Title of the referenced web resource"
},
- "usage": {
- "$ref": "#/components/schemas/OpenAIEmbeddingUsage",
- "description": "Usage information"
+ "url": {
+ "type": "string",
+ "description": "URL of the referenced web resource"
}
},
"additionalProperties": false,
"required": [
- "object",
- "data",
- "model",
- "usage"
- ],
- "title": "OpenAIEmbeddingsResponse",
- "description": "Response from an OpenAI-compatible embeddings request."
- },
- "OpenAIFilePurpose": {
- "type": "string",
- "enum": [
- "assistants",
- "batch"
+ "type",
+ "end_index",
+ "start_index",
+ "title",
+ "url"
],
- "title": "OpenAIFilePurpose",
- "description": "Valid purpose values for OpenAI Files API."
+ "title": "OpenAIResponseAnnotationCitation",
+ "description": "URL citation annotation for referencing external web resources."
},
- "ListOpenAIFileResponse": {
+ "OpenAIResponseAnnotationContainerFileCitation": {
"type": "object",
"properties": {
- "data": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIFileObject"
- },
- "description": "List of file objects"
+ "type": {
+ "type": "string",
+ "const": "container_file_citation",
+ "default": "container_file_citation"
},
- "has_more": {
- "type": "boolean",
- "description": "Whether there are more files available beyond this page"
+ "container_id": {
+ "type": "string"
},
- "first_id": {
- "type": "string",
- "description": "ID of the first file in the list for pagination"
+ "end_index": {
+ "type": "integer"
},
- "last_id": {
- "type": "string",
- "description": "ID of the last file in the list for pagination"
+ "file_id": {
+ "type": "string"
},
- "object": {
- "type": "string",
- "const": "list",
- "default": "list",
- "description": "The object type, which is always \"list\""
+ "filename": {
+ "type": "string"
+ },
+ "start_index": {
+ "type": "integer"
}
},
"additionalProperties": false,
"required": [
- "data",
- "has_more",
- "first_id",
- "last_id",
- "object"
+ "type",
+ "container_id",
+ "end_index",
+ "file_id",
+ "filename",
+ "start_index"
],
- "title": "ListOpenAIFileResponse",
- "description": "Response for listing files in OpenAI Files API."
+ "title": "OpenAIResponseAnnotationContainerFileCitation"
},
- "OpenAIFileObject": {
+ "OpenAIResponseAnnotationFileCitation": {
"type": "object",
"properties": {
- "object": {
+ "type": {
"type": "string",
- "const": "file",
- "default": "file",
- "description": "The object type, which is always \"file\""
+ "const": "file_citation",
+ "default": "file_citation",
+ "description": "Annotation type identifier, always \"file_citation\""
},
- "id": {
+ "file_id": {
"type": "string",
- "description": "The file identifier, which can be referenced in the API endpoints"
- },
- "bytes": {
- "type": "integer",
- "description": "The size of the file, in bytes"
- },
- "created_at": {
- "type": "integer",
- "description": "The Unix timestamp (in seconds) for when the file was created"
- },
- "expires_at": {
- "type": "integer",
- "description": "The Unix timestamp (in seconds) for when the file expires"
+ "description": "Unique identifier of the referenced file"
},
"filename": {
"type": "string",
- "description": "The name of the file"
+ "description": "Name of the referenced file"
},
- "purpose": {
- "type": "string",
- "enum": [
- "assistants",
- "batch"
- ],
- "description": "The intended purpose of the file"
+ "index": {
+ "type": "integer",
+ "description": "Position index of the citation within the content"
}
},
"additionalProperties": false,
"required": [
- "object",
- "id",
- "bytes",
- "created_at",
- "expires_at",
+ "type",
+ "file_id",
"filename",
- "purpose"
+ "index"
],
- "title": "OpenAIFileObject",
- "description": "OpenAI File object as defined in the OpenAI Files API."
+ "title": "OpenAIResponseAnnotationFileCitation",
+ "description": "File citation annotation for referencing specific files in response content."
},
- "ExpiresAfter": {
+ "OpenAIResponseAnnotationFilePath": {
"type": "object",
"properties": {
- "anchor": {
+ "type": {
"type": "string",
- "const": "created_at"
+ "const": "file_path",
+ "default": "file_path"
},
- "seconds": {
+ "file_id": {
+ "type": "string"
+ },
+ "index": {
"type": "integer"
}
},
"additionalProperties": false,
"required": [
- "anchor",
- "seconds"
+ "type",
+ "file_id",
+ "index"
],
- "title": "ExpiresAfter",
- "description": "Control expiration of uploaded files.\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)"
+ "title": "OpenAIResponseAnnotationFilePath"
},
- "OpenAIFileDeleteResponse": {
- "type": "object",
- "properties": {
- "id": {
- "type": "string",
- "description": "The file identifier that was deleted"
+ "OpenAIResponseAnnotations": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation"
},
- "object": {
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation",
+ "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation",
+ "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation",
+ "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath"
+ }
+ }
+ },
+ "OpenAIResponseInputMessageContent": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
+ "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
+ }
+ }
+ },
+ "OpenAIResponseInputMessageContentImage": {
+ "type": "object",
+ "properties": {
+ "detail": {
+ "oneOf": [
+ {
+ "type": "string",
+ "const": "low"
+ },
+ {
+ "type": "string",
+ "const": "high"
+ },
+ {
+ "type": "string",
+ "const": "auto"
+ }
+ ],
+ "default": "auto",
+ "description": "Level of detail for image processing, can be \"low\", \"high\", or \"auto\""
+ },
+ "type": {
"type": "string",
- "const": "file",
- "default": "file",
- "description": "The object type, which is always \"file\""
+ "const": "input_image",
+ "default": "input_image",
+ "description": "Content type identifier, always \"input_image\""
},
- "deleted": {
- "type": "boolean",
- "description": "Whether the file was successfully deleted"
+ "image_url": {
+ "type": "string",
+ "description": "(Optional) URL of the image content"
}
},
"additionalProperties": false,
"required": [
- "id",
- "object",
- "deleted"
+ "detail",
+ "type"
],
- "title": "OpenAIFileDeleteResponse",
- "description": "Response for deleting a file in OpenAI Files API."
- },
- "Response": {
- "type": "object",
- "title": "Response"
+ "title": "OpenAIResponseInputMessageContentImage",
+ "description": "Image content for input messages in OpenAI response format."
},
- "HealthInfo": {
+ "OpenAIResponseInputMessageContentText": {
"type": "object",
"properties": {
- "status": {
+ "text": {
"type": "string",
- "enum": [
- "OK",
- "Error",
- "Not Implemented"
- ],
- "description": "Current health status of the service"
+ "description": "The text content of the input message"
+ },
+ "type": {
+ "type": "string",
+ "const": "input_text",
+ "default": "input_text",
+ "description": "Content type identifier, always \"input_text\""
}
},
"additionalProperties": false,
"required": [
- "status"
+ "text",
+ "type"
],
- "title": "HealthInfo",
- "description": "Health status information for the service."
+ "title": "OpenAIResponseInputMessageContentText",
+ "description": "Text content for input messages in OpenAI response format."
},
- "RouteInfo": {
+ "OpenAIResponseMessage": {
"type": "object",
"properties": {
- "route": {
- "type": "string",
- "description": "The API endpoint path"
+ "content": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
+ }
+ },
+ {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent"
+ }
+ }
+ ]
},
- "method": {
+ "role": {
+ "oneOf": [
+ {
+ "type": "string",
+ "const": "system"
+ },
+ {
+ "type": "string",
+ "const": "developer"
+ },
+ {
+ "type": "string",
+ "const": "user"
+ },
+ {
+ "type": "string",
+ "const": "assistant"
+ }
+ ]
+ },
+ "type": {
"type": "string",
- "description": "HTTP method for the route"
+ "const": "message",
+ "default": "message"
},
- "provider_types": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "List of provider types that implement this route"
+ "id": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
}
},
"additionalProperties": false,
"required": [
- "route",
- "method",
- "provider_types"
+ "content",
+ "role",
+ "type"
],
- "title": "RouteInfo",
- "description": "Information about an API route including its path, method, and implementing providers."
+ "title": "OpenAIResponseMessage",
+ "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios."
},
- "ListRoutesResponse": {
+ "OpenAIResponseOutputMessageContent": {
"type": "object",
"properties": {
- "data": {
+ "text": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string",
+ "const": "output_text",
+ "default": "output_text"
+ },
+ "annotations": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/RouteInfo"
- },
- "description": "List of available route information objects"
+ "$ref": "#/components/schemas/OpenAIResponseAnnotations"
+ }
}
},
"additionalProperties": false,
"required": [
- "data"
+ "text",
+ "type",
+ "annotations"
],
- "title": "ListRoutesResponse",
- "description": "Response containing a list of all available API routes."
+ "title": "OpenAIResponseOutputMessageContentOutputText"
},
- "Model": {
+ "OpenAIResponseOutputMessageFileSearchToolCall": {
"type": "object",
"properties": {
- "identifier": {
+ "id": {
"type": "string",
- "description": "Unique identifier for this resource in llama stack"
+ "description": "Unique identifier for this tool call"
},
- "provider_resource_id": {
- "type": "string",
- "description": "Unique identifier for this resource in the provider"
+ "queries": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of search queries executed"
},
- "provider_id": {
+ "status": {
"type": "string",
- "description": "ID of the provider that owns this resource"
+ "description": "Current status of the file search operation"
},
"type": {
"type": "string",
- "enum": [
- "model",
- "shield",
- "vector_db",
- "dataset",
- "scoring_function",
- "benchmark",
- "tool",
- "tool_group",
- "prompt"
- ],
- "const": "model",
- "default": "model",
- "description": "The resource type, always 'model' for model resources"
+ "const": "file_search_call",
+ "default": "file_search_call",
+ "description": "Tool call type identifier, always \"file_search_call\""
},
- "metadata": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
+ "results": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "attributes": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "(Optional) Key-value attributes associated with the file"
},
- {
- "type": "array"
+ "file_id": {
+ "type": "string",
+ "description": "Unique identifier of the file containing the result"
},
- {
- "type": "object"
+ "filename": {
+ "type": "string",
+ "description": "Name of the file containing the result"
+ },
+ "score": {
+ "type": "number",
+ "description": "Relevance score for this search result (between 0 and 1)"
+ },
+ "text": {
+ "type": "string",
+ "description": "Text content of the search result"
}
- ]
+ },
+ "additionalProperties": false,
+ "required": [
+ "attributes",
+ "file_id",
+ "filename",
+ "score",
+ "text"
+ ],
+ "title": "OpenAIResponseOutputMessageFileSearchToolCallResults",
+ "description": "Search results returned by the file search operation."
},
- "description": "Any additional metadata for this model"
- },
- "model_type": {
- "$ref": "#/components/schemas/ModelType",
- "default": "llm",
- "description": "The type of model (LLM or embedding model)"
- }
- },
- "additionalProperties": false,
- "required": [
- "identifier",
- "provider_id",
- "type",
- "metadata",
- "model_type"
- ],
- "title": "Model",
- "description": "A model resource representing an AI model registered in Llama Stack."
- },
- "ModelType": {
- "type": "string",
- "enum": [
- "llm",
- "embedding"
- ],
- "title": "ModelType",
- "description": "Enumeration of supported model types in Llama Stack."
- },
- "ListModelsResponse": {
- "type": "object",
- "properties": {
- "data": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Model"
- }
+ "description": "(Optional) Search results returned by the file search operation"
}
},
"additionalProperties": false,
"required": [
- "data"
+ "id",
+ "queries",
+ "status",
+ "type"
],
- "title": "ListModelsResponse"
+ "title": "OpenAIResponseOutputMessageFileSearchToolCall",
+ "description": "File search tool call output message for OpenAI responses."
},
- "RegisterModelRequest": {
+ "OpenAIResponseOutputMessageFunctionToolCall": {
"type": "object",
"properties": {
- "model_id": {
+ "call_id": {
"type": "string",
- "description": "The identifier of the model to register."
+ "description": "Unique identifier for the function call"
},
- "provider_model_id": {
+ "name": {
"type": "string",
- "description": "The identifier of the model in the provider."
+ "description": "Name of the function being called"
},
- "provider_id": {
+ "arguments": {
"type": "string",
- "description": "The identifier of the provider."
+ "description": "JSON string containing the function arguments"
},
- "metadata": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- },
- "description": "Any additional metadata for this model."
+ "type": {
+ "type": "string",
+ "const": "function_call",
+ "default": "function_call",
+ "description": "Tool call type identifier, always \"function_call\""
},
- "model_type": {
- "$ref": "#/components/schemas/ModelType",
- "description": "The type of model to register."
- }
- },
- "additionalProperties": false,
- "required": [
- "model_id"
- ],
- "title": "RegisterModelRequest"
- },
- "RunModerationRequest": {
- "type": "object",
- "properties": {
- "input": {
- "oneOf": [
- {
- "type": "string"
- },
- {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- ],
- "description": "Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."
+ "id": {
+ "type": "string",
+ "description": "(Optional) Additional identifier for the tool call"
},
- "model": {
+ "status": {
"type": "string",
- "description": "The content moderation model you would like to use."
+ "description": "(Optional) Current status of the function call execution"
}
},
"additionalProperties": false,
"required": [
- "input",
- "model"
+ "call_id",
+ "name",
+ "arguments",
+ "type"
],
- "title": "RunModerationRequest"
+ "title": "OpenAIResponseOutputMessageFunctionToolCall",
+ "description": "Function tool call output message for OpenAI responses."
},
- "ModerationObject": {
+ "OpenAIResponseOutputMessageMCPCall": {
"type": "object",
"properties": {
"id": {
"type": "string",
- "description": "The unique identifier for the moderation request."
+ "description": "Unique identifier for this MCP call"
},
- "model": {
+ "type": {
"type": "string",
- "description": "The model used to generate the moderation results."
+ "const": "mcp_call",
+ "default": "mcp_call",
+ "description": "Tool call type identifier, always \"mcp_call\""
},
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ModerationObjectResults"
- },
- "description": "A list of moderation objects"
+ "arguments": {
+ "type": "string",
+ "description": "JSON string containing the MCP call arguments"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the MCP method being called"
+ },
+ "server_label": {
+ "type": "string",
+ "description": "Label identifying the MCP server handling the call"
+ },
+ "error": {
+ "type": "string",
+ "description": "(Optional) Error message if the MCP call failed"
+ },
+ "output": {
+ "type": "string",
+ "description": "(Optional) Output result from the successful MCP call"
}
},
"additionalProperties": false,
"required": [
"id",
- "model",
- "results"
+ "type",
+ "arguments",
+ "name",
+ "server_label"
],
- "title": "ModerationObject",
- "description": "A moderation object."
+ "title": "OpenAIResponseOutputMessageMCPCall",
+ "description": "Model Context Protocol (MCP) call output message for OpenAI responses."
},
- "ModerationObjectResults": {
+ "OpenAIResponseOutputMessageMCPListTools": {
"type": "object",
"properties": {
- "flagged": {
- "type": "boolean",
- "description": "Whether any of the below categories are flagged."
- },
- "categories": {
- "type": "object",
- "additionalProperties": {
- "type": "boolean"
- },
- "description": "A list of the categories, and whether they are flagged or not."
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for this MCP list tools operation"
},
- "category_applied_input_types": {
- "type": "object",
- "additionalProperties": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "description": "A list of the categories along with the input type(s) that the score applies to."
- },
- "category_scores": {
- "type": "object",
- "additionalProperties": {
- "type": "number"
- },
- "description": "A list of the categories along with their scores as predicted by model."
+ "type": {
+ "type": "string",
+ "const": "mcp_list_tools",
+ "default": "mcp_list_tools",
+ "description": "Tool call type identifier, always \"mcp_list_tools\""
},
- "user_message": {
- "type": "string"
+ "server_label": {
+ "type": "string",
+ "description": "Label identifying the MCP server providing the tools"
},
- "metadata": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
+ "tools": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "input_schema": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "JSON schema defining the tool's input parameters"
},
- {
- "type": "array"
+ "name": {
+ "type": "string",
+ "description": "Name of the tool"
},
- {
- "type": "object"
+ "description": {
+ "type": "string",
+ "description": "(Optional) Description of what the tool does"
}
- ]
- }
+ },
+ "additionalProperties": false,
+ "required": [
+ "input_schema",
+ "name"
+ ],
+ "title": "MCPListToolsTool",
+ "description": "Tool definition returned by MCP list tools operation."
+ },
+ "description": "List of available tools provided by the MCP server"
}
},
"additionalProperties": false,
"required": [
- "flagged",
- "metadata"
+ "id",
+ "type",
+ "server_label",
+ "tools"
],
- "title": "ModerationObjectResults",
- "description": "A moderation object."
+ "title": "OpenAIResponseOutputMessageMCPListTools",
+ "description": "MCP list tools output message containing available tools from an MCP server."
},
- "Prompt": {
+ "OpenAIResponseOutputMessageWebSearchToolCall": {
"type": "object",
"properties": {
- "prompt": {
+ "id": {
"type": "string",
- "description": "The system prompt text with variable placeholders. Variables are only supported when using the Responses API."
- },
- "version": {
- "type": "integer",
- "description": "Version (integer starting at 1, incremented on save)"
+ "description": "Unique identifier for this tool call"
},
- "prompt_id": {
+ "status": {
"type": "string",
- "description": "Unique identifier formatted as 'pmpt_<48-digit-hash>'"
+ "description": "Current status of the web search operation"
},
- "variables": {
+ "type": {
+ "type": "string",
+ "const": "web_search_call",
+ "default": "web_search_call",
+ "description": "Tool call type identifier, always \"web_search_call\""
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "status",
+ "type"
+ ],
+ "title": "OpenAIResponseOutputMessageWebSearchToolCall",
+ "description": "Web search tool call output message for OpenAI responses."
+ },
+ "CreateConversationRequest": {
+ "type": "object",
+ "properties": {
+ "items": {
"type": "array",
"items": {
- "type": "string"
+ "$ref": "#/components/schemas/ConversationItem"
},
- "description": "List of prompt variable names that can be used in the prompt template"
+ "description": "Initial items to include in the conversation context."
},
- "is_default": {
- "type": "boolean",
- "default": false,
- "description": "Boolean indicating whether this version is the default version for this prompt"
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Set of key-value pairs that can be attached to an object."
}
},
"additionalProperties": false,
- "required": [
- "version",
- "prompt_id",
- "variables",
- "is_default"
- ],
- "title": "Prompt",
- "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack."
+ "title": "CreateConversationRequest"
},
- "ListPromptsResponse": {
+ "Conversation": {
"type": "object",
"properties": {
- "data": {
+ "id": {
+ "type": "string"
+ },
+ "object": {
+ "type": "string",
+ "const": "conversation",
+ "default": "conversation"
+ },
+ "created_at": {
+ "type": "integer"
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "items": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/Prompt"
+ "type": "object",
+ "title": "dict",
+ "description": "dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2)"
}
}
},
"additionalProperties": false,
"required": [
- "data"
+ "id",
+ "object",
+ "created_at"
],
- "title": "ListPromptsResponse",
- "description": "Response model to list prompts."
+ "title": "Conversation",
+ "description": "OpenAI-compatible conversation object."
},
- "CreatePromptRequest": {
+ "UpdateConversationRequest": {
"type": "object",
"properties": {
- "prompt": {
- "type": "string",
- "description": "The prompt text content with variable placeholders."
- },
- "variables": {
- "type": "array",
- "items": {
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
"type": "string"
},
- "description": "List of variable names that can be used in the prompt template."
+ "description": "Set of key-value pairs that can be attached to an object."
}
},
"additionalProperties": false,
"required": [
- "prompt"
+ "metadata"
],
- "title": "CreatePromptRequest"
+ "title": "UpdateConversationRequest"
},
- "UpdatePromptRequest": {
+ "ConversationDeletedResource": {
"type": "object",
"properties": {
- "prompt": {
+ "id": {
+ "type": "string"
+ },
+ "object": {
"type": "string",
- "description": "The updated prompt text content."
+ "default": "conversation.deleted"
},
- "version": {
- "type": "integer",
- "description": "The current version of the prompt being updated."
+ "deleted": {
+ "type": "boolean",
+ "default": true
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "object",
+ "deleted"
+ ],
+ "title": "ConversationDeletedResource",
+ "description": "Response for deleted conversation."
+ },
+ "ConversationItemList": {
+ "type": "object",
+ "properties": {
+ "object": {
+ "type": "string",
+ "default": "list"
},
- "variables": {
+ "data": {
"type": "array",
"items": {
- "type": "string"
- },
- "description": "Updated list of variable names that can be used in the prompt template."
+ "$ref": "#/components/schemas/ConversationItem"
+ }
},
- "set_as_default": {
+ "first_id": {
+ "type": "string"
+ },
+ "last_id": {
+ "type": "string"
+ },
+ "has_more": {
"type": "boolean",
- "description": "Set the new version as the default (default=True)."
+ "default": false
}
},
"additionalProperties": false,
"required": [
- "prompt",
- "version",
- "set_as_default"
+ "object",
+ "data",
+ "has_more"
],
- "title": "UpdatePromptRequest"
+ "title": "ConversationItemList",
+ "description": "List of conversation items with pagination."
},
- "SetDefaultVersionRequest": {
+ "AddItemsRequest": {
"type": "object",
"properties": {
- "version": {
- "type": "integer",
- "description": "The version to set as default."
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ConversationItem"
+ },
+ "description": "Items to include in the conversation context."
}
},
"additionalProperties": false,
"required": [
- "version"
+ "items"
],
- "title": "SetDefaultVersionRequest"
+ "title": "AddItemsRequest"
},
- "ProviderInfo": {
+ "ConversationItemDeletedResource": {
"type": "object",
"properties": {
- "api": {
- "type": "string",
- "description": "The API name this provider implements"
+ "id": {
+ "type": "string"
},
- "provider_id": {
+ "object": {
"type": "string",
- "description": "Unique identifier for the provider"
+ "default": "conversation.item.deleted"
},
- "provider_type": {
+ "deleted": {
+ "type": "boolean",
+ "default": true
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "object",
+ "deleted"
+ ],
+ "title": "ConversationItemDeletedResource",
+ "description": "Response for deleted conversation item."
+ },
+ "OpenaiEmbeddingsRequest": {
+ "type": "object",
+ "properties": {
+ "model": {
"type": "string",
- "description": "The type of provider implementation"
+ "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint."
},
- "config": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
+ "input": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
"type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
}
- ]
- },
- "description": "Configuration parameters for the provider"
+ }
+ ],
+ "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings."
},
- "health": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
+ "encoding_format": {
+ "type": "string",
+ "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"."
+ },
+ "dimensions": {
+ "type": "integer",
+ "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models."
+ },
+ "user": {
+ "type": "string",
+ "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse."
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "model",
+ "input"
+ ],
+ "title": "OpenaiEmbeddingsRequest"
+ },
+ "OpenAIEmbeddingData": {
+ "type": "object",
+ "properties": {
+ "object": {
+ "type": "string",
+ "const": "embedding",
+ "default": "embedding",
+ "description": "The object type, which will be \"embedding\""
+ },
+ "embedding": {
+ "oneOf": [
+ {
+ "type": "array",
+ "items": {
"type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
}
- ]
- },
- "description": "Current health status of the provider"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")"
+ },
+ "index": {
+ "type": "integer",
+ "description": "The index of the embedding in the input list"
}
},
"additionalProperties": false,
"required": [
- "api",
- "provider_id",
- "provider_type",
- "config",
- "health"
+ "object",
+ "embedding",
+ "index"
],
- "title": "ProviderInfo",
- "description": "Information about a registered provider including its configuration and health status."
+ "title": "OpenAIEmbeddingData",
+ "description": "A single embedding data object from an OpenAI-compatible embeddings response."
},
- "ListProvidersResponse": {
+ "OpenAIEmbeddingUsage": {
+ "type": "object",
+ "properties": {
+ "prompt_tokens": {
+ "type": "integer",
+ "description": "The number of tokens in the input"
+ },
+ "total_tokens": {
+ "type": "integer",
+ "description": "The total number of tokens used"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "prompt_tokens",
+ "total_tokens"
+ ],
+ "title": "OpenAIEmbeddingUsage",
+ "description": "Usage information for an OpenAI-compatible embeddings response."
+ },
+ "OpenAIEmbeddingsResponse": {
"type": "object",
"properties": {
+ "object": {
+ "type": "string",
+ "const": "list",
+ "default": "list",
+ "description": "The object type, which will be \"list\""
+ },
"data": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/ProviderInfo"
+ "$ref": "#/components/schemas/OpenAIEmbeddingData"
},
- "description": "List of provider information objects"
+ "description": "List of embedding data objects"
+ },
+ "model": {
+ "type": "string",
+ "description": "The model that was used to generate the embeddings"
+ },
+ "usage": {
+ "$ref": "#/components/schemas/OpenAIEmbeddingUsage",
+ "description": "Usage information"
}
},
"additionalProperties": false,
"required": [
- "data"
+ "object",
+ "data",
+ "model",
+ "usage"
],
- "title": "ListProvidersResponse",
- "description": "Response containing a list of all available providers."
+ "title": "OpenAIEmbeddingsResponse",
+ "description": "Response from an OpenAI-compatible embeddings request."
},
- "ListOpenAIResponseObject": {
+ "OpenAIFilePurpose": {
+ "type": "string",
+ "enum": [
+ "assistants",
+ "batch"
+ ],
+ "title": "OpenAIFilePurpose",
+ "description": "Valid purpose values for OpenAI Files API."
+ },
+ "ListOpenAIFileResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseObjectWithInput"
+ "$ref": "#/components/schemas/OpenAIFileObject"
},
- "description": "List of response objects with their input context"
+ "description": "List of file objects"
},
"has_more": {
"type": "boolean",
- "description": "Whether there are more results available beyond this page"
+ "description": "Whether there are more files available beyond this page"
},
"first_id": {
"type": "string",
- "description": "Identifier of the first item in this page"
+ "description": "ID of the first file in the list for pagination"
},
"last_id": {
"type": "string",
- "description": "Identifier of the last item in this page"
+ "description": "ID of the last file in the list for pagination"
},
"object": {
"type": "string",
"const": "list",
"default": "list",
- "description": "Object type identifier, always \"list\""
+ "description": "The object type, which is always \"list\""
}
},
"additionalProperties": false,
@@ -5987,381 +6575,332 @@
"last_id",
"object"
],
- "title": "ListOpenAIResponseObject",
- "description": "Paginated list of OpenAI response objects with navigation metadata."
+ "title": "ListOpenAIFileResponse",
+ "description": "Response for listing files in OpenAI Files API."
},
- "OpenAIResponseAnnotationCitation": {
+ "OpenAIFileObject": {
"type": "object",
"properties": {
- "type": {
+ "object": {
"type": "string",
- "const": "url_citation",
- "default": "url_citation",
- "description": "Annotation type identifier, always \"url_citation\""
+ "const": "file",
+ "default": "file",
+ "description": "The object type, which is always \"file\""
},
- "end_index": {
+ "id": {
+ "type": "string",
+ "description": "The file identifier, which can be referenced in the API endpoints"
+ },
+ "bytes": {
"type": "integer",
- "description": "End position of the citation span in the content"
+ "description": "The size of the file, in bytes"
},
- "start_index": {
+ "created_at": {
"type": "integer",
- "description": "Start position of the citation span in the content"
+ "description": "The Unix timestamp (in seconds) for when the file was created"
},
- "title": {
+ "expires_at": {
+ "type": "integer",
+ "description": "The Unix timestamp (in seconds) for when the file expires"
+ },
+ "filename": {
"type": "string",
- "description": "Title of the referenced web resource"
+ "description": "The name of the file"
},
- "url": {
+ "purpose": {
"type": "string",
- "description": "URL of the referenced web resource"
+ "enum": [
+ "assistants",
+ "batch"
+ ],
+ "description": "The intended purpose of the file"
}
},
"additionalProperties": false,
"required": [
- "type",
- "end_index",
- "start_index",
- "title",
- "url"
- ],
- "title": "OpenAIResponseAnnotationCitation",
- "description": "URL citation annotation for referencing external web resources."
+ "object",
+ "id",
+ "bytes",
+ "created_at",
+ "expires_at",
+ "filename",
+ "purpose"
+ ],
+ "title": "OpenAIFileObject",
+ "description": "OpenAI File object as defined in the OpenAI Files API."
},
- "OpenAIResponseAnnotationContainerFileCitation": {
+ "ExpiresAfter": {
"type": "object",
"properties": {
- "type": {
+ "anchor": {
"type": "string",
- "const": "container_file_citation",
- "default": "container_file_citation"
- },
- "container_id": {
- "type": "string"
- },
- "end_index": {
- "type": "integer"
- },
- "file_id": {
- "type": "string"
- },
- "filename": {
- "type": "string"
+ "const": "created_at"
},
- "start_index": {
+ "seconds": {
"type": "integer"
}
},
"additionalProperties": false,
"required": [
- "type",
- "container_id",
- "end_index",
- "file_id",
- "filename",
- "start_index"
+ "anchor",
+ "seconds"
],
- "title": "OpenAIResponseAnnotationContainerFileCitation"
+ "title": "ExpiresAfter",
+ "description": "Control expiration of uploaded files.\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)"
},
- "OpenAIResponseAnnotationFileCitation": {
+ "OpenAIFileDeleteResponse": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "const": "file_citation",
- "default": "file_citation",
- "description": "Annotation type identifier, always \"file_citation\""
- },
- "file_id": {
+ "id": {
"type": "string",
- "description": "Unique identifier of the referenced file"
+ "description": "The file identifier that was deleted"
},
- "filename": {
+ "object": {
"type": "string",
- "description": "Name of the referenced file"
+ "const": "file",
+ "default": "file",
+ "description": "The object type, which is always \"file\""
},
- "index": {
- "type": "integer",
- "description": "Position index of the citation within the content"
+ "deleted": {
+ "type": "boolean",
+ "description": "Whether the file was successfully deleted"
}
},
"additionalProperties": false,
"required": [
- "type",
- "file_id",
- "filename",
- "index"
+ "id",
+ "object",
+ "deleted"
],
- "title": "OpenAIResponseAnnotationFileCitation",
- "description": "File citation annotation for referencing specific files in response content."
+ "title": "OpenAIFileDeleteResponse",
+ "description": "Response for deleting a file in OpenAI Files API."
},
- "OpenAIResponseAnnotationFilePath": {
+ "Response": {
+ "type": "object",
+ "title": "Response"
+ },
+ "HealthInfo": {
"type": "object",
"properties": {
- "type": {
+ "status": {
"type": "string",
- "const": "file_path",
- "default": "file_path"
- },
- "file_id": {
- "type": "string"
- },
- "index": {
- "type": "integer"
+ "enum": [
+ "OK",
+ "Error",
+ "Not Implemented"
+ ],
+ "description": "Current health status of the service"
}
},
"additionalProperties": false,
"required": [
- "type",
- "file_id",
- "index"
- ],
- "title": "OpenAIResponseAnnotationFilePath"
- },
- "OpenAIResponseAnnotations": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath"
- }
+ "status"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation",
- "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation",
- "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation",
- "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath"
- }
- }
+ "title": "HealthInfo",
+ "description": "Health status information for the service."
},
- "OpenAIResponseError": {
+ "RouteInfo": {
"type": "object",
"properties": {
- "code": {
+ "route": {
"type": "string",
- "description": "Error code identifying the type of failure"
+ "description": "The API endpoint path"
},
- "message": {
+ "method": {
"type": "string",
- "description": "Human-readable error message describing the failure"
+ "description": "HTTP method for the route"
+ },
+ "provider_types": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of provider types that implement this route"
}
},
"additionalProperties": false,
"required": [
- "code",
- "message"
+ "route",
+ "method",
+ "provider_types"
],
- "title": "OpenAIResponseError",
- "description": "Error details for failed OpenAI response requests."
- },
- "OpenAIResponseInput": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMessage"
- }
- ]
+ "title": "RouteInfo",
+ "description": "Information about an API route including its path, method, and implementing providers."
},
- "OpenAIResponseInputFunctionToolCallOutput": {
+ "ListRoutesResponse": {
"type": "object",
"properties": {
- "call_id": {
- "type": "string"
- },
- "output": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "const": "function_call_output",
- "default": "function_call_output"
- },
- "id": {
- "type": "string"
- },
- "status": {
- "type": "string"
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RouteInfo"
+ },
+ "description": "List of available route information objects"
}
},
"additionalProperties": false,
"required": [
- "call_id",
- "output",
- "type"
- ],
- "title": "OpenAIResponseInputFunctionToolCallOutput",
- "description": "This represents the output of a function call that gets passed back to the model."
- },
- "OpenAIResponseInputMessageContent": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
- }
+ "data"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
- "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
- }
- }
+ "title": "ListRoutesResponse",
+ "description": "Response containing a list of all available API routes."
},
- "OpenAIResponseInputMessageContentImage": {
+ "Model": {
"type": "object",
"properties": {
- "detail": {
- "oneOf": [
- {
- "type": "string",
- "const": "low"
- },
- {
- "type": "string",
- "const": "high"
- },
- {
- "type": "string",
- "const": "auto"
- }
- ],
- "default": "auto",
- "description": "Level of detail for image processing, can be \"low\", \"high\", or \"auto\""
+ "identifier": {
+ "type": "string",
+ "description": "Unique identifier for this resource in llama stack"
},
- "type": {
+ "provider_resource_id": {
"type": "string",
- "const": "input_image",
- "default": "input_image",
- "description": "Content type identifier, always \"input_image\""
+ "description": "Unique identifier for this resource in the provider"
},
- "image_url": {
- "type": "string",
- "description": "(Optional) URL of the image content"
- }
- },
- "additionalProperties": false,
- "required": [
- "detail",
- "type"
- ],
- "title": "OpenAIResponseInputMessageContentImage",
- "description": "Image content for input messages in OpenAI response format."
- },
- "OpenAIResponseInputMessageContentText": {
- "type": "object",
- "properties": {
- "text": {
+ "provider_id": {
"type": "string",
- "description": "The text content of the input message"
+ "description": "ID of the provider that owns this resource"
},
"type": {
"type": "string",
- "const": "input_text",
- "default": "input_text",
- "description": "Content type identifier, always \"input_text\""
+ "enum": [
+ "model",
+ "shield",
+ "vector_db",
+ "dataset",
+ "scoring_function",
+ "benchmark",
+ "tool",
+ "tool_group",
+ "prompt"
+ ],
+ "const": "model",
+ "default": "model",
+ "description": "The resource type, always 'model' for model resources"
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "Any additional metadata for this model"
+ },
+ "model_type": {
+ "$ref": "#/components/schemas/ModelType",
+ "default": "llm",
+ "description": "The type of model (LLM or embedding model)"
}
},
"additionalProperties": false,
"required": [
- "text",
- "type"
+ "identifier",
+ "provider_id",
+ "type",
+ "metadata",
+ "model_type"
],
- "title": "OpenAIResponseInputMessageContentText",
- "description": "Text content for input messages in OpenAI response format."
+ "title": "Model",
+ "description": "A model resource representing an AI model registered in Llama Stack."
},
- "OpenAIResponseMCPApprovalRequest": {
+ "ModelType": {
+ "type": "string",
+ "enum": [
+ "llm",
+ "embedding"
+ ],
+ "title": "ModelType",
+ "description": "Enumeration of supported model types in Llama Stack."
+ },
+ "ListModelsResponse": {
"type": "object",
"properties": {
- "arguments": {
- "type": "string"
- },
- "id": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "server_label": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "const": "mcp_approval_request",
- "default": "mcp_approval_request"
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Model"
+ }
}
},
"additionalProperties": false,
"required": [
- "arguments",
- "id",
- "name",
- "server_label",
- "type"
+ "data"
],
- "title": "OpenAIResponseMCPApprovalRequest",
- "description": "A request for human approval of a tool invocation."
+ "title": "ListModelsResponse"
},
- "OpenAIResponseMCPApprovalResponse": {
+ "RegisterModelRequest": {
"type": "object",
"properties": {
- "approval_request_id": {
- "type": "string"
+ "model_id": {
+ "type": "string",
+ "description": "The identifier of the model to register."
},
- "approve": {
- "type": "boolean"
+ "provider_model_id": {
+ "type": "string",
+ "description": "The identifier of the model in the provider."
},
- "type": {
+ "provider_id": {
"type": "string",
- "const": "mcp_approval_response",
- "default": "mcp_approval_response"
+ "description": "The identifier of the provider."
},
- "id": {
- "type": "string"
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "Any additional metadata for this model."
},
- "reason": {
- "type": "string"
+ "model_type": {
+ "$ref": "#/components/schemas/ModelType",
+ "description": "The type of model to register."
}
},
"additionalProperties": false,
"required": [
- "approval_request_id",
- "approve",
- "type"
+ "model_id"
],
- "title": "OpenAIResponseMCPApprovalResponse",
- "description": "A response to an MCP approval request."
+ "title": "RegisterModelRequest"
},
- "OpenAIResponseMessage": {
+ "RunModerationRequest": {
"type": "object",
"properties": {
- "content": {
+ "input": {
"oneOf": [
{
"type": "string"
@@ -6369,490 +6908,635 @@
{
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
- }
- },
- {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent"
+ "type": "string"
}
}
- ]
- },
- "role": {
- "oneOf": [
- {
- "type": "string",
- "const": "system"
- },
- {
- "type": "string",
- "const": "developer"
- },
- {
- "type": "string",
- "const": "user"
- },
- {
- "type": "string",
- "const": "assistant"
- }
- ]
+ ],
+ "description": "Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."
},
- "type": {
+ "model": {
"type": "string",
- "const": "message",
- "default": "message"
- },
- "id": {
- "type": "string"
- },
- "status": {
- "type": "string"
+ "description": "The content moderation model you would like to use."
}
},
"additionalProperties": false,
"required": [
- "content",
- "role",
- "type"
+ "input",
+ "model"
],
- "title": "OpenAIResponseMessage",
- "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios."
+ "title": "RunModerationRequest"
},
- "OpenAIResponseObjectWithInput": {
+ "ModerationObject": {
"type": "object",
"properties": {
- "created_at": {
- "type": "integer",
- "description": "Unix timestamp when the response was created"
- },
- "error": {
- "$ref": "#/components/schemas/OpenAIResponseError",
- "description": "(Optional) Error details if the response generation failed"
- },
"id": {
"type": "string",
- "description": "Unique identifier for this response"
+ "description": "The unique identifier for the moderation request."
},
"model": {
"type": "string",
- "description": "Model identifier used for generation"
- },
- "object": {
- "type": "string",
- "const": "response",
- "default": "response",
- "description": "Object type identifier, always \"response\""
+ "description": "The model used to generate the moderation results."
},
- "output": {
+ "results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseOutput"
+ "$ref": "#/components/schemas/ModerationObjectResults"
},
- "description": "List of generated output items (messages, tool calls, etc.)"
- },
- "parallel_tool_calls": {
+ "description": "A list of moderation objects"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "model",
+ "results"
+ ],
+ "title": "ModerationObject",
+ "description": "A moderation object."
+ },
+ "ModerationObjectResults": {
+ "type": "object",
+ "properties": {
+ "flagged": {
"type": "boolean",
- "default": false,
- "description": "Whether tool calls can be executed in parallel"
+ "description": "Whether any of the below categories are flagged."
},
- "previous_response_id": {
- "type": "string",
- "description": "(Optional) ID of the previous response in a conversation"
+ "categories": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "boolean"
+ },
+ "description": "A list of the categories, and whether they are flagged or not."
},
- "status": {
- "type": "string",
- "description": "Current status of the response generation"
+ "category_applied_input_types": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "description": "A list of the categories along with the input type(s) that the score applies to."
},
- "temperature": {
- "type": "number",
- "description": "(Optional) Sampling temperature used for generation"
+ "category_scores": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "number"
+ },
+ "description": "A list of the categories along with their scores as predicted by model."
},
- "text": {
- "$ref": "#/components/schemas/OpenAIResponseText",
- "description": "Text formatting configuration for the response"
+ "user_message": {
+ "type": "string"
},
- "top_p": {
- "type": "number",
- "description": "(Optional) Nucleus sampling parameter used for generation"
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ }
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "flagged",
+ "metadata"
+ ],
+ "title": "ModerationObjectResults",
+ "description": "A moderation object."
+ },
+ "Prompt": {
+ "type": "object",
+ "properties": {
+ "prompt": {
+ "type": "string",
+ "description": "The system prompt text with variable placeholders. Variables are only supported when using the Responses API."
},
- "truncation": {
+ "version": {
+ "type": "integer",
+ "description": "Version (integer starting at 1, incremented on save)"
+ },
+ "prompt_id": {
"type": "string",
- "description": "(Optional) Truncation strategy applied to the response"
+ "description": "Unique identifier formatted as 'pmpt_<48-digit-hash>'"
},
- "input": {
+ "variables": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseInput"
+ "type": "string"
},
- "description": "List of input items that led to this response"
+ "description": "List of prompt variable names that can be used in the prompt template"
+ },
+ "is_default": {
+ "type": "boolean",
+ "default": false,
+ "description": "Boolean indicating whether this version is the default version for this prompt"
}
},
"additionalProperties": false,
"required": [
- "created_at",
- "id",
- "model",
- "object",
- "output",
- "parallel_tool_calls",
- "status",
- "text",
- "input"
+ "version",
+ "prompt_id",
+ "variables",
+ "is_default"
],
- "title": "OpenAIResponseObjectWithInput",
- "description": "OpenAI response object extended with input context information."
+ "title": "Prompt",
+ "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack."
},
- "OpenAIResponseOutput": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseMessage"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ "ListPromptsResponse": {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Prompt"
+ }
}
+ },
+ "additionalProperties": false,
+ "required": [
+ "data"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "message": "#/components/schemas/OpenAIResponseMessage",
- "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall",
- "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall",
- "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall",
- "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall",
- "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools",
- "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
- }
- }
+ "title": "ListPromptsResponse",
+ "description": "Response model to list prompts."
},
- "OpenAIResponseOutputMessageContent": {
+ "CreatePromptRequest": {
"type": "object",
"properties": {
- "text": {
- "type": "string"
- },
- "type": {
+ "prompt": {
"type": "string",
- "const": "output_text",
- "default": "output_text"
+ "description": "The prompt text content with variable placeholders."
},
- "annotations": {
+ "variables": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseAnnotations"
- }
+ "type": "string"
+ },
+ "description": "List of variable names that can be used in the prompt template."
}
},
"additionalProperties": false,
"required": [
- "text",
- "type",
- "annotations"
+ "prompt"
],
- "title": "OpenAIResponseOutputMessageContentOutputText"
+ "title": "CreatePromptRequest"
},
- "OpenAIResponseOutputMessageFileSearchToolCall": {
+ "UpdatePromptRequest": {
"type": "object",
"properties": {
- "id": {
+ "prompt": {
"type": "string",
- "description": "Unique identifier for this tool call"
+ "description": "The updated prompt text content."
},
- "queries": {
+ "version": {
+ "type": "integer",
+ "description": "The current version of the prompt being updated."
+ },
+ "variables": {
"type": "array",
"items": {
"type": "string"
},
- "description": "List of search queries executed"
+ "description": "Updated list of variable names that can be used in the prompt template."
},
- "status": {
+ "set_as_default": {
+ "type": "boolean",
+ "description": "Set the new version as the default (default=True)."
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "prompt",
+ "version",
+ "set_as_default"
+ ],
+ "title": "UpdatePromptRequest"
+ },
+ "SetDefaultVersionRequest": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "integer",
+ "description": "The version to set as default."
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "version"
+ ],
+ "title": "SetDefaultVersionRequest"
+ },
+ "ProviderInfo": {
+ "type": "object",
+ "properties": {
+ "api": {
"type": "string",
- "description": "Current status of the file search operation"
+ "description": "The API name this provider implements"
},
- "type": {
+ "provider_id": {
"type": "string",
- "const": "file_search_call",
- "default": "file_search_call",
- "description": "Tool call type identifier, always \"file_search_call\""
+ "description": "Unique identifier for the provider"
},
- "results": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "attributes": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- },
- "description": "(Optional) Key-value attributes associated with the file"
+ "provider_type": {
+ "type": "string",
+ "description": "The type of provider implementation"
+ },
+ "config": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
},
- "file_id": {
- "type": "string",
- "description": "Unique identifier of the file containing the result"
+ {
+ "type": "boolean"
},
- "filename": {
- "type": "string",
- "description": "Name of the file containing the result"
+ {
+ "type": "number"
},
- "score": {
- "type": "number",
- "description": "Relevance score for this search result (between 0 and 1)"
+ {
+ "type": "string"
},
- "text": {
- "type": "string",
- "description": "Text content of the search result"
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
}
- },
- "additionalProperties": false,
- "required": [
- "attributes",
- "file_id",
- "filename",
- "score",
- "text"
- ],
- "title": "OpenAIResponseOutputMessageFileSearchToolCallResults",
- "description": "Search results returned by the file search operation."
+ ]
},
- "description": "(Optional) Search results returned by the file search operation"
+ "description": "Configuration parameters for the provider"
+ },
+ "health": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "Current health status of the provider"
}
},
"additionalProperties": false,
"required": [
- "id",
- "queries",
- "status",
- "type"
+ "api",
+ "provider_id",
+ "provider_type",
+ "config",
+ "health"
],
- "title": "OpenAIResponseOutputMessageFileSearchToolCall",
- "description": "File search tool call output message for OpenAI responses."
+ "title": "ProviderInfo",
+ "description": "Information about a registered provider including its configuration and health status."
},
- "OpenAIResponseOutputMessageFunctionToolCall": {
+ "ListProvidersResponse": {
"type": "object",
"properties": {
- "call_id": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ProviderInfo"
+ },
+ "description": "List of provider information objects"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "data"
+ ],
+ "title": "ListProvidersResponse",
+ "description": "Response containing a list of all available providers."
+ },
+ "ListOpenAIResponseObject": {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseObjectWithInput"
+ },
+ "description": "List of response objects with their input context"
+ },
+ "has_more": {
+ "type": "boolean",
+ "description": "Whether there are more results available beyond this page"
+ },
+ "first_id": {
"type": "string",
- "description": "Unique identifier for the function call"
+ "description": "Identifier of the first item in this page"
},
- "name": {
+ "last_id": {
"type": "string",
- "description": "Name of the function being called"
+ "description": "Identifier of the last item in this page"
},
- "arguments": {
+ "object": {
"type": "string",
- "description": "JSON string containing the function arguments"
+ "const": "list",
+ "default": "list",
+ "description": "Object type identifier, always \"list\""
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "data",
+ "has_more",
+ "first_id",
+ "last_id",
+ "object"
+ ],
+ "title": "ListOpenAIResponseObject",
+ "description": "Paginated list of OpenAI response objects with navigation metadata."
+ },
+ "OpenAIResponseError": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "Error code identifying the type of failure"
+ },
+ "message": {
+ "type": "string",
+ "description": "Human-readable error message describing the failure"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "code",
+ "message"
+ ],
+ "title": "OpenAIResponseError",
+ "description": "Error details for failed OpenAI response requests."
+ },
+ "OpenAIResponseInput": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMessage"
+ }
+ ]
+ },
+ "OpenAIResponseInputFunctionToolCallOutput": {
+ "type": "object",
+ "properties": {
+ "call_id": {
+ "type": "string"
+ },
+ "output": {
+ "type": "string"
},
"type": {
"type": "string",
- "const": "function_call",
- "default": "function_call",
- "description": "Tool call type identifier, always \"function_call\""
+ "const": "function_call_output",
+ "default": "function_call_output"
},
"id": {
- "type": "string",
- "description": "(Optional) Additional identifier for the tool call"
+ "type": "string"
},
"status": {
- "type": "string",
- "description": "(Optional) Current status of the function call execution"
+ "type": "string"
}
},
"additionalProperties": false,
"required": [
"call_id",
- "name",
- "arguments",
+ "output",
"type"
],
- "title": "OpenAIResponseOutputMessageFunctionToolCall",
- "description": "Function tool call output message for OpenAI responses."
+ "title": "OpenAIResponseInputFunctionToolCallOutput",
+ "description": "This represents the output of a function call that gets passed back to the model."
},
- "OpenAIResponseOutputMessageMCPCall": {
+ "OpenAIResponseMCPApprovalRequest": {
"type": "object",
"properties": {
- "id": {
- "type": "string",
- "description": "Unique identifier for this MCP call"
- },
- "type": {
- "type": "string",
- "const": "mcp_call",
- "default": "mcp_call",
- "description": "Tool call type identifier, always \"mcp_call\""
- },
"arguments": {
- "type": "string",
- "description": "JSON string containing the MCP call arguments"
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
},
"name": {
- "type": "string",
- "description": "Name of the MCP method being called"
+ "type": "string"
},
"server_label": {
- "type": "string",
- "description": "Label identifying the MCP server handling the call"
- },
- "error": {
- "type": "string",
- "description": "(Optional) Error message if the MCP call failed"
+ "type": "string"
},
- "output": {
+ "type": {
"type": "string",
- "description": "(Optional) Output result from the successful MCP call"
+ "const": "mcp_approval_request",
+ "default": "mcp_approval_request"
}
},
"additionalProperties": false,
"required": [
- "id",
- "type",
"arguments",
+ "id",
"name",
- "server_label"
+ "server_label",
+ "type"
],
- "title": "OpenAIResponseOutputMessageMCPCall",
- "description": "Model Context Protocol (MCP) call output message for OpenAI responses."
+ "title": "OpenAIResponseMCPApprovalRequest",
+ "description": "A request for human approval of a tool invocation."
},
- "OpenAIResponseOutputMessageMCPListTools": {
+ "OpenAIResponseMCPApprovalResponse": {
"type": "object",
"properties": {
- "id": {
- "type": "string",
- "description": "Unique identifier for this MCP list tools operation"
+ "approval_request_id": {
+ "type": "string"
+ },
+ "approve": {
+ "type": "boolean"
},
"type": {
"type": "string",
- "const": "mcp_list_tools",
- "default": "mcp_list_tools",
- "description": "Tool call type identifier, always \"mcp_list_tools\""
+ "const": "mcp_approval_response",
+ "default": "mcp_approval_response"
},
- "server_label": {
- "type": "string",
- "description": "Label identifying the MCP server providing the tools"
+ "id": {
+ "type": "string"
},
- "tools": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "input_schema": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- },
- "description": "JSON schema defining the tool's input parameters"
- },
- "name": {
- "type": "string",
- "description": "Name of the tool"
- },
- "description": {
- "type": "string",
- "description": "(Optional) Description of what the tool does"
- }
- },
- "additionalProperties": false,
- "required": [
- "input_schema",
- "name"
- ],
- "title": "MCPListToolsTool",
- "description": "Tool definition returned by MCP list tools operation."
- },
- "description": "List of available tools provided by the MCP server"
+ "reason": {
+ "type": "string"
}
},
"additionalProperties": false,
"required": [
- "id",
- "type",
- "server_label",
- "tools"
+ "approval_request_id",
+ "approve",
+ "type"
],
- "title": "OpenAIResponseOutputMessageMCPListTools",
- "description": "MCP list tools output message containing available tools from an MCP server."
+ "title": "OpenAIResponseMCPApprovalResponse",
+ "description": "A response to an MCP approval request."
},
- "OpenAIResponseOutputMessageWebSearchToolCall": {
+ "OpenAIResponseObjectWithInput": {
"type": "object",
"properties": {
+ "created_at": {
+ "type": "integer",
+ "description": "Unix timestamp when the response was created"
+ },
+ "error": {
+ "$ref": "#/components/schemas/OpenAIResponseError",
+ "description": "(Optional) Error details if the response generation failed"
+ },
"id": {
"type": "string",
- "description": "Unique identifier for this tool call"
+ "description": "Unique identifier for this response"
+ },
+ "model": {
+ "type": "string",
+ "description": "Model identifier used for generation"
+ },
+ "object": {
+ "type": "string",
+ "const": "response",
+ "default": "response",
+ "description": "Object type identifier, always \"response\""
+ },
+ "output": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseOutput"
+ },
+ "description": "List of generated output items (messages, tool calls, etc.)"
+ },
+ "parallel_tool_calls": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether tool calls can be executed in parallel"
+ },
+ "previous_response_id": {
+ "type": "string",
+ "description": "(Optional) ID of the previous response in a conversation"
},
"status": {
"type": "string",
- "description": "Current status of the web search operation"
+ "description": "Current status of the response generation"
},
- "type": {
+ "temperature": {
+ "type": "number",
+ "description": "(Optional) Sampling temperature used for generation"
+ },
+ "text": {
+ "$ref": "#/components/schemas/OpenAIResponseText",
+ "description": "Text formatting configuration for the response"
+ },
+ "top_p": {
+ "type": "number",
+ "description": "(Optional) Nucleus sampling parameter used for generation"
+ },
+ "truncation": {
"type": "string",
- "const": "web_search_call",
- "default": "web_search_call",
- "description": "Tool call type identifier, always \"web_search_call\""
+ "description": "(Optional) Truncation strategy applied to the response"
+ },
+ "input": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseInput"
+ },
+ "description": "List of input items that led to this response"
}
},
"additionalProperties": false,
"required": [
+ "created_at",
"id",
+ "model",
+ "object",
+ "output",
+ "parallel_tool_calls",
"status",
- "type"
+ "text",
+ "input"
],
- "title": "OpenAIResponseOutputMessageWebSearchToolCall",
- "description": "Web search tool call output message for OpenAI responses."
+ "title": "OpenAIResponseObjectWithInput",
+ "description": "OpenAI response object extended with input context information."
+ },
+ "OpenAIResponseOutput": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMessage"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "message": "#/components/schemas/OpenAIResponseMessage",
+ "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall",
+ "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall",
+ "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall",
+ "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall",
+ "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools",
+ "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ }
+ }
},
"OpenAIResponseText": {
"type": "object",
@@ -12397,6 +13081,11 @@
"description": "APIs for creating and interacting with agentic systems.\n\n## Responses API\n\nThe Responses API provides OpenAI-compatible functionality with enhanced capabilities for dynamic, stateful interactions.\n\n> **✅ STABLE**: This API is production-ready with backward compatibility guarantees. Recommended for production applications.\n\n### ✅ Supported Tools\n\nThe Responses API supports the following tool types:\n\n- **`web_search`**: Search the web for current information and real-time data\n- **`file_search`**: Search through uploaded files and vector stores\n - Supports dynamic `vector_store_ids` per call\n - Compatible with OpenAI file search patterns\n- **`function`**: Call custom functions with JSON schema validation\n- **`mcp_tool`**: Model Context Protocol integration\n\n### ✅ Supported Fields & Features\n\n**Core Capabilities:**\n- **Dynamic Configuration**: Switch models, vector stores, and tools per request without pre-configuration\n- **Conversation Branching**: Use `previous_response_id` to branch conversations and explore different paths\n- **Rich Annotations**: Automatic file citations, URL citations, and container file citations\n- **Status Tracking**: Monitor tool call execution status and handle failures gracefully\n\n### 🚧 Work in Progress\n\n- Full real-time response streaming support\n- `tool_choice` parameter\n- `max_tool_calls` parameter\n- Built-in tools (code interpreter, containers API)\n- Safety & guardrails\n- `reasoning` capabilities\n- `service_tier`\n- `logprobs`\n- `max_output_tokens`\n- `metadata` handling\n- `instructions`\n- `incomplete_details`\n- `background`",
"x-displayName": "Agents"
},
+ {
+ "name": "Conversations",
+ "description": "",
+ "x-displayName": "Protocol for conversation management operations."
+ },
{
"name": "Files",
"description": ""
@@ -12470,6 +13159,7 @@
"name": "Operations",
"tags": [
"Agents",
+ "Conversations",
"Files",
"Inference",
"Inspect",
diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml
index 3927d3a94a..a2fcb812a1 100644
--- a/docs/static/llama-stack-spec.yaml
+++ b/docs/static/llama-stack-spec.yaml
@@ -167,16 +167,15 @@ paths:
$ref: '#/components/schemas/OpenaiCompletionRequest'
required: true
deprecated: false
- /v1/embeddings:
+ /v1/conversations:
post:
responses:
'200':
- description: >-
- An OpenAIEmbeddingsResponse containing the embeddings.
+ description: The created conversation object.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIEmbeddingsResponse'
+ $ref: '#/components/schemas/Conversation'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -188,31 +187,26 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Inference
- summary: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
- description: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
+ - Conversations
+ summary: Create a conversation.
+ description: Create a conversation.
parameters: []
requestBody:
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenaiEmbeddingsRequest'
+ $ref: '#/components/schemas/CreateConversationRequest'
required: true
deprecated: false
- /v1/files:
+ /v1/conversations/{conversation_id}:
get:
responses:
'200':
- description: >-
- An ListOpenAIFileResponse containing the list of files.
+ description: The conversation object.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListOpenAIFileResponse'
+ $ref: '#/components/schemas/Conversation'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -224,55 +218,25 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: >-
- Returns a list of files that belong to the user's organization.
- description: >-
- Returns a list of files that belong to the user's organization.
+ - Conversations
+ summary: Get a conversation with the given ID.
+ description: Get a conversation with the given ID.
parameters:
- - name: after
- in: query
- description: >-
- A cursor for use in pagination. `after` is an object ID that defines your
- place in the list. For instance, if you make a list request and receive
- 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo
- in order to fetch the next page of the list.
- required: false
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
schema:
type: string
- - name: limit
- in: query
- description: >-
- A limit on the number of objects to be returned. Limit can range between
- 1 and 10,000, and the default is 10,000.
- required: false
- schema:
- type: integer
- - name: order
- in: query
- description: >-
- Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
- required: false
- schema:
- $ref: '#/components/schemas/Order'
- - name: purpose
- in: query
- description: >-
- Only return files with the given purpose.
- required: false
- schema:
- $ref: '#/components/schemas/OpenAIFilePurpose'
deprecated: false
post:
responses:
'200':
- description: >-
- An OpenAIFileObject representing the uploaded file.
+ description: The updated conversation object.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIFileObject'
+ $ref: '#/components/schemas/Conversation'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -284,48 +248,33 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
+ - Conversations
summary: >-
- Upload a file that can be used across various endpoints.
+ Update a conversation's metadata with the given ID.
description: >-
- Upload a file that can be used across various endpoints.
-
- The file upload should be a multipart form request with:
-
- - file: The File object (not file name) to be uploaded.
-
- - purpose: The intended purpose of the uploaded file.
-
- - expires_after: Optional form values describing expiration for the file.
- parameters: []
+ Update a conversation's metadata with the given ID.
+ parameters:
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
+ schema:
+ type: string
requestBody:
content:
- multipart/form-data:
+ application/json:
schema:
- type: object
- properties:
- file:
- type: string
- format: binary
- purpose:
- $ref: '#/components/schemas/OpenAIFilePurpose'
- expires_after:
- $ref: '#/components/schemas/ExpiresAfter'
- required:
- - file
- - purpose
+ $ref: '#/components/schemas/UpdateConversationRequest'
required: true
deprecated: false
- /v1/files/{file_id}:
- get:
+ delete:
responses:
'200':
- description: >-
- An OpenAIFileObject containing file information.
+ description: The deleted conversation resource.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIFileObject'
+ $ref: '#/components/schemas/ConversationDeletedResource'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -337,29 +286,26 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: >-
- Returns information about a specific file.
- description: >-
- Returns information about a specific file.
+ - Conversations
+ summary: Delete a conversation with the given ID.
+ description: Delete a conversation with the given ID.
parameters:
- - name: file_id
+ - name: conversation_id
in: path
- description: >-
- The ID of the file to use for this request.
+ description: The conversation identifier.
required: true
schema:
type: string
deprecated: false
- delete:
+ /v1/conversations/{conversation_id}/items:
+ get:
responses:
'200':
- description: >-
- An OpenAIFileDeleteResponse indicating successful deletion.
+ description: List of conversation items.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIFileDeleteResponse'
+ $ref: '#/components/schemas/ConversationItemList'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -371,28 +317,169 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: Delete a file.
- description: Delete a file.
+ - Conversations
+ summary: List items in the conversation.
+ description: List items in the conversation.
parameters:
- - name: file_id
+ - name: conversation_id
in: path
- description: >-
- The ID of the file to use for this request.
+ description: The conversation identifier.
required: true
schema:
type: string
+ - name: after
+ in: query
+ description: >-
+ An item ID to list items after, used in pagination.
+ required: true
+ schema:
+ oneOf:
+ - type: string
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
+ - name: include
+ in: query
+ description: >-
+ Specify additional output data to include in the response.
+ required: true
+ schema:
+ oneOf:
+ - type: array
+ items:
+ type: string
+ enum:
+ - code_interpreter_call.outputs
+ - computer_call_output.output.image_url
+ - file_search_call.results
+ - message.input_image.image_url
+ - message.output_text.logprobs
+ - reasoning.encrypted_content
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
+ - name: limit
+ in: query
+ description: >-
+ A limit on the number of objects to be returned (1-100, default 20).
+ required: true
+ schema:
+ oneOf:
+ - type: integer
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
+ - name: order
+ in: query
+ description: >-
+ The order to return items in (asc or desc, default desc).
+ required: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - asc
+ - desc
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
deprecated: false
- /v1/files/{file_id}/content:
- get:
+ post:
responses:
'200':
- description: >-
- The raw file content as a binary response.
+ description: List of created items.
content:
application/json:
schema:
- $ref: '#/components/schemas/Response'
+ $ref: '#/components/schemas/ConversationItemList'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -404,30 +491,32 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: >-
- Returns the contents of the specified file.
- description: >-
- Returns the contents of the specified file.
+ - Conversations
+ summary: Create items in the conversation.
+ description: Create items in the conversation.
parameters:
- - name: file_id
+ - name: conversation_id
in: path
- description: >-
- The ID of the file to use for this request.
+ description: The conversation identifier.
required: true
schema:
type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AddItemsRequest'
+ required: true
deprecated: false
- /v1/health:
+ /v1/conversations/{conversation_id}/items/{item_id}:
get:
responses:
'200':
- description: >-
- Health information indicating if the service is operational.
+ description: The conversation item.
content:
application/json:
schema:
- $ref: '#/components/schemas/HealthInfo'
+ $ref: '#/components/schemas/ConversationItem'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -439,23 +528,31 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Inspect
- summary: >-
- Get the current health status of the service.
- description: >-
- Get the current health status of the service.
- parameters: []
+ - Conversations
+ summary: Retrieve a conversation item.
+ description: Retrieve a conversation item.
+ parameters:
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
+ schema:
+ type: string
+ - name: item_id
+ in: path
+ description: The item identifier.
+ required: true
+ schema:
+ type: string
deprecated: false
- /v1/inspect/routes:
- get:
+ delete:
responses:
'200':
- description: >-
- Response containing information about all available routes.
+ description: The deleted item resource.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListRoutesResponse'
+ $ref: '#/components/schemas/ConversationItemDeletedResource'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -467,22 +564,33 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Inspect
- summary: >-
- List all available API routes with their methods and implementing providers.
- description: >-
- List all available API routes with their methods and implementing providers.
- parameters: []
+ - Conversations
+ summary: Delete a conversation item.
+ description: Delete a conversation item.
+ parameters:
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
+ schema:
+ type: string
+ - name: item_id
+ in: path
+ description: The item identifier.
+ required: true
+ schema:
+ type: string
deprecated: false
- /v1/models:
- get:
+ /v1/embeddings:
+ post:
responses:
'200':
- description: A ListModelsResponse.
- content:
+ description: >-
+ An OpenAIEmbeddingsResponse containing the embeddings.
+ content:
application/json:
schema:
- $ref: '#/components/schemas/ListModelsResponse'
+ $ref: '#/components/schemas/OpenAIEmbeddingsResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -494,19 +602,91 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: List all models.
- description: List all models.
+ - Inference
+ summary: >-
+ Generate OpenAI-compatible embeddings for the given input using the specified
+ model.
+ description: >-
+ Generate OpenAI-compatible embeddings for the given input using the specified
+ model.
parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/OpenaiEmbeddingsRequest'
+ required: true
+ deprecated: false
+ /v1/files:
+ get:
+ responses:
+ '200':
+ description: >-
+ An ListOpenAIFileResponse containing the list of files.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ListOpenAIFileResponse'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Files
+ summary: >-
+ Returns a list of files that belong to the user's organization.
+ description: >-
+ Returns a list of files that belong to the user's organization.
+ parameters:
+ - name: after
+ in: query
+ description: >-
+ A cursor for use in pagination. `after` is an object ID that defines your
+ place in the list. For instance, if you make a list request and receive
+ 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo
+ in order to fetch the next page of the list.
+ required: false
+ schema:
+ type: string
+ - name: limit
+ in: query
+ description: >-
+ A limit on the number of objects to be returned. Limit can range between
+ 1 and 10,000, and the default is 10,000.
+ required: false
+ schema:
+ type: integer
+ - name: order
+ in: query
+ description: >-
+ Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+ required: false
+ schema:
+ $ref: '#/components/schemas/Order'
+ - name: purpose
+ in: query
+ description: >-
+ Only return files with the given purpose.
+ required: false
+ schema:
+ $ref: '#/components/schemas/OpenAIFilePurpose'
deprecated: false
post:
responses:
'200':
- description: A Model.
+ description: >-
+ An OpenAIFileObject representing the uploaded file.
content:
application/json:
schema:
- $ref: '#/components/schemas/Model'
+ $ref: '#/components/schemas/OpenAIFileObject'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -518,26 +698,48 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: Register a model.
- description: Register a model.
+ - Files
+ summary: >-
+ Upload a file that can be used across various endpoints.
+ description: >-
+ Upload a file that can be used across various endpoints.
+
+ The file upload should be a multipart form request with:
+
+ - file: The File object (not file name) to be uploaded.
+
+ - purpose: The intended purpose of the uploaded file.
+
+ - expires_after: Optional form values describing expiration for the file.
parameters: []
requestBody:
content:
- application/json:
+ multipart/form-data:
schema:
- $ref: '#/components/schemas/RegisterModelRequest'
+ type: object
+ properties:
+ file:
+ type: string
+ format: binary
+ purpose:
+ $ref: '#/components/schemas/OpenAIFilePurpose'
+ expires_after:
+ $ref: '#/components/schemas/ExpiresAfter'
+ required:
+ - file
+ - purpose
required: true
deprecated: false
- /v1/models/{model_id}:
+ /v1/files/{file_id}:
get:
responses:
'200':
- description: A Model.
+ description: >-
+ An OpenAIFileObject containing file information.
content:
application/json:
schema:
- $ref: '#/components/schemas/Model'
+ $ref: '#/components/schemas/OpenAIFileObject'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -549,13 +751,16 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: Get a model by its identifier.
- description: Get a model by its identifier.
+ - Files
+ summary: >-
+ Returns information about a specific file.
+ description: >-
+ Returns information about a specific file.
parameters:
- - name: model_id
+ - name: file_id
in: path
- description: The identifier of the model to get.
+ description: >-
+ The ID of the file to use for this request.
required: true
schema:
type: string
@@ -563,7 +768,12 @@ paths:
delete:
responses:
'200':
- description: OK
+ description: >-
+ An OpenAIFileDeleteResponse indicating successful deletion.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/OpenAIFileDeleteResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -575,27 +785,28 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: Unregister a model.
- description: Unregister a model.
+ - Files
+ summary: Delete a file.
+ description: Delete a file.
parameters:
- - name: model_id
+ - name: file_id
in: path
description: >-
- The identifier of the model to unregister.
+ The ID of the file to use for this request.
required: true
schema:
type: string
deprecated: false
- /v1/moderations:
- post:
+ /v1/files/{file_id}/content:
+ get:
responses:
'200':
- description: A moderation object.
+ description: >-
+ The raw file content as a binary response.
content:
application/json:
schema:
- $ref: '#/components/schemas/ModerationObject'
+ $ref: '#/components/schemas/Response'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -607,29 +818,30 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Safety
+ - Files
summary: >-
- Classifies if text and/or image inputs are potentially harmful.
+ Returns the contents of the specified file.
description: >-
- Classifies if text and/or image inputs are potentially harmful.
- parameters: []
- requestBody:
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/RunModerationRequest'
- required: true
+ Returns the contents of the specified file.
+ parameters:
+ - name: file_id
+ in: path
+ description: >-
+ The ID of the file to use for this request.
+ required: true
+ schema:
+ type: string
deprecated: false
- /v1/prompts:
+ /v1/health:
get:
responses:
'200':
description: >-
- A ListPromptsResponse containing all prompts.
+ Health information indicating if the service is operational.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListPromptsResponse'
+ $ref: '#/components/schemas/HealthInfo'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -641,19 +853,23 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: List all prompts.
- description: List all prompts.
+ - Inspect
+ summary: >-
+ Get the current health status of the service.
+ description: >-
+ Get the current health status of the service.
parameters: []
deprecated: false
- post:
+ /v1/inspect/routes:
+ get:
responses:
'200':
- description: The created Prompt resource.
+ description: >-
+ Response containing information about all available routes.
content:
application/json:
schema:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/ListRoutesResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -665,26 +881,22 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: Create a new prompt.
- description: Create a new prompt.
+ - Inspect
+ summary: >-
+ List all available API routes with their methods and implementing providers.
+ description: >-
+ List all available API routes with their methods and implementing providers.
parameters: []
- requestBody:
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/CreatePromptRequest'
- required: true
deprecated: false
- /v1/prompts/{prompt_id}:
+ /v1/models:
get:
responses:
'200':
- description: A Prompt resource.
+ description: A ListModelsResponse.
content:
application/json:
schema:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/ListModelsResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -696,35 +908,19 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: >-
- Get a prompt by its identifier and optional version.
- description: >-
- Get a prompt by its identifier and optional version.
- parameters:
- - name: prompt_id
- in: path
- description: The identifier of the prompt to get.
- required: true
- schema:
- type: string
- - name: version
- in: query
- description: >-
- The version of the prompt to get (defaults to latest).
- required: false
- schema:
- type: integer
+ - Models
+ summary: List all models.
+ description: List all models.
+ parameters: []
deprecated: false
post:
responses:
'200':
- description: >-
- The updated Prompt resource with incremented version.
+ description: A Model.
content:
application/json:
schema:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/Model'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -736,7 +932,225 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
+ - Models
+ summary: Register a model.
+ description: Register a model.
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RegisterModelRequest'
+ required: true
+ deprecated: false
+ /v1/models/{model_id}:
+ get:
+ responses:
+ '200':
+ description: A Model.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Model'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Models
+ summary: Get a model by its identifier.
+ description: Get a model by its identifier.
+ parameters:
+ - name: model_id
+ in: path
+ description: The identifier of the model to get.
+ required: true
+ schema:
+ type: string
+ deprecated: false
+ delete:
+ responses:
+ '200':
+ description: OK
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Models
+ summary: Unregister a model.
+ description: Unregister a model.
+ parameters:
+ - name: model_id
+ in: path
+ description: >-
+ The identifier of the model to unregister.
+ required: true
+ schema:
+ type: string
+ deprecated: false
+ /v1/moderations:
+ post:
+ responses:
+ '200':
+ description: A moderation object.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ModerationObject'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Safety
+ summary: >-
+ Classifies if text and/or image inputs are potentially harmful.
+ description: >-
+ Classifies if text and/or image inputs are potentially harmful.
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RunModerationRequest'
+ required: true
+ deprecated: false
+ /v1/prompts:
+ get:
+ responses:
+ '200':
+ description: >-
+ A ListPromptsResponse containing all prompts.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ListPromptsResponse'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: List all prompts.
+ description: List all prompts.
+ parameters: []
+ deprecated: false
+ post:
+ responses:
+ '200':
+ description: The created Prompt resource.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Prompt'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: Create a new prompt.
+ description: Create a new prompt.
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CreatePromptRequest'
+ required: true
+ deprecated: false
+ /v1/prompts/{prompt_id}:
+ get:
+ responses:
+ '200':
+ description: A Prompt resource.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Prompt'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: >-
+ Get a prompt by its identifier and optional version.
+ description: >-
+ Get a prompt by its identifier and optional version.
+ parameters:
+ - name: prompt_id
+ in: path
+ description: The identifier of the prompt to get.
+ required: true
+ schema:
+ type: string
+ - name: version
+ in: query
+ description: >-
+ The version of the prompt to get (defaults to latest).
+ required: false
+ schema:
+ type: integer
+ deprecated: false
+ post:
+ responses:
+ '200':
+ description: >-
+ The updated Prompt resource with incremented version.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Prompt'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
summary: >-
Update an existing prompt (increments version).
description: >-
@@ -3753,690 +4167,778 @@ components:
title: OpenAICompletionChoice
description: >-
A choice from an OpenAI-compatible completion response.
- OpenaiEmbeddingsRequest:
+ ConversationItem:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseMessage'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ discriminator:
+ propertyName: type
+ mapping:
+ message: '#/components/schemas/OpenAIResponseMessage'
+ function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ OpenAIResponseAnnotationCitation:
type: object
properties:
- model:
+ type:
type: string
+ const: url_citation
+ default: url_citation
description: >-
- The identifier of the model to use. The model must be an embedding model
- registered with Llama Stack and available via the /models endpoint.
- input:
- oneOf:
- - type: string
- - type: array
- items:
- type: string
+ Annotation type identifier, always "url_citation"
+ end_index:
+ type: integer
description: >-
- Input text to embed, encoded as a string or array of strings. To embed
- multiple inputs in a single request, pass an array of strings.
- encoding_format:
- type: string
- description: >-
- (Optional) The format to return the embeddings in. Can be either "float"
- or "base64". Defaults to "float".
- dimensions:
+ End position of the citation span in the content
+ start_index:
type: integer
description: >-
- (Optional) The number of dimensions the resulting output embeddings should
- have. Only supported in text-embedding-3 and later models.
- user:
+ Start position of the citation span in the content
+ title:
type: string
- description: >-
- (Optional) A unique identifier representing your end-user, which can help
- OpenAI to monitor and detect abuse.
+ description: Title of the referenced web resource
+ url:
+ type: string
+ description: URL of the referenced web resource
additionalProperties: false
required:
- - model
- - input
- title: OpenaiEmbeddingsRequest
- OpenAIEmbeddingData:
+ - type
+ - end_index
+ - start_index
+ - title
+ - url
+ title: OpenAIResponseAnnotationCitation
+ description: >-
+ URL citation annotation for referencing external web resources.
+ "OpenAIResponseAnnotationContainerFileCitation":
type: object
properties:
- object:
+ type:
type: string
- const: embedding
- default: embedding
- description: >-
- The object type, which will be "embedding"
- embedding:
- oneOf:
- - type: array
- items:
- type: number
- - type: string
+ const: container_file_citation
+ default: container_file_citation
+ container_id:
+ type: string
+ end_index:
+ type: integer
+ file_id:
+ type: string
+ filename:
+ type: string
+ start_index:
+ type: integer
+ additionalProperties: false
+ required:
+ - type
+ - container_id
+ - end_index
+ - file_id
+ - filename
+ - start_index
+ title: >-
+ OpenAIResponseAnnotationContainerFileCitation
+ OpenAIResponseAnnotationFileCitation:
+ type: object
+ properties:
+ type:
+ type: string
+ const: file_citation
+ default: file_citation
description: >-
- The embedding vector as a list of floats (when encoding_format="float")
- or as a base64-encoded string (when encoding_format="base64")
+ Annotation type identifier, always "file_citation"
+ file_id:
+ type: string
+ description: Unique identifier of the referenced file
+ filename:
+ type: string
+ description: Name of the referenced file
index:
type: integer
description: >-
- The index of the embedding in the input list
+ Position index of the citation within the content
additionalProperties: false
required:
- - object
- - embedding
+ - type
+ - file_id
+ - filename
- index
- title: OpenAIEmbeddingData
+ title: OpenAIResponseAnnotationFileCitation
description: >-
- A single embedding data object from an OpenAI-compatible embeddings response.
- OpenAIEmbeddingUsage:
+ File citation annotation for referencing specific files in response content.
+ OpenAIResponseAnnotationFilePath:
type: object
properties:
- prompt_tokens:
- type: integer
- description: The number of tokens in the input
- total_tokens:
+ type:
+ type: string
+ const: file_path
+ default: file_path
+ file_id:
+ type: string
+ index:
type: integer
- description: The total number of tokens used
additionalProperties: false
required:
- - prompt_tokens
- - total_tokens
- title: OpenAIEmbeddingUsage
- description: >-
- Usage information for an OpenAI-compatible embeddings response.
- OpenAIEmbeddingsResponse:
+ - type
+ - file_id
+ - index
+ title: OpenAIResponseAnnotationFilePath
+ OpenAIResponseAnnotations:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
+ discriminator:
+ propertyName: type
+ mapping:
+ file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
+ url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
+ container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
+ file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
+ OpenAIResponseInputMessageContent:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
+ - $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
+ discriminator:
+ propertyName: type
+ mapping:
+ input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
+ input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
+ OpenAIResponseInputMessageContentImage:
type: object
properties:
- object:
- type: string
- const: list
- default: list
- description: The object type, which will be "list"
- data:
- type: array
- items:
- $ref: '#/components/schemas/OpenAIEmbeddingData'
- description: List of embedding data objects
- model:
+ detail:
+ oneOf:
+ - type: string
+ const: low
+ - type: string
+ const: high
+ - type: string
+ const: auto
+ default: auto
+ description: >-
+ Level of detail for image processing, can be "low", "high", or "auto"
+ type:
type: string
+ const: input_image
+ default: input_image
description: >-
- The model that was used to generate the embeddings
- usage:
- $ref: '#/components/schemas/OpenAIEmbeddingUsage'
- description: Usage information
+ Content type identifier, always "input_image"
+ image_url:
+ type: string
+ description: (Optional) URL of the image content
additionalProperties: false
required:
- - object
- - data
- - model
- - usage
- title: OpenAIEmbeddingsResponse
- description: >-
- Response from an OpenAI-compatible embeddings request.
- OpenAIFilePurpose:
- type: string
- enum:
- - assistants
- - batch
- title: OpenAIFilePurpose
+ - detail
+ - type
+ title: OpenAIResponseInputMessageContentImage
description: >-
- Valid purpose values for OpenAI Files API.
- ListOpenAIFileResponse:
+ Image content for input messages in OpenAI response format.
+ OpenAIResponseInputMessageContentText:
type: object
properties:
- data:
- type: array
- items:
- $ref: '#/components/schemas/OpenAIFileObject'
- description: List of file objects
- has_more:
- type: boolean
- description: >-
- Whether there are more files available beyond this page
- first_id:
+ text:
type: string
- description: >-
- ID of the first file in the list for pagination
- last_id:
+ description: The text content of the input message
+ type:
type: string
+ const: input_text
+ default: input_text
description: >-
- ID of the last file in the list for pagination
- object:
- type: string
- const: list
- default: list
- description: The object type, which is always "list"
+ Content type identifier, always "input_text"
additionalProperties: false
required:
- - data
- - has_more
- - first_id
- - last_id
- - object
- title: ListOpenAIFileResponse
+ - text
+ - type
+ title: OpenAIResponseInputMessageContentText
description: >-
- Response for listing files in OpenAI Files API.
- OpenAIFileObject:
+ Text content for input messages in OpenAI response format.
+ OpenAIResponseMessage:
type: object
properties:
- object:
+ content:
+ oneOf:
+ - type: string
+ - type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseInputMessageContent'
+ - type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
+ role:
+ oneOf:
+ - type: string
+ const: system
+ - type: string
+ const: developer
+ - type: string
+ const: user
+ - type: string
+ const: assistant
+ type:
type: string
- const: file
- default: file
- description: The object type, which is always "file"
+ const: message
+ default: message
id:
type: string
- description: >-
- The file identifier, which can be referenced in the API endpoints
- bytes:
- type: integer
- description: The size of the file, in bytes
- created_at:
- type: integer
- description: >-
- The Unix timestamp (in seconds) for when the file was created
- expires_at:
- type: integer
- description: >-
- The Unix timestamp (in seconds) for when the file expires
- filename:
- type: string
- description: The name of the file
- purpose:
+ status:
type: string
- enum:
- - assistants
- - batch
- description: The intended purpose of the file
additionalProperties: false
required:
- - object
- - id
- - bytes
- - created_at
- - expires_at
- - filename
- - purpose
- title: OpenAIFileObject
+ - content
+ - role
+ - type
+ title: OpenAIResponseMessage
description: >-
- OpenAI File object as defined in the OpenAI Files API.
- ExpiresAfter:
+ Corresponds to the various Message types in the Responses API. They are all
+ under one type because the Responses API gives them all the same "type" value,
+ and there is no way to tell them apart in certain scenarios.
+ OpenAIResponseOutputMessageContent:
type: object
properties:
- anchor:
+ text:
type: string
- const: created_at
- seconds:
- type: integer
+ type:
+ type: string
+ const: output_text
+ default: output_text
+ annotations:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseAnnotations'
additionalProperties: false
required:
- - anchor
- - seconds
- title: ExpiresAfter
- description: >-
- Control expiration of uploaded files.
-
- Params:
- - anchor, must be "created_at"
- - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
- OpenAIFileDeleteResponse:
+ - text
+ - type
+ - annotations
+ title: >-
+ OpenAIResponseOutputMessageContentOutputText
+ "OpenAIResponseOutputMessageFileSearchToolCall":
type: object
properties:
id:
type: string
- description: The file identifier that was deleted
- object:
+ description: Unique identifier for this tool call
+ queries:
+ type: array
+ items:
+ type: string
+ description: List of search queries executed
+ status:
type: string
- const: file
- default: file
- description: The object type, which is always "file"
- deleted:
- type: boolean
description: >-
- Whether the file was successfully deleted
- additionalProperties: false
- required:
- - id
- - object
- - deleted
- title: OpenAIFileDeleteResponse
- description: >-
- Response for deleting a file in OpenAI Files API.
- Response:
- type: object
- title: Response
- HealthInfo:
- type: object
- properties:
- status:
+ Current status of the file search operation
+ type:
type: string
- enum:
- - OK
- - Error
- - Not Implemented
- description: Current health status of the service
+ const: file_search_call
+ default: file_search_call
+ description: >-
+ Tool call type identifier, always "file_search_call"
+ results:
+ type: array
+ items:
+ type: object
+ properties:
+ attributes:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: >-
+ (Optional) Key-value attributes associated with the file
+ file_id:
+ type: string
+ description: >-
+ Unique identifier of the file containing the result
+ filename:
+ type: string
+ description: Name of the file containing the result
+ score:
+ type: number
+ description: >-
+ Relevance score for this search result (between 0 and 1)
+ text:
+ type: string
+ description: Text content of the search result
+ additionalProperties: false
+ required:
+ - attributes
+ - file_id
+ - filename
+ - score
+ - text
+ title: >-
+ OpenAIResponseOutputMessageFileSearchToolCallResults
+ description: >-
+ Search results returned by the file search operation.
+ description: >-
+ (Optional) Search results returned by the file search operation
additionalProperties: false
required:
+ - id
+ - queries
- status
- title: HealthInfo
+ - type
+ title: >-
+ OpenAIResponseOutputMessageFileSearchToolCall
description: >-
- Health status information for the service.
- RouteInfo:
+ File search tool call output message for OpenAI responses.
+ "OpenAIResponseOutputMessageFunctionToolCall":
type: object
properties:
- route:
+ call_id:
type: string
- description: The API endpoint path
- method:
+ description: Unique identifier for the function call
+ name:
+ type: string
+ description: Name of the function being called
+ arguments:
type: string
- description: HTTP method for the route
- provider_types:
- type: array
- items:
- type: string
description: >-
- List of provider types that implement this route
- additionalProperties: false
- required:
- - route
- - method
- - provider_types
- title: RouteInfo
- description: >-
- Information about an API route including its path, method, and implementing
- providers.
- ListRoutesResponse:
- type: object
- properties:
- data:
- type: array
- items:
- $ref: '#/components/schemas/RouteInfo'
+ JSON string containing the function arguments
+ type:
+ type: string
+ const: function_call
+ default: function_call
description: >-
- List of available route information objects
+ Tool call type identifier, always "function_call"
+ id:
+ type: string
+ description: >-
+ (Optional) Additional identifier for the tool call
+ status:
+ type: string
+ description: >-
+ (Optional) Current status of the function call execution
additionalProperties: false
required:
- - data
- title: ListRoutesResponse
+ - call_id
+ - name
+ - arguments
+ - type
+ title: >-
+ OpenAIResponseOutputMessageFunctionToolCall
description: >-
- Response containing a list of all available API routes.
- Model:
+ Function tool call output message for OpenAI responses.
+ OpenAIResponseOutputMessageMCPCall:
type: object
properties:
- identifier:
+ id:
type: string
+ description: Unique identifier for this MCP call
+ type:
+ type: string
+ const: mcp_call
+ default: mcp_call
description: >-
- Unique identifier for this resource in llama stack
- provider_resource_id:
+ Tool call type identifier, always "mcp_call"
+ arguments:
type: string
description: >-
- Unique identifier for this resource in the provider
- provider_id:
+ JSON string containing the MCP call arguments
+ name:
+ type: string
+ description: Name of the MCP method being called
+ server_label:
type: string
description: >-
- ID of the provider that owns this resource
- type:
+ Label identifying the MCP server handling the call
+ error:
type: string
- enum:
- - model
- - shield
- - vector_db
- - dataset
- - scoring_function
- - benchmark
- - tool
- - tool_group
- - prompt
- const: model
- default: model
description: >-
- The resource type, always 'model' for model resources
- metadata:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: Any additional metadata for this model
- model_type:
- $ref: '#/components/schemas/ModelType'
- default: llm
+ (Optional) Error message if the MCP call failed
+ output:
+ type: string
description: >-
- The type of model (LLM or embedding model)
+ (Optional) Output result from the successful MCP call
additionalProperties: false
required:
- - identifier
- - provider_id
+ - id
- type
- - metadata
- - model_type
- title: Model
- description: >-
- A model resource representing an AI model registered in Llama Stack.
- ModelType:
- type: string
- enum:
- - llm
- - embedding
- title: ModelType
+ - arguments
+ - name
+ - server_label
+ title: OpenAIResponseOutputMessageMCPCall
description: >-
- Enumeration of supported model types in Llama Stack.
- ListModelsResponse:
+ Model Context Protocol (MCP) call output message for OpenAI responses.
+ OpenAIResponseOutputMessageMCPListTools:
type: object
properties:
- data:
+ id:
+ type: string
+ description: >-
+ Unique identifier for this MCP list tools operation
+ type:
+ type: string
+ const: mcp_list_tools
+ default: mcp_list_tools
+ description: >-
+ Tool call type identifier, always "mcp_list_tools"
+ server_label:
+ type: string
+ description: >-
+ Label identifying the MCP server providing the tools
+ tools:
type: array
items:
- $ref: '#/components/schemas/Model'
+ type: object
+ properties:
+ input_schema:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: >-
+ JSON schema defining the tool's input parameters
+ name:
+ type: string
+ description: Name of the tool
+ description:
+ type: string
+ description: >-
+ (Optional) Description of what the tool does
+ additionalProperties: false
+ required:
+ - input_schema
+ - name
+ title: MCPListToolsTool
+ description: >-
+ Tool definition returned by MCP list tools operation.
+ description: >-
+ List of available tools provided by the MCP server
additionalProperties: false
required:
- - data
- title: ListModelsResponse
- RegisterModelRequest:
+ - id
+ - type
+ - server_label
+ - tools
+ title: OpenAIResponseOutputMessageMCPListTools
+ description: >-
+ MCP list tools output message containing available tools from an MCP server.
+ "OpenAIResponseOutputMessageWebSearchToolCall":
type: object
properties:
- model_id:
+ id:
type: string
- description: The identifier of the model to register.
- provider_model_id:
+ description: Unique identifier for this tool call
+ status:
type: string
description: >-
- The identifier of the model in the provider.
- provider_id:
+ Current status of the web search operation
+ type:
type: string
- description: The identifier of the provider.
- metadata:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: Any additional metadata for this model.
- model_type:
- $ref: '#/components/schemas/ModelType'
- description: The type of model to register.
+ const: web_search_call
+ default: web_search_call
+ description: >-
+ Tool call type identifier, always "web_search_call"
additionalProperties: false
required:
- - model_id
- title: RegisterModelRequest
- RunModerationRequest:
+ - id
+ - status
+ - type
+ title: >-
+ OpenAIResponseOutputMessageWebSearchToolCall
+ description: >-
+ Web search tool call output message for OpenAI responses.
+ CreateConversationRequest:
type: object
properties:
- input:
- oneOf:
- - type: string
- - type: array
- items:
- type: string
+ items:
+ type: array
+ items:
+ $ref: '#/components/schemas/ConversationItem'
description: >-
- Input (or inputs) to classify. Can be a single string, an array of strings,
- or an array of multi-modal input objects similar to other models.
- model:
- type: string
+ Initial items to include in the conversation context.
+ metadata:
+ type: object
+ additionalProperties:
+ type: string
description: >-
- The content moderation model you would like to use.
+ Set of key-value pairs that can be attached to an object.
additionalProperties: false
- required:
- - input
- - model
- title: RunModerationRequest
- ModerationObject:
+ title: CreateConversationRequest
+ Conversation:
type: object
properties:
id:
type: string
- description: >-
- The unique identifier for the moderation request.
- model:
+ object:
type: string
- description: >-
- The model used to generate the moderation results.
- results:
+ const: conversation
+ default: conversation
+ created_at:
+ type: integer
+ metadata:
+ type: object
+ additionalProperties:
+ type: string
+ items:
type: array
items:
- $ref: '#/components/schemas/ModerationObjectResults'
- description: A list of moderation objects
+ type: object
+ title: dict
+ description: >-
+ dict() -> new empty dictionary dict(mapping) -> new dictionary initialized
+ from a mapping object's (key, value) pairs dict(iterable) -> new
+ dictionary initialized as if via: d = {} for k, v in iterable: d[k]
+ = v dict(**kwargs) -> new dictionary initialized with the name=value
+ pairs in the keyword argument list. For example: dict(one=1, two=2)
additionalProperties: false
required:
- id
- - model
- - results
- title: ModerationObject
- description: A moderation object.
- ModerationObjectResults:
+ - object
+ - created_at
+ title: Conversation
+ description: OpenAI-compatible conversation object.
+ UpdateConversationRequest:
type: object
properties:
- flagged:
- type: boolean
- description: >-
- Whether any of the below categories are flagged.
- categories:
- type: object
- additionalProperties:
- type: boolean
- description: >-
- A list of the categories, and whether they are flagged or not.
- category_applied_input_types:
- type: object
- additionalProperties:
- type: array
- items:
- type: string
- description: >-
- A list of the categories along with the input type(s) that the score applies
- to.
- category_scores:
- type: object
- additionalProperties:
- type: number
- description: >-
- A list of the categories along with their scores as predicted by model.
- user_message:
- type: string
metadata:
type: object
additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
+ type: string
+ description: >-
+ Set of key-value pairs that can be attached to an object.
additionalProperties: false
required:
- - flagged
- metadata
- title: ModerationObjectResults
- description: A moderation object.
- Prompt:
+ title: UpdateConversationRequest
+ ConversationDeletedResource:
type: object
properties:
- prompt:
+ id:
type: string
- description: >-
- The system prompt text with variable placeholders. Variables are only
- supported when using the Responses API.
- version:
- type: integer
- description: >-
- Version (integer starting at 1, incremented on save)
- prompt_id:
+ object:
type: string
- description: >-
- Unique identifier formatted as 'pmpt_<48-digit-hash>'
- variables:
- type: array
- items:
- type: string
- description: >-
- List of prompt variable names that can be used in the prompt template
- is_default:
+ default: conversation.deleted
+ deleted:
type: boolean
- default: false
- description: >-
- Boolean indicating whether this version is the default version for this
- prompt
+ default: true
additionalProperties: false
required:
- - version
- - prompt_id
- - variables
- - is_default
- title: Prompt
- description: >-
- A prompt resource representing a stored OpenAI Compatible prompt template
- in Llama Stack.
- ListPromptsResponse:
+ - id
+ - object
+ - deleted
+ title: ConversationDeletedResource
+ description: Response for deleted conversation.
+ ConversationItemList:
type: object
properties:
+ object:
+ type: string
+ default: list
data:
type: array
items:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/ConversationItem'
+ first_id:
+ type: string
+ last_id:
+ type: string
+ has_more:
+ type: boolean
+ default: false
additionalProperties: false
required:
+ - object
- data
- title: ListPromptsResponse
- description: Response model to list prompts.
- CreatePromptRequest:
+ - has_more
+ title: ConversationItemList
+ description: >-
+ List of conversation items with pagination.
+ AddItemsRequest:
type: object
properties:
- prompt:
- type: string
- description: >-
- The prompt text content with variable placeholders.
- variables:
+ items:
type: array
items:
- type: string
+ $ref: '#/components/schemas/ConversationItem'
description: >-
- List of variable names that can be used in the prompt template.
+ Items to include in the conversation context.
additionalProperties: false
required:
- - prompt
- title: CreatePromptRequest
- UpdatePromptRequest:
+ - items
+ title: AddItemsRequest
+ ConversationItemDeletedResource:
type: object
properties:
- prompt:
+ id:
type: string
- description: The updated prompt text content.
- version:
- type: integer
- description: >-
- The current version of the prompt being updated.
- variables:
- type: array
- items:
- type: string
- description: >-
- Updated list of variable names that can be used in the prompt template.
- set_as_default:
+ object:
+ type: string
+ default: conversation.item.deleted
+ deleted:
type: boolean
- description: >-
- Set the new version as the default (default=True).
+ default: true
additionalProperties: false
required:
- - prompt
- - version
- - set_as_default
- title: UpdatePromptRequest
- SetDefaultVersionRequest:
+ - id
+ - object
+ - deleted
+ title: ConversationItemDeletedResource
+ description: Response for deleted conversation item.
+ OpenaiEmbeddingsRequest:
type: object
properties:
- version:
+ model:
+ type: string
+ description: >-
+ The identifier of the model to use. The model must be an embedding model
+ registered with Llama Stack and available via the /models endpoint.
+ input:
+ oneOf:
+ - type: string
+ - type: array
+ items:
+ type: string
+ description: >-
+ Input text to embed, encoded as a string or array of strings. To embed
+ multiple inputs in a single request, pass an array of strings.
+ encoding_format:
+ type: string
+ description: >-
+ (Optional) The format to return the embeddings in. Can be either "float"
+ or "base64". Defaults to "float".
+ dimensions:
type: integer
- description: The version to set as default.
+ description: >-
+ (Optional) The number of dimensions the resulting output embeddings should
+ have. Only supported in text-embedding-3 and later models.
+ user:
+ type: string
+ description: >-
+ (Optional) A unique identifier representing your end-user, which can help
+ OpenAI to monitor and detect abuse.
additionalProperties: false
required:
- - version
- title: SetDefaultVersionRequest
- ProviderInfo:
+ - model
+ - input
+ title: OpenaiEmbeddingsRequest
+ OpenAIEmbeddingData:
type: object
properties:
- api:
- type: string
- description: The API name this provider implements
- provider_id:
- type: string
- description: Unique identifier for the provider
- provider_type:
+ object:
type: string
- description: The type of provider implementation
- config:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
+ const: embedding
+ default: embedding
description: >-
- Configuration parameters for the provider
- health:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: Current health status of the provider
+ The object type, which will be "embedding"
+ embedding:
+ oneOf:
+ - type: array
+ items:
+ type: number
+ - type: string
+ description: >-
+ The embedding vector as a list of floats (when encoding_format="float")
+ or as a base64-encoded string (when encoding_format="base64")
+ index:
+ type: integer
+ description: >-
+ The index of the embedding in the input list
additionalProperties: false
required:
- - api
- - provider_id
- - provider_type
- - config
- - health
- title: ProviderInfo
+ - object
+ - embedding
+ - index
+ title: OpenAIEmbeddingData
description: >-
- Information about a registered provider including its configuration and health
- status.
- ListProvidersResponse:
+ A single embedding data object from an OpenAI-compatible embeddings response.
+ OpenAIEmbeddingUsage:
+ type: object
+ properties:
+ prompt_tokens:
+ type: integer
+ description: The number of tokens in the input
+ total_tokens:
+ type: integer
+ description: The total number of tokens used
+ additionalProperties: false
+ required:
+ - prompt_tokens
+ - total_tokens
+ title: OpenAIEmbeddingUsage
+ description: >-
+ Usage information for an OpenAI-compatible embeddings response.
+ OpenAIEmbeddingsResponse:
type: object
properties:
+ object:
+ type: string
+ const: list
+ default: list
+ description: The object type, which will be "list"
data:
type: array
items:
- $ref: '#/components/schemas/ProviderInfo'
- description: List of provider information objects
+ $ref: '#/components/schemas/OpenAIEmbeddingData'
+ description: List of embedding data objects
+ model:
+ type: string
+ description: >-
+ The model that was used to generate the embeddings
+ usage:
+ $ref: '#/components/schemas/OpenAIEmbeddingUsage'
+ description: Usage information
additionalProperties: false
required:
+ - object
- data
- title: ListProvidersResponse
+ - model
+ - usage
+ title: OpenAIEmbeddingsResponse
description: >-
- Response containing a list of all available providers.
- ListOpenAIResponseObject:
+ Response from an OpenAI-compatible embeddings request.
+ OpenAIFilePurpose:
+ type: string
+ enum:
+ - assistants
+ - batch
+ title: OpenAIFilePurpose
+ description: >-
+ Valid purpose values for OpenAI Files API.
+ ListOpenAIFileResponse:
type: object
properties:
data:
type: array
items:
- $ref: '#/components/schemas/OpenAIResponseObjectWithInput'
- description: >-
- List of response objects with their input context
+ $ref: '#/components/schemas/OpenAIFileObject'
+ description: List of file objects
has_more:
type: boolean
description: >-
- Whether there are more results available beyond this page
+ Whether there are more files available beyond this page
first_id:
type: string
description: >-
- Identifier of the first item in this page
+ ID of the first file in the list for pagination
last_id:
type: string
- description: Identifier of the last item in this page
+ description: >-
+ ID of the last file in the list for pagination
object:
type: string
const: list
default: list
- description: Object type identifier, always "list"
+ description: The object type, which is always "list"
additionalProperties: false
required:
- data
@@ -4444,678 +4946,744 @@ components:
- first_id
- last_id
- object
- title: ListOpenAIResponseObject
+ title: ListOpenAIFileResponse
description: >-
- Paginated list of OpenAI response objects with navigation metadata.
- OpenAIResponseAnnotationCitation:
+ Response for listing files in OpenAI Files API.
+ OpenAIFileObject:
type: object
properties:
- type:
+ object:
+ type: string
+ const: file
+ default: file
+ description: The object type, which is always "file"
+ id:
type: string
- const: url_citation
- default: url_citation
description: >-
- Annotation type identifier, always "url_citation"
- end_index:
+ The file identifier, which can be referenced in the API endpoints
+ bytes:
+ type: integer
+ description: The size of the file, in bytes
+ created_at:
type: integer
description: >-
- End position of the citation span in the content
- start_index:
+ The Unix timestamp (in seconds) for when the file was created
+ expires_at:
type: integer
description: >-
- Start position of the citation span in the content
- title:
+ The Unix timestamp (in seconds) for when the file expires
+ filename:
type: string
- description: Title of the referenced web resource
- url:
+ description: The name of the file
+ purpose:
type: string
- description: URL of the referenced web resource
+ enum:
+ - assistants
+ - batch
+ description: The intended purpose of the file
additionalProperties: false
required:
- - type
- - end_index
- - start_index
- - title
- - url
- title: OpenAIResponseAnnotationCitation
+ - object
+ - id
+ - bytes
+ - created_at
+ - expires_at
+ - filename
+ - purpose
+ title: OpenAIFileObject
description: >-
- URL citation annotation for referencing external web resources.
- "OpenAIResponseAnnotationContainerFileCitation":
+ OpenAI File object as defined in the OpenAI Files API.
+ ExpiresAfter:
type: object
properties:
- type:
- type: string
- const: container_file_citation
- default: container_file_citation
- container_id:
- type: string
- end_index:
- type: integer
- file_id:
- type: string
- filename:
+ anchor:
type: string
- start_index:
+ const: created_at
+ seconds:
type: integer
additionalProperties: false
required:
- - type
- - container_id
- - end_index
- - file_id
- - filename
- - start_index
- title: >-
- OpenAIResponseAnnotationContainerFileCitation
- OpenAIResponseAnnotationFileCitation:
+ - anchor
+ - seconds
+ title: ExpiresAfter
+ description: >-
+ Control expiration of uploaded files.
+
+ Params:
+ - anchor, must be "created_at"
+ - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ OpenAIFileDeleteResponse:
type: object
properties:
- type:
- type: string
- const: file_citation
- default: file_citation
- description: >-
- Annotation type identifier, always "file_citation"
- file_id:
+ id:
type: string
- description: Unique identifier of the referenced file
- filename:
+ description: The file identifier that was deleted
+ object:
type: string
- description: Name of the referenced file
- index:
- type: integer
+ const: file
+ default: file
+ description: The object type, which is always "file"
+ deleted:
+ type: boolean
description: >-
- Position index of the citation within the content
+ Whether the file was successfully deleted
additionalProperties: false
required:
- - type
- - file_id
- - filename
- - index
- title: OpenAIResponseAnnotationFileCitation
+ - id
+ - object
+ - deleted
+ title: OpenAIFileDeleteResponse
description: >-
- File citation annotation for referencing specific files in response content.
- OpenAIResponseAnnotationFilePath:
+ Response for deleting a file in OpenAI Files API.
+ Response:
+ type: object
+ title: Response
+ HealthInfo:
type: object
properties:
- type:
- type: string
- const: file_path
- default: file_path
- file_id:
+ status:
type: string
- index:
- type: integer
+ enum:
+ - OK
+ - Error
+ - Not Implemented
+ description: Current health status of the service
additionalProperties: false
required:
- - type
- - file_id
- - index
- title: OpenAIResponseAnnotationFilePath
- OpenAIResponseAnnotations:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
- - $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
- - $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
- - $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
- discriminator:
- propertyName: type
- mapping:
- file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
- url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
- container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
- file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
- OpenAIResponseError:
+ - status
+ title: HealthInfo
+ description: >-
+ Health status information for the service.
+ RouteInfo:
type: object
properties:
- code:
+ route:
type: string
- description: >-
- Error code identifying the type of failure
- message:
+ description: The API endpoint path
+ method:
type: string
+ description: HTTP method for the route
+ provider_types:
+ type: array
+ items:
+ type: string
description: >-
- Human-readable error message describing the failure
+ List of provider types that implement this route
additionalProperties: false
required:
- - code
- - message
- title: OpenAIResponseError
+ - route
+ - method
+ - provider_types
+ title: RouteInfo
description: >-
- Error details for failed OpenAI response requests.
- OpenAIResponseInput:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
- - $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
- - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
- - $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
- - $ref: '#/components/schemas/OpenAIResponseMessage'
- "OpenAIResponseInputFunctionToolCallOutput":
+ Information about an API route including its path, method, and implementing
+ providers.
+ ListRoutesResponse:
type: object
properties:
- call_id:
- type: string
- output:
- type: string
- type:
- type: string
- const: function_call_output
- default: function_call_output
- id:
- type: string
- status:
- type: string
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/RouteInfo'
+ description: >-
+ List of available route information objects
additionalProperties: false
required:
- - call_id
- - output
- - type
- title: >-
- OpenAIResponseInputFunctionToolCallOutput
+ - data
+ title: ListRoutesResponse
description: >-
- This represents the output of a function call that gets passed back to the
- model.
- OpenAIResponseInputMessageContent:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
- - $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
- discriminator:
- propertyName: type
- mapping:
- input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
- input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
- OpenAIResponseInputMessageContentImage:
+ Response containing a list of all available API routes.
+ Model:
type: object
properties:
- detail:
- oneOf:
- - type: string
- const: low
- - type: string
- const: high
- - type: string
- const: auto
- default: auto
- description: >-
- Level of detail for image processing, can be "low", "high", or "auto"
- type:
+ identifier:
type: string
- const: input_image
- default: input_image
description: >-
- Content type identifier, always "input_image"
- image_url:
+ Unique identifier for this resource in llama stack
+ provider_resource_id:
type: string
- description: (Optional) URL of the image content
- additionalProperties: false
- required:
- - detail
- - type
- title: OpenAIResponseInputMessageContentImage
- description: >-
- Image content for input messages in OpenAI response format.
- OpenAIResponseInputMessageContentText:
- type: object
- properties:
- text:
+ description: >-
+ Unique identifier for this resource in the provider
+ provider_id:
type: string
- description: The text content of the input message
+ description: >-
+ ID of the provider that owns this resource
type:
type: string
- const: input_text
- default: input_text
- description: >-
- Content type identifier, always "input_text"
- additionalProperties: false
- required:
- - text
- - type
- title: OpenAIResponseInputMessageContentText
+ enum:
+ - model
+ - shield
+ - vector_db
+ - dataset
+ - scoring_function
+ - benchmark
+ - tool
+ - tool_group
+ - prompt
+ const: model
+ default: model
+ description: >-
+ The resource type, always 'model' for model resources
+ metadata:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: Any additional metadata for this model
+ model_type:
+ $ref: '#/components/schemas/ModelType'
+ default: llm
+ description: >-
+ The type of model (LLM or embedding model)
+ additionalProperties: false
+ required:
+ - identifier
+ - provider_id
+ - type
+ - metadata
+ - model_type
+ title: Model
description: >-
- Text content for input messages in OpenAI response format.
- OpenAIResponseMCPApprovalRequest:
+ A model resource representing an AI model registered in Llama Stack.
+ ModelType:
+ type: string
+ enum:
+ - llm
+ - embedding
+ title: ModelType
+ description: >-
+ Enumeration of supported model types in Llama Stack.
+ ListModelsResponse:
type: object
properties:
- arguments:
- type: string
- id:
- type: string
- name:
- type: string
- server_label:
- type: string
- type:
- type: string
- const: mcp_approval_request
- default: mcp_approval_request
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Model'
additionalProperties: false
required:
- - arguments
- - id
- - name
- - server_label
- - type
- title: OpenAIResponseMCPApprovalRequest
- description: >-
- A request for human approval of a tool invocation.
- OpenAIResponseMCPApprovalResponse:
+ - data
+ title: ListModelsResponse
+ RegisterModelRequest:
type: object
properties:
- approval_request_id:
- type: string
- approve:
- type: boolean
- type:
+ model_id:
type: string
- const: mcp_approval_response
- default: mcp_approval_response
- id:
+ description: The identifier of the model to register.
+ provider_model_id:
type: string
- reason:
+ description: >-
+ The identifier of the model in the provider.
+ provider_id:
type: string
+ description: The identifier of the provider.
+ metadata:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: Any additional metadata for this model.
+ model_type:
+ $ref: '#/components/schemas/ModelType'
+ description: The type of model to register.
additionalProperties: false
required:
- - approval_request_id
- - approve
- - type
- title: OpenAIResponseMCPApprovalResponse
- description: A response to an MCP approval request.
- OpenAIResponseMessage:
+ - model_id
+ title: RegisterModelRequest
+ RunModerationRequest:
type: object
properties:
- content:
+ input:
oneOf:
- type: string
- type: array
items:
- $ref: '#/components/schemas/OpenAIResponseInputMessageContent'
- - type: array
- items:
- $ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
- role:
- oneOf:
- - type: string
- const: system
- - type: string
- const: developer
- - type: string
- const: user
- - type: string
- const: assistant
- type:
- type: string
- const: message
- default: message
- id:
- type: string
- status:
+ type: string
+ description: >-
+ Input (or inputs) to classify. Can be a single string, an array of strings,
+ or an array of multi-modal input objects similar to other models.
+ model:
type: string
+ description: >-
+ The content moderation model you would like to use.
additionalProperties: false
required:
- - content
- - role
- - type
- title: OpenAIResponseMessage
- description: >-
- Corresponds to the various Message types in the Responses API. They are all
- under one type because the Responses API gives them all the same "type" value,
- and there is no way to tell them apart in certain scenarios.
- OpenAIResponseObjectWithInput:
+ - input
+ - model
+ title: RunModerationRequest
+ ModerationObject:
type: object
properties:
- created_at:
- type: integer
- description: >-
- Unix timestamp when the response was created
- error:
- $ref: '#/components/schemas/OpenAIResponseError'
- description: >-
- (Optional) Error details if the response generation failed
id:
type: string
- description: Unique identifier for this response
+ description: >-
+ The unique identifier for the moderation request.
model:
type: string
- description: Model identifier used for generation
- object:
- type: string
- const: response
- default: response
description: >-
- Object type identifier, always "response"
- output:
+ The model used to generate the moderation results.
+ results:
type: array
items:
- $ref: '#/components/schemas/OpenAIResponseOutput'
- description: >-
- List of generated output items (messages, tool calls, etc.)
- parallel_tool_calls:
+ $ref: '#/components/schemas/ModerationObjectResults'
+ description: A list of moderation objects
+ additionalProperties: false
+ required:
+ - id
+ - model
+ - results
+ title: ModerationObject
+ description: A moderation object.
+ ModerationObjectResults:
+ type: object
+ properties:
+ flagged:
type: boolean
- default: false
- description: >-
- Whether tool calls can be executed in parallel
- previous_response_id:
- type: string
- description: >-
- (Optional) ID of the previous response in a conversation
- status:
- type: string
description: >-
- Current status of the response generation
- temperature:
- type: number
+ Whether any of the below categories are flagged.
+ categories:
+ type: object
+ additionalProperties:
+ type: boolean
description: >-
- (Optional) Sampling temperature used for generation
- text:
- $ref: '#/components/schemas/OpenAIResponseText'
+ A list of the categories, and whether they are flagged or not.
+ category_applied_input_types:
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ type: string
description: >-
- Text formatting configuration for the response
- top_p:
- type: number
+ A list of the categories along with the input type(s) that the score applies
+ to.
+ category_scores:
+ type: object
+ additionalProperties:
+ type: number
description: >-
- (Optional) Nucleus sampling parameter used for generation
- truncation:
+ A list of the categories along with their scores as predicted by model.
+ user_message:
type: string
- description: >-
- (Optional) Truncation strategy applied to the response
- input:
- type: array
- items:
- $ref: '#/components/schemas/OpenAIResponseInput'
- description: >-
- List of input items that led to this response
+ metadata:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
additionalProperties: false
required:
- - created_at
- - id
- - model
- - object
- - output
- - parallel_tool_calls
- - status
- - text
- - input
- title: OpenAIResponseObjectWithInput
- description: >-
- OpenAI response object extended with input context information.
- OpenAIResponseOutput:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseMessage'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
- - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
- discriminator:
- propertyName: type
- mapping:
- message: '#/components/schemas/OpenAIResponseMessage'
- web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
- file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
- function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
- mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
- mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
- mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
- OpenAIResponseOutputMessageContent:
+ - flagged
+ - metadata
+ title: ModerationObjectResults
+ description: A moderation object.
+ Prompt:
type: object
properties:
- text:
+ prompt:
type: string
- type:
+ description: >-
+ The system prompt text with variable placeholders. Variables are only
+ supported when using the Responses API.
+ version:
+ type: integer
+ description: >-
+ Version (integer starting at 1, incremented on save)
+ prompt_id:
type: string
- const: output_text
- default: output_text
- annotations:
+ description: >-
+ Unique identifier formatted as 'pmpt_<48-digit-hash>'
+ variables:
type: array
items:
- $ref: '#/components/schemas/OpenAIResponseAnnotations'
+ type: string
+ description: >-
+ List of prompt variable names that can be used in the prompt template
+ is_default:
+ type: boolean
+ default: false
+ description: >-
+ Boolean indicating whether this version is the default version for this
+ prompt
additionalProperties: false
required:
- - text
- - type
- - annotations
- title: >-
- OpenAIResponseOutputMessageContentOutputText
- "OpenAIResponseOutputMessageFileSearchToolCall":
+ - version
+ - prompt_id
+ - variables
+ - is_default
+ title: Prompt
+ description: >-
+ A prompt resource representing a stored OpenAI Compatible prompt template
+ in Llama Stack.
+ ListPromptsResponse:
type: object
properties:
- id:
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Prompt'
+ additionalProperties: false
+ required:
+ - data
+ title: ListPromptsResponse
+ description: Response model to list prompts.
+ CreatePromptRequest:
+ type: object
+ properties:
+ prompt:
type: string
- description: Unique identifier for this tool call
- queries:
+ description: >-
+ The prompt text content with variable placeholders.
+ variables:
type: array
items:
type: string
- description: List of search queries executed
- status:
+ description: >-
+ List of variable names that can be used in the prompt template.
+ additionalProperties: false
+ required:
+ - prompt
+ title: CreatePromptRequest
+ UpdatePromptRequest:
+ type: object
+ properties:
+ prompt:
type: string
+ description: The updated prompt text content.
+ version:
+ type: integer
description: >-
- Current status of the file search operation
- type:
+ The current version of the prompt being updated.
+ variables:
+ type: array
+ items:
+ type: string
+ description: >-
+ Updated list of variable names that can be used in the prompt template.
+ set_as_default:
+ type: boolean
+ description: >-
+ Set the new version as the default (default=True).
+ additionalProperties: false
+ required:
+ - prompt
+ - version
+ - set_as_default
+ title: UpdatePromptRequest
+ SetDefaultVersionRequest:
+ type: object
+ properties:
+ version:
+ type: integer
+ description: The version to set as default.
+ additionalProperties: false
+ required:
+ - version
+ title: SetDefaultVersionRequest
+ ProviderInfo:
+ type: object
+ properties:
+ api:
type: string
- const: file_search_call
- default: file_search_call
+ description: The API name this provider implements
+ provider_id:
+ type: string
+ description: Unique identifier for the provider
+ provider_type:
+ type: string
+ description: The type of provider implementation
+ config:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
description: >-
- Tool call type identifier, always "file_search_call"
- results:
+ Configuration parameters for the provider
+ health:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: Current health status of the provider
+ additionalProperties: false
+ required:
+ - api
+ - provider_id
+ - provider_type
+ - config
+ - health
+ title: ProviderInfo
+ description: >-
+ Information about a registered provider including its configuration and health
+ status.
+ ListProvidersResponse:
+ type: object
+ properties:
+ data:
type: array
items:
- type: object
- properties:
- attributes:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: >-
- (Optional) Key-value attributes associated with the file
- file_id:
- type: string
- description: >-
- Unique identifier of the file containing the result
- filename:
- type: string
- description: Name of the file containing the result
- score:
- type: number
- description: >-
- Relevance score for this search result (between 0 and 1)
- text:
- type: string
- description: Text content of the search result
- additionalProperties: false
- required:
- - attributes
- - file_id
- - filename
- - score
- - text
- title: >-
- OpenAIResponseOutputMessageFileSearchToolCallResults
- description: >-
- Search results returned by the file search operation.
+ $ref: '#/components/schemas/ProviderInfo'
+ description: List of provider information objects
+ additionalProperties: false
+ required:
+ - data
+ title: ListProvidersResponse
+ description: >-
+ Response containing a list of all available providers.
+ ListOpenAIResponseObject:
+ type: object
+ properties:
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseObjectWithInput'
+ description: >-
+ List of response objects with their input context
+ has_more:
+ type: boolean
+ description: >-
+ Whether there are more results available beyond this page
+ first_id:
+ type: string
+ description: >-
+ Identifier of the first item in this page
+ last_id:
+ type: string
+ description: Identifier of the last item in this page
+ object:
+ type: string
+ const: list
+ default: list
+ description: Object type identifier, always "list"
+ additionalProperties: false
+ required:
+ - data
+ - has_more
+ - first_id
+ - last_id
+ - object
+ title: ListOpenAIResponseObject
+ description: >-
+ Paginated list of OpenAI response objects with navigation metadata.
+ OpenAIResponseError:
+ type: object
+ properties:
+ code:
+ type: string
description: >-
- (Optional) Search results returned by the file search operation
+ Error code identifying the type of failure
+ message:
+ type: string
+ description: >-
+ Human-readable error message describing the failure
additionalProperties: false
required:
- - id
- - queries
- - status
- - type
- title: >-
- OpenAIResponseOutputMessageFileSearchToolCall
+ - code
+ - message
+ title: OpenAIResponseError
description: >-
- File search tool call output message for OpenAI responses.
- "OpenAIResponseOutputMessageFunctionToolCall":
+ Error details for failed OpenAI response requests.
+ OpenAIResponseInput:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
+ - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
+ - $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
+ - $ref: '#/components/schemas/OpenAIResponseMessage'
+ "OpenAIResponseInputFunctionToolCallOutput":
type: object
properties:
call_id:
type: string
- description: Unique identifier for the function call
- name:
- type: string
- description: Name of the function being called
- arguments:
+ output:
type: string
- description: >-
- JSON string containing the function arguments
type:
type: string
- const: function_call
- default: function_call
- description: >-
- Tool call type identifier, always "function_call"
+ const: function_call_output
+ default: function_call_output
id:
type: string
- description: >-
- (Optional) Additional identifier for the tool call
status:
type: string
- description: >-
- (Optional) Current status of the function call execution
additionalProperties: false
required:
- call_id
- - name
- - arguments
+ - output
- type
title: >-
- OpenAIResponseOutputMessageFunctionToolCall
+ OpenAIResponseInputFunctionToolCallOutput
description: >-
- Function tool call output message for OpenAI responses.
- OpenAIResponseOutputMessageMCPCall:
+ This represents the output of a function call that gets passed back to the
+ model.
+ OpenAIResponseMCPApprovalRequest:
type: object
properties:
- id:
- type: string
- description: Unique identifier for this MCP call
- type:
- type: string
- const: mcp_call
- default: mcp_call
- description: >-
- Tool call type identifier, always "mcp_call"
arguments:
type: string
- description: >-
- JSON string containing the MCP call arguments
+ id:
+ type: string
name:
type: string
- description: Name of the MCP method being called
server_label:
type: string
- description: >-
- Label identifying the MCP server handling the call
- error:
- type: string
- description: >-
- (Optional) Error message if the MCP call failed
- output:
+ type:
type: string
- description: >-
- (Optional) Output result from the successful MCP call
+ const: mcp_approval_request
+ default: mcp_approval_request
additionalProperties: false
required:
- - id
- - type
- arguments
+ - id
- name
- server_label
- title: OpenAIResponseOutputMessageMCPCall
+ - type
+ title: OpenAIResponseMCPApprovalRequest
description: >-
- Model Context Protocol (MCP) call output message for OpenAI responses.
- OpenAIResponseOutputMessageMCPListTools:
+ A request for human approval of a tool invocation.
+ OpenAIResponseMCPApprovalResponse:
type: object
properties:
- id:
+ approval_request_id:
type: string
- description: >-
- Unique identifier for this MCP list tools operation
+ approve:
+ type: boolean
type:
type: string
- const: mcp_list_tools
- default: mcp_list_tools
- description: >-
- Tool call type identifier, always "mcp_list_tools"
- server_label:
+ const: mcp_approval_response
+ default: mcp_approval_response
+ id:
+ type: string
+ reason:
type: string
- description: >-
- Label identifying the MCP server providing the tools
- tools:
- type: array
- items:
- type: object
- properties:
- input_schema:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: >-
- JSON schema defining the tool's input parameters
- name:
- type: string
- description: Name of the tool
- description:
- type: string
- description: >-
- (Optional) Description of what the tool does
- additionalProperties: false
- required:
- - input_schema
- - name
- title: MCPListToolsTool
- description: >-
- Tool definition returned by MCP list tools operation.
- description: >-
- List of available tools provided by the MCP server
additionalProperties: false
required:
- - id
+ - approval_request_id
+ - approve
- type
- - server_label
- - tools
- title: OpenAIResponseOutputMessageMCPListTools
- description: >-
- MCP list tools output message containing available tools from an MCP server.
- "OpenAIResponseOutputMessageWebSearchToolCall":
+ title: OpenAIResponseMCPApprovalResponse
+ description: A response to an MCP approval request.
+ OpenAIResponseObjectWithInput:
type: object
properties:
+ created_at:
+ type: integer
+ description: >-
+ Unix timestamp when the response was created
+ error:
+ $ref: '#/components/schemas/OpenAIResponseError'
+ description: >-
+ (Optional) Error details if the response generation failed
id:
type: string
- description: Unique identifier for this tool call
+ description: Unique identifier for this response
+ model:
+ type: string
+ description: Model identifier used for generation
+ object:
+ type: string
+ const: response
+ default: response
+ description: >-
+ Object type identifier, always "response"
+ output:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseOutput'
+ description: >-
+ List of generated output items (messages, tool calls, etc.)
+ parallel_tool_calls:
+ type: boolean
+ default: false
+ description: >-
+ Whether tool calls can be executed in parallel
+ previous_response_id:
+ type: string
+ description: >-
+ (Optional) ID of the previous response in a conversation
status:
type: string
description: >-
- Current status of the web search operation
- type:
+ Current status of the response generation
+ temperature:
+ type: number
+ description: >-
+ (Optional) Sampling temperature used for generation
+ text:
+ $ref: '#/components/schemas/OpenAIResponseText'
+ description: >-
+ Text formatting configuration for the response
+ top_p:
+ type: number
+ description: >-
+ (Optional) Nucleus sampling parameter used for generation
+ truncation:
type: string
- const: web_search_call
- default: web_search_call
description: >-
- Tool call type identifier, always "web_search_call"
+ (Optional) Truncation strategy applied to the response
+ input:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseInput'
+ description: >-
+ List of input items that led to this response
additionalProperties: false
required:
+ - created_at
- id
+ - model
+ - object
+ - output
+ - parallel_tool_calls
- status
- - type
- title: >-
- OpenAIResponseOutputMessageWebSearchToolCall
+ - text
+ - input
+ title: OpenAIResponseObjectWithInput
description: >-
- Web search tool call output message for OpenAI responses.
+ OpenAI response object extended with input context information.
+ OpenAIResponseOutput:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseMessage'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
+ discriminator:
+ propertyName: type
+ mapping:
+ message: '#/components/schemas/OpenAIResponseMessage'
+ web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
OpenAIResponseText:
type: object
properties:
@@ -9291,6 +9859,10 @@ tags:
- `background`
x-displayName: Agents
+ - name: Conversations
+ description: ''
+ x-displayName: >-
+ Protocol for conversation management operations.
- name: Files
description: ''
- name: Inference
@@ -9341,6 +9913,7 @@ x-tagGroups:
- name: Operations
tags:
- Agents
+ - Conversations
- Files
- Inference
- Inspect
diff --git a/docs/static/stainless-llama-stack-spec.html b/docs/static/stainless-llama-stack-spec.html
index f921d2c292..f037f24c35 100644
--- a/docs/static/stainless-llama-stack-spec.html
+++ b/docs/static/stainless-llama-stack-spec.html
@@ -252,15 +252,15 @@
"deprecated": false
}
},
- "/v1/embeddings": {
+ "/v1/conversations": {
"post": {
"responses": {
"200": {
- "description": "An OpenAIEmbeddingsResponse containing the embeddings.",
+ "description": "The created conversation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIEmbeddingsResponse"
+ "$ref": "#/components/schemas/Conversation"
}
}
}
@@ -279,16 +279,16 @@
}
},
"tags": [
- "Inference"
+ "Conversations"
],
- "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
- "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "summary": "Create a conversation.",
+ "description": "Create a conversation.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenaiEmbeddingsRequest"
+ "$ref": "#/components/schemas/CreateConversationRequest"
}
}
},
@@ -297,15 +297,15 @@
"deprecated": false
}
},
- "/v1/files": {
+ "/v1/conversations/{conversation_id}": {
"get": {
"responses": {
"200": {
- "description": "An ListOpenAIFileResponse containing the list of files.",
+ "description": "The conversation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIFileResponse"
+ "$ref": "#/components/schemas/Conversation"
}
}
}
@@ -324,46 +324,19 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Returns a list of files that belong to the user's organization.",
- "description": "Returns a list of files that belong to the user's organization.",
+ "summary": "Get a conversation with the given ID.",
+ "description": "Get a conversation with the given ID.",
"parameters": [
{
- "name": "after",
- "in": "query",
- "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.",
- "required": false,
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
"schema": {
"type": "string"
}
- },
- {
- "name": "limit",
- "in": "query",
- "description": "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.",
- "required": false,
- "schema": {
- "type": "integer"
- }
- },
- {
- "name": "order",
- "in": "query",
- "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/Order"
- }
- },
- {
- "name": "purpose",
- "in": "query",
- "description": "Only return files with the given purpose.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/OpenAIFilePurpose"
- }
}
],
"deprecated": false
@@ -371,11 +344,11 @@
"post": {
"responses": {
"200": {
- "description": "An OpenAIFileObject representing the uploaded file.",
+ "description": "The updated conversation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIFileObject"
+ "$ref": "#/components/schemas/Conversation"
}
}
}
@@ -394,49 +367,41 @@
}
},
"tags": [
- "Files"
+ "Conversations"
+ ],
+ "summary": "Update a conversation's metadata with the given ID.",
+ "description": "Update a conversation's metadata with the given ID.",
+ "parameters": [
+ {
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "Upload a file that can be used across various endpoints.",
- "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
- "parameters": [],
"requestBody": {
"content": {
- "multipart/form-data": {
+ "application/json": {
"schema": {
- "type": "object",
- "properties": {
- "file": {
- "type": "string",
- "format": "binary"
- },
- "purpose": {
- "$ref": "#/components/schemas/OpenAIFilePurpose"
- },
- "expires_after": {
- "$ref": "#/components/schemas/ExpiresAfter"
- }
- },
- "required": [
- "file",
- "purpose"
- ]
+ "$ref": "#/components/schemas/UpdateConversationRequest"
}
}
},
"required": true
},
"deprecated": false
- }
- },
- "/v1/files/{file_id}": {
- "get": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "An OpenAIFileObject containing file information.",
+ "description": "The deleted conversation resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIFileObject"
+ "$ref": "#/components/schemas/ConversationDeletedResource"
}
}
}
@@ -455,15 +420,15 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Returns information about a specific file.",
- "description": "Returns information about a specific file.",
+ "summary": "Delete a conversation with the given ID.",
+ "description": "Delete a conversation with the given ID.",
"parameters": [
{
- "name": "file_id",
+ "name": "conversation_id",
"in": "path",
- "description": "The ID of the file to use for this request.",
+ "description": "The conversation identifier.",
"required": true,
"schema": {
"type": "string"
@@ -471,15 +436,17 @@
}
],
"deprecated": false
- },
- "delete": {
+ }
+ },
+ "/v1/conversations/{conversation_id}/items": {
+ "get": {
"responses": {
"200": {
- "description": "An OpenAIFileDeleteResponse indicating successful deletion.",
+ "description": "List of conversation items.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIFileDeleteResponse"
+ "$ref": "#/components/schemas/ConversationItemList"
}
}
}
@@ -498,33 +465,118 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Delete a file.",
- "description": "Delete a file.",
+ "summary": "List items in the conversation.",
+ "description": "List items in the conversation.",
"parameters": [
{
- "name": "file_id",
+ "name": "conversation_id",
"in": "path",
- "description": "The ID of the file to use for this request.",
+ "description": "The conversation identifier.",
"required": true,
"schema": {
"type": "string"
}
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "An item ID to list items after, used in pagination.",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
+ },
+ {
+ "name": "include",
+ "in": "query",
+ "description": "Specify additional output data to include in the response.",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content"
+ ]
+ }
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "A limit on the number of objects to be returned (1-100, default 20).",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "The order to return items in (asc or desc, default desc).",
+ "required": true,
+ "schema": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "asc",
+ "desc"
+ ]
+ },
+ {
+ "type": "object",
+ "title": "NotGiven",
+ "description": "A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).\nFor example:\n\n```py\ndef get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...\n\n\nget(timeout=1) # 1s timeout\nget(timeout=None) # No timeout\nget() # Default timeout behavior, which may not be statically known at the method definition.\n```"
+ }
+ ]
+ }
}
],
"deprecated": false
- }
- },
- "/v1/files/{file_id}/content": {
- "get": {
+ },
+ "post": {
"responses": {
"200": {
- "description": "The raw file content as a binary response.",
+ "description": "List of created items.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Response"
+ "$ref": "#/components/schemas/ConversationItemList"
}
}
}
@@ -543,33 +595,43 @@
}
},
"tags": [
- "Files"
+ "Conversations"
],
- "summary": "Returns the contents of the specified file.",
- "description": "Returns the contents of the specified file.",
+ "summary": "Create items in the conversation.",
+ "description": "Create items in the conversation.",
"parameters": [
{
- "name": "file_id",
+ "name": "conversation_id",
"in": "path",
- "description": "The ID of the file to use for this request.",
+ "description": "The conversation identifier.",
"required": true,
"schema": {
"type": "string"
}
}
],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/AddItemsRequest"
+ }
+ }
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/health": {
+ "/v1/conversations/{conversation_id}/items/{item_id}": {
"get": {
"responses": {
"200": {
- "description": "Health information indicating if the service is operational.",
+ "description": "The conversation item.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/HealthInfo"
+ "$ref": "#/components/schemas/ConversationItem"
}
}
}
@@ -588,23 +650,40 @@
}
},
"tags": [
- "Inspect"
+ "Conversations"
+ ],
+ "summary": "Retrieve a conversation item.",
+ "description": "Retrieve a conversation item.",
+ "parameters": [
+ {
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "item_id",
+ "in": "path",
+ "description": "The item identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "Get the current health status of the service.",
- "description": "Get the current health status of the service.",
- "parameters": [],
"deprecated": false
- }
- },
- "/v1/inspect/routes": {
- "get": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "Response containing information about all available routes.",
+ "description": "The deleted item resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListRoutesResponse"
+ "$ref": "#/components/schemas/ConversationItemDeletedResource"
}
}
}
@@ -623,23 +702,42 @@
}
},
"tags": [
- "Inspect"
+ "Conversations"
+ ],
+ "summary": "Delete a conversation item.",
+ "description": "Delete a conversation item.",
+ "parameters": [
+ {
+ "name": "conversation_id",
+ "in": "path",
+ "description": "The conversation identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "item_id",
+ "in": "path",
+ "description": "The item identifier.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "List all available API routes with their methods and implementing providers.",
- "description": "List all available API routes with their methods and implementing providers.",
- "parameters": [],
"deprecated": false
}
},
- "/v1/models": {
- "get": {
+ "/v1/embeddings": {
+ "post": {
"responses": {
"200": {
- "description": "A ListModelsResponse.",
+ "description": "An OpenAIEmbeddingsResponse containing the embeddings.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListModelsResponse"
+ "$ref": "#/components/schemas/OpenAIEmbeddingsResponse"
}
}
}
@@ -658,21 +756,103 @@
}
},
"tags": [
- "Models"
+ "Inference"
],
- "summary": "List all models.",
- "description": "List all models.",
+ "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenaiEmbeddingsRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/files": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "An ListOpenAIFileResponse containing the list of files.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListOpenAIFileResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Files"
+ ],
+ "summary": "Returns a list of files that belong to the user's organization.",
+ "description": "Returns a list of files that belong to the user's organization.",
+ "parameters": [
+ {
+ "name": "after",
+ "in": "query",
+ "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/Order"
+ }
+ },
+ {
+ "name": "purpose",
+ "in": "query",
+ "description": "Only return files with the given purpose.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIFilePurpose"
+ }
+ }
+ ],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "A Model.",
+ "description": "An OpenAIFileObject representing the uploaded file.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Model"
+ "$ref": "#/components/schemas/OpenAIFileObject"
}
}
}
@@ -691,16 +871,32 @@
}
},
"tags": [
- "Models"
+ "Files"
],
- "summary": "Register a model.",
- "description": "Register a model.",
+ "summary": "Upload a file that can be used across various endpoints.",
+ "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
- "application/json": {
+ "multipart/form-data": {
"schema": {
- "$ref": "#/components/schemas/RegisterModelRequest"
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "string",
+ "format": "binary"
+ },
+ "purpose": {
+ "$ref": "#/components/schemas/OpenAIFilePurpose"
+ },
+ "expires_after": {
+ "$ref": "#/components/schemas/ExpiresAfter"
+ }
+ },
+ "required": [
+ "file",
+ "purpose"
+ ]
}
}
},
@@ -709,15 +905,15 @@
"deprecated": false
}
},
- "/v1/models/{model_id}": {
+ "/v1/files/{file_id}": {
"get": {
"responses": {
"200": {
- "description": "A Model.",
+ "description": "An OpenAIFileObject containing file information.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Model"
+ "$ref": "#/components/schemas/OpenAIFileObject"
}
}
}
@@ -736,15 +932,15 @@
}
},
"tags": [
- "Models"
+ "Files"
],
- "summary": "Get a model by its identifier.",
- "description": "Get a model by its identifier.",
+ "summary": "Returns information about a specific file.",
+ "description": "Returns information about a specific file.",
"parameters": [
{
- "name": "model_id",
+ "name": "file_id",
"in": "path",
- "description": "The identifier of the model to get.",
+ "description": "The ID of the file to use for this request.",
"required": true,
"schema": {
"type": "string"
@@ -756,7 +952,14 @@
"delete": {
"responses": {
"200": {
- "description": "OK"
+ "description": "An OpenAIFileDeleteResponse indicating successful deletion.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIFileDeleteResponse"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -772,15 +975,15 @@
}
},
"tags": [
- "Models"
+ "Files"
],
- "summary": "Unregister a model.",
- "description": "Unregister a model.",
+ "summary": "Delete a file.",
+ "description": "Delete a file.",
"parameters": [
{
- "name": "model_id",
+ "name": "file_id",
"in": "path",
- "description": "The identifier of the model to unregister.",
+ "description": "The ID of the file to use for this request.",
"required": true,
"schema": {
"type": "string"
@@ -790,15 +993,15 @@
"deprecated": false
}
},
- "/v1/moderations": {
- "post": {
+ "/v1/files/{file_id}/content": {
+ "get": {
"responses": {
"200": {
- "description": "A moderation object.",
+ "description": "The raw file content as a binary response.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ModerationObject"
+ "$ref": "#/components/schemas/Response"
}
}
}
@@ -817,33 +1020,33 @@
}
},
"tags": [
- "Safety"
+ "Files"
],
- "summary": "Classifies if text and/or image inputs are potentially harmful.",
- "description": "Classifies if text and/or image inputs are potentially harmful.",
- "parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/RunModerationRequest"
- }
+ "summary": "Returns the contents of the specified file.",
+ "description": "Returns the contents of the specified file.",
+ "parameters": [
+ {
+ "name": "file_id",
+ "in": "path",
+ "description": "The ID of the file to use for this request.",
+ "required": true,
+ "schema": {
+ "type": "string"
}
- },
- "required": true
- },
+ }
+ ],
"deprecated": false
}
},
- "/v1/prompts": {
+ "/v1/health": {
"get": {
"responses": {
"200": {
- "description": "A ListPromptsResponse containing all prompts.",
+ "description": "Health information indicating if the service is operational.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListPromptsResponse"
+ "$ref": "#/components/schemas/HealthInfo"
}
}
}
@@ -862,21 +1065,23 @@
}
},
"tags": [
- "Prompts"
+ "Inspect"
],
- "summary": "List all prompts.",
- "description": "List all prompts.",
+ "summary": "Get the current health status of the service.",
+ "description": "Get the current health status of the service.",
"parameters": [],
"deprecated": false
- },
- "post": {
+ }
+ },
+ "/v1/inspect/routes": {
+ "get": {
"responses": {
"200": {
- "description": "The created Prompt resource.",
+ "description": "Response containing information about all available routes.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Prompt"
+ "$ref": "#/components/schemas/ListRoutesResponse"
}
}
}
@@ -895,33 +1100,23 @@
}
},
"tags": [
- "Prompts"
+ "Inspect"
],
- "summary": "Create a new prompt.",
- "description": "Create a new prompt.",
+ "summary": "List all available API routes with their methods and implementing providers.",
+ "description": "List all available API routes with their methods and implementing providers.",
"parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreatePromptRequest"
- }
- }
- },
- "required": true
- },
"deprecated": false
}
},
- "/v1/prompts/{prompt_id}": {
+ "/v1/models": {
"get": {
"responses": {
"200": {
- "description": "A Prompt resource.",
+ "description": "A ListModelsResponse.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Prompt"
+ "$ref": "#/components/schemas/ListModelsResponse"
}
}
}
@@ -940,40 +1135,21 @@
}
},
"tags": [
- "Prompts"
- ],
- "summary": "Get a prompt by its identifier and optional version.",
- "description": "Get a prompt by its identifier and optional version.",
- "parameters": [
- {
- "name": "prompt_id",
- "in": "path",
- "description": "The identifier of the prompt to get.",
- "required": true,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "version",
- "in": "query",
- "description": "The version of the prompt to get (defaults to latest).",
- "required": false,
- "schema": {
- "type": "integer"
- }
- }
+ "Models"
],
+ "summary": "List all models.",
+ "description": "List all models.",
+ "parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "The updated Prompt resource with incremented version.",
+ "description": "A Model.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Prompt"
+ "$ref": "#/components/schemas/Model"
}
}
}
@@ -992,37 +1168,36 @@
}
},
"tags": [
- "Prompts"
- ],
- "summary": "Update an existing prompt (increments version).",
- "description": "Update an existing prompt (increments version).",
- "parameters": [
- {
- "name": "prompt_id",
- "in": "path",
- "description": "The identifier of the prompt to update.",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
+ "Models"
],
+ "summary": "Register a model.",
+ "description": "Register a model.",
+ "parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdatePromptRequest"
+ "$ref": "#/components/schemas/RegisterModelRequest"
}
}
},
"required": true
},
"deprecated": false
- },
- "delete": {
+ }
+ },
+ "/v1/models/{model_id}": {
+ "get": {
"responses": {
"200": {
- "description": "OK"
+ "description": "A Model.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Model"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1038,15 +1213,15 @@
}
},
"tags": [
- "Prompts"
+ "Models"
],
- "summary": "Delete a prompt.",
- "description": "Delete a prompt.",
+ "summary": "Get a model by its identifier.",
+ "description": "Get a model by its identifier.",
"parameters": [
{
- "name": "prompt_id",
+ "name": "model_id",
"in": "path",
- "description": "The identifier of the prompt to delete.",
+ "description": "The identifier of the model to get.",
"required": true,
"schema": {
"type": "string"
@@ -1054,20 +1229,11 @@
}
],
"deprecated": false
- }
- },
- "/v1/prompts/{prompt_id}/set-default-version": {
- "post": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "The prompt with the specified version now set as default.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Prompt"
- }
- }
- }
+ "description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1083,43 +1249,33 @@
}
},
"tags": [
- "Prompts"
+ "Models"
],
- "summary": "Set which version of a prompt should be the default in get_prompt (latest).",
- "description": "Set which version of a prompt should be the default in get_prompt (latest).",
+ "summary": "Unregister a model.",
+ "description": "Unregister a model.",
"parameters": [
{
- "name": "prompt_id",
+ "name": "model_id",
"in": "path",
- "description": "The identifier of the prompt.",
+ "description": "The identifier of the model to unregister.",
"required": true,
"schema": {
"type": "string"
}
}
],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/SetDefaultVersionRequest"
- }
- }
- },
- "required": true
- },
"deprecated": false
}
},
- "/v1/prompts/{prompt_id}/versions": {
- "get": {
+ "/v1/moderations": {
+ "post": {
"responses": {
"200": {
- "description": "A ListPromptsResponse containing all versions of the prompt.",
+ "description": "A moderation object.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListPromptsResponse"
+ "$ref": "#/components/schemas/ModerationObject"
}
}
}
@@ -1138,33 +1294,33 @@
}
},
"tags": [
- "Prompts"
+ "Safety"
],
- "summary": "List all versions of a specific prompt.",
- "description": "List all versions of a specific prompt.",
- "parameters": [
- {
- "name": "prompt_id",
- "in": "path",
- "description": "The identifier of the prompt to list versions for.",
- "required": true,
- "schema": {
- "type": "string"
+ "summary": "Classifies if text and/or image inputs are potentially harmful.",
+ "description": "Classifies if text and/or image inputs are potentially harmful.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/RunModerationRequest"
+ }
}
- }
- ],
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/providers": {
+ "/v1/prompts": {
"get": {
"responses": {
"200": {
- "description": "A ListProvidersResponse containing information about all providers.",
+ "description": "A ListPromptsResponse containing all prompts.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListProvidersResponse"
+ "$ref": "#/components/schemas/ListPromptsResponse"
}
}
}
@@ -1183,23 +1339,21 @@
}
},
"tags": [
- "Providers"
+ "Prompts"
],
- "summary": "List all available providers.",
- "description": "List all available providers.",
+ "summary": "List all prompts.",
+ "description": "List all prompts.",
"parameters": [],
"deprecated": false
- }
- },
- "/v1/providers/{provider_id}": {
- "get": {
+ },
+ "post": {
"responses": {
"200": {
- "description": "A ProviderInfo object containing the provider's details.",
+ "description": "The created Prompt resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ProviderInfo"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1218,33 +1372,33 @@
}
},
"tags": [
- "Providers"
+ "Prompts"
],
- "summary": "Get detailed information about a specific provider.",
- "description": "Get detailed information about a specific provider.",
- "parameters": [
- {
- "name": "provider_id",
- "in": "path",
- "description": "The ID of the provider to inspect.",
- "required": true,
- "schema": {
- "type": "string"
+ "summary": "Create a new prompt.",
+ "description": "Create a new prompt.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreatePromptRequest"
+ }
}
- }
- ],
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/responses": {
+ "/v1/prompts/{prompt_id}": {
"get": {
"responses": {
"200": {
- "description": "A ListOpenAIResponseObject.",
+ "description": "A Prompt resource.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIResponseObject"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1263,46 +1417,28 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
+ "summary": "Get a prompt by its identifier and optional version.",
+ "description": "Get a prompt by its identifier and optional version.",
"parameters": [
{
- "name": "after",
- "in": "query",
- "description": "The ID of the last response to return.",
- "required": false,
+ "name": "prompt_id",
+ "in": "path",
+ "description": "The identifier of the prompt to get.",
+ "required": true,
"schema": {
"type": "string"
}
},
{
- "name": "limit",
+ "name": "version",
"in": "query",
- "description": "The number of responses to return.",
+ "description": "The version of the prompt to get (defaults to latest).",
"required": false,
"schema": {
"type": "integer"
}
- },
- {
- "name": "model",
- "in": "query",
- "description": "The model to filter responses by.",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "order",
- "in": "query",
- "description": "The order to sort responses by when sorted by created_at ('asc' or 'desc').",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/Order"
- }
}
],
"deprecated": false
@@ -1310,11 +1446,11 @@
"post": {
"responses": {
"200": {
- "description": "A ListOpenAIResponseObject.",
+ "description": "The updated Prompt resource with incremented version.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIResponseObject"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1333,36 +1469,37 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
+ ],
+ "summary": "Update an existing prompt (increments version).",
+ "description": "Update an existing prompt (increments version).",
+ "parameters": [
+ {
+ "name": "prompt_id",
+ "in": "path",
+ "description": "The identifier of the prompt to update.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
- "parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenaiResponsesRequest"
+ "$ref": "#/components/schemas/UpdatePromptRequest"
}
}
},
"required": true
},
"deprecated": false
- }
- },
- "/v1/responses/{response_id}": {
- "get": {
+ },
+ "delete": {
"responses": {
"200": {
- "description": "An OpenAIResponseObject.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/OpenAIResponseObject"
- }
- }
- }
+ "description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1378,15 +1515,15 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "Retrieve an OpenAI response by its ID.",
- "description": "Retrieve an OpenAI response by its ID.",
+ "summary": "Delete a prompt.",
+ "description": "Delete a prompt.",
"parameters": [
{
- "name": "response_id",
+ "name": "prompt_id",
"in": "path",
- "description": "The ID of the OpenAI response to retrieve.",
+ "description": "The identifier of the prompt to delete.",
"required": true,
"schema": {
"type": "string"
@@ -1394,15 +1531,17 @@
}
],
"deprecated": false
- },
- "delete": {
+ }
+ },
+ "/v1/prompts/{prompt_id}/set-default-version": {
+ "post": {
"responses": {
"200": {
- "description": "An OpenAIDeleteResponseObject",
+ "description": "The prompt with the specified version now set as default.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIDeleteResponseObject"
+ "$ref": "#/components/schemas/Prompt"
}
}
}
@@ -1421,33 +1560,43 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "Delete an OpenAI response by its ID.",
- "description": "Delete an OpenAI response by its ID.",
+ "summary": "Set which version of a prompt should be the default in get_prompt (latest).",
+ "description": "Set which version of a prompt should be the default in get_prompt (latest).",
"parameters": [
{
- "name": "response_id",
+ "name": "prompt_id",
"in": "path",
- "description": "The ID of the OpenAI response to delete.",
+ "description": "The identifier of the prompt.",
"required": true,
"schema": {
"type": "string"
}
}
],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/SetDefaultVersionRequest"
+ }
+ }
+ },
+ "required": true
+ },
"deprecated": false
}
},
- "/v1/responses/{response_id}/input_items": {
+ "/v1/prompts/{prompt_id}/versions": {
"get": {
"responses": {
"200": {
- "description": "An ListOpenAIResponseInputItem.",
+ "description": "A ListPromptsResponse containing all versions of the prompt.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListOpenAIResponseInputItem"
+ "$ref": "#/components/schemas/ListPromptsResponse"
}
}
}
@@ -1466,81 +1615,68 @@
}
},
"tags": [
- "Agents"
+ "Prompts"
],
- "summary": "List input items for a given OpenAI response.",
- "description": "List input items for a given OpenAI response.",
+ "summary": "List all versions of a specific prompt.",
+ "description": "List all versions of a specific prompt.",
"parameters": [
{
- "name": "response_id",
+ "name": "prompt_id",
"in": "path",
- "description": "The ID of the response to retrieve input items for.",
+ "description": "The identifier of the prompt to list versions for.",
"required": true,
"schema": {
"type": "string"
}
- },
- {
- "name": "after",
- "in": "query",
- "description": "An item ID to list items after, used for pagination.",
- "required": false,
- "schema": {
- "type": "string"
+ }
+ ],
+ "deprecated": false
+ }
+ },
+ "/v1/providers": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "A ListProvidersResponse containing information about all providers.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListProvidersResponse"
+ }
+ }
}
},
- {
- "name": "before",
- "in": "query",
- "description": "An item ID to list items before, used for pagination.",
- "required": false,
- "schema": {
- "type": "string"
- }
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
},
- {
- "name": "include",
- "in": "query",
- "description": "Additional fields to include in the response.",
- "required": false,
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
},
- {
- "name": "limit",
- "in": "query",
- "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.",
- "required": false,
- "schema": {
- "type": "integer"
- }
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
},
- {
- "name": "order",
- "in": "query",
- "description": "The order to return the input items in. Default is desc.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/Order"
- }
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
}
+ },
+ "tags": [
+ "Providers"
],
+ "summary": "List all available providers.",
+ "description": "List all available providers.",
+ "parameters": [],
"deprecated": false
}
},
- "/v1/safety/run-shield": {
- "post": {
+ "/v1/providers/{provider_id}": {
+ "get": {
"responses": {
"200": {
- "description": "A RunShieldResponse.",
+ "description": "A ProviderInfo object containing the provider's details.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/RunShieldResponse"
+ "$ref": "#/components/schemas/ProviderInfo"
}
}
}
@@ -1559,33 +1695,33 @@
}
},
"tags": [
- "Safety"
+ "Providers"
],
- "summary": "Run a shield.",
- "description": "Run a shield.",
- "parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/RunShieldRequest"
- }
+ "summary": "Get detailed information about a specific provider.",
+ "description": "Get detailed information about a specific provider.",
+ "parameters": [
+ {
+ "name": "provider_id",
+ "in": "path",
+ "description": "The ID of the provider to inspect.",
+ "required": true,
+ "schema": {
+ "type": "string"
}
- },
- "required": true
- },
+ }
+ ],
"deprecated": false
}
},
- "/v1/scoring-functions": {
+ "/v1/responses": {
"get": {
"responses": {
"200": {
- "description": "A ListScoringFunctionsResponse.",
+ "description": "A ListOpenAIResponseObject.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListScoringFunctionsResponse"
+ "$ref": "#/components/schemas/ListOpenAIResponseObject"
}
}
}
@@ -1604,17 +1740,61 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
+ ],
+ "summary": "List all OpenAI responses.",
+ "description": "List all OpenAI responses.",
+ "parameters": [
+ {
+ "name": "after",
+ "in": "query",
+ "description": "The ID of the last response to return.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "The number of responses to return.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "model",
+ "in": "query",
+ "description": "The model to filter responses by.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "The order to sort responses by when sorted by created_at ('asc' or 'desc').",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/Order"
+ }
+ }
],
- "summary": "List all scoring functions.",
- "description": "List all scoring functions.",
- "parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "OK"
+ "description": "A ListOpenAIResponseObject.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListOpenAIResponseObject"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1630,16 +1810,16 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
],
- "summary": "Register a scoring function.",
- "description": "Register a scoring function.",
+ "summary": "List all OpenAI responses.",
+ "description": "List all OpenAI responses.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/RegisterScoringFunctionRequest"
+ "$ref": "#/components/schemas/ListOpenaiResponsesRequest"
}
}
},
@@ -1648,15 +1828,15 @@
"deprecated": false
}
},
- "/v1/scoring-functions/{scoring_fn_id}": {
+ "/v1/responses/{response_id}": {
"get": {
"responses": {
"200": {
- "description": "A ScoringFn.",
+ "description": "An OpenAIResponseObject.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoringFn"
+ "$ref": "#/components/schemas/OpenAIResponseObject"
}
}
}
@@ -1675,15 +1855,15 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
],
- "summary": "Get a scoring function by its ID.",
- "description": "Get a scoring function by its ID.",
+ "summary": "Retrieve an OpenAI response by its ID.",
+ "description": "Retrieve an OpenAI response by its ID.",
"parameters": [
{
- "name": "scoring_fn_id",
+ "name": "response_id",
"in": "path",
- "description": "The ID of the scoring function to get.",
+ "description": "The ID of the OpenAI response to retrieve.",
"required": true,
"schema": {
"type": "string"
@@ -1695,7 +1875,14 @@
"delete": {
"responses": {
"200": {
- "description": "OK"
+ "description": "An OpenAIDeleteResponseObject",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIDeleteResponseObject"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1711,15 +1898,15 @@
}
},
"tags": [
- "ScoringFunctions"
+ "Agents"
],
- "summary": "Unregister a scoring function.",
- "description": "Unregister a scoring function.",
+ "summary": "Delete an OpenAI response by its ID.",
+ "description": "Delete an OpenAI response by its ID.",
"parameters": [
{
- "name": "scoring_fn_id",
+ "name": "response_id",
"in": "path",
- "description": "The ID of the scoring function to unregister.",
+ "description": "The ID of the OpenAI response to delete.",
"required": true,
"schema": {
"type": "string"
@@ -1729,15 +1916,15 @@
"deprecated": false
}
},
- "/v1/scoring/score": {
- "post": {
+ "/v1/responses/{response_id}/input_items": {
+ "get": {
"responses": {
"200": {
- "description": "A ScoreResponse object containing rows and aggregated results.",
+ "description": "An ListOpenAIResponseInputItem.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoreResponse"
+ "$ref": "#/components/schemas/ListOpenAIResponseInputItem"
}
}
}
@@ -1756,33 +1943,81 @@
}
},
"tags": [
- "Scoring"
+ "Agents"
],
- "summary": "Score a list of rows.",
- "description": "Score a list of rows.",
- "parameters": [],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ScoreRequest"
+ "summary": "List input items for a given OpenAI response.",
+ "description": "List input items for a given OpenAI response.",
+ "parameters": [
+ {
+ "name": "response_id",
+ "in": "path",
+ "description": "The ID of the response to retrieve input items for.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "An item ID to list items after, used for pagination.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "An item ID to list items before, used for pagination.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "include",
+ "in": "query",
+ "description": "Additional fields to include in the response.",
+ "required": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
}
}
},
- "required": true
- },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.",
+ "required": false,
+ "schema": {
+ "type": "integer"
+ }
+ },
+ {
+ "name": "order",
+ "in": "query",
+ "description": "The order to return the input items in. Default is desc.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/Order"
+ }
+ }
+ ],
"deprecated": false
}
},
- "/v1/scoring/score-batch": {
+ "/v1/safety/run-shield": {
"post": {
"responses": {
"200": {
- "description": "A ScoreBatchResponse.",
+ "description": "A RunShieldResponse.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoreBatchResponse"
+ "$ref": "#/components/schemas/RunShieldResponse"
}
}
}
@@ -1801,16 +2036,16 @@
}
},
"tags": [
- "Scoring"
+ "Safety"
],
- "summary": "Score a batch of rows.",
- "description": "Score a batch of rows.",
+ "summary": "Run a shield.",
+ "description": "Run a shield.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ScoreBatchRequest"
+ "$ref": "#/components/schemas/RunShieldRequest"
}
}
},
@@ -1819,15 +2054,15 @@
"deprecated": false
}
},
- "/v1/shields": {
+ "/v1/scoring-functions": {
"get": {
"responses": {
"200": {
- "description": "A ListShieldsResponse.",
+ "description": "A ListScoringFunctionsResponse.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListShieldsResponse"
+ "$ref": "#/components/schemas/ListScoringFunctionsResponse"
}
}
}
@@ -1846,24 +2081,17 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "List all shields.",
- "description": "List all shields.",
+ "summary": "List all scoring functions.",
+ "description": "List all scoring functions.",
"parameters": [],
"deprecated": false
},
"post": {
"responses": {
"200": {
- "description": "A Shield.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Shield"
- }
- }
- }
+ "description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -1879,16 +2107,16 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "Register a shield.",
- "description": "Register a shield.",
+ "summary": "Register a scoring function.",
+ "description": "Register a scoring function.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/RegisterShieldRequest"
+ "$ref": "#/components/schemas/RegisterScoringFunctionRequest"
}
}
},
@@ -1897,15 +2125,15 @@
"deprecated": false
}
},
- "/v1/shields/{identifier}": {
+ "/v1/scoring-functions/{scoring_fn_id}": {
"get": {
"responses": {
"200": {
- "description": "A Shield.",
+ "description": "A ScoringFn.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Shield"
+ "$ref": "#/components/schemas/ScoringFn"
}
}
}
@@ -1924,15 +2152,15 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "Get a shield by its identifier.",
- "description": "Get a shield by its identifier.",
+ "summary": "Get a scoring function by its ID.",
+ "description": "Get a scoring function by its ID.",
"parameters": [
{
- "name": "identifier",
+ "name": "scoring_fn_id",
"in": "path",
- "description": "The identifier of the shield to get.",
+ "description": "The ID of the scoring function to get.",
"required": true,
"schema": {
"type": "string"
@@ -1960,15 +2188,15 @@
}
},
"tags": [
- "Shields"
+ "ScoringFunctions"
],
- "summary": "Unregister a shield.",
- "description": "Unregister a shield.",
+ "summary": "Unregister a scoring function.",
+ "description": "Unregister a scoring function.",
"parameters": [
{
- "name": "identifier",
+ "name": "scoring_fn_id",
"in": "path",
- "description": "The identifier of the shield to unregister.",
+ "description": "The ID of the scoring function to unregister.",
"required": true,
"schema": {
"type": "string"
@@ -1978,15 +2206,15 @@
"deprecated": false
}
},
- "/v1/synthetic-data-generation/generate": {
+ "/v1/scoring/score": {
"post": {
"responses": {
"200": {
- "description": "Response containing filtered synthetic data samples and optional statistics",
+ "description": "A ScoreResponse object containing rows and aggregated results.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/SyntheticDataGenerationResponse"
+ "$ref": "#/components/schemas/ScoreResponse"
}
}
}
@@ -2005,16 +2233,16 @@
}
},
"tags": [
- "SyntheticDataGeneration (Coming Soon)"
+ "Scoring"
],
- "summary": "Generate synthetic data based on input dialogs and apply filtering.",
- "description": "Generate synthetic data based on input dialogs and apply filtering.",
+ "summary": "Score a list of rows.",
+ "description": "Score a list of rows.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/SyntheticDataGenerateRequest"
+ "$ref": "#/components/schemas/ScoreRequest"
}
}
},
@@ -2023,11 +2251,18 @@
"deprecated": false
}
},
- "/v1/telemetry/events": {
+ "/v1/scoring/score-batch": {
"post": {
"responses": {
"200": {
- "description": "OK"
+ "description": "A ScoreBatchResponse.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ScoreBatchResponse"
+ }
+ }
+ }
},
"400": {
"$ref": "#/components/responses/BadRequest400"
@@ -2043,16 +2278,16 @@
}
},
"tags": [
- "Telemetry"
+ "Scoring"
],
- "summary": "Log an event.",
- "description": "Log an event.",
+ "summary": "Score a batch of rows.",
+ "description": "Score a batch of rows.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/LogEventRequest"
+ "$ref": "#/components/schemas/ScoreBatchRequest"
}
}
},
@@ -2061,15 +2296,48 @@
"deprecated": false
}
},
- "/v1/tool-runtime/invoke": {
+ "/v1/shields": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "A ListShieldsResponse.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListShieldsResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Shields"
+ ],
+ "summary": "List all shields.",
+ "description": "List all shields.",
+ "parameters": [],
+ "deprecated": false
+ },
"post": {
"responses": {
"200": {
- "description": "A ToolInvocationResult.",
+ "description": "A Shield.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ToolInvocationResult"
+ "$ref": "#/components/schemas/Shield"
}
}
}
@@ -2088,16 +2356,16 @@
}
},
"tags": [
- "ToolRuntime"
+ "Shields"
],
- "summary": "Run a tool with the given arguments.",
- "description": "Run a tool with the given arguments.",
+ "summary": "Register a shield.",
+ "description": "Register a shield.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/InvokeToolRequest"
+ "$ref": "#/components/schemas/RegisterShieldRequest"
}
}
},
@@ -2106,15 +2374,15 @@
"deprecated": false
}
},
- "/v1/tool-runtime/list-tools": {
+ "/v1/shields/{identifier}": {
"get": {
"responses": {
"200": {
- "description": "A ListToolDefsResponse.",
+ "description": "A Shield.",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ListToolDefsResponse"
+ "$ref": "#/components/schemas/Shield"
}
}
}
@@ -2133,32 +2401,241 @@
}
},
"tags": [
- "ToolRuntime"
+ "Shields"
],
- "summary": "List all tools in the runtime.",
- "description": "List all tools in the runtime.",
+ "summary": "Get a shield by its identifier.",
+ "description": "Get a shield by its identifier.",
"parameters": [
{
- "name": "tool_group_id",
- "in": "query",
- "description": "The ID of the tool group to list tools for.",
- "required": false,
+ "name": "identifier",
+ "in": "path",
+ "description": "The identifier of the shield to get.",
+ "required": true,
"schema": {
"type": "string"
}
- },
- {
- "name": "mcp_endpoint",
- "in": "query",
- "description": "The MCP endpoint to use for the tool group.",
- "required": false,
- "schema": {
- "$ref": "#/components/schemas/URL"
- }
}
],
"deprecated": false
- }
+ },
+ "delete": {
+ "responses": {
+ "200": {
+ "description": "OK"
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Shields"
+ ],
+ "summary": "Unregister a shield.",
+ "description": "Unregister a shield.",
+ "parameters": [
+ {
+ "name": "identifier",
+ "in": "path",
+ "description": "The identifier of the shield to unregister.",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "deprecated": false
+ }
+ },
+ "/v1/synthetic-data-generation/generate": {
+ "post": {
+ "responses": {
+ "200": {
+ "description": "Response containing filtered synthetic data samples and optional statistics",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/SyntheticDataGenerationResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "SyntheticDataGeneration (Coming Soon)"
+ ],
+ "summary": "Generate synthetic data based on input dialogs and apply filtering.",
+ "description": "Generate synthetic data based on input dialogs and apply filtering.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/SyntheticDataGenerateRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/telemetry/events": {
+ "post": {
+ "responses": {
+ "200": {
+ "description": "OK"
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "Telemetry"
+ ],
+ "summary": "Log an event.",
+ "description": "Log an event.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/LogEventRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/tool-runtime/invoke": {
+ "post": {
+ "responses": {
+ "200": {
+ "description": "A ToolInvocationResult.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ToolInvocationResult"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "ToolRuntime"
+ ],
+ "summary": "Run a tool with the given arguments.",
+ "description": "Run a tool with the given arguments.",
+ "parameters": [],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/InvokeToolRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "deprecated": false
+ }
+ },
+ "/v1/tool-runtime/list-tools": {
+ "get": {
+ "responses": {
+ "200": {
+ "description": "A ListToolDefsResponse.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ListToolDefsResponse"
+ }
+ }
+ }
+ },
+ "400": {
+ "$ref": "#/components/responses/BadRequest400"
+ },
+ "429": {
+ "$ref": "#/components/responses/TooManyRequests429"
+ },
+ "500": {
+ "$ref": "#/components/responses/InternalServerError500"
+ },
+ "default": {
+ "$ref": "#/components/responses/DefaultError"
+ }
+ },
+ "tags": [
+ "ToolRuntime"
+ ],
+ "summary": "List all tools in the runtime.",
+ "description": "List all tools in the runtime.",
+ "parameters": [
+ {
+ "name": "tool_group_id",
+ "in": "query",
+ "description": "The ID of the tool group to list tools for.",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "mcp_endpoint",
+ "in": "query",
+ "description": "The MCP endpoint to use for the tool group.",
+ "required": false,
+ "schema": {
+ "$ref": "#/components/schemas/URL"
+ }
+ }
+ ],
+ "deprecated": false
+ }
},
"/v1/tool-runtime/rag-tool/insert": {
"post": {
@@ -7115,877 +7592,988 @@
"title": "OpenAICompletionChoice",
"description": "A choice from an OpenAI-compatible completion response."
},
- "OpenaiEmbeddingsRequest": {
- "type": "object",
- "properties": {
- "model": {
- "type": "string",
- "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint."
+ "ConversationItem": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMessage"
},
- "input": {
- "oneOf": [
- {
- "type": "string"
- },
- {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- ],
- "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
},
- "encoding_format": {
- "type": "string",
- "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
},
- "dimensions": {
- "type": "integer",
- "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
},
- "user": {
- "type": "string",
- "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse."
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
}
- },
- "additionalProperties": false,
- "required": [
- "model",
- "input"
],
- "title": "OpenaiEmbeddingsRequest"
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "message": "#/components/schemas/OpenAIResponseMessage",
+ "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall",
+ "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall",
+ "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall",
+ "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall",
+ "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
+ }
+ }
},
- "OpenAIEmbeddingData": {
+ "OpenAIResponseAnnotationCitation": {
"type": "object",
"properties": {
- "object": {
+ "type": {
"type": "string",
- "const": "embedding",
- "default": "embedding",
- "description": "The object type, which will be \"embedding\""
- },
- "embedding": {
- "oneOf": [
- {
- "type": "array",
- "items": {
- "type": "number"
- }
- },
- {
- "type": "string"
- }
- ],
- "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")"
+ "const": "url_citation",
+ "default": "url_citation",
+ "description": "Annotation type identifier, always \"url_citation\""
},
- "index": {
- "type": "integer",
- "description": "The index of the embedding in the input list"
- }
- },
- "additionalProperties": false,
- "required": [
- "object",
- "embedding",
- "index"
- ],
- "title": "OpenAIEmbeddingData",
- "description": "A single embedding data object from an OpenAI-compatible embeddings response."
- },
- "OpenAIEmbeddingUsage": {
- "type": "object",
- "properties": {
- "prompt_tokens": {
+ "end_index": {
"type": "integer",
- "description": "The number of tokens in the input"
+ "description": "End position of the citation span in the content"
},
- "total_tokens": {
+ "start_index": {
"type": "integer",
- "description": "The total number of tokens used"
- }
- },
- "additionalProperties": false,
- "required": [
- "prompt_tokens",
- "total_tokens"
- ],
- "title": "OpenAIEmbeddingUsage",
- "description": "Usage information for an OpenAI-compatible embeddings response."
- },
- "OpenAIEmbeddingsResponse": {
- "type": "object",
- "properties": {
- "object": {
- "type": "string",
- "const": "list",
- "default": "list",
- "description": "The object type, which will be \"list\""
- },
- "data": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIEmbeddingData"
- },
- "description": "List of embedding data objects"
+ "description": "Start position of the citation span in the content"
},
- "model": {
+ "title": {
"type": "string",
- "description": "The model that was used to generate the embeddings"
+ "description": "Title of the referenced web resource"
},
- "usage": {
- "$ref": "#/components/schemas/OpenAIEmbeddingUsage",
- "description": "Usage information"
+ "url": {
+ "type": "string",
+ "description": "URL of the referenced web resource"
}
},
"additionalProperties": false,
"required": [
- "object",
- "data",
- "model",
- "usage"
- ],
- "title": "OpenAIEmbeddingsResponse",
- "description": "Response from an OpenAI-compatible embeddings request."
- },
- "OpenAIFilePurpose": {
- "type": "string",
- "enum": [
- "assistants",
- "batch"
+ "type",
+ "end_index",
+ "start_index",
+ "title",
+ "url"
],
- "title": "OpenAIFilePurpose",
- "description": "Valid purpose values for OpenAI Files API."
+ "title": "OpenAIResponseAnnotationCitation",
+ "description": "URL citation annotation for referencing external web resources."
},
- "ListOpenAIFileResponse": {
+ "OpenAIResponseAnnotationContainerFileCitation": {
"type": "object",
"properties": {
- "data": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIFileObject"
- },
- "description": "List of file objects"
+ "type": {
+ "type": "string",
+ "const": "container_file_citation",
+ "default": "container_file_citation"
},
- "has_more": {
- "type": "boolean",
- "description": "Whether there are more files available beyond this page"
+ "container_id": {
+ "type": "string"
},
- "first_id": {
- "type": "string",
- "description": "ID of the first file in the list for pagination"
+ "end_index": {
+ "type": "integer"
},
- "last_id": {
- "type": "string",
- "description": "ID of the last file in the list for pagination"
+ "file_id": {
+ "type": "string"
},
- "object": {
- "type": "string",
- "const": "list",
- "default": "list",
- "description": "The object type, which is always \"list\""
+ "filename": {
+ "type": "string"
+ },
+ "start_index": {
+ "type": "integer"
}
},
"additionalProperties": false,
"required": [
- "data",
- "has_more",
- "first_id",
- "last_id",
- "object"
+ "type",
+ "container_id",
+ "end_index",
+ "file_id",
+ "filename",
+ "start_index"
],
- "title": "ListOpenAIFileResponse",
- "description": "Response for listing files in OpenAI Files API."
+ "title": "OpenAIResponseAnnotationContainerFileCitation"
},
- "OpenAIFileObject": {
+ "OpenAIResponseAnnotationFileCitation": {
"type": "object",
"properties": {
- "object": {
+ "type": {
"type": "string",
- "const": "file",
- "default": "file",
- "description": "The object type, which is always \"file\""
+ "const": "file_citation",
+ "default": "file_citation",
+ "description": "Annotation type identifier, always \"file_citation\""
},
- "id": {
+ "file_id": {
"type": "string",
- "description": "The file identifier, which can be referenced in the API endpoints"
- },
- "bytes": {
- "type": "integer",
- "description": "The size of the file, in bytes"
- },
- "created_at": {
- "type": "integer",
- "description": "The Unix timestamp (in seconds) for when the file was created"
- },
- "expires_at": {
- "type": "integer",
- "description": "The Unix timestamp (in seconds) for when the file expires"
+ "description": "Unique identifier of the referenced file"
},
"filename": {
"type": "string",
- "description": "The name of the file"
+ "description": "Name of the referenced file"
},
- "purpose": {
- "type": "string",
- "enum": [
- "assistants",
- "batch"
- ],
- "description": "The intended purpose of the file"
+ "index": {
+ "type": "integer",
+ "description": "Position index of the citation within the content"
}
},
"additionalProperties": false,
"required": [
- "object",
- "id",
- "bytes",
- "created_at",
- "expires_at",
+ "type",
+ "file_id",
"filename",
- "purpose"
+ "index"
],
- "title": "OpenAIFileObject",
- "description": "OpenAI File object as defined in the OpenAI Files API."
+ "title": "OpenAIResponseAnnotationFileCitation",
+ "description": "File citation annotation for referencing specific files in response content."
},
- "ExpiresAfter": {
+ "OpenAIResponseAnnotationFilePath": {
"type": "object",
"properties": {
- "anchor": {
+ "type": {
"type": "string",
- "const": "created_at"
+ "const": "file_path",
+ "default": "file_path"
},
- "seconds": {
+ "file_id": {
+ "type": "string"
+ },
+ "index": {
"type": "integer"
}
},
"additionalProperties": false,
"required": [
- "anchor",
- "seconds"
+ "type",
+ "file_id",
+ "index"
],
- "title": "ExpiresAfter",
- "description": "Control expiration of uploaded files.\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)"
+ "title": "OpenAIResponseAnnotationFilePath"
},
- "OpenAIFileDeleteResponse": {
- "type": "object",
- "properties": {
- "id": {
- "type": "string",
- "description": "The file identifier that was deleted"
- },
- "object": {
+ "OpenAIResponseAnnotations": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation",
+ "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation",
+ "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation",
+ "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath"
+ }
+ }
+ },
+ "OpenAIResponseInputMessageContent": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
+ "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
+ }
+ }
+ },
+ "OpenAIResponseInputMessageContentImage": {
+ "type": "object",
+ "properties": {
+ "detail": {
+ "oneOf": [
+ {
+ "type": "string",
+ "const": "low"
+ },
+ {
+ "type": "string",
+ "const": "high"
+ },
+ {
+ "type": "string",
+ "const": "auto"
+ }
+ ],
+ "default": "auto",
+ "description": "Level of detail for image processing, can be \"low\", \"high\", or \"auto\""
+ },
+ "type": {
"type": "string",
- "const": "file",
- "default": "file",
- "description": "The object type, which is always \"file\""
+ "const": "input_image",
+ "default": "input_image",
+ "description": "Content type identifier, always \"input_image\""
},
- "deleted": {
- "type": "boolean",
- "description": "Whether the file was successfully deleted"
+ "image_url": {
+ "type": "string",
+ "description": "(Optional) URL of the image content"
}
},
"additionalProperties": false,
"required": [
- "id",
- "object",
- "deleted"
+ "detail",
+ "type"
],
- "title": "OpenAIFileDeleteResponse",
- "description": "Response for deleting a file in OpenAI Files API."
- },
- "Response": {
- "type": "object",
- "title": "Response"
+ "title": "OpenAIResponseInputMessageContentImage",
+ "description": "Image content for input messages in OpenAI response format."
},
- "HealthInfo": {
+ "OpenAIResponseInputMessageContentText": {
"type": "object",
"properties": {
- "status": {
+ "text": {
"type": "string",
- "enum": [
- "OK",
- "Error",
- "Not Implemented"
- ],
- "description": "Current health status of the service"
+ "description": "The text content of the input message"
+ },
+ "type": {
+ "type": "string",
+ "const": "input_text",
+ "default": "input_text",
+ "description": "Content type identifier, always \"input_text\""
}
},
"additionalProperties": false,
"required": [
- "status"
+ "text",
+ "type"
],
- "title": "HealthInfo",
- "description": "Health status information for the service."
+ "title": "OpenAIResponseInputMessageContentText",
+ "description": "Text content for input messages in OpenAI response format."
},
- "RouteInfo": {
+ "OpenAIResponseMessage": {
"type": "object",
"properties": {
- "route": {
- "type": "string",
- "description": "The API endpoint path"
+ "content": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
+ }
+ },
+ {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent"
+ }
+ }
+ ]
},
- "method": {
+ "role": {
+ "oneOf": [
+ {
+ "type": "string",
+ "const": "system"
+ },
+ {
+ "type": "string",
+ "const": "developer"
+ },
+ {
+ "type": "string",
+ "const": "user"
+ },
+ {
+ "type": "string",
+ "const": "assistant"
+ }
+ ]
+ },
+ "type": {
"type": "string",
- "description": "HTTP method for the route"
+ "const": "message",
+ "default": "message"
},
- "provider_types": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "List of provider types that implement this route"
+ "id": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
}
},
"additionalProperties": false,
"required": [
- "route",
- "method",
- "provider_types"
+ "content",
+ "role",
+ "type"
],
- "title": "RouteInfo",
- "description": "Information about an API route including its path, method, and implementing providers."
+ "title": "OpenAIResponseMessage",
+ "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios."
},
- "ListRoutesResponse": {
+ "OpenAIResponseOutputMessageContent": {
"type": "object",
"properties": {
- "data": {
+ "text": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string",
+ "const": "output_text",
+ "default": "output_text"
+ },
+ "annotations": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/RouteInfo"
- },
- "description": "List of available route information objects"
+ "$ref": "#/components/schemas/OpenAIResponseAnnotations"
+ }
}
},
"additionalProperties": false,
"required": [
- "data"
+ "text",
+ "type",
+ "annotations"
],
- "title": "ListRoutesResponse",
- "description": "Response containing a list of all available API routes."
+ "title": "OpenAIResponseOutputMessageContentOutputText"
},
- "Model": {
+ "OpenAIResponseOutputMessageFileSearchToolCall": {
"type": "object",
"properties": {
- "identifier": {
+ "id": {
"type": "string",
- "description": "Unique identifier for this resource in llama stack"
+ "description": "Unique identifier for this tool call"
},
- "provider_resource_id": {
- "type": "string",
- "description": "Unique identifier for this resource in the provider"
+ "queries": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of search queries executed"
},
- "provider_id": {
+ "status": {
"type": "string",
- "description": "ID of the provider that owns this resource"
+ "description": "Current status of the file search operation"
},
"type": {
"type": "string",
- "enum": [
- "model",
- "shield",
- "vector_db",
- "dataset",
- "scoring_function",
- "benchmark",
- "tool",
- "tool_group",
- "prompt"
- ],
- "const": "model",
- "default": "model",
- "description": "The resource type, always 'model' for model resources"
+ "const": "file_search_call",
+ "default": "file_search_call",
+ "description": "Tool call type identifier, always \"file_search_call\""
},
- "metadata": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
+ "results": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "attributes": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "(Optional) Key-value attributes associated with the file"
},
- {
- "type": "array"
+ "file_id": {
+ "type": "string",
+ "description": "Unique identifier of the file containing the result"
},
- {
- "type": "object"
+ "filename": {
+ "type": "string",
+ "description": "Name of the file containing the result"
+ },
+ "score": {
+ "type": "number",
+ "description": "Relevance score for this search result (between 0 and 1)"
+ },
+ "text": {
+ "type": "string",
+ "description": "Text content of the search result"
}
- ]
+ },
+ "additionalProperties": false,
+ "required": [
+ "attributes",
+ "file_id",
+ "filename",
+ "score",
+ "text"
+ ],
+ "title": "OpenAIResponseOutputMessageFileSearchToolCallResults",
+ "description": "Search results returned by the file search operation."
},
- "description": "Any additional metadata for this model"
- },
- "model_type": {
- "$ref": "#/components/schemas/ModelType",
- "default": "llm",
- "description": "The type of model (LLM or embedding model)"
- }
- },
- "additionalProperties": false,
- "required": [
- "identifier",
- "provider_id",
- "type",
- "metadata",
- "model_type"
- ],
- "title": "Model",
- "description": "A model resource representing an AI model registered in Llama Stack."
- },
- "ModelType": {
- "type": "string",
- "enum": [
- "llm",
- "embedding"
- ],
- "title": "ModelType",
- "description": "Enumeration of supported model types in Llama Stack."
- },
- "ListModelsResponse": {
- "type": "object",
- "properties": {
- "data": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Model"
- }
+ "description": "(Optional) Search results returned by the file search operation"
}
},
"additionalProperties": false,
"required": [
- "data"
+ "id",
+ "queries",
+ "status",
+ "type"
],
- "title": "ListModelsResponse"
+ "title": "OpenAIResponseOutputMessageFileSearchToolCall",
+ "description": "File search tool call output message for OpenAI responses."
},
- "RegisterModelRequest": {
+ "OpenAIResponseOutputMessageFunctionToolCall": {
"type": "object",
"properties": {
- "model_id": {
+ "call_id": {
"type": "string",
- "description": "The identifier of the model to register."
+ "description": "Unique identifier for the function call"
},
- "provider_model_id": {
+ "name": {
"type": "string",
- "description": "The identifier of the model in the provider."
+ "description": "Name of the function being called"
},
- "provider_id": {
+ "arguments": {
"type": "string",
- "description": "The identifier of the provider."
+ "description": "JSON string containing the function arguments"
},
- "metadata": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- },
- "description": "Any additional metadata for this model."
+ "type": {
+ "type": "string",
+ "const": "function_call",
+ "default": "function_call",
+ "description": "Tool call type identifier, always \"function_call\""
},
- "model_type": {
- "$ref": "#/components/schemas/ModelType",
- "description": "The type of model to register."
- }
- },
- "additionalProperties": false,
- "required": [
- "model_id"
- ],
- "title": "RegisterModelRequest"
- },
- "RunModerationRequest": {
- "type": "object",
- "properties": {
- "input": {
- "oneOf": [
- {
- "type": "string"
- },
- {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- ],
- "description": "Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."
+ "id": {
+ "type": "string",
+ "description": "(Optional) Additional identifier for the tool call"
},
- "model": {
+ "status": {
"type": "string",
- "description": "The content moderation model you would like to use."
+ "description": "(Optional) Current status of the function call execution"
}
},
"additionalProperties": false,
"required": [
- "input",
- "model"
+ "call_id",
+ "name",
+ "arguments",
+ "type"
],
- "title": "RunModerationRequest"
+ "title": "OpenAIResponseOutputMessageFunctionToolCall",
+ "description": "Function tool call output message for OpenAI responses."
},
- "ModerationObject": {
+ "OpenAIResponseOutputMessageMCPCall": {
"type": "object",
"properties": {
"id": {
"type": "string",
- "description": "The unique identifier for the moderation request."
+ "description": "Unique identifier for this MCP call"
},
- "model": {
+ "type": {
"type": "string",
- "description": "The model used to generate the moderation results."
+ "const": "mcp_call",
+ "default": "mcp_call",
+ "description": "Tool call type identifier, always \"mcp_call\""
},
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ModerationObjectResults"
- },
- "description": "A list of moderation objects"
+ "arguments": {
+ "type": "string",
+ "description": "JSON string containing the MCP call arguments"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the MCP method being called"
+ },
+ "server_label": {
+ "type": "string",
+ "description": "Label identifying the MCP server handling the call"
+ },
+ "error": {
+ "type": "string",
+ "description": "(Optional) Error message if the MCP call failed"
+ },
+ "output": {
+ "type": "string",
+ "description": "(Optional) Output result from the successful MCP call"
}
},
"additionalProperties": false,
"required": [
"id",
- "model",
- "results"
+ "type",
+ "arguments",
+ "name",
+ "server_label"
],
- "title": "ModerationObject",
- "description": "A moderation object."
+ "title": "OpenAIResponseOutputMessageMCPCall",
+ "description": "Model Context Protocol (MCP) call output message for OpenAI responses."
},
- "ModerationObjectResults": {
+ "OpenAIResponseOutputMessageMCPListTools": {
"type": "object",
"properties": {
- "flagged": {
- "type": "boolean",
- "description": "Whether any of the below categories are flagged."
- },
- "categories": {
- "type": "object",
- "additionalProperties": {
- "type": "boolean"
- },
- "description": "A list of the categories, and whether they are flagged or not."
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for this MCP list tools operation"
},
- "category_applied_input_types": {
- "type": "object",
- "additionalProperties": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "description": "A list of the categories along with the input type(s) that the score applies to."
- },
- "category_scores": {
- "type": "object",
- "additionalProperties": {
- "type": "number"
- },
- "description": "A list of the categories along with their scores as predicted by model."
+ "type": {
+ "type": "string",
+ "const": "mcp_list_tools",
+ "default": "mcp_list_tools",
+ "description": "Tool call type identifier, always \"mcp_list_tools\""
},
- "user_message": {
- "type": "string"
+ "server_label": {
+ "type": "string",
+ "description": "Label identifying the MCP server providing the tools"
},
- "metadata": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
+ "tools": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "input_schema": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "JSON schema defining the tool's input parameters"
},
- {
- "type": "array"
+ "name": {
+ "type": "string",
+ "description": "Name of the tool"
},
- {
- "type": "object"
+ "description": {
+ "type": "string",
+ "description": "(Optional) Description of what the tool does"
}
- ]
- }
+ },
+ "additionalProperties": false,
+ "required": [
+ "input_schema",
+ "name"
+ ],
+ "title": "MCPListToolsTool",
+ "description": "Tool definition returned by MCP list tools operation."
+ },
+ "description": "List of available tools provided by the MCP server"
}
},
"additionalProperties": false,
"required": [
- "flagged",
- "metadata"
+ "id",
+ "type",
+ "server_label",
+ "tools"
],
- "title": "ModerationObjectResults",
- "description": "A moderation object."
+ "title": "OpenAIResponseOutputMessageMCPListTools",
+ "description": "MCP list tools output message containing available tools from an MCP server."
},
- "Prompt": {
+ "OpenAIResponseOutputMessageWebSearchToolCall": {
"type": "object",
"properties": {
- "prompt": {
+ "id": {
"type": "string",
- "description": "The system prompt text with variable placeholders. Variables are only supported when using the Responses API."
- },
- "version": {
- "type": "integer",
- "description": "Version (integer starting at 1, incremented on save)"
+ "description": "Unique identifier for this tool call"
},
- "prompt_id": {
+ "status": {
"type": "string",
- "description": "Unique identifier formatted as 'pmpt_<48-digit-hash>'"
+ "description": "Current status of the web search operation"
},
- "variables": {
+ "type": {
+ "type": "string",
+ "const": "web_search_call",
+ "default": "web_search_call",
+ "description": "Tool call type identifier, always \"web_search_call\""
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "status",
+ "type"
+ ],
+ "title": "OpenAIResponseOutputMessageWebSearchToolCall",
+ "description": "Web search tool call output message for OpenAI responses."
+ },
+ "CreateConversationRequest": {
+ "type": "object",
+ "properties": {
+ "items": {
"type": "array",
"items": {
- "type": "string"
+ "$ref": "#/components/schemas/ConversationItem"
},
- "description": "List of prompt variable names that can be used in the prompt template"
+ "description": "Initial items to include in the conversation context."
},
- "is_default": {
- "type": "boolean",
- "default": false,
- "description": "Boolean indicating whether this version is the default version for this prompt"
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Set of key-value pairs that can be attached to an object."
}
},
"additionalProperties": false,
- "required": [
- "version",
- "prompt_id",
- "variables",
- "is_default"
- ],
- "title": "Prompt",
- "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack."
+ "title": "CreateConversationRequest"
},
- "ListPromptsResponse": {
+ "Conversation": {
"type": "object",
"properties": {
- "data": {
+ "id": {
+ "type": "string"
+ },
+ "object": {
+ "type": "string",
+ "const": "conversation",
+ "default": "conversation"
+ },
+ "created_at": {
+ "type": "integer"
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "items": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/Prompt"
+ "type": "object",
+ "title": "dict",
+ "description": "dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2)"
}
}
},
"additionalProperties": false,
"required": [
- "data"
+ "id",
+ "object",
+ "created_at"
],
- "title": "ListPromptsResponse",
- "description": "Response model to list prompts."
+ "title": "Conversation",
+ "description": "OpenAI-compatible conversation object."
},
- "CreatePromptRequest": {
+ "UpdateConversationRequest": {
"type": "object",
"properties": {
- "prompt": {
- "type": "string",
- "description": "The prompt text content with variable placeholders."
- },
- "variables": {
- "type": "array",
- "items": {
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
"type": "string"
},
- "description": "List of variable names that can be used in the prompt template."
+ "description": "Set of key-value pairs that can be attached to an object."
}
},
"additionalProperties": false,
"required": [
- "prompt"
+ "metadata"
],
- "title": "CreatePromptRequest"
+ "title": "UpdateConversationRequest"
},
- "UpdatePromptRequest": {
+ "ConversationDeletedResource": {
"type": "object",
"properties": {
- "prompt": {
+ "id": {
+ "type": "string"
+ },
+ "object": {
"type": "string",
- "description": "The updated prompt text content."
+ "default": "conversation.deleted"
},
- "version": {
- "type": "integer",
- "description": "The current version of the prompt being updated."
+ "deleted": {
+ "type": "boolean",
+ "default": true
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "object",
+ "deleted"
+ ],
+ "title": "ConversationDeletedResource",
+ "description": "Response for deleted conversation."
+ },
+ "ConversationItemList": {
+ "type": "object",
+ "properties": {
+ "object": {
+ "type": "string",
+ "default": "list"
},
- "variables": {
+ "data": {
"type": "array",
"items": {
- "type": "string"
- },
- "description": "Updated list of variable names that can be used in the prompt template."
+ "$ref": "#/components/schemas/ConversationItem"
+ }
},
- "set_as_default": {
+ "first_id": {
+ "type": "string"
+ },
+ "last_id": {
+ "type": "string"
+ },
+ "has_more": {
"type": "boolean",
- "description": "Set the new version as the default (default=True)."
+ "default": false
}
},
"additionalProperties": false,
"required": [
- "prompt",
- "version",
- "set_as_default"
+ "object",
+ "data",
+ "has_more"
],
- "title": "UpdatePromptRequest"
+ "title": "ConversationItemList",
+ "description": "List of conversation items with pagination."
},
- "SetDefaultVersionRequest": {
+ "AddItemsRequest": {
"type": "object",
"properties": {
- "version": {
- "type": "integer",
- "description": "The version to set as default."
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ConversationItem"
+ },
+ "description": "Items to include in the conversation context."
}
},
"additionalProperties": false,
"required": [
- "version"
+ "items"
],
- "title": "SetDefaultVersionRequest"
+ "title": "AddItemsRequest"
},
- "ProviderInfo": {
+ "ConversationItemDeletedResource": {
"type": "object",
"properties": {
- "api": {
- "type": "string",
- "description": "The API name this provider implements"
+ "id": {
+ "type": "string"
},
- "provider_id": {
+ "object": {
"type": "string",
- "description": "Unique identifier for the provider"
+ "default": "conversation.item.deleted"
},
- "provider_type": {
+ "deleted": {
+ "type": "boolean",
+ "default": true
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "object",
+ "deleted"
+ ],
+ "title": "ConversationItemDeletedResource",
+ "description": "Response for deleted conversation item."
+ },
+ "OpenaiEmbeddingsRequest": {
+ "type": "object",
+ "properties": {
+ "model": {
"type": "string",
- "description": "The type of provider implementation"
+ "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint."
},
- "config": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
+ "input": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
"type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
}
- ]
- },
- "description": "Configuration parameters for the provider"
+ }
+ ],
+ "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings."
},
- "health": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
+ "encoding_format": {
+ "type": "string",
+ "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"."
+ },
+ "dimensions": {
+ "type": "integer",
+ "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models."
+ },
+ "user": {
+ "type": "string",
+ "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse."
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "model",
+ "input"
+ ],
+ "title": "OpenaiEmbeddingsRequest"
+ },
+ "OpenAIEmbeddingData": {
+ "type": "object",
+ "properties": {
+ "object": {
+ "type": "string",
+ "const": "embedding",
+ "default": "embedding",
+ "description": "The object type, which will be \"embedding\""
+ },
+ "embedding": {
+ "oneOf": [
+ {
+ "type": "array",
+ "items": {
"type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
}
- ]
- },
- "description": "Current health status of the provider"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")"
+ },
+ "index": {
+ "type": "integer",
+ "description": "The index of the embedding in the input list"
}
},
"additionalProperties": false,
"required": [
- "api",
- "provider_id",
- "provider_type",
- "config",
- "health"
+ "object",
+ "embedding",
+ "index"
],
- "title": "ProviderInfo",
- "description": "Information about a registered provider including its configuration and health status."
+ "title": "OpenAIEmbeddingData",
+ "description": "A single embedding data object from an OpenAI-compatible embeddings response."
},
- "ListProvidersResponse": {
+ "OpenAIEmbeddingUsage": {
+ "type": "object",
+ "properties": {
+ "prompt_tokens": {
+ "type": "integer",
+ "description": "The number of tokens in the input"
+ },
+ "total_tokens": {
+ "type": "integer",
+ "description": "The total number of tokens used"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "prompt_tokens",
+ "total_tokens"
+ ],
+ "title": "OpenAIEmbeddingUsage",
+ "description": "Usage information for an OpenAI-compatible embeddings response."
+ },
+ "OpenAIEmbeddingsResponse": {
"type": "object",
"properties": {
+ "object": {
+ "type": "string",
+ "const": "list",
+ "default": "list",
+ "description": "The object type, which will be \"list\""
+ },
"data": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/ProviderInfo"
+ "$ref": "#/components/schemas/OpenAIEmbeddingData"
},
- "description": "List of provider information objects"
+ "description": "List of embedding data objects"
+ },
+ "model": {
+ "type": "string",
+ "description": "The model that was used to generate the embeddings"
+ },
+ "usage": {
+ "$ref": "#/components/schemas/OpenAIEmbeddingUsage",
+ "description": "Usage information"
}
},
"additionalProperties": false,
"required": [
- "data"
+ "object",
+ "data",
+ "model",
+ "usage"
],
- "title": "ListProvidersResponse",
- "description": "Response containing a list of all available providers."
+ "title": "OpenAIEmbeddingsResponse",
+ "description": "Response from an OpenAI-compatible embeddings request."
},
- "ListOpenAIResponseObject": {
+ "OpenAIFilePurpose": {
+ "type": "string",
+ "enum": [
+ "assistants",
+ "batch"
+ ],
+ "title": "OpenAIFilePurpose",
+ "description": "Valid purpose values for OpenAI Files API."
+ },
+ "ListOpenAIFileResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseObjectWithInput"
+ "$ref": "#/components/schemas/OpenAIFileObject"
},
- "description": "List of response objects with their input context"
+ "description": "List of file objects"
},
"has_more": {
"type": "boolean",
- "description": "Whether there are more results available beyond this page"
+ "description": "Whether there are more files available beyond this page"
},
"first_id": {
"type": "string",
- "description": "Identifier of the first item in this page"
+ "description": "ID of the first file in the list for pagination"
},
"last_id": {
"type": "string",
- "description": "Identifier of the last item in this page"
+ "description": "ID of the last file in the list for pagination"
},
"object": {
"type": "string",
"const": "list",
"default": "list",
- "description": "Object type identifier, always \"list\""
+ "description": "The object type, which is always \"list\""
}
},
"additionalProperties": false,
@@ -7996,381 +8584,332 @@
"last_id",
"object"
],
- "title": "ListOpenAIResponseObject",
- "description": "Paginated list of OpenAI response objects with navigation metadata."
+ "title": "ListOpenAIFileResponse",
+ "description": "Response for listing files in OpenAI Files API."
},
- "OpenAIResponseAnnotationCitation": {
+ "OpenAIFileObject": {
"type": "object",
"properties": {
- "type": {
+ "object": {
"type": "string",
- "const": "url_citation",
- "default": "url_citation",
- "description": "Annotation type identifier, always \"url_citation\""
+ "const": "file",
+ "default": "file",
+ "description": "The object type, which is always \"file\""
},
- "end_index": {
+ "id": {
+ "type": "string",
+ "description": "The file identifier, which can be referenced in the API endpoints"
+ },
+ "bytes": {
"type": "integer",
- "description": "End position of the citation span in the content"
+ "description": "The size of the file, in bytes"
},
- "start_index": {
+ "created_at": {
"type": "integer",
- "description": "Start position of the citation span in the content"
+ "description": "The Unix timestamp (in seconds) for when the file was created"
},
- "title": {
+ "expires_at": {
+ "type": "integer",
+ "description": "The Unix timestamp (in seconds) for when the file expires"
+ },
+ "filename": {
"type": "string",
- "description": "Title of the referenced web resource"
+ "description": "The name of the file"
},
- "url": {
+ "purpose": {
"type": "string",
- "description": "URL of the referenced web resource"
+ "enum": [
+ "assistants",
+ "batch"
+ ],
+ "description": "The intended purpose of the file"
}
},
"additionalProperties": false,
"required": [
- "type",
- "end_index",
- "start_index",
- "title",
- "url"
+ "object",
+ "id",
+ "bytes",
+ "created_at",
+ "expires_at",
+ "filename",
+ "purpose"
],
- "title": "OpenAIResponseAnnotationCitation",
- "description": "URL citation annotation for referencing external web resources."
+ "title": "OpenAIFileObject",
+ "description": "OpenAI File object as defined in the OpenAI Files API."
},
- "OpenAIResponseAnnotationContainerFileCitation": {
+ "ExpiresAfter": {
"type": "object",
"properties": {
- "type": {
+ "anchor": {
"type": "string",
- "const": "container_file_citation",
- "default": "container_file_citation"
- },
- "container_id": {
- "type": "string"
- },
- "end_index": {
- "type": "integer"
- },
- "file_id": {
- "type": "string"
- },
- "filename": {
- "type": "string"
+ "const": "created_at"
},
- "start_index": {
+ "seconds": {
"type": "integer"
}
},
"additionalProperties": false,
"required": [
- "type",
- "container_id",
- "end_index",
- "file_id",
- "filename",
- "start_index"
+ "anchor",
+ "seconds"
],
- "title": "OpenAIResponseAnnotationContainerFileCitation"
+ "title": "ExpiresAfter",
+ "description": "Control expiration of uploaded files.\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)"
},
- "OpenAIResponseAnnotationFileCitation": {
+ "OpenAIFileDeleteResponse": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "const": "file_citation",
- "default": "file_citation",
- "description": "Annotation type identifier, always \"file_citation\""
- },
- "file_id": {
+ "id": {
"type": "string",
- "description": "Unique identifier of the referenced file"
+ "description": "The file identifier that was deleted"
},
- "filename": {
+ "object": {
"type": "string",
- "description": "Name of the referenced file"
+ "const": "file",
+ "default": "file",
+ "description": "The object type, which is always \"file\""
},
- "index": {
- "type": "integer",
- "description": "Position index of the citation within the content"
+ "deleted": {
+ "type": "boolean",
+ "description": "Whether the file was successfully deleted"
}
},
"additionalProperties": false,
"required": [
- "type",
- "file_id",
- "filename",
- "index"
+ "id",
+ "object",
+ "deleted"
],
- "title": "OpenAIResponseAnnotationFileCitation",
- "description": "File citation annotation for referencing specific files in response content."
+ "title": "OpenAIFileDeleteResponse",
+ "description": "Response for deleting a file in OpenAI Files API."
},
- "OpenAIResponseAnnotationFilePath": {
+ "Response": {
+ "type": "object",
+ "title": "Response"
+ },
+ "HealthInfo": {
"type": "object",
"properties": {
- "type": {
+ "status": {
"type": "string",
- "const": "file_path",
- "default": "file_path"
- },
- "file_id": {
- "type": "string"
- },
- "index": {
- "type": "integer"
+ "enum": [
+ "OK",
+ "Error",
+ "Not Implemented"
+ ],
+ "description": "Current health status of the service"
}
},
"additionalProperties": false,
"required": [
- "type",
- "file_id",
- "index"
- ],
- "title": "OpenAIResponseAnnotationFilePath"
- },
- "OpenAIResponseAnnotations": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath"
- }
+ "status"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation",
- "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation",
- "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation",
- "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath"
- }
- }
+ "title": "HealthInfo",
+ "description": "Health status information for the service."
},
- "OpenAIResponseError": {
+ "RouteInfo": {
"type": "object",
"properties": {
- "code": {
+ "route": {
"type": "string",
- "description": "Error code identifying the type of failure"
+ "description": "The API endpoint path"
},
- "message": {
+ "method": {
"type": "string",
- "description": "Human-readable error message describing the failure"
+ "description": "HTTP method for the route"
+ },
+ "provider_types": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of provider types that implement this route"
}
},
"additionalProperties": false,
"required": [
- "code",
- "message"
+ "route",
+ "method",
+ "provider_types"
],
- "title": "OpenAIResponseError",
- "description": "Error details for failed OpenAI response requests."
- },
- "OpenAIResponseInput": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMessage"
- }
- ]
+ "title": "RouteInfo",
+ "description": "Information about an API route including its path, method, and implementing providers."
},
- "OpenAIResponseInputFunctionToolCallOutput": {
+ "ListRoutesResponse": {
"type": "object",
"properties": {
- "call_id": {
- "type": "string"
- },
- "output": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "const": "function_call_output",
- "default": "function_call_output"
- },
- "id": {
- "type": "string"
- },
- "status": {
- "type": "string"
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RouteInfo"
+ },
+ "description": "List of available route information objects"
}
},
"additionalProperties": false,
"required": [
- "call_id",
- "output",
- "type"
- ],
- "title": "OpenAIResponseInputFunctionToolCallOutput",
- "description": "This represents the output of a function call that gets passed back to the model."
- },
- "OpenAIResponseInputMessageContent": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
- }
+ "data"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
- "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
- }
- }
+ "title": "ListRoutesResponse",
+ "description": "Response containing a list of all available API routes."
},
- "OpenAIResponseInputMessageContentImage": {
+ "Model": {
"type": "object",
"properties": {
- "detail": {
- "oneOf": [
- {
- "type": "string",
- "const": "low"
- },
- {
- "type": "string",
- "const": "high"
- },
- {
- "type": "string",
- "const": "auto"
- }
- ],
- "default": "auto",
- "description": "Level of detail for image processing, can be \"low\", \"high\", or \"auto\""
+ "identifier": {
+ "type": "string",
+ "description": "Unique identifier for this resource in llama stack"
},
- "type": {
+ "provider_resource_id": {
"type": "string",
- "const": "input_image",
- "default": "input_image",
- "description": "Content type identifier, always \"input_image\""
+ "description": "Unique identifier for this resource in the provider"
},
- "image_url": {
- "type": "string",
- "description": "(Optional) URL of the image content"
- }
- },
- "additionalProperties": false,
- "required": [
- "detail",
- "type"
- ],
- "title": "OpenAIResponseInputMessageContentImage",
- "description": "Image content for input messages in OpenAI response format."
- },
- "OpenAIResponseInputMessageContentText": {
- "type": "object",
- "properties": {
- "text": {
+ "provider_id": {
"type": "string",
- "description": "The text content of the input message"
+ "description": "ID of the provider that owns this resource"
},
"type": {
"type": "string",
- "const": "input_text",
- "default": "input_text",
- "description": "Content type identifier, always \"input_text\""
+ "enum": [
+ "model",
+ "shield",
+ "vector_db",
+ "dataset",
+ "scoring_function",
+ "benchmark",
+ "tool",
+ "tool_group",
+ "prompt"
+ ],
+ "const": "model",
+ "default": "model",
+ "description": "The resource type, always 'model' for model resources"
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "Any additional metadata for this model"
+ },
+ "model_type": {
+ "$ref": "#/components/schemas/ModelType",
+ "default": "llm",
+ "description": "The type of model (LLM or embedding model)"
}
},
"additionalProperties": false,
"required": [
- "text",
- "type"
+ "identifier",
+ "provider_id",
+ "type",
+ "metadata",
+ "model_type"
],
- "title": "OpenAIResponseInputMessageContentText",
- "description": "Text content for input messages in OpenAI response format."
+ "title": "Model",
+ "description": "A model resource representing an AI model registered in Llama Stack."
},
- "OpenAIResponseMCPApprovalRequest": {
+ "ModelType": {
+ "type": "string",
+ "enum": [
+ "llm",
+ "embedding"
+ ],
+ "title": "ModelType",
+ "description": "Enumeration of supported model types in Llama Stack."
+ },
+ "ListModelsResponse": {
"type": "object",
"properties": {
- "arguments": {
- "type": "string"
- },
- "id": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "server_label": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "const": "mcp_approval_request",
- "default": "mcp_approval_request"
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Model"
+ }
}
},
"additionalProperties": false,
"required": [
- "arguments",
- "id",
- "name",
- "server_label",
- "type"
+ "data"
],
- "title": "OpenAIResponseMCPApprovalRequest",
- "description": "A request for human approval of a tool invocation."
+ "title": "ListModelsResponse"
},
- "OpenAIResponseMCPApprovalResponse": {
+ "RegisterModelRequest": {
"type": "object",
"properties": {
- "approval_request_id": {
- "type": "string"
+ "model_id": {
+ "type": "string",
+ "description": "The identifier of the model to register."
},
- "approve": {
- "type": "boolean"
+ "provider_model_id": {
+ "type": "string",
+ "description": "The identifier of the model in the provider."
},
- "type": {
+ "provider_id": {
"type": "string",
- "const": "mcp_approval_response",
- "default": "mcp_approval_response"
+ "description": "The identifier of the provider."
},
- "id": {
- "type": "string"
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "Any additional metadata for this model."
},
- "reason": {
- "type": "string"
+ "model_type": {
+ "$ref": "#/components/schemas/ModelType",
+ "description": "The type of model to register."
}
},
"additionalProperties": false,
"required": [
- "approval_request_id",
- "approve",
- "type"
+ "model_id"
],
- "title": "OpenAIResponseMCPApprovalResponse",
- "description": "A response to an MCP approval request."
+ "title": "RegisterModelRequest"
},
- "OpenAIResponseMessage": {
+ "RunModerationRequest": {
"type": "object",
"properties": {
- "content": {
+ "input": {
"oneOf": [
{
"type": "string"
@@ -8378,490 +8917,635 @@
{
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
- }
- },
- {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent"
+ "type": "string"
}
}
- ]
- },
- "role": {
- "oneOf": [
- {
- "type": "string",
- "const": "system"
- },
- {
- "type": "string",
- "const": "developer"
- },
- {
- "type": "string",
- "const": "user"
- },
- {
- "type": "string",
- "const": "assistant"
- }
- ]
+ ],
+ "description": "Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models."
},
- "type": {
+ "model": {
"type": "string",
- "const": "message",
- "default": "message"
- },
- "id": {
- "type": "string"
- },
- "status": {
- "type": "string"
+ "description": "The content moderation model you would like to use."
}
},
"additionalProperties": false,
"required": [
- "content",
- "role",
- "type"
+ "input",
+ "model"
],
- "title": "OpenAIResponseMessage",
- "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios."
+ "title": "RunModerationRequest"
},
- "OpenAIResponseObjectWithInput": {
+ "ModerationObject": {
"type": "object",
"properties": {
- "created_at": {
- "type": "integer",
- "description": "Unix timestamp when the response was created"
- },
- "error": {
- "$ref": "#/components/schemas/OpenAIResponseError",
- "description": "(Optional) Error details if the response generation failed"
- },
"id": {
"type": "string",
- "description": "Unique identifier for this response"
+ "description": "The unique identifier for the moderation request."
},
"model": {
"type": "string",
- "description": "Model identifier used for generation"
- },
- "object": {
- "type": "string",
- "const": "response",
- "default": "response",
- "description": "Object type identifier, always \"response\""
+ "description": "The model used to generate the moderation results."
},
- "output": {
+ "results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseOutput"
+ "$ref": "#/components/schemas/ModerationObjectResults"
},
- "description": "List of generated output items (messages, tool calls, etc.)"
- },
- "parallel_tool_calls": {
+ "description": "A list of moderation objects"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "id",
+ "model",
+ "results"
+ ],
+ "title": "ModerationObject",
+ "description": "A moderation object."
+ },
+ "ModerationObjectResults": {
+ "type": "object",
+ "properties": {
+ "flagged": {
"type": "boolean",
- "default": false,
- "description": "Whether tool calls can be executed in parallel"
+ "description": "Whether any of the below categories are flagged."
},
- "previous_response_id": {
- "type": "string",
- "description": "(Optional) ID of the previous response in a conversation"
+ "categories": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "boolean"
+ },
+ "description": "A list of the categories, and whether they are flagged or not."
},
- "status": {
- "type": "string",
- "description": "Current status of the response generation"
+ "category_applied_input_types": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "description": "A list of the categories along with the input type(s) that the score applies to."
},
- "temperature": {
- "type": "number",
- "description": "(Optional) Sampling temperature used for generation"
+ "category_scores": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "number"
+ },
+ "description": "A list of the categories along with their scores as predicted by model."
},
- "text": {
- "$ref": "#/components/schemas/OpenAIResponseText",
- "description": "Text formatting configuration for the response"
+ "user_message": {
+ "type": "string"
},
- "top_p": {
- "type": "number",
- "description": "(Optional) Nucleus sampling parameter used for generation"
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ }
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "flagged",
+ "metadata"
+ ],
+ "title": "ModerationObjectResults",
+ "description": "A moderation object."
+ },
+ "Prompt": {
+ "type": "object",
+ "properties": {
+ "prompt": {
+ "type": "string",
+ "description": "The system prompt text with variable placeholders. Variables are only supported when using the Responses API."
},
- "truncation": {
+ "version": {
+ "type": "integer",
+ "description": "Version (integer starting at 1, incremented on save)"
+ },
+ "prompt_id": {
"type": "string",
- "description": "(Optional) Truncation strategy applied to the response"
+ "description": "Unique identifier formatted as 'pmpt_<48-digit-hash>'"
},
- "input": {
+ "variables": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseInput"
+ "type": "string"
},
- "description": "List of input items that led to this response"
+ "description": "List of prompt variable names that can be used in the prompt template"
+ },
+ "is_default": {
+ "type": "boolean",
+ "default": false,
+ "description": "Boolean indicating whether this version is the default version for this prompt"
}
},
"additionalProperties": false,
"required": [
- "created_at",
- "id",
- "model",
- "object",
- "output",
- "parallel_tool_calls",
- "status",
- "text",
- "input"
+ "version",
+ "prompt_id",
+ "variables",
+ "is_default"
],
- "title": "OpenAIResponseObjectWithInput",
- "description": "OpenAI response object extended with input context information."
+ "title": "Prompt",
+ "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack."
},
- "OpenAIResponseOutput": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/OpenAIResponseMessage"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
- },
- {
- "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ "ListPromptsResponse": {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Prompt"
+ }
}
+ },
+ "additionalProperties": false,
+ "required": [
+ "data"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "message": "#/components/schemas/OpenAIResponseMessage",
- "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall",
- "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall",
- "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall",
- "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall",
- "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools",
- "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
- }
- }
+ "title": "ListPromptsResponse",
+ "description": "Response model to list prompts."
},
- "OpenAIResponseOutputMessageContent": {
+ "CreatePromptRequest": {
"type": "object",
"properties": {
- "text": {
- "type": "string"
- },
- "type": {
+ "prompt": {
"type": "string",
- "const": "output_text",
- "default": "output_text"
+ "description": "The prompt text content with variable placeholders."
},
- "annotations": {
+ "variables": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/OpenAIResponseAnnotations"
- }
+ "type": "string"
+ },
+ "description": "List of variable names that can be used in the prompt template."
}
},
"additionalProperties": false,
"required": [
- "text",
- "type",
- "annotations"
+ "prompt"
],
- "title": "OpenAIResponseOutputMessageContentOutputText"
+ "title": "CreatePromptRequest"
},
- "OpenAIResponseOutputMessageFileSearchToolCall": {
+ "UpdatePromptRequest": {
"type": "object",
"properties": {
- "id": {
+ "prompt": {
"type": "string",
- "description": "Unique identifier for this tool call"
+ "description": "The updated prompt text content."
},
- "queries": {
+ "version": {
+ "type": "integer",
+ "description": "The current version of the prompt being updated."
+ },
+ "variables": {
"type": "array",
"items": {
"type": "string"
},
- "description": "List of search queries executed"
+ "description": "Updated list of variable names that can be used in the prompt template."
},
- "status": {
+ "set_as_default": {
+ "type": "boolean",
+ "description": "Set the new version as the default (default=True)."
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "prompt",
+ "version",
+ "set_as_default"
+ ],
+ "title": "UpdatePromptRequest"
+ },
+ "SetDefaultVersionRequest": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "integer",
+ "description": "The version to set as default."
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "version"
+ ],
+ "title": "SetDefaultVersionRequest"
+ },
+ "ProviderInfo": {
+ "type": "object",
+ "properties": {
+ "api": {
"type": "string",
- "description": "Current status of the file search operation"
+ "description": "The API name this provider implements"
},
- "type": {
+ "provider_id": {
"type": "string",
- "const": "file_search_call",
- "default": "file_search_call",
- "description": "Tool call type identifier, always \"file_search_call\""
+ "description": "Unique identifier for the provider"
},
- "results": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "attributes": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- },
- "description": "(Optional) Key-value attributes associated with the file"
+ "provider_type": {
+ "type": "string",
+ "description": "The type of provider implementation"
+ },
+ "config": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
},
- "file_id": {
- "type": "string",
- "description": "Unique identifier of the file containing the result"
+ {
+ "type": "boolean"
},
- "filename": {
- "type": "string",
- "description": "Name of the file containing the result"
+ {
+ "type": "number"
},
- "score": {
- "type": "number",
- "description": "Relevance score for this search result (between 0 and 1)"
+ {
+ "type": "string"
},
- "text": {
- "type": "string",
- "description": "Text content of the search result"
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
}
- },
- "additionalProperties": false,
- "required": [
- "attributes",
- "file_id",
- "filename",
- "score",
- "text"
- ],
- "title": "OpenAIResponseOutputMessageFileSearchToolCallResults",
- "description": "Search results returned by the file search operation."
+ ]
},
- "description": "(Optional) Search results returned by the file search operation"
+ "description": "Configuration parameters for the provider"
+ },
+ "health": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
+ },
+ "description": "Current health status of the provider"
}
},
"additionalProperties": false,
"required": [
- "id",
- "queries",
- "status",
- "type"
+ "api",
+ "provider_id",
+ "provider_type",
+ "config",
+ "health"
],
- "title": "OpenAIResponseOutputMessageFileSearchToolCall",
- "description": "File search tool call output message for OpenAI responses."
+ "title": "ProviderInfo",
+ "description": "Information about a registered provider including its configuration and health status."
},
- "OpenAIResponseOutputMessageFunctionToolCall": {
+ "ListProvidersResponse": {
"type": "object",
"properties": {
- "call_id": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ProviderInfo"
+ },
+ "description": "List of provider information objects"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "data"
+ ],
+ "title": "ListProvidersResponse",
+ "description": "Response containing a list of all available providers."
+ },
+ "ListOpenAIResponseObject": {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseObjectWithInput"
+ },
+ "description": "List of response objects with their input context"
+ },
+ "has_more": {
+ "type": "boolean",
+ "description": "Whether there are more results available beyond this page"
+ },
+ "first_id": {
"type": "string",
- "description": "Unique identifier for the function call"
+ "description": "Identifier of the first item in this page"
},
- "name": {
+ "last_id": {
"type": "string",
- "description": "Name of the function being called"
+ "description": "Identifier of the last item in this page"
},
- "arguments": {
+ "object": {
"type": "string",
- "description": "JSON string containing the function arguments"
+ "const": "list",
+ "default": "list",
+ "description": "Object type identifier, always \"list\""
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "data",
+ "has_more",
+ "first_id",
+ "last_id",
+ "object"
+ ],
+ "title": "ListOpenAIResponseObject",
+ "description": "Paginated list of OpenAI response objects with navigation metadata."
+ },
+ "OpenAIResponseError": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "Error code identifying the type of failure"
+ },
+ "message": {
+ "type": "string",
+ "description": "Human-readable error message describing the failure"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "code",
+ "message"
+ ],
+ "title": "OpenAIResponseError",
+ "description": "Error details for failed OpenAI response requests."
+ },
+ "OpenAIResponseInput": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMessage"
+ }
+ ]
+ },
+ "OpenAIResponseInputFunctionToolCallOutput": {
+ "type": "object",
+ "properties": {
+ "call_id": {
+ "type": "string"
+ },
+ "output": {
+ "type": "string"
},
"type": {
"type": "string",
- "const": "function_call",
- "default": "function_call",
- "description": "Tool call type identifier, always \"function_call\""
+ "const": "function_call_output",
+ "default": "function_call_output"
},
"id": {
- "type": "string",
- "description": "(Optional) Additional identifier for the tool call"
+ "type": "string"
},
"status": {
- "type": "string",
- "description": "(Optional) Current status of the function call execution"
+ "type": "string"
}
},
"additionalProperties": false,
"required": [
"call_id",
- "name",
- "arguments",
+ "output",
"type"
],
- "title": "OpenAIResponseOutputMessageFunctionToolCall",
- "description": "Function tool call output message for OpenAI responses."
+ "title": "OpenAIResponseInputFunctionToolCallOutput",
+ "description": "This represents the output of a function call that gets passed back to the model."
},
- "OpenAIResponseOutputMessageMCPCall": {
+ "OpenAIResponseMCPApprovalRequest": {
"type": "object",
"properties": {
- "id": {
- "type": "string",
- "description": "Unique identifier for this MCP call"
- },
- "type": {
- "type": "string",
- "const": "mcp_call",
- "default": "mcp_call",
- "description": "Tool call type identifier, always \"mcp_call\""
- },
"arguments": {
- "type": "string",
- "description": "JSON string containing the MCP call arguments"
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
},
"name": {
- "type": "string",
- "description": "Name of the MCP method being called"
+ "type": "string"
},
"server_label": {
- "type": "string",
- "description": "Label identifying the MCP server handling the call"
- },
- "error": {
- "type": "string",
- "description": "(Optional) Error message if the MCP call failed"
+ "type": "string"
},
- "output": {
+ "type": {
"type": "string",
- "description": "(Optional) Output result from the successful MCP call"
+ "const": "mcp_approval_request",
+ "default": "mcp_approval_request"
}
},
"additionalProperties": false,
"required": [
- "id",
- "type",
"arguments",
+ "id",
"name",
- "server_label"
+ "server_label",
+ "type"
],
- "title": "OpenAIResponseOutputMessageMCPCall",
- "description": "Model Context Protocol (MCP) call output message for OpenAI responses."
+ "title": "OpenAIResponseMCPApprovalRequest",
+ "description": "A request for human approval of a tool invocation."
},
- "OpenAIResponseOutputMessageMCPListTools": {
+ "OpenAIResponseMCPApprovalResponse": {
"type": "object",
"properties": {
- "id": {
- "type": "string",
- "description": "Unique identifier for this MCP list tools operation"
+ "approval_request_id": {
+ "type": "string"
+ },
+ "approve": {
+ "type": "boolean"
},
"type": {
"type": "string",
- "const": "mcp_list_tools",
- "default": "mcp_list_tools",
- "description": "Tool call type identifier, always \"mcp_list_tools\""
+ "const": "mcp_approval_response",
+ "default": "mcp_approval_response"
},
- "server_label": {
- "type": "string",
- "description": "Label identifying the MCP server providing the tools"
+ "id": {
+ "type": "string"
},
- "tools": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "input_schema": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- },
- "description": "JSON schema defining the tool's input parameters"
- },
- "name": {
- "type": "string",
- "description": "Name of the tool"
- },
- "description": {
- "type": "string",
- "description": "(Optional) Description of what the tool does"
- }
- },
- "additionalProperties": false,
- "required": [
- "input_schema",
- "name"
- ],
- "title": "MCPListToolsTool",
- "description": "Tool definition returned by MCP list tools operation."
- },
- "description": "List of available tools provided by the MCP server"
+ "reason": {
+ "type": "string"
}
},
"additionalProperties": false,
"required": [
- "id",
- "type",
- "server_label",
- "tools"
+ "approval_request_id",
+ "approve",
+ "type"
],
- "title": "OpenAIResponseOutputMessageMCPListTools",
- "description": "MCP list tools output message containing available tools from an MCP server."
+ "title": "OpenAIResponseMCPApprovalResponse",
+ "description": "A response to an MCP approval request."
},
- "OpenAIResponseOutputMessageWebSearchToolCall": {
+ "OpenAIResponseObjectWithInput": {
"type": "object",
"properties": {
+ "created_at": {
+ "type": "integer",
+ "description": "Unix timestamp when the response was created"
+ },
+ "error": {
+ "$ref": "#/components/schemas/OpenAIResponseError",
+ "description": "(Optional) Error details if the response generation failed"
+ },
"id": {
"type": "string",
- "description": "Unique identifier for this tool call"
+ "description": "Unique identifier for this response"
+ },
+ "model": {
+ "type": "string",
+ "description": "Model identifier used for generation"
+ },
+ "object": {
+ "type": "string",
+ "const": "response",
+ "default": "response",
+ "description": "Object type identifier, always \"response\""
+ },
+ "output": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseOutput"
+ },
+ "description": "List of generated output items (messages, tool calls, etc.)"
+ },
+ "parallel_tool_calls": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether tool calls can be executed in parallel"
+ },
+ "previous_response_id": {
+ "type": "string",
+ "description": "(Optional) ID of the previous response in a conversation"
},
"status": {
"type": "string",
- "description": "Current status of the web search operation"
+ "description": "Current status of the response generation"
},
- "type": {
+ "temperature": {
+ "type": "number",
+ "description": "(Optional) Sampling temperature used for generation"
+ },
+ "text": {
+ "$ref": "#/components/schemas/OpenAIResponseText",
+ "description": "Text formatting configuration for the response"
+ },
+ "top_p": {
+ "type": "number",
+ "description": "(Optional) Nucleus sampling parameter used for generation"
+ },
+ "truncation": {
"type": "string",
- "const": "web_search_call",
- "default": "web_search_call",
- "description": "Tool call type identifier, always \"web_search_call\""
+ "description": "(Optional) Truncation strategy applied to the response"
+ },
+ "input": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIResponseInput"
+ },
+ "description": "List of input items that led to this response"
}
},
"additionalProperties": false,
"required": [
+ "created_at",
"id",
+ "model",
+ "object",
+ "output",
+ "parallel_tool_calls",
"status",
- "type"
+ "text",
+ "input"
],
- "title": "OpenAIResponseOutputMessageWebSearchToolCall",
- "description": "Web search tool call output message for OpenAI responses."
+ "title": "OpenAIResponseObjectWithInput",
+ "description": "OpenAI response object extended with input context information."
+ },
+ "OpenAIResponseOutput": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMessage"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "message": "#/components/schemas/OpenAIResponseMessage",
+ "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall",
+ "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall",
+ "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall",
+ "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall",
+ "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools",
+ "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest"
+ }
+ }
},
"OpenAIResponseText": {
"type": "object",
@@ -17957,6 +18641,11 @@
"name": "Benchmarks",
"description": ""
},
+ {
+ "name": "Conversations",
+ "description": "",
+ "x-displayName": "Protocol for conversation management operations."
+ },
{
"name": "DatasetIO",
"description": ""
@@ -18048,6 +18737,7 @@
"tags": [
"Agents",
"Benchmarks",
+ "Conversations",
"DatasetIO",
"Datasets",
"Eval",
diff --git a/docs/static/stainless-llama-stack-spec.yaml b/docs/static/stainless-llama-stack-spec.yaml
index cb43b313b9..a3ad454873 100644
--- a/docs/static/stainless-llama-stack-spec.yaml
+++ b/docs/static/stainless-llama-stack-spec.yaml
@@ -170,16 +170,15 @@ paths:
$ref: '#/components/schemas/OpenaiCompletionRequest'
required: true
deprecated: false
- /v1/embeddings:
+ /v1/conversations:
post:
responses:
'200':
- description: >-
- An OpenAIEmbeddingsResponse containing the embeddings.
+ description: The created conversation object.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIEmbeddingsResponse'
+ $ref: '#/components/schemas/Conversation'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -191,31 +190,26 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Inference
- summary: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
- description: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
+ - Conversations
+ summary: Create a conversation.
+ description: Create a conversation.
parameters: []
requestBody:
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenaiEmbeddingsRequest'
+ $ref: '#/components/schemas/CreateConversationRequest'
required: true
deprecated: false
- /v1/files:
+ /v1/conversations/{conversation_id}:
get:
responses:
'200':
- description: >-
- An ListOpenAIFileResponse containing the list of files.
+ description: The conversation object.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListOpenAIFileResponse'
+ $ref: '#/components/schemas/Conversation'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -227,55 +221,25 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: >-
- Returns a list of files that belong to the user's organization.
- description: >-
- Returns a list of files that belong to the user's organization.
+ - Conversations
+ summary: Get a conversation with the given ID.
+ description: Get a conversation with the given ID.
parameters:
- - name: after
- in: query
- description: >-
- A cursor for use in pagination. `after` is an object ID that defines your
- place in the list. For instance, if you make a list request and receive
- 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo
- in order to fetch the next page of the list.
- required: false
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
schema:
type: string
- - name: limit
- in: query
- description: >-
- A limit on the number of objects to be returned. Limit can range between
- 1 and 10,000, and the default is 10,000.
- required: false
- schema:
- type: integer
- - name: order
- in: query
- description: >-
- Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
- required: false
- schema:
- $ref: '#/components/schemas/Order'
- - name: purpose
- in: query
- description: >-
- Only return files with the given purpose.
- required: false
- schema:
- $ref: '#/components/schemas/OpenAIFilePurpose'
deprecated: false
post:
responses:
'200':
- description: >-
- An OpenAIFileObject representing the uploaded file.
+ description: The updated conversation object.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIFileObject'
+ $ref: '#/components/schemas/Conversation'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -287,48 +251,33 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
+ - Conversations
summary: >-
- Upload a file that can be used across various endpoints.
+ Update a conversation's metadata with the given ID.
description: >-
- Upload a file that can be used across various endpoints.
-
- The file upload should be a multipart form request with:
-
- - file: The File object (not file name) to be uploaded.
-
- - purpose: The intended purpose of the uploaded file.
-
- - expires_after: Optional form values describing expiration for the file.
- parameters: []
+ Update a conversation's metadata with the given ID.
+ parameters:
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
+ schema:
+ type: string
requestBody:
content:
- multipart/form-data:
+ application/json:
schema:
- type: object
- properties:
- file:
- type: string
- format: binary
- purpose:
- $ref: '#/components/schemas/OpenAIFilePurpose'
- expires_after:
- $ref: '#/components/schemas/ExpiresAfter'
- required:
- - file
- - purpose
+ $ref: '#/components/schemas/UpdateConversationRequest'
required: true
deprecated: false
- /v1/files/{file_id}:
- get:
+ delete:
responses:
'200':
- description: >-
- An OpenAIFileObject containing file information.
+ description: The deleted conversation resource.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIFileObject'
+ $ref: '#/components/schemas/ConversationDeletedResource'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -340,29 +289,26 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: >-
- Returns information about a specific file.
- description: >-
- Returns information about a specific file.
+ - Conversations
+ summary: Delete a conversation with the given ID.
+ description: Delete a conversation with the given ID.
parameters:
- - name: file_id
+ - name: conversation_id
in: path
- description: >-
- The ID of the file to use for this request.
+ description: The conversation identifier.
required: true
schema:
type: string
deprecated: false
- delete:
+ /v1/conversations/{conversation_id}/items:
+ get:
responses:
'200':
- description: >-
- An OpenAIFileDeleteResponse indicating successful deletion.
+ description: List of conversation items.
content:
application/json:
schema:
- $ref: '#/components/schemas/OpenAIFileDeleteResponse'
+ $ref: '#/components/schemas/ConversationItemList'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -374,28 +320,169 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: Delete a file.
- description: Delete a file.
+ - Conversations
+ summary: List items in the conversation.
+ description: List items in the conversation.
parameters:
- - name: file_id
+ - name: conversation_id
in: path
- description: >-
- The ID of the file to use for this request.
+ description: The conversation identifier.
required: true
schema:
type: string
+ - name: after
+ in: query
+ description: >-
+ An item ID to list items after, used in pagination.
+ required: true
+ schema:
+ oneOf:
+ - type: string
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
+ - name: include
+ in: query
+ description: >-
+ Specify additional output data to include in the response.
+ required: true
+ schema:
+ oneOf:
+ - type: array
+ items:
+ type: string
+ enum:
+ - code_interpreter_call.outputs
+ - computer_call_output.output.image_url
+ - file_search_call.results
+ - message.input_image.image_url
+ - message.output_text.logprobs
+ - reasoning.encrypted_content
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
+ - name: limit
+ in: query
+ description: >-
+ A limit on the number of objects to be returned (1-100, default 20).
+ required: true
+ schema:
+ oneOf:
+ - type: integer
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
+ - name: order
+ in: query
+ description: >-
+ The order to return items in (asc or desc, default desc).
+ required: true
+ schema:
+ oneOf:
+ - type: string
+ enum:
+ - asc
+ - desc
+ - type: object
+ title: NotGiven
+ description: >-
+ A sentinel singleton class used to distinguish omitted keyword arguments
+ from those passed in with the value None (which may have different
+ behavior).
+
+ For example:
+
+
+ ```py
+
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
+ ...
+
+
+
+ get(timeout=1) # 1s timeout
+
+ get(timeout=None) # No timeout
+
+ get() # Default timeout behavior, which may not be statically known
+ at the method definition.
+
+ ```
deprecated: false
- /v1/files/{file_id}/content:
- get:
+ post:
responses:
'200':
- description: >-
- The raw file content as a binary response.
+ description: List of created items.
content:
application/json:
schema:
- $ref: '#/components/schemas/Response'
+ $ref: '#/components/schemas/ConversationItemList'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -407,30 +494,32 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Files
- summary: >-
- Returns the contents of the specified file.
- description: >-
- Returns the contents of the specified file.
+ - Conversations
+ summary: Create items in the conversation.
+ description: Create items in the conversation.
parameters:
- - name: file_id
+ - name: conversation_id
in: path
- description: >-
- The ID of the file to use for this request.
+ description: The conversation identifier.
required: true
schema:
type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/AddItemsRequest'
+ required: true
deprecated: false
- /v1/health:
+ /v1/conversations/{conversation_id}/items/{item_id}:
get:
responses:
'200':
- description: >-
- Health information indicating if the service is operational.
+ description: The conversation item.
content:
application/json:
schema:
- $ref: '#/components/schemas/HealthInfo'
+ $ref: '#/components/schemas/ConversationItem'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -442,23 +531,31 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Inspect
- summary: >-
- Get the current health status of the service.
- description: >-
- Get the current health status of the service.
- parameters: []
+ - Conversations
+ summary: Retrieve a conversation item.
+ description: Retrieve a conversation item.
+ parameters:
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
+ schema:
+ type: string
+ - name: item_id
+ in: path
+ description: The item identifier.
+ required: true
+ schema:
+ type: string
deprecated: false
- /v1/inspect/routes:
- get:
+ delete:
responses:
'200':
- description: >-
- Response containing information about all available routes.
+ description: The deleted item resource.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListRoutesResponse'
+ $ref: '#/components/schemas/ConversationItemDeletedResource'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -470,22 +567,33 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Inspect
- summary: >-
- List all available API routes with their methods and implementing providers.
- description: >-
- List all available API routes with their methods and implementing providers.
- parameters: []
+ - Conversations
+ summary: Delete a conversation item.
+ description: Delete a conversation item.
+ parameters:
+ - name: conversation_id
+ in: path
+ description: The conversation identifier.
+ required: true
+ schema:
+ type: string
+ - name: item_id
+ in: path
+ description: The item identifier.
+ required: true
+ schema:
+ type: string
deprecated: false
- /v1/models:
- get:
+ /v1/embeddings:
+ post:
responses:
'200':
- description: A ListModelsResponse.
+ description: >-
+ An OpenAIEmbeddingsResponse containing the embeddings.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListModelsResponse'
+ $ref: '#/components/schemas/OpenAIEmbeddingsResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -497,19 +605,91 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: List all models.
- description: List all models.
+ - Inference
+ summary: >-
+ Generate OpenAI-compatible embeddings for the given input using the specified
+ model.
+ description: >-
+ Generate OpenAI-compatible embeddings for the given input using the specified
+ model.
parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/OpenaiEmbeddingsRequest'
+ required: true
+ deprecated: false
+ /v1/files:
+ get:
+ responses:
+ '200':
+ description: >-
+ An ListOpenAIFileResponse containing the list of files.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ListOpenAIFileResponse'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Files
+ summary: >-
+ Returns a list of files that belong to the user's organization.
+ description: >-
+ Returns a list of files that belong to the user's organization.
+ parameters:
+ - name: after
+ in: query
+ description: >-
+ A cursor for use in pagination. `after` is an object ID that defines your
+ place in the list. For instance, if you make a list request and receive
+ 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo
+ in order to fetch the next page of the list.
+ required: false
+ schema:
+ type: string
+ - name: limit
+ in: query
+ description: >-
+ A limit on the number of objects to be returned. Limit can range between
+ 1 and 10,000, and the default is 10,000.
+ required: false
+ schema:
+ type: integer
+ - name: order
+ in: query
+ description: >-
+ Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+ required: false
+ schema:
+ $ref: '#/components/schemas/Order'
+ - name: purpose
+ in: query
+ description: >-
+ Only return files with the given purpose.
+ required: false
+ schema:
+ $ref: '#/components/schemas/OpenAIFilePurpose'
deprecated: false
post:
responses:
'200':
- description: A Model.
+ description: >-
+ An OpenAIFileObject representing the uploaded file.
content:
application/json:
schema:
- $ref: '#/components/schemas/Model'
+ $ref: '#/components/schemas/OpenAIFileObject'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -521,26 +701,48 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: Register a model.
- description: Register a model.
+ - Files
+ summary: >-
+ Upload a file that can be used across various endpoints.
+ description: >-
+ Upload a file that can be used across various endpoints.
+
+ The file upload should be a multipart form request with:
+
+ - file: The File object (not file name) to be uploaded.
+
+ - purpose: The intended purpose of the uploaded file.
+
+ - expires_after: Optional form values describing expiration for the file.
parameters: []
requestBody:
content:
- application/json:
+ multipart/form-data:
schema:
- $ref: '#/components/schemas/RegisterModelRequest'
+ type: object
+ properties:
+ file:
+ type: string
+ format: binary
+ purpose:
+ $ref: '#/components/schemas/OpenAIFilePurpose'
+ expires_after:
+ $ref: '#/components/schemas/ExpiresAfter'
+ required:
+ - file
+ - purpose
required: true
deprecated: false
- /v1/models/{model_id}:
+ /v1/files/{file_id}:
get:
responses:
'200':
- description: A Model.
+ description: >-
+ An OpenAIFileObject containing file information.
content:
application/json:
schema:
- $ref: '#/components/schemas/Model'
+ $ref: '#/components/schemas/OpenAIFileObject'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -552,13 +754,16 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: Get a model by its identifier.
- description: Get a model by its identifier.
+ - Files
+ summary: >-
+ Returns information about a specific file.
+ description: >-
+ Returns information about a specific file.
parameters:
- - name: model_id
+ - name: file_id
in: path
- description: The identifier of the model to get.
+ description: >-
+ The ID of the file to use for this request.
required: true
schema:
type: string
@@ -566,7 +771,12 @@ paths:
delete:
responses:
'200':
- description: OK
+ description: >-
+ An OpenAIFileDeleteResponse indicating successful deletion.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/OpenAIFileDeleteResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -578,27 +788,28 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Models
- summary: Unregister a model.
- description: Unregister a model.
+ - Files
+ summary: Delete a file.
+ description: Delete a file.
parameters:
- - name: model_id
+ - name: file_id
in: path
description: >-
- The identifier of the model to unregister.
+ The ID of the file to use for this request.
required: true
schema:
type: string
deprecated: false
- /v1/moderations:
- post:
+ /v1/files/{file_id}/content:
+ get:
responses:
'200':
- description: A moderation object.
+ description: >-
+ The raw file content as a binary response.
content:
application/json:
schema:
- $ref: '#/components/schemas/ModerationObject'
+ $ref: '#/components/schemas/Response'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -610,29 +821,30 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Safety
+ - Files
summary: >-
- Classifies if text and/or image inputs are potentially harmful.
+ Returns the contents of the specified file.
description: >-
- Classifies if text and/or image inputs are potentially harmful.
- parameters: []
- requestBody:
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/RunModerationRequest'
- required: true
+ Returns the contents of the specified file.
+ parameters:
+ - name: file_id
+ in: path
+ description: >-
+ The ID of the file to use for this request.
+ required: true
+ schema:
+ type: string
deprecated: false
- /v1/prompts:
+ /v1/health:
get:
responses:
'200':
description: >-
- A ListPromptsResponse containing all prompts.
+ Health information indicating if the service is operational.
content:
application/json:
schema:
- $ref: '#/components/schemas/ListPromptsResponse'
+ $ref: '#/components/schemas/HealthInfo'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -644,19 +856,23 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: List all prompts.
- description: List all prompts.
+ - Inspect
+ summary: >-
+ Get the current health status of the service.
+ description: >-
+ Get the current health status of the service.
parameters: []
deprecated: false
- post:
+ /v1/inspect/routes:
+ get:
responses:
'200':
- description: The created Prompt resource.
+ description: >-
+ Response containing information about all available routes.
content:
application/json:
schema:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/ListRoutesResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -668,26 +884,22 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: Create a new prompt.
- description: Create a new prompt.
+ - Inspect
+ summary: >-
+ List all available API routes with their methods and implementing providers.
+ description: >-
+ List all available API routes with their methods and implementing providers.
parameters: []
- requestBody:
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/CreatePromptRequest'
- required: true
deprecated: false
- /v1/prompts/{prompt_id}:
+ /v1/models:
get:
responses:
'200':
- description: A Prompt resource.
+ description: A ListModelsResponse.
content:
application/json:
schema:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/ListModelsResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -699,35 +911,19 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: >-
- Get a prompt by its identifier and optional version.
- description: >-
- Get a prompt by its identifier and optional version.
- parameters:
- - name: prompt_id
- in: path
- description: The identifier of the prompt to get.
- required: true
- schema:
- type: string
- - name: version
- in: query
- description: >-
- The version of the prompt to get (defaults to latest).
- required: false
- schema:
- type: integer
+ - Models
+ summary: List all models.
+ description: List all models.
+ parameters: []
deprecated: false
post:
responses:
'200':
- description: >-
- The updated Prompt resource with incremented version.
+ description: A Model.
content:
application/json:
schema:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/Model'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@@ -739,8 +935,226 @@ paths:
default:
$ref: '#/components/responses/DefaultError'
tags:
- - Prompts
- summary: >-
+ - Models
+ summary: Register a model.
+ description: Register a model.
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RegisterModelRequest'
+ required: true
+ deprecated: false
+ /v1/models/{model_id}:
+ get:
+ responses:
+ '200':
+ description: A Model.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Model'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Models
+ summary: Get a model by its identifier.
+ description: Get a model by its identifier.
+ parameters:
+ - name: model_id
+ in: path
+ description: The identifier of the model to get.
+ required: true
+ schema:
+ type: string
+ deprecated: false
+ delete:
+ responses:
+ '200':
+ description: OK
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Models
+ summary: Unregister a model.
+ description: Unregister a model.
+ parameters:
+ - name: model_id
+ in: path
+ description: >-
+ The identifier of the model to unregister.
+ required: true
+ schema:
+ type: string
+ deprecated: false
+ /v1/moderations:
+ post:
+ responses:
+ '200':
+ description: A moderation object.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ModerationObject'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Safety
+ summary: >-
+ Classifies if text and/or image inputs are potentially harmful.
+ description: >-
+ Classifies if text and/or image inputs are potentially harmful.
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/RunModerationRequest'
+ required: true
+ deprecated: false
+ /v1/prompts:
+ get:
+ responses:
+ '200':
+ description: >-
+ A ListPromptsResponse containing all prompts.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ListPromptsResponse'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: List all prompts.
+ description: List all prompts.
+ parameters: []
+ deprecated: false
+ post:
+ responses:
+ '200':
+ description: The created Prompt resource.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Prompt'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: Create a new prompt.
+ description: Create a new prompt.
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/CreatePromptRequest'
+ required: true
+ deprecated: false
+ /v1/prompts/{prompt_id}:
+ get:
+ responses:
+ '200':
+ description: A Prompt resource.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Prompt'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: >-
+ Get a prompt by its identifier and optional version.
+ description: >-
+ Get a prompt by its identifier and optional version.
+ parameters:
+ - name: prompt_id
+ in: path
+ description: The identifier of the prompt to get.
+ required: true
+ schema:
+ type: string
+ - name: version
+ in: query
+ description: >-
+ The version of the prompt to get (defaults to latest).
+ required: false
+ schema:
+ type: integer
+ deprecated: false
+ post:
+ responses:
+ '200':
+ description: >-
+ The updated Prompt resource with incremented version.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Prompt'
+ '400':
+ $ref: '#/components/responses/BadRequest400'
+ '429':
+ $ref: >-
+ #/components/responses/TooManyRequests429
+ '500':
+ $ref: >-
+ #/components/responses/InternalServerError500
+ default:
+ $ref: '#/components/responses/DefaultError'
+ tags:
+ - Prompts
+ summary: >-
Update an existing prompt (increments version).
description: >-
Update an existing prompt (increments version).
@@ -5198,690 +5612,778 @@ components:
title: OpenAICompletionChoice
description: >-
A choice from an OpenAI-compatible completion response.
- OpenaiEmbeddingsRequest:
+ ConversationItem:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseMessage'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ discriminator:
+ propertyName: type
+ mapping:
+ message: '#/components/schemas/OpenAIResponseMessage'
+ function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ OpenAIResponseAnnotationCitation:
type: object
properties:
- model:
+ type:
type: string
+ const: url_citation
+ default: url_citation
description: >-
- The identifier of the model to use. The model must be an embedding model
- registered with Llama Stack and available via the /models endpoint.
- input:
- oneOf:
- - type: string
- - type: array
- items:
- type: string
+ Annotation type identifier, always "url_citation"
+ end_index:
+ type: integer
description: >-
- Input text to embed, encoded as a string or array of strings. To embed
- multiple inputs in a single request, pass an array of strings.
- encoding_format:
- type: string
- description: >-
- (Optional) The format to return the embeddings in. Can be either "float"
- or "base64". Defaults to "float".
- dimensions:
+ End position of the citation span in the content
+ start_index:
type: integer
description: >-
- (Optional) The number of dimensions the resulting output embeddings should
- have. Only supported in text-embedding-3 and later models.
- user:
+ Start position of the citation span in the content
+ title:
type: string
- description: >-
- (Optional) A unique identifier representing your end-user, which can help
- OpenAI to monitor and detect abuse.
+ description: Title of the referenced web resource
+ url:
+ type: string
+ description: URL of the referenced web resource
additionalProperties: false
required:
- - model
- - input
- title: OpenaiEmbeddingsRequest
- OpenAIEmbeddingData:
+ - type
+ - end_index
+ - start_index
+ - title
+ - url
+ title: OpenAIResponseAnnotationCitation
+ description: >-
+ URL citation annotation for referencing external web resources.
+ "OpenAIResponseAnnotationContainerFileCitation":
type: object
properties:
- object:
+ type:
type: string
- const: embedding
- default: embedding
- description: >-
- The object type, which will be "embedding"
- embedding:
- oneOf:
- - type: array
- items:
- type: number
- - type: string
- description: >-
- The embedding vector as a list of floats (when encoding_format="float")
- or as a base64-encoded string (when encoding_format="base64")
- index:
+ const: container_file_citation
+ default: container_file_citation
+ container_id:
+ type: string
+ end_index:
+ type: integer
+ file_id:
+ type: string
+ filename:
+ type: string
+ start_index:
type: integer
- description: >-
- The index of the embedding in the input list
additionalProperties: false
required:
- - object
- - embedding
- - index
- title: OpenAIEmbeddingData
- description: >-
- A single embedding data object from an OpenAI-compatible embeddings response.
- OpenAIEmbeddingUsage:
+ - type
+ - container_id
+ - end_index
+ - file_id
+ - filename
+ - start_index
+ title: >-
+ OpenAIResponseAnnotationContainerFileCitation
+ OpenAIResponseAnnotationFileCitation:
type: object
properties:
- prompt_tokens:
- type: integer
- description: The number of tokens in the input
- total_tokens:
+ type:
+ type: string
+ const: file_citation
+ default: file_citation
+ description: >-
+ Annotation type identifier, always "file_citation"
+ file_id:
+ type: string
+ description: Unique identifier of the referenced file
+ filename:
+ type: string
+ description: Name of the referenced file
+ index:
type: integer
- description: The total number of tokens used
+ description: >-
+ Position index of the citation within the content
additionalProperties: false
required:
- - prompt_tokens
- - total_tokens
- title: OpenAIEmbeddingUsage
+ - type
+ - file_id
+ - filename
+ - index
+ title: OpenAIResponseAnnotationFileCitation
description: >-
- Usage information for an OpenAI-compatible embeddings response.
- OpenAIEmbeddingsResponse:
+ File citation annotation for referencing specific files in response content.
+ OpenAIResponseAnnotationFilePath:
type: object
properties:
- object:
+ type:
type: string
- const: list
- default: list
- description: The object type, which will be "list"
- data:
- type: array
- items:
- $ref: '#/components/schemas/OpenAIEmbeddingData'
- description: List of embedding data objects
- model:
+ const: file_path
+ default: file_path
+ file_id:
type: string
- description: >-
- The model that was used to generate the embeddings
- usage:
- $ref: '#/components/schemas/OpenAIEmbeddingUsage'
- description: Usage information
+ index:
+ type: integer
additionalProperties: false
required:
- - object
- - data
- - model
- - usage
- title: OpenAIEmbeddingsResponse
- description: >-
- Response from an OpenAI-compatible embeddings request.
- OpenAIFilePurpose:
- type: string
- enum:
- - assistants
- - batch
- title: OpenAIFilePurpose
- description: >-
- Valid purpose values for OpenAI Files API.
- ListOpenAIFileResponse:
+ - type
+ - file_id
+ - index
+ title: OpenAIResponseAnnotationFilePath
+ OpenAIResponseAnnotations:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
+ - $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
+ discriminator:
+ propertyName: type
+ mapping:
+ file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
+ url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
+ container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
+ file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
+ OpenAIResponseInputMessageContent:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
+ - $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
+ discriminator:
+ propertyName: type
+ mapping:
+ input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
+ input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
+ OpenAIResponseInputMessageContentImage:
type: object
properties:
- data:
- type: array
- items:
- $ref: '#/components/schemas/OpenAIFileObject'
- description: List of file objects
- has_more:
- type: boolean
- description: >-
- Whether there are more files available beyond this page
- first_id:
- type: string
+ detail:
+ oneOf:
+ - type: string
+ const: low
+ - type: string
+ const: high
+ - type: string
+ const: auto
+ default: auto
description: >-
- ID of the first file in the list for pagination
- last_id:
+ Level of detail for image processing, can be "low", "high", or "auto"
+ type:
type: string
+ const: input_image
+ default: input_image
description: >-
- ID of the last file in the list for pagination
- object:
+ Content type identifier, always "input_image"
+ image_url:
type: string
- const: list
- default: list
- description: The object type, which is always "list"
+ description: (Optional) URL of the image content
additionalProperties: false
required:
- - data
- - has_more
- - first_id
- - last_id
- - object
- title: ListOpenAIFileResponse
+ - detail
+ - type
+ title: OpenAIResponseInputMessageContentImage
description: >-
- Response for listing files in OpenAI Files API.
- OpenAIFileObject:
+ Image content for input messages in OpenAI response format.
+ OpenAIResponseInputMessageContentText:
type: object
properties:
- object:
+ text:
type: string
- const: file
- default: file
- description: The object type, which is always "file"
- id:
+ description: The text content of the input message
+ type:
type: string
+ const: input_text
+ default: input_text
description: >-
- The file identifier, which can be referenced in the API endpoints
- bytes:
- type: integer
- description: The size of the file, in bytes
- created_at:
- type: integer
- description: >-
- The Unix timestamp (in seconds) for when the file was created
- expires_at:
- type: integer
- description: >-
- The Unix timestamp (in seconds) for when the file expires
- filename:
- type: string
- description: The name of the file
- purpose:
- type: string
- enum:
- - assistants
- - batch
- description: The intended purpose of the file
+ Content type identifier, always "input_text"
additionalProperties: false
required:
- - object
- - id
- - bytes
- - created_at
- - expires_at
- - filename
- - purpose
- title: OpenAIFileObject
+ - text
+ - type
+ title: OpenAIResponseInputMessageContentText
description: >-
- OpenAI File object as defined in the OpenAI Files API.
- ExpiresAfter:
+ Text content for input messages in OpenAI response format.
+ OpenAIResponseMessage:
type: object
properties:
- anchor:
+ content:
+ oneOf:
+ - type: string
+ - type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseInputMessageContent'
+ - type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
+ role:
+ oneOf:
+ - type: string
+ const: system
+ - type: string
+ const: developer
+ - type: string
+ const: user
+ - type: string
+ const: assistant
+ type:
type: string
- const: created_at
- seconds:
- type: integer
- additionalProperties: false
- required:
- - anchor
- - seconds
- title: ExpiresAfter
- description: >-
- Control expiration of uploaded files.
-
- Params:
- - anchor, must be "created_at"
- - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
- OpenAIFileDeleteResponse:
- type: object
- properties:
+ const: message
+ default: message
id:
type: string
- description: The file identifier that was deleted
- object:
+ status:
type: string
- const: file
- default: file
- description: The object type, which is always "file"
- deleted:
- type: boolean
- description: >-
- Whether the file was successfully deleted
additionalProperties: false
required:
- - id
- - object
- - deleted
- title: OpenAIFileDeleteResponse
+ - content
+ - role
+ - type
+ title: OpenAIResponseMessage
description: >-
- Response for deleting a file in OpenAI Files API.
- Response:
- type: object
- title: Response
- HealthInfo:
+ Corresponds to the various Message types in the Responses API. They are all
+ under one type because the Responses API gives them all the same "type" value,
+ and there is no way to tell them apart in certain scenarios.
+ OpenAIResponseOutputMessageContent:
type: object
properties:
- status:
+ text:
type: string
- enum:
- - OK
- - Error
- - Not Implemented
- description: Current health status of the service
+ type:
+ type: string
+ const: output_text
+ default: output_text
+ annotations:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseAnnotations'
additionalProperties: false
required:
- - status
- title: HealthInfo
- description: >-
- Health status information for the service.
- RouteInfo:
+ - text
+ - type
+ - annotations
+ title: >-
+ OpenAIResponseOutputMessageContentOutputText
+ "OpenAIResponseOutputMessageFileSearchToolCall":
type: object
properties:
- route:
- type: string
- description: The API endpoint path
- method:
+ id:
type: string
- description: HTTP method for the route
- provider_types:
+ description: Unique identifier for this tool call
+ queries:
type: array
items:
type: string
+ description: List of search queries executed
+ status:
+ type: string
description: >-
- List of provider types that implement this route
- additionalProperties: false
- required:
- - route
- - method
- - provider_types
- title: RouteInfo
- description: >-
- Information about an API route including its path, method, and implementing
- providers.
- ListRoutesResponse:
- type: object
- properties:
- data:
+ Current status of the file search operation
+ type:
+ type: string
+ const: file_search_call
+ default: file_search_call
+ description: >-
+ Tool call type identifier, always "file_search_call"
+ results:
type: array
items:
- $ref: '#/components/schemas/RouteInfo'
+ type: object
+ properties:
+ attributes:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: >-
+ (Optional) Key-value attributes associated with the file
+ file_id:
+ type: string
+ description: >-
+ Unique identifier of the file containing the result
+ filename:
+ type: string
+ description: Name of the file containing the result
+ score:
+ type: number
+ description: >-
+ Relevance score for this search result (between 0 and 1)
+ text:
+ type: string
+ description: Text content of the search result
+ additionalProperties: false
+ required:
+ - attributes
+ - file_id
+ - filename
+ - score
+ - text
+ title: >-
+ OpenAIResponseOutputMessageFileSearchToolCallResults
+ description: >-
+ Search results returned by the file search operation.
description: >-
- List of available route information objects
+ (Optional) Search results returned by the file search operation
additionalProperties: false
required:
- - data
- title: ListRoutesResponse
+ - id
+ - queries
+ - status
+ - type
+ title: >-
+ OpenAIResponseOutputMessageFileSearchToolCall
description: >-
- Response containing a list of all available API routes.
- Model:
+ File search tool call output message for OpenAI responses.
+ "OpenAIResponseOutputMessageFunctionToolCall":
type: object
properties:
- identifier:
+ call_id:
type: string
- description: >-
- Unique identifier for this resource in llama stack
- provider_resource_id:
+ description: Unique identifier for the function call
+ name:
type: string
- description: >-
- Unique identifier for this resource in the provider
- provider_id:
+ description: Name of the function being called
+ arguments:
type: string
description: >-
- ID of the provider that owns this resource
+ JSON string containing the function arguments
type:
type: string
- enum:
- - model
- - shield
- - vector_db
- - dataset
- - scoring_function
- - benchmark
- - tool
- - tool_group
- - prompt
- const: model
- default: model
+ const: function_call
+ default: function_call
description: >-
- The resource type, always 'model' for model resources
- metadata:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: Any additional metadata for this model
- model_type:
- $ref: '#/components/schemas/ModelType'
- default: llm
+ Tool call type identifier, always "function_call"
+ id:
+ type: string
description: >-
- The type of model (LLM or embedding model)
+ (Optional) Additional identifier for the tool call
+ status:
+ type: string
+ description: >-
+ (Optional) Current status of the function call execution
additionalProperties: false
required:
- - identifier
- - provider_id
+ - call_id
+ - name
+ - arguments
- type
- - metadata
- - model_type
- title: Model
+ title: >-
+ OpenAIResponseOutputMessageFunctionToolCall
description: >-
- A model resource representing an AI model registered in Llama Stack.
- ModelType:
- type: string
- enum:
- - llm
- - embedding
- title: ModelType
- description: >-
- Enumeration of supported model types in Llama Stack.
- ListModelsResponse:
- type: object
- properties:
- data:
- type: array
- items:
- $ref: '#/components/schemas/Model'
- additionalProperties: false
- required:
- - data
- title: ListModelsResponse
- RegisterModelRequest:
+ Function tool call output message for OpenAI responses.
+ OpenAIResponseOutputMessageMCPCall:
type: object
properties:
- model_id:
+ id:
type: string
- description: The identifier of the model to register.
- provider_model_id:
+ description: Unique identifier for this MCP call
+ type:
type: string
+ const: mcp_call
+ default: mcp_call
description: >-
- The identifier of the model in the provider.
- provider_id:
+ Tool call type identifier, always "mcp_call"
+ arguments:
type: string
- description: The identifier of the provider.
- metadata:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: Any additional metadata for this model.
- model_type:
- $ref: '#/components/schemas/ModelType'
- description: The type of model to register.
+ description: >-
+ JSON string containing the MCP call arguments
+ name:
+ type: string
+ description: Name of the MCP method being called
+ server_label:
+ type: string
+ description: >-
+ Label identifying the MCP server handling the call
+ error:
+ type: string
+ description: >-
+ (Optional) Error message if the MCP call failed
+ output:
+ type: string
+ description: >-
+ (Optional) Output result from the successful MCP call
additionalProperties: false
required:
- - model_id
- title: RegisterModelRequest
- RunModerationRequest:
+ - id
+ - type
+ - arguments
+ - name
+ - server_label
+ title: OpenAIResponseOutputMessageMCPCall
+ description: >-
+ Model Context Protocol (MCP) call output message for OpenAI responses.
+ OpenAIResponseOutputMessageMCPListTools:
type: object
properties:
- input:
- oneOf:
- - type: string
- - type: array
- items:
- type: string
+ id:
+ type: string
description: >-
- Input (or inputs) to classify. Can be a single string, an array of strings,
- or an array of multi-modal input objects similar to other models.
- model:
+ Unique identifier for this MCP list tools operation
+ type:
type: string
+ const: mcp_list_tools
+ default: mcp_list_tools
description: >-
- The content moderation model you would like to use.
+ Tool call type identifier, always "mcp_list_tools"
+ server_label:
+ type: string
+ description: >-
+ Label identifying the MCP server providing the tools
+ tools:
+ type: array
+ items:
+ type: object
+ properties:
+ input_schema:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: >-
+ JSON schema defining the tool's input parameters
+ name:
+ type: string
+ description: Name of the tool
+ description:
+ type: string
+ description: >-
+ (Optional) Description of what the tool does
+ additionalProperties: false
+ required:
+ - input_schema
+ - name
+ title: MCPListToolsTool
+ description: >-
+ Tool definition returned by MCP list tools operation.
+ description: >-
+ List of available tools provided by the MCP server
additionalProperties: false
required:
- - input
- - model
- title: RunModerationRequest
- ModerationObject:
+ - id
+ - type
+ - server_label
+ - tools
+ title: OpenAIResponseOutputMessageMCPListTools
+ description: >-
+ MCP list tools output message containing available tools from an MCP server.
+ "OpenAIResponseOutputMessageWebSearchToolCall":
type: object
properties:
id:
+ type: string
+ description: Unique identifier for this tool call
+ status:
type: string
description: >-
- The unique identifier for the moderation request.
- model:
+ Current status of the web search operation
+ type:
type: string
+ const: web_search_call
+ default: web_search_call
description: >-
- The model used to generate the moderation results.
- results:
- type: array
- items:
- $ref: '#/components/schemas/ModerationObjectResults'
- description: A list of moderation objects
+ Tool call type identifier, always "web_search_call"
additionalProperties: false
required:
- id
- - model
- - results
- title: ModerationObject
- description: A moderation object.
- ModerationObjectResults:
+ - status
+ - type
+ title: >-
+ OpenAIResponseOutputMessageWebSearchToolCall
+ description: >-
+ Web search tool call output message for OpenAI responses.
+ CreateConversationRequest:
type: object
properties:
- flagged:
- type: boolean
- description: >-
- Whether any of the below categories are flagged.
- categories:
- type: object
- additionalProperties:
- type: boolean
+ items:
+ type: array
+ items:
+ $ref: '#/components/schemas/ConversationItem'
description: >-
- A list of the categories, and whether they are flagged or not.
- category_applied_input_types:
+ Initial items to include in the conversation context.
+ metadata:
type: object
additionalProperties:
- type: array
- items:
- type: string
+ type: string
description: >-
- A list of the categories along with the input type(s) that the score applies
- to.
- category_scores:
+ Set of key-value pairs that can be attached to an object.
+ additionalProperties: false
+ title: CreateConversationRequest
+ Conversation:
+ type: object
+ properties:
+ id:
+ type: string
+ object:
+ type: string
+ const: conversation
+ default: conversation
+ created_at:
+ type: integer
+ metadata:
type: object
additionalProperties:
- type: number
- description: >-
- A list of the categories along with their scores as predicted by model.
- user_message:
- type: string
+ type: string
+ items:
+ type: array
+ items:
+ type: object
+ title: dict
+ description: >-
+ dict() -> new empty dictionary dict(mapping) -> new dictionary initialized
+ from a mapping object's (key, value) pairs dict(iterable) -> new
+ dictionary initialized as if via: d = {} for k, v in iterable: d[k]
+ = v dict(**kwargs) -> new dictionary initialized with the name=value
+ pairs in the keyword argument list. For example: dict(one=1, two=2)
+ additionalProperties: false
+ required:
+ - id
+ - object
+ - created_at
+ title: Conversation
+ description: OpenAI-compatible conversation object.
+ UpdateConversationRequest:
+ type: object
+ properties:
metadata:
type: object
additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
+ type: string
+ description: >-
+ Set of key-value pairs that can be attached to an object.
additionalProperties: false
required:
- - flagged
- metadata
- title: ModerationObjectResults
- description: A moderation object.
- Prompt:
+ title: UpdateConversationRequest
+ ConversationDeletedResource:
type: object
properties:
- prompt:
+ id:
type: string
- description: >-
- The system prompt text with variable placeholders. Variables are only
- supported when using the Responses API.
- version:
- type: integer
- description: >-
- Version (integer starting at 1, incremented on save)
- prompt_id:
+ object:
type: string
- description: >-
- Unique identifier formatted as 'pmpt_<48-digit-hash>'
- variables:
- type: array
- items:
- type: string
- description: >-
- List of prompt variable names that can be used in the prompt template
- is_default:
+ default: conversation.deleted
+ deleted:
type: boolean
- default: false
- description: >-
- Boolean indicating whether this version is the default version for this
- prompt
+ default: true
additionalProperties: false
required:
- - version
- - prompt_id
- - variables
- - is_default
- title: Prompt
- description: >-
- A prompt resource representing a stored OpenAI Compatible prompt template
- in Llama Stack.
- ListPromptsResponse:
+ - id
+ - object
+ - deleted
+ title: ConversationDeletedResource
+ description: Response for deleted conversation.
+ ConversationItemList:
type: object
properties:
+ object:
+ type: string
+ default: list
data:
type: array
items:
- $ref: '#/components/schemas/Prompt'
+ $ref: '#/components/schemas/ConversationItem'
+ first_id:
+ type: string
+ last_id:
+ type: string
+ has_more:
+ type: boolean
+ default: false
additionalProperties: false
required:
+ - object
- data
- title: ListPromptsResponse
- description: Response model to list prompts.
- CreatePromptRequest:
+ - has_more
+ title: ConversationItemList
+ description: >-
+ List of conversation items with pagination.
+ AddItemsRequest:
type: object
properties:
- prompt:
- type: string
- description: >-
- The prompt text content with variable placeholders.
- variables:
+ items:
type: array
items:
- type: string
+ $ref: '#/components/schemas/ConversationItem'
description: >-
- List of variable names that can be used in the prompt template.
+ Items to include in the conversation context.
additionalProperties: false
required:
- - prompt
- title: CreatePromptRequest
- UpdatePromptRequest:
+ - items
+ title: AddItemsRequest
+ ConversationItemDeletedResource:
type: object
properties:
- prompt:
+ id:
type: string
- description: The updated prompt text content.
- version:
- type: integer
- description: >-
- The current version of the prompt being updated.
- variables:
- type: array
- items:
- type: string
- description: >-
- Updated list of variable names that can be used in the prompt template.
- set_as_default:
+ object:
+ type: string
+ default: conversation.item.deleted
+ deleted:
type: boolean
- description: >-
- Set the new version as the default (default=True).
+ default: true
additionalProperties: false
required:
- - prompt
- - version
- - set_as_default
- title: UpdatePromptRequest
- SetDefaultVersionRequest:
+ - id
+ - object
+ - deleted
+ title: ConversationItemDeletedResource
+ description: Response for deleted conversation item.
+ OpenaiEmbeddingsRequest:
type: object
properties:
- version:
+ model:
+ type: string
+ description: >-
+ The identifier of the model to use. The model must be an embedding model
+ registered with Llama Stack and available via the /models endpoint.
+ input:
+ oneOf:
+ - type: string
+ - type: array
+ items:
+ type: string
+ description: >-
+ Input text to embed, encoded as a string or array of strings. To embed
+ multiple inputs in a single request, pass an array of strings.
+ encoding_format:
+ type: string
+ description: >-
+ (Optional) The format to return the embeddings in. Can be either "float"
+ or "base64". Defaults to "float".
+ dimensions:
type: integer
- description: The version to set as default.
+ description: >-
+ (Optional) The number of dimensions the resulting output embeddings should
+ have. Only supported in text-embedding-3 and later models.
+ user:
+ type: string
+ description: >-
+ (Optional) A unique identifier representing your end-user, which can help
+ OpenAI to monitor and detect abuse.
additionalProperties: false
required:
- - version
- title: SetDefaultVersionRequest
- ProviderInfo:
+ - model
+ - input
+ title: OpenaiEmbeddingsRequest
+ OpenAIEmbeddingData:
type: object
properties:
- api:
- type: string
- description: The API name this provider implements
- provider_id:
- type: string
- description: Unique identifier for the provider
- provider_type:
+ object:
type: string
- description: The type of provider implementation
- config:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
+ const: embedding
+ default: embedding
description: >-
- Configuration parameters for the provider
- health:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: Current health status of the provider
+ The object type, which will be "embedding"
+ embedding:
+ oneOf:
+ - type: array
+ items:
+ type: number
+ - type: string
+ description: >-
+ The embedding vector as a list of floats (when encoding_format="float")
+ or as a base64-encoded string (when encoding_format="base64")
+ index:
+ type: integer
+ description: >-
+ The index of the embedding in the input list
additionalProperties: false
required:
- - api
- - provider_id
- - provider_type
- - config
- - health
- title: ProviderInfo
+ - object
+ - embedding
+ - index
+ title: OpenAIEmbeddingData
description: >-
- Information about a registered provider including its configuration and health
- status.
- ListProvidersResponse:
+ A single embedding data object from an OpenAI-compatible embeddings response.
+ OpenAIEmbeddingUsage:
+ type: object
+ properties:
+ prompt_tokens:
+ type: integer
+ description: The number of tokens in the input
+ total_tokens:
+ type: integer
+ description: The total number of tokens used
+ additionalProperties: false
+ required:
+ - prompt_tokens
+ - total_tokens
+ title: OpenAIEmbeddingUsage
+ description: >-
+ Usage information for an OpenAI-compatible embeddings response.
+ OpenAIEmbeddingsResponse:
type: object
properties:
+ object:
+ type: string
+ const: list
+ default: list
+ description: The object type, which will be "list"
data:
type: array
items:
- $ref: '#/components/schemas/ProviderInfo'
- description: List of provider information objects
+ $ref: '#/components/schemas/OpenAIEmbeddingData'
+ description: List of embedding data objects
+ model:
+ type: string
+ description: >-
+ The model that was used to generate the embeddings
+ usage:
+ $ref: '#/components/schemas/OpenAIEmbeddingUsage'
+ description: Usage information
additionalProperties: false
required:
+ - object
- data
- title: ListProvidersResponse
+ - model
+ - usage
+ title: OpenAIEmbeddingsResponse
description: >-
- Response containing a list of all available providers.
- ListOpenAIResponseObject:
+ Response from an OpenAI-compatible embeddings request.
+ OpenAIFilePurpose:
+ type: string
+ enum:
+ - assistants
+ - batch
+ title: OpenAIFilePurpose
+ description: >-
+ Valid purpose values for OpenAI Files API.
+ ListOpenAIFileResponse:
type: object
properties:
data:
type: array
items:
- $ref: '#/components/schemas/OpenAIResponseObjectWithInput'
- description: >-
- List of response objects with their input context
+ $ref: '#/components/schemas/OpenAIFileObject'
+ description: List of file objects
has_more:
type: boolean
description: >-
- Whether there are more results available beyond this page
+ Whether there are more files available beyond this page
first_id:
type: string
description: >-
- Identifier of the first item in this page
+ ID of the first file in the list for pagination
last_id:
type: string
- description: Identifier of the last item in this page
+ description: >-
+ ID of the last file in the list for pagination
object:
type: string
const: list
default: list
- description: Object type identifier, always "list"
+ description: The object type, which is always "list"
additionalProperties: false
required:
- data
@@ -5889,678 +6391,744 @@ components:
- first_id
- last_id
- object
- title: ListOpenAIResponseObject
+ title: ListOpenAIFileResponse
description: >-
- Paginated list of OpenAI response objects with navigation metadata.
- OpenAIResponseAnnotationCitation:
+ Response for listing files in OpenAI Files API.
+ OpenAIFileObject:
type: object
properties:
- type:
+ object:
+ type: string
+ const: file
+ default: file
+ description: The object type, which is always "file"
+ id:
type: string
- const: url_citation
- default: url_citation
description: >-
- Annotation type identifier, always "url_citation"
- end_index:
+ The file identifier, which can be referenced in the API endpoints
+ bytes:
+ type: integer
+ description: The size of the file, in bytes
+ created_at:
type: integer
description: >-
- End position of the citation span in the content
- start_index:
+ The Unix timestamp (in seconds) for when the file was created
+ expires_at:
type: integer
description: >-
- Start position of the citation span in the content
- title:
+ The Unix timestamp (in seconds) for when the file expires
+ filename:
type: string
- description: Title of the referenced web resource
- url:
+ description: The name of the file
+ purpose:
type: string
- description: URL of the referenced web resource
+ enum:
+ - assistants
+ - batch
+ description: The intended purpose of the file
additionalProperties: false
required:
- - type
- - end_index
- - start_index
- - title
- - url
- title: OpenAIResponseAnnotationCitation
+ - object
+ - id
+ - bytes
+ - created_at
+ - expires_at
+ - filename
+ - purpose
+ title: OpenAIFileObject
description: >-
- URL citation annotation for referencing external web resources.
- "OpenAIResponseAnnotationContainerFileCitation":
+ OpenAI File object as defined in the OpenAI Files API.
+ ExpiresAfter:
type: object
properties:
- type:
- type: string
- const: container_file_citation
- default: container_file_citation
- container_id:
- type: string
- end_index:
- type: integer
- file_id:
- type: string
- filename:
+ anchor:
type: string
- start_index:
+ const: created_at
+ seconds:
type: integer
additionalProperties: false
required:
- - type
- - container_id
- - end_index
- - file_id
- - filename
- - start_index
- title: >-
- OpenAIResponseAnnotationContainerFileCitation
- OpenAIResponseAnnotationFileCitation:
+ - anchor
+ - seconds
+ title: ExpiresAfter
+ description: >-
+ Control expiration of uploaded files.
+
+ Params:
+ - anchor, must be "created_at"
+ - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
+ OpenAIFileDeleteResponse:
type: object
properties:
- type:
- type: string
- const: file_citation
- default: file_citation
- description: >-
- Annotation type identifier, always "file_citation"
- file_id:
+ id:
type: string
- description: Unique identifier of the referenced file
- filename:
+ description: The file identifier that was deleted
+ object:
type: string
- description: Name of the referenced file
- index:
- type: integer
+ const: file
+ default: file
+ description: The object type, which is always "file"
+ deleted:
+ type: boolean
description: >-
- Position index of the citation within the content
+ Whether the file was successfully deleted
additionalProperties: false
required:
- - type
- - file_id
- - filename
- - index
- title: OpenAIResponseAnnotationFileCitation
+ - id
+ - object
+ - deleted
+ title: OpenAIFileDeleteResponse
description: >-
- File citation annotation for referencing specific files in response content.
- OpenAIResponseAnnotationFilePath:
+ Response for deleting a file in OpenAI Files API.
+ Response:
+ type: object
+ title: Response
+ HealthInfo:
type: object
properties:
- type:
- type: string
- const: file_path
- default: file_path
- file_id:
+ status:
type: string
- index:
- type: integer
+ enum:
+ - OK
+ - Error
+ - Not Implemented
+ description: Current health status of the service
additionalProperties: false
required:
- - type
- - file_id
- - index
- title: OpenAIResponseAnnotationFilePath
- OpenAIResponseAnnotations:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
- - $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
- - $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
- - $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
- discriminator:
- propertyName: type
- mapping:
- file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
- url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
- container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
- file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
- OpenAIResponseError:
+ - status
+ title: HealthInfo
+ description: >-
+ Health status information for the service.
+ RouteInfo:
type: object
properties:
- code:
+ route:
type: string
- description: >-
- Error code identifying the type of failure
- message:
+ description: The API endpoint path
+ method:
type: string
+ description: HTTP method for the route
+ provider_types:
+ type: array
+ items:
+ type: string
description: >-
- Human-readable error message describing the failure
+ List of provider types that implement this route
additionalProperties: false
required:
- - code
- - message
- title: OpenAIResponseError
+ - route
+ - method
+ - provider_types
+ title: RouteInfo
description: >-
- Error details for failed OpenAI response requests.
- OpenAIResponseInput:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
- - $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
- - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
- - $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
- - $ref: '#/components/schemas/OpenAIResponseMessage'
- "OpenAIResponseInputFunctionToolCallOutput":
+ Information about an API route including its path, method, and implementing
+ providers.
+ ListRoutesResponse:
type: object
properties:
- call_id:
- type: string
- output:
- type: string
- type:
- type: string
- const: function_call_output
- default: function_call_output
- id:
- type: string
- status:
- type: string
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/RouteInfo'
+ description: >-
+ List of available route information objects
additionalProperties: false
required:
- - call_id
- - output
- - type
- title: >-
- OpenAIResponseInputFunctionToolCallOutput
+ - data
+ title: ListRoutesResponse
description: >-
- This represents the output of a function call that gets passed back to the
- model.
- OpenAIResponseInputMessageContent:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
- - $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
- discriminator:
- propertyName: type
- mapping:
- input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
- input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
- OpenAIResponseInputMessageContentImage:
+ Response containing a list of all available API routes.
+ Model:
type: object
properties:
- detail:
- oneOf:
- - type: string
- const: low
- - type: string
- const: high
- - type: string
- const: auto
- default: auto
- description: >-
- Level of detail for image processing, can be "low", "high", or "auto"
- type:
+ identifier:
type: string
- const: input_image
- default: input_image
description: >-
- Content type identifier, always "input_image"
- image_url:
+ Unique identifier for this resource in llama stack
+ provider_resource_id:
type: string
- description: (Optional) URL of the image content
- additionalProperties: false
- required:
- - detail
- - type
- title: OpenAIResponseInputMessageContentImage
- description: >-
- Image content for input messages in OpenAI response format.
- OpenAIResponseInputMessageContentText:
- type: object
- properties:
- text:
+ description: >-
+ Unique identifier for this resource in the provider
+ provider_id:
type: string
- description: The text content of the input message
+ description: >-
+ ID of the provider that owns this resource
type:
type: string
- const: input_text
- default: input_text
+ enum:
+ - model
+ - shield
+ - vector_db
+ - dataset
+ - scoring_function
+ - benchmark
+ - tool
+ - tool_group
+ - prompt
+ const: model
+ default: model
description: >-
- Content type identifier, always "input_text"
+ The resource type, always 'model' for model resources
+ metadata:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: Any additional metadata for this model
+ model_type:
+ $ref: '#/components/schemas/ModelType'
+ default: llm
+ description: >-
+ The type of model (LLM or embedding model)
additionalProperties: false
required:
- - text
+ - identifier
+ - provider_id
- type
- title: OpenAIResponseInputMessageContentText
+ - metadata
+ - model_type
+ title: Model
description: >-
- Text content for input messages in OpenAI response format.
- OpenAIResponseMCPApprovalRequest:
+ A model resource representing an AI model registered in Llama Stack.
+ ModelType:
+ type: string
+ enum:
+ - llm
+ - embedding
+ title: ModelType
+ description: >-
+ Enumeration of supported model types in Llama Stack.
+ ListModelsResponse:
type: object
properties:
- arguments:
- type: string
- id:
- type: string
- name:
- type: string
- server_label:
- type: string
- type:
- type: string
- const: mcp_approval_request
- default: mcp_approval_request
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Model'
additionalProperties: false
required:
- - arguments
- - id
- - name
- - server_label
- - type
- title: OpenAIResponseMCPApprovalRequest
- description: >-
- A request for human approval of a tool invocation.
- OpenAIResponseMCPApprovalResponse:
+ - data
+ title: ListModelsResponse
+ RegisterModelRequest:
type: object
properties:
- approval_request_id:
- type: string
- approve:
- type: boolean
- type:
+ model_id:
type: string
- const: mcp_approval_response
- default: mcp_approval_response
- id:
+ description: The identifier of the model to register.
+ provider_model_id:
type: string
- reason:
+ description: >-
+ The identifier of the model in the provider.
+ provider_id:
type: string
+ description: The identifier of the provider.
+ metadata:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: Any additional metadata for this model.
+ model_type:
+ $ref: '#/components/schemas/ModelType'
+ description: The type of model to register.
additionalProperties: false
required:
- - approval_request_id
- - approve
- - type
- title: OpenAIResponseMCPApprovalResponse
- description: A response to an MCP approval request.
- OpenAIResponseMessage:
+ - model_id
+ title: RegisterModelRequest
+ RunModerationRequest:
type: object
properties:
- content:
+ input:
oneOf:
- type: string
- type: array
items:
- $ref: '#/components/schemas/OpenAIResponseInputMessageContent'
- - type: array
- items:
- $ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
- role:
- oneOf:
- - type: string
- const: system
- - type: string
- const: developer
- - type: string
- const: user
- - type: string
- const: assistant
- type:
- type: string
- const: message
- default: message
- id:
- type: string
- status:
+ type: string
+ description: >-
+ Input (or inputs) to classify. Can be a single string, an array of strings,
+ or an array of multi-modal input objects similar to other models.
+ model:
type: string
+ description: >-
+ The content moderation model you would like to use.
additionalProperties: false
required:
- - content
- - role
- - type
- title: OpenAIResponseMessage
- description: >-
- Corresponds to the various Message types in the Responses API. They are all
- under one type because the Responses API gives them all the same "type" value,
- and there is no way to tell them apart in certain scenarios.
- OpenAIResponseObjectWithInput:
+ - input
+ - model
+ title: RunModerationRequest
+ ModerationObject:
type: object
properties:
- created_at:
- type: integer
- description: >-
- Unix timestamp when the response was created
- error:
- $ref: '#/components/schemas/OpenAIResponseError'
- description: >-
- (Optional) Error details if the response generation failed
id:
type: string
- description: Unique identifier for this response
+ description: >-
+ The unique identifier for the moderation request.
model:
type: string
- description: Model identifier used for generation
- object:
- type: string
- const: response
- default: response
description: >-
- Object type identifier, always "response"
- output:
+ The model used to generate the moderation results.
+ results:
type: array
items:
- $ref: '#/components/schemas/OpenAIResponseOutput'
- description: >-
- List of generated output items (messages, tool calls, etc.)
- parallel_tool_calls:
+ $ref: '#/components/schemas/ModerationObjectResults'
+ description: A list of moderation objects
+ additionalProperties: false
+ required:
+ - id
+ - model
+ - results
+ title: ModerationObject
+ description: A moderation object.
+ ModerationObjectResults:
+ type: object
+ properties:
+ flagged:
type: boolean
- default: false
- description: >-
- Whether tool calls can be executed in parallel
- previous_response_id:
- type: string
- description: >-
- (Optional) ID of the previous response in a conversation
- status:
- type: string
description: >-
- Current status of the response generation
- temperature:
- type: number
+ Whether any of the below categories are flagged.
+ categories:
+ type: object
+ additionalProperties:
+ type: boolean
description: >-
- (Optional) Sampling temperature used for generation
- text:
- $ref: '#/components/schemas/OpenAIResponseText'
+ A list of the categories, and whether they are flagged or not.
+ category_applied_input_types:
+ type: object
+ additionalProperties:
+ type: array
+ items:
+ type: string
description: >-
- Text formatting configuration for the response
- top_p:
- type: number
+ A list of the categories along with the input type(s) that the score applies
+ to.
+ category_scores:
+ type: object
+ additionalProperties:
+ type: number
description: >-
- (Optional) Nucleus sampling parameter used for generation
- truncation:
+ A list of the categories along with their scores as predicted by model.
+ user_message:
type: string
- description: >-
- (Optional) Truncation strategy applied to the response
- input:
- type: array
- items:
- $ref: '#/components/schemas/OpenAIResponseInput'
- description: >-
- List of input items that led to this response
+ metadata:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
additionalProperties: false
required:
- - created_at
- - id
- - model
- - object
- - output
- - parallel_tool_calls
- - status
- - text
- - input
- title: OpenAIResponseObjectWithInput
- description: >-
- OpenAI response object extended with input context information.
- OpenAIResponseOutput:
- oneOf:
- - $ref: '#/components/schemas/OpenAIResponseMessage'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
- - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
- - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
- discriminator:
- propertyName: type
- mapping:
- message: '#/components/schemas/OpenAIResponseMessage'
- web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
- file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
- function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
- mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
- mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
- mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
- OpenAIResponseOutputMessageContent:
+ - flagged
+ - metadata
+ title: ModerationObjectResults
+ description: A moderation object.
+ Prompt:
type: object
properties:
- text:
+ prompt:
type: string
- type:
+ description: >-
+ The system prompt text with variable placeholders. Variables are only
+ supported when using the Responses API.
+ version:
+ type: integer
+ description: >-
+ Version (integer starting at 1, incremented on save)
+ prompt_id:
type: string
- const: output_text
- default: output_text
- annotations:
+ description: >-
+ Unique identifier formatted as 'pmpt_<48-digit-hash>'
+ variables:
type: array
items:
- $ref: '#/components/schemas/OpenAIResponseAnnotations'
+ type: string
+ description: >-
+ List of prompt variable names that can be used in the prompt template
+ is_default:
+ type: boolean
+ default: false
+ description: >-
+ Boolean indicating whether this version is the default version for this
+ prompt
additionalProperties: false
required:
- - text
- - type
- - annotations
- title: >-
- OpenAIResponseOutputMessageContentOutputText
- "OpenAIResponseOutputMessageFileSearchToolCall":
+ - version
+ - prompt_id
+ - variables
+ - is_default
+ title: Prompt
+ description: >-
+ A prompt resource representing a stored OpenAI Compatible prompt template
+ in Llama Stack.
+ ListPromptsResponse:
type: object
properties:
- id:
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/Prompt'
+ additionalProperties: false
+ required:
+ - data
+ title: ListPromptsResponse
+ description: Response model to list prompts.
+ CreatePromptRequest:
+ type: object
+ properties:
+ prompt:
type: string
- description: Unique identifier for this tool call
- queries:
+ description: >-
+ The prompt text content with variable placeholders.
+ variables:
type: array
items:
type: string
- description: List of search queries executed
- status:
+ description: >-
+ List of variable names that can be used in the prompt template.
+ additionalProperties: false
+ required:
+ - prompt
+ title: CreatePromptRequest
+ UpdatePromptRequest:
+ type: object
+ properties:
+ prompt:
type: string
+ description: The updated prompt text content.
+ version:
+ type: integer
description: >-
- Current status of the file search operation
- type:
+ The current version of the prompt being updated.
+ variables:
+ type: array
+ items:
+ type: string
+ description: >-
+ Updated list of variable names that can be used in the prompt template.
+ set_as_default:
+ type: boolean
+ description: >-
+ Set the new version as the default (default=True).
+ additionalProperties: false
+ required:
+ - prompt
+ - version
+ - set_as_default
+ title: UpdatePromptRequest
+ SetDefaultVersionRequest:
+ type: object
+ properties:
+ version:
+ type: integer
+ description: The version to set as default.
+ additionalProperties: false
+ required:
+ - version
+ title: SetDefaultVersionRequest
+ ProviderInfo:
+ type: object
+ properties:
+ api:
type: string
- const: file_search_call
- default: file_search_call
+ description: The API name this provider implements
+ provider_id:
+ type: string
+ description: Unique identifier for the provider
+ provider_type:
+ type: string
+ description: The type of provider implementation
+ config:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
description: >-
- Tool call type identifier, always "file_search_call"
- results:
+ Configuration parameters for the provider
+ health:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: Current health status of the provider
+ additionalProperties: false
+ required:
+ - api
+ - provider_id
+ - provider_type
+ - config
+ - health
+ title: ProviderInfo
+ description: >-
+ Information about a registered provider including its configuration and health
+ status.
+ ListProvidersResponse:
+ type: object
+ properties:
+ data:
type: array
items:
- type: object
- properties:
- attributes:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: >-
- (Optional) Key-value attributes associated with the file
- file_id:
- type: string
- description: >-
- Unique identifier of the file containing the result
- filename:
- type: string
- description: Name of the file containing the result
- score:
- type: number
- description: >-
- Relevance score for this search result (between 0 and 1)
- text:
- type: string
- description: Text content of the search result
- additionalProperties: false
- required:
- - attributes
- - file_id
- - filename
- - score
- - text
- title: >-
- OpenAIResponseOutputMessageFileSearchToolCallResults
- description: >-
- Search results returned by the file search operation.
+ $ref: '#/components/schemas/ProviderInfo'
+ description: List of provider information objects
+ additionalProperties: false
+ required:
+ - data
+ title: ListProvidersResponse
+ description: >-
+ Response containing a list of all available providers.
+ ListOpenAIResponseObject:
+ type: object
+ properties:
+ data:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseObjectWithInput'
+ description: >-
+ List of response objects with their input context
+ has_more:
+ type: boolean
+ description: >-
+ Whether there are more results available beyond this page
+ first_id:
+ type: string
+ description: >-
+ Identifier of the first item in this page
+ last_id:
+ type: string
+ description: Identifier of the last item in this page
+ object:
+ type: string
+ const: list
+ default: list
+ description: Object type identifier, always "list"
+ additionalProperties: false
+ required:
+ - data
+ - has_more
+ - first_id
+ - last_id
+ - object
+ title: ListOpenAIResponseObject
+ description: >-
+ Paginated list of OpenAI response objects with navigation metadata.
+ OpenAIResponseError:
+ type: object
+ properties:
+ code:
+ type: string
description: >-
- (Optional) Search results returned by the file search operation
+ Error code identifying the type of failure
+ message:
+ type: string
+ description: >-
+ Human-readable error message describing the failure
additionalProperties: false
required:
- - id
- - queries
- - status
- - type
- title: >-
- OpenAIResponseOutputMessageFileSearchToolCall
+ - code
+ - message
+ title: OpenAIResponseError
description: >-
- File search tool call output message for OpenAI responses.
- "OpenAIResponseOutputMessageFunctionToolCall":
+ Error details for failed OpenAI response requests.
+ OpenAIResponseInput:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
+ - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
+ - $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
+ - $ref: '#/components/schemas/OpenAIResponseMessage'
+ "OpenAIResponseInputFunctionToolCallOutput":
type: object
properties:
call_id:
type: string
- description: Unique identifier for the function call
- name:
- type: string
- description: Name of the function being called
- arguments:
+ output:
type: string
- description: >-
- JSON string containing the function arguments
type:
type: string
- const: function_call
- default: function_call
- description: >-
- Tool call type identifier, always "function_call"
+ const: function_call_output
+ default: function_call_output
id:
type: string
- description: >-
- (Optional) Additional identifier for the tool call
status:
type: string
- description: >-
- (Optional) Current status of the function call execution
additionalProperties: false
required:
- call_id
- - name
- - arguments
+ - output
- type
title: >-
- OpenAIResponseOutputMessageFunctionToolCall
+ OpenAIResponseInputFunctionToolCallOutput
description: >-
- Function tool call output message for OpenAI responses.
- OpenAIResponseOutputMessageMCPCall:
+ This represents the output of a function call that gets passed back to the
+ model.
+ OpenAIResponseMCPApprovalRequest:
type: object
properties:
- id:
- type: string
- description: Unique identifier for this MCP call
- type:
- type: string
- const: mcp_call
- default: mcp_call
- description: >-
- Tool call type identifier, always "mcp_call"
arguments:
type: string
- description: >-
- JSON string containing the MCP call arguments
+ id:
+ type: string
name:
type: string
- description: Name of the MCP method being called
server_label:
type: string
- description: >-
- Label identifying the MCP server handling the call
- error:
- type: string
- description: >-
- (Optional) Error message if the MCP call failed
- output:
+ type:
type: string
- description: >-
- (Optional) Output result from the successful MCP call
+ const: mcp_approval_request
+ default: mcp_approval_request
additionalProperties: false
required:
- - id
- - type
- arguments
+ - id
- name
- server_label
- title: OpenAIResponseOutputMessageMCPCall
+ - type
+ title: OpenAIResponseMCPApprovalRequest
description: >-
- Model Context Protocol (MCP) call output message for OpenAI responses.
- OpenAIResponseOutputMessageMCPListTools:
+ A request for human approval of a tool invocation.
+ OpenAIResponseMCPApprovalResponse:
type: object
properties:
- id:
+ approval_request_id:
type: string
- description: >-
- Unique identifier for this MCP list tools operation
+ approve:
+ type: boolean
type:
type: string
- const: mcp_list_tools
- default: mcp_list_tools
- description: >-
- Tool call type identifier, always "mcp_list_tools"
- server_label:
+ const: mcp_approval_response
+ default: mcp_approval_response
+ id:
+ type: string
+ reason:
type: string
- description: >-
- Label identifying the MCP server providing the tools
- tools:
- type: array
- items:
- type: object
- properties:
- input_schema:
- type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
- description: >-
- JSON schema defining the tool's input parameters
- name:
- type: string
- description: Name of the tool
- description:
- type: string
- description: >-
- (Optional) Description of what the tool does
- additionalProperties: false
- required:
- - input_schema
- - name
- title: MCPListToolsTool
- description: >-
- Tool definition returned by MCP list tools operation.
- description: >-
- List of available tools provided by the MCP server
additionalProperties: false
required:
- - id
+ - approval_request_id
+ - approve
- type
- - server_label
- - tools
- title: OpenAIResponseOutputMessageMCPListTools
- description: >-
- MCP list tools output message containing available tools from an MCP server.
- "OpenAIResponseOutputMessageWebSearchToolCall":
+ title: OpenAIResponseMCPApprovalResponse
+ description: A response to an MCP approval request.
+ OpenAIResponseObjectWithInput:
type: object
properties:
+ created_at:
+ type: integer
+ description: >-
+ Unix timestamp when the response was created
+ error:
+ $ref: '#/components/schemas/OpenAIResponseError'
+ description: >-
+ (Optional) Error details if the response generation failed
id:
type: string
- description: Unique identifier for this tool call
+ description: Unique identifier for this response
+ model:
+ type: string
+ description: Model identifier used for generation
+ object:
+ type: string
+ const: response
+ default: response
+ description: >-
+ Object type identifier, always "response"
+ output:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseOutput'
+ description: >-
+ List of generated output items (messages, tool calls, etc.)
+ parallel_tool_calls:
+ type: boolean
+ default: false
+ description: >-
+ Whether tool calls can be executed in parallel
+ previous_response_id:
+ type: string
+ description: >-
+ (Optional) ID of the previous response in a conversation
status:
type: string
description: >-
- Current status of the web search operation
- type:
+ Current status of the response generation
+ temperature:
+ type: number
+ description: >-
+ (Optional) Sampling temperature used for generation
+ text:
+ $ref: '#/components/schemas/OpenAIResponseText'
+ description: >-
+ Text formatting configuration for the response
+ top_p:
+ type: number
+ description: >-
+ (Optional) Nucleus sampling parameter used for generation
+ truncation:
type: string
- const: web_search_call
- default: web_search_call
description: >-
- Tool call type identifier, always "web_search_call"
+ (Optional) Truncation strategy applied to the response
+ input:
+ type: array
+ items:
+ $ref: '#/components/schemas/OpenAIResponseInput'
+ description: >-
+ List of input items that led to this response
additionalProperties: false
required:
+ - created_at
- id
+ - model
+ - object
+ - output
+ - parallel_tool_calls
- status
- - type
- title: >-
- OpenAIResponseOutputMessageWebSearchToolCall
+ - text
+ - input
+ title: OpenAIResponseObjectWithInput
description: >-
- Web search tool call output message for OpenAI responses.
+ OpenAI response object extended with input context information.
+ OpenAIResponseOutput:
+ oneOf:
+ - $ref: '#/components/schemas/OpenAIResponseMessage'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
+ discriminator:
+ propertyName: type
+ mapping:
+ message: '#/components/schemas/OpenAIResponseMessage'
+ web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
+ file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
+ function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
+ mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
+ mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
+ mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
OpenAIResponseText:
type: object
properties:
@@ -13329,6 +13897,10 @@ tags:
x-displayName: Agents
- name: Benchmarks
description: ''
+ - name: Conversations
+ description: ''
+ x-displayName: >-
+ Protocol for conversation management operations.
- name: DatasetIO
description: ''
- name: Datasets
@@ -13390,6 +13962,7 @@ x-tagGroups:
tags:
- Agents
- Benchmarks
+ - Conversations
- DatasetIO
- Datasets
- Eval
diff --git a/llama_stack/apis/conversations/__init__.py b/llama_stack/apis/conversations/__init__.py
new file mode 100644
index 0000000000..2d214d27a3
--- /dev/null
+++ b/llama_stack/apis/conversations/__init__.py
@@ -0,0 +1,31 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from .conversations import (
+ Conversation,
+ ConversationCreateRequest,
+ ConversationDeletedResource,
+ ConversationItem,
+ ConversationItemCreateRequest,
+ ConversationItemDeletedResource,
+ ConversationItemList,
+ Conversations,
+ ConversationUpdateRequest,
+ Metadata,
+)
+
+__all__ = [
+ "Conversation",
+ "ConversationCreateRequest",
+ "ConversationDeletedResource",
+ "ConversationItem",
+ "ConversationItemCreateRequest",
+ "ConversationItemDeletedResource",
+ "ConversationItemList",
+ "Conversations",
+ "ConversationUpdateRequest",
+ "Metadata",
+]
diff --git a/llama_stack/apis/conversations/conversations.py b/llama_stack/apis/conversations/conversations.py
new file mode 100644
index 0000000000..58ae9c35aa
--- /dev/null
+++ b/llama_stack/apis/conversations/conversations.py
@@ -0,0 +1,260 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Annotated, Literal, Protocol, runtime_checkable
+
+from openai import NOT_GIVEN
+from openai._types import NotGiven
+from openai.types.responses.response_includable import ResponseIncludable
+from pydantic import BaseModel, Field
+
+from llama_stack.apis.agents.openai_responses import (
+ OpenAIResponseMessage,
+ OpenAIResponseOutputMessageFileSearchToolCall,
+ OpenAIResponseOutputMessageFunctionToolCall,
+ OpenAIResponseOutputMessageMCPCall,
+ OpenAIResponseOutputMessageMCPListTools,
+ OpenAIResponseOutputMessageWebSearchToolCall,
+)
+from llama_stack.apis.version import LLAMA_STACK_API_V1
+from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
+from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
+
+Metadata = dict[str, str]
+
+
+@json_schema_type
+class Conversation(BaseModel):
+ """OpenAI-compatible conversation object."""
+
+ id: str = Field(..., description="The unique ID of the conversation.")
+ object: Literal["conversation"] = Field(
+ default="conversation", description="The object type, which is always conversation."
+ )
+ created_at: int = Field(
+ ..., description="The time at which the conversation was created, measured in seconds since the Unix epoch."
+ )
+ metadata: Metadata | None = Field(
+ default=None,
+ description="Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.",
+ )
+ items: list[dict] | None = Field(
+ default=None,
+ description="Initial items to include in the conversation context. You may add up to 20 items at a time.",
+ )
+
+
+@json_schema_type
+class ConversationMessage(BaseModel):
+ """OpenAI-compatible message item for conversations."""
+
+ id: str = Field(..., description="unique identifier for this message")
+ content: list[dict] = Field(..., description="message content")
+ role: str = Field(..., description="message role")
+ status: str = Field(..., description="message status")
+ type: Literal["message"] = "message"
+ object: Literal["message"] = "message"
+
+
+ConversationItem = Annotated[
+ OpenAIResponseMessage
+ | OpenAIResponseOutputMessageFunctionToolCall
+ | OpenAIResponseOutputMessageFileSearchToolCall
+ | OpenAIResponseOutputMessageWebSearchToolCall
+ | OpenAIResponseOutputMessageMCPCall
+ | OpenAIResponseOutputMessageMCPListTools,
+ Field(discriminator="type"),
+]
+register_schema(ConversationItem, name="ConversationItem")
+
+# Using OpenAI types directly caused issues but some notes for reference:
+# Note that ConversationItem is a Annotated Union of the types below:
+# from openai.types.responses import *
+# from openai.types.responses.response_item import *
+# from openai.types.conversations import ConversationItem
+# f = [
+# ResponseFunctionToolCallItem,
+# ResponseFunctionToolCallOutputItem,
+# ResponseFileSearchToolCall,
+# ResponseFunctionWebSearch,
+# ImageGenerationCall,
+# ResponseComputerToolCall,
+# ResponseComputerToolCallOutputItem,
+# ResponseReasoningItem,
+# ResponseCodeInterpreterToolCall,
+# LocalShellCall,
+# LocalShellCallOutput,
+# McpListTools,
+# McpApprovalRequest,
+# McpApprovalResponse,
+# McpCall,
+# ResponseCustomToolCall,
+# ResponseCustomToolCallOutput
+# ]
+
+
+@json_schema_type
+class ConversationCreateRequest(BaseModel):
+ """Request body for creating a conversation."""
+
+ items: list[ConversationItem] | None = Field(
+ default=[],
+ description="Initial items to include in the conversation context. You may add up to 20 items at a time.",
+ max_length=20,
+ )
+ metadata: Metadata | None = Field(
+ default={},
+ description="Set of 16 key-value pairs that can be attached to an object. Useful for storing additional information",
+ max_length=16,
+ )
+
+
+@json_schema_type
+class ConversationUpdateRequest(BaseModel):
+ """Request body for updating a conversation."""
+
+ metadata: Metadata = Field(
+ ...,
+ description="Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.",
+ )
+
+
+@json_schema_type
+class ConversationDeletedResource(BaseModel):
+ """Response for deleted conversation."""
+
+ id: str = Field(..., description="The deleted conversation identifier")
+ object: str = Field(default="conversation.deleted", description="Object type")
+ deleted: bool = Field(default=True, description="Whether the object was deleted")
+
+
+@json_schema_type
+class ConversationItemCreateRequest(BaseModel):
+ """Request body for creating conversation items."""
+
+ items: list[ConversationItem] = Field(
+ ...,
+ description="Items to include in the conversation context. You may add up to 20 items at a time.",
+ max_length=20,
+ )
+
+
+@json_schema_type
+class ConversationItemList(BaseModel):
+ """List of conversation items with pagination."""
+
+ object: str = Field(default="list", description="Object type")
+ data: list[ConversationItem] = Field(..., description="List of conversation items")
+ first_id: str | None = Field(default=None, description="The ID of the first item in the list")
+ last_id: str | None = Field(default=None, description="The ID of the last item in the list")
+ has_more: bool = Field(default=False, description="Whether there are more items available")
+
+
+@json_schema_type
+class ConversationItemDeletedResource(BaseModel):
+ """Response for deleted conversation item."""
+
+ id: str = Field(..., description="The deleted item identifier")
+ object: str = Field(default="conversation.item.deleted", description="Object type")
+ deleted: bool = Field(default=True, description="Whether the object was deleted")
+
+
+@runtime_checkable
+@trace_protocol
+class Conversations(Protocol):
+ """Protocol for conversation management operations."""
+
+ @webmethod(route="/conversations", method="POST", level=LLAMA_STACK_API_V1)
+ async def create_conversation(
+ self, items: list[ConversationItem] | None = None, metadata: Metadata | None = None
+ ) -> Conversation:
+ """Create a conversation.
+
+ :param items: Initial items to include in the conversation context.
+ :param metadata: Set of key-value pairs that can be attached to an object.
+ :returns: The created conversation object.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}", method="GET", level=LLAMA_STACK_API_V1)
+ async def get_conversation(self, conversation_id: str) -> Conversation:
+ """Get a conversation with the given ID.
+
+ :param conversation_id: The conversation identifier.
+ :returns: The conversation object.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}", method="POST", level=LLAMA_STACK_API_V1)
+ async def update_conversation(self, conversation_id: str, metadata: Metadata) -> Conversation:
+ """Update a conversation's metadata with the given ID.
+
+ :param conversation_id: The conversation identifier.
+ :param metadata: Set of key-value pairs that can be attached to an object.
+ :returns: The updated conversation object.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}", method="DELETE", level=LLAMA_STACK_API_V1)
+ async def openai_delete_conversation(self, conversation_id: str) -> ConversationDeletedResource:
+ """Delete a conversation with the given ID.
+
+ :param conversation_id: The conversation identifier.
+ :returns: The deleted conversation resource.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}/items", method="POST", level=LLAMA_STACK_API_V1)
+ async def add_items(self, conversation_id: str, items: list[ConversationItem]) -> ConversationItemList:
+ """Create items in the conversation.
+
+ :param conversation_id: The conversation identifier.
+ :param items: Items to include in the conversation context.
+ :returns: List of created items.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}/items/{item_id}", method="GET", level=LLAMA_STACK_API_V1)
+ async def retrieve(self, conversation_id: str, item_id: str) -> ConversationItem:
+ """Retrieve a conversation item.
+
+ :param conversation_id: The conversation identifier.
+ :param item_id: The item identifier.
+ :returns: The conversation item.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}/items", method="GET", level=LLAMA_STACK_API_V1)
+ async def list(
+ self,
+ conversation_id: str,
+ after: str | NotGiven = NOT_GIVEN,
+ include: list[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ ) -> ConversationItemList:
+ """List items in the conversation.
+
+ :param conversation_id: The conversation identifier.
+ :param after: An item ID to list items after, used in pagination.
+ :param include: Specify additional output data to include in the response.
+ :param limit: A limit on the number of objects to be returned (1-100, default 20).
+ :param order: The order to return items in (asc or desc, default desc).
+ :returns: List of conversation items.
+ """
+ ...
+
+ @webmethod(route="/conversations/{conversation_id}/items/{item_id}", method="DELETE", level=LLAMA_STACK_API_V1)
+ async def openai_delete_conversation_item(
+ self, conversation_id: str, item_id: str
+ ) -> ConversationItemDeletedResource:
+ """Delete a conversation item.
+
+ :param conversation_id: The conversation identifier.
+ :param item_id: The item identifier.
+ :returns: The deleted item resource.
+ """
+ ...
diff --git a/llama_stack/apis/datatypes.py b/llama_stack/apis/datatypes.py
index 8d0f2e26df..e522682c6d 100644
--- a/llama_stack/apis/datatypes.py
+++ b/llama_stack/apis/datatypes.py
@@ -129,6 +129,7 @@ class Api(Enum, metaclass=DynamicApiMeta):
tool_groups = "tool_groups"
files = "files"
prompts = "prompts"
+ conversations = "conversations"
# built-in API
inspect = "inspect"
diff --git a/llama_stack/core/conversations/__init__.py b/llama_stack/core/conversations/__init__.py
new file mode 100644
index 0000000000..756f351d88
--- /dev/null
+++ b/llama_stack/core/conversations/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
diff --git a/llama_stack/core/conversations/conversations.py b/llama_stack/core/conversations/conversations.py
new file mode 100644
index 0000000000..bef138e69e
--- /dev/null
+++ b/llama_stack/core/conversations/conversations.py
@@ -0,0 +1,306 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import os
+import secrets
+import time
+from typing import Any
+
+from openai import NOT_GIVEN
+from pydantic import BaseModel, TypeAdapter
+
+from llama_stack.apis.conversations.conversations import (
+ Conversation,
+ ConversationDeletedResource,
+ ConversationItem,
+ ConversationItemDeletedResource,
+ ConversationItemList,
+ Conversations,
+ Metadata,
+)
+from llama_stack.core.datatypes import AccessRule
+from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR
+from llama_stack.log import get_logger
+from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
+from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.providers.utils.sqlstore.sqlstore import (
+ SqliteSqlStoreConfig,
+ SqlStoreConfig,
+ sqlstore_impl,
+)
+
+logger = get_logger(name=__name__, category="openai::conversations")
+
+
+class ConversationServiceConfig(BaseModel):
+ """Configuration for the built-in conversation service.
+
+ :param conversations_store: SQL store configuration for conversations (defaults to SQLite)
+ :param policy: Access control rules
+ """
+
+ conversations_store: SqlStoreConfig = SqliteSqlStoreConfig(
+ db_path=(DISTRIBS_BASE_DIR / "conversations.db").as_posix()
+ )
+ policy: list[AccessRule] = []
+
+
+async def get_provider_impl(config: ConversationServiceConfig, deps: dict[Any, Any]):
+ """Get the conversation service implementation."""
+ impl = ConversationServiceImpl(config, deps)
+ await impl.initialize()
+ return impl
+
+
+class ConversationServiceImpl(Conversations):
+ """Built-in conversation service implementation using AuthorizedSqlStore."""
+
+ def __init__(self, config: ConversationServiceConfig, deps: dict[Any, Any]):
+ self.config = config
+ self.deps = deps
+ self.policy = config.policy
+
+ base_sql_store = sqlstore_impl(config.conversations_store)
+ self.sql_store = AuthorizedSqlStore(base_sql_store, self.policy)
+
+ async def initialize(self) -> None:
+ """Initialize the store and create tables."""
+ if isinstance(self.config.conversations_store, SqliteSqlStoreConfig):
+ os.makedirs(os.path.dirname(self.config.conversations_store.db_path), exist_ok=True)
+
+ await self.sql_store.create_table(
+ "openai_conversations",
+ {
+ "id": ColumnDefinition(type=ColumnType.STRING, primary_key=True),
+ "created_at": ColumnType.INTEGER,
+ "items": ColumnType.JSON,
+ "metadata": ColumnType.JSON,
+ },
+ )
+
+ await self.sql_store.create_table(
+ "conversation_items",
+ {
+ "id": ColumnDefinition(type=ColumnType.STRING, primary_key=True),
+ "conversation_id": ColumnType.STRING,
+ "created_at": ColumnType.INTEGER,
+ "item_data": ColumnType.JSON,
+ },
+ )
+
+ async def create_conversation(
+ self, items: list[ConversationItem] | None = None, metadata: Metadata | None = None
+ ) -> Conversation:
+ """Create a conversation."""
+ random_bytes = secrets.token_bytes(24)
+ conversation_id = f"conv_{random_bytes.hex()}"
+ created_at = int(time.time())
+
+ record_data = {
+ "id": conversation_id,
+ "created_at": created_at,
+ "items": [],
+ "metadata": metadata,
+ }
+
+ await self.sql_store.insert(
+ table="openai_conversations",
+ data=record_data,
+ )
+
+ if items:
+ item_records = []
+ for item in items:
+ item_dict = item.model_dump()
+ item_id = self._get_or_generate_item_id(item, item_dict)
+
+ item_record = {
+ "id": item_id,
+ "conversation_id": conversation_id,
+ "created_at": created_at,
+ "item_data": item_dict,
+ }
+
+ item_records.append(item_record)
+
+ await self.sql_store.insert(table="conversation_items", data=item_records)
+
+ conversation = Conversation(
+ id=conversation_id,
+ created_at=created_at,
+ metadata=metadata,
+ object="conversation",
+ )
+
+ logger.info(f"Created conversation {conversation_id}")
+ return conversation
+
+ async def get_conversation(self, conversation_id: str) -> Conversation:
+ """Get a conversation with the given ID."""
+ record = await self.sql_store.fetch_one(table="openai_conversations", where={"id": conversation_id})
+
+ if record is None:
+ raise ValueError(f"Conversation {conversation_id} not found")
+
+ return Conversation(
+ id=record["id"], created_at=record["created_at"], metadata=record.get("metadata"), object="conversation"
+ )
+
+ async def update_conversation(self, conversation_id: str, metadata: Metadata) -> Conversation:
+ """Update a conversation's metadata with the given ID"""
+ await self.sql_store.update(
+ table="openai_conversations", data={"metadata": metadata}, where={"id": conversation_id}
+ )
+
+ return await self.get_conversation(conversation_id)
+
+ async def openai_delete_conversation(self, conversation_id: str) -> ConversationDeletedResource:
+ """Delete a conversation with the given ID."""
+ await self.sql_store.delete(table="openai_conversations", where={"id": conversation_id})
+
+ logger.info(f"Deleted conversation {conversation_id}")
+ return ConversationDeletedResource(id=conversation_id)
+
+ def _validate_conversation_id(self, conversation_id: str) -> None:
+ """Validate conversation ID format."""
+ if not conversation_id.startswith("conv_"):
+ raise ValueError(
+ f"Invalid 'conversation_id': '{conversation_id}'. Expected an ID that begins with 'conv_'."
+ )
+
+ def _get_or_generate_item_id(self, item: ConversationItem, item_dict: dict) -> str:
+ """Get existing item ID or generate one if missing."""
+ if item.id is None:
+ random_bytes = secrets.token_bytes(24)
+ if item.type == "message":
+ item_id = f"msg_{random_bytes.hex()}"
+ else:
+ item_id = f"item_{random_bytes.hex()}"
+ item_dict["id"] = item_id
+ return item_id
+ return item.id
+
+ async def _get_validated_conversation(self, conversation_id: str) -> Conversation:
+ """Validate conversation ID and return the conversation if it exists."""
+ self._validate_conversation_id(conversation_id)
+ return await self.get_conversation(conversation_id)
+
+ async def add_items(self, conversation_id: str, items: list[ConversationItem]) -> ConversationItemList:
+ """Create (add) items to a conversation."""
+ await self._get_validated_conversation(conversation_id)
+
+ created_items = []
+ created_at = int(time.time())
+
+ for item in items:
+ item_dict = item.model_dump()
+ item_id = self._get_or_generate_item_id(item, item_dict)
+
+ item_record = {
+ "id": item_id,
+ "conversation_id": conversation_id,
+ "created_at": created_at,
+ "item_data": item_dict,
+ }
+
+ # TODO: Add support for upsert in sql_store, this will fail first if ID exists and then update
+ try:
+ await self.sql_store.insert(table="conversation_items", data=item_record)
+ except Exception:
+ # If insert fails due to ID conflict, update existing record
+ await self.sql_store.update(
+ table="conversation_items",
+ data={"created_at": created_at, "item_data": item_dict},
+ where={"id": item_id},
+ )
+
+ created_items.append(item_dict)
+
+ logger.info(f"Created {len(created_items)} items in conversation {conversation_id}")
+
+ # Convert created items (dicts) to proper ConversationItem types
+ adapter: TypeAdapter[ConversationItem] = TypeAdapter(ConversationItem)
+ response_items: list[ConversationItem] = [adapter.validate_python(item_dict) for item_dict in created_items]
+
+ return ConversationItemList(
+ data=response_items,
+ first_id=created_items[0]["id"] if created_items else None,
+ last_id=created_items[-1]["id"] if created_items else None,
+ has_more=False,
+ )
+
+ async def retrieve(self, conversation_id: str, item_id: str) -> ConversationItem:
+ """Retrieve a conversation item."""
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ if not item_id:
+ raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+
+ # Get item from conversation_items table
+ record = await self.sql_store.fetch_one(
+ table="conversation_items", where={"id": item_id, "conversation_id": conversation_id}
+ )
+
+ if record is None:
+ raise ValueError(f"Item {item_id} not found in conversation {conversation_id}")
+
+ adapter: TypeAdapter[ConversationItem] = TypeAdapter(ConversationItem)
+ return adapter.validate_python(record["item_data"])
+
+ async def list(self, conversation_id: str, after=NOT_GIVEN, include=NOT_GIVEN, limit=NOT_GIVEN, order=NOT_GIVEN):
+ """List items in the conversation."""
+ result = await self.sql_store.fetch_all(table="conversation_items", where={"conversation_id": conversation_id})
+ records = result.data
+
+ if order != NOT_GIVEN and order == "asc":
+ records.sort(key=lambda x: x["created_at"])
+ else:
+ records.sort(key=lambda x: x["created_at"], reverse=True)
+
+ actual_limit = 20
+ if limit != NOT_GIVEN and isinstance(limit, int):
+ actual_limit = limit
+
+ records = records[:actual_limit]
+ items = [record["item_data"] for record in records]
+
+ adapter: TypeAdapter[ConversationItem] = TypeAdapter(ConversationItem)
+ response_items: list[ConversationItem] = [adapter.validate_python(item) for item in items]
+
+ first_id = response_items[0].id if response_items else None
+ last_id = response_items[-1].id if response_items else None
+
+ return ConversationItemList(
+ data=response_items,
+ first_id=first_id,
+ last_id=last_id,
+ has_more=False,
+ )
+
+ async def openai_delete_conversation_item(
+ self, conversation_id: str, item_id: str
+ ) -> ConversationItemDeletedResource:
+ """Delete a conversation item."""
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ if not item_id:
+ raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+
+ _ = await self._get_validated_conversation(conversation_id)
+
+ record = await self.sql_store.fetch_one(
+ table="conversation_items", where={"id": item_id, "conversation_id": conversation_id}
+ )
+
+ if record is None:
+ raise ValueError(f"Item {item_id} not found in conversation {conversation_id}")
+
+ await self.sql_store.delete(
+ table="conversation_items", where={"id": item_id, "conversation_id": conversation_id}
+ )
+
+ logger.info(f"Deleted item {item_id} from conversation {conversation_id}")
+ return ConversationItemDeletedResource(id=item_id)
diff --git a/llama_stack/core/datatypes.py b/llama_stack/core/datatypes.py
index 6a297f0122..0e16d91e7e 100644
--- a/llama_stack/core/datatypes.py
+++ b/llama_stack/core/datatypes.py
@@ -480,6 +480,13 @@ class StackRunConfig(BaseModel):
If not specified, a default SQLite store will be used.""",
)
+ conversations_store: SqlStoreConfig | None = Field(
+ default=None,
+ description="""
+Configuration for the persistence store used by the conversations API.
+If not specified, a default SQLite store will be used.""",
+ )
+
# registry of "resources" in the distribution
models: list[ModelInput] = Field(default_factory=list)
shields: list[ShieldInput] = Field(default_factory=list)
diff --git a/llama_stack/core/distribution.py b/llama_stack/core/distribution.py
index 302ecb9600..f44967aaf5 100644
--- a/llama_stack/core/distribution.py
+++ b/llama_stack/core/distribution.py
@@ -25,7 +25,7 @@
logger = get_logger(name=__name__, category="core")
-INTERNAL_APIS = {Api.inspect, Api.providers, Api.prompts}
+INTERNAL_APIS = {Api.inspect, Api.providers, Api.prompts, Api.conversations}
def stack_apis() -> list[Api]:
diff --git a/llama_stack/core/resolver.py b/llama_stack/core/resolver.py
index f421c47ed7..0d6f54f9e9 100644
--- a/llama_stack/core/resolver.py
+++ b/llama_stack/core/resolver.py
@@ -10,6 +10,7 @@
from llama_stack.apis.agents import Agents
from llama_stack.apis.batches import Batches
from llama_stack.apis.benchmarks import Benchmarks
+from llama_stack.apis.conversations import Conversations
from llama_stack.apis.datasetio import DatasetIO
from llama_stack.apis.datasets import Datasets
from llama_stack.apis.datatypes import ExternalApiSpec
@@ -96,6 +97,7 @@ def api_protocol_map(external_apis: dict[Api, ExternalApiSpec] | None = None) ->
Api.tool_runtime: ToolRuntime,
Api.files: Files,
Api.prompts: Prompts,
+ Api.conversations: Conversations,
}
if external_apis:
diff --git a/llama_stack/core/server/server.py b/llama_stack/core/server/server.py
index 7d119c1399..27187ddd01 100644
--- a/llama_stack/core/server/server.py
+++ b/llama_stack/core/server/server.py
@@ -451,6 +451,7 @@ def create_app(
apis_to_serve.add("inspect")
apis_to_serve.add("providers")
apis_to_serve.add("prompts")
+ apis_to_serve.add("conversations")
for api_str in apis_to_serve:
api = Api(api_str)
diff --git a/llama_stack/core/stack.py b/llama_stack/core/stack.py
index 3e14328a37..d5d55319a7 100644
--- a/llama_stack/core/stack.py
+++ b/llama_stack/core/stack.py
@@ -15,6 +15,7 @@
from llama_stack.apis.agents import Agents
from llama_stack.apis.benchmarks import Benchmarks
+from llama_stack.apis.conversations import Conversations
from llama_stack.apis.datasetio import DatasetIO
from llama_stack.apis.datasets import Datasets
from llama_stack.apis.eval import Eval
@@ -34,6 +35,7 @@
from llama_stack.apis.tools import RAGToolRuntime, ToolGroups, ToolRuntime
from llama_stack.apis.vector_dbs import VectorDBs
from llama_stack.apis.vector_io import VectorIO
+from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl
from llama_stack.core.datatypes import Provider, StackRunConfig
from llama_stack.core.distribution import get_provider_registry
from llama_stack.core.inspect import DistributionInspectConfig, DistributionInspectImpl
@@ -73,6 +75,7 @@ class LlamaStack(
RAGToolRuntime,
Files,
Prompts,
+ Conversations,
):
pass
@@ -312,6 +315,12 @@ def add_internal_implementations(impls: dict[Api, Any], run_config: StackRunConf
)
impls[Api.prompts] = prompts_impl
+ conversations_impl = ConversationServiceImpl(
+ ConversationServiceConfig(run_config=run_config),
+ deps=impls,
+ )
+ impls[Api.conversations] = conversations_impl
+
class Stack:
def __init__(self, run_config: StackRunConfig, provider_registry: ProviderRegistry | None = None):
@@ -342,6 +351,8 @@ async def initialize(self):
if Api.prompts in impls:
await impls[Api.prompts].initialize()
+ if Api.conversations in impls:
+ await impls[Api.conversations].initialize()
await register_resources(self.run_config, impls)
diff --git a/llama_stack/providers/utils/sqlstore/api.py b/llama_stack/providers/utils/sqlstore/api.py
index 6bb85ea0c4..a61fd1090e 100644
--- a/llama_stack/providers/utils/sqlstore/api.py
+++ b/llama_stack/providers/utils/sqlstore/api.py
@@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from collections.abc import Mapping
+from collections.abc import Mapping, Sequence
from enum import Enum
from typing import Any, Literal, Protocol
@@ -41,9 +41,9 @@ async def create_table(self, table: str, schema: Mapping[str, ColumnType | Colum
"""
pass
- async def insert(self, table: str, data: Mapping[str, Any]) -> None:
+ async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None:
"""
- Insert a row into a table.
+ Insert a row or batch of rows into a table.
"""
pass
diff --git a/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py b/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py
index ab67f7052d..e1da4db6e0 100644
--- a/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py
+++ b/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py
@@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from collections.abc import Mapping
+from collections.abc import Mapping, Sequence
from typing import Any, Literal
from llama_stack.core.access_control.access_control import default_policy, is_action_allowed
@@ -38,6 +38,18 @@
]
+def _enhance_item_with_access_control(item: Mapping[str, Any], current_user: User | None) -> Mapping[str, Any]:
+ """Add access control attributes to a data item."""
+ enhanced = dict(item)
+ if current_user:
+ enhanced["owner_principal"] = current_user.principal
+ enhanced["access_attributes"] = current_user.attributes
+ else:
+ enhanced["owner_principal"] = None
+ enhanced["access_attributes"] = None
+ return enhanced
+
+
class SqlRecord(ProtectedResource):
def __init__(self, record_id: str, table_name: str, owner: User):
self.type = f"sql_record::{table_name}"
@@ -102,18 +114,14 @@ async def create_table(self, table: str, schema: Mapping[str, ColumnType | Colum
await self.sql_store.add_column_if_not_exists(table, "access_attributes", ColumnType.JSON)
await self.sql_store.add_column_if_not_exists(table, "owner_principal", ColumnType.STRING)
- async def insert(self, table: str, data: Mapping[str, Any]) -> None:
- """Insert a row with automatic access control attribute capture."""
- enhanced_data = dict(data)
-
+ async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None:
+ """Insert a row or batch of rows with automatic access control attribute capture."""
current_user = get_authenticated_user()
- if current_user:
- enhanced_data["owner_principal"] = current_user.principal
- enhanced_data["access_attributes"] = current_user.attributes
+ enhanced_data: Mapping[str, Any] | Sequence[Mapping[str, Any]]
+ if isinstance(data, Mapping):
+ enhanced_data = _enhance_item_with_access_control(data, current_user)
else:
- enhanced_data["owner_principal"] = None
- enhanced_data["access_attributes"] = None
-
+ enhanced_data = [_enhance_item_with_access_control(item, current_user) for item in data]
await self.sql_store.insert(table, enhanced_data)
async def fetch_all(
diff --git a/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py b/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
index 46ed8c1d11..23cd6444ec 100644
--- a/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
+++ b/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
@@ -3,7 +3,7 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from collections.abc import Mapping
+from collections.abc import Mapping, Sequence
from typing import Any, Literal
from sqlalchemy import (
@@ -116,7 +116,7 @@ async def create_table(
async with engine.begin() as conn:
await conn.run_sync(self.metadata.create_all, tables=[sqlalchemy_table], checkfirst=True)
- async def insert(self, table: str, data: Mapping[str, Any]) -> None:
+ async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None:
async with self.async_session() as session:
await session.execute(self.metadata.tables[table].insert(), data)
await session.commit()
diff --git a/llama_stack/strong_typing/schema.py b/llama_stack/strong_typing/schema.py
index 2bfb7033e2..f911fc41f9 100644
--- a/llama_stack/strong_typing/schema.py
+++ b/llama_stack/strong_typing/schema.py
@@ -484,12 +484,19 @@ def _type_to_schema(
}
return ret
elif origin_type is Literal:
- if len(typing.get_args(typ)) != 1:
- raise ValueError(f"Literal type {typ} has {len(typing.get_args(typ))} arguments")
- (literal_value,) = typing.get_args(typ) # unpack value of literal type
- schema = self.type_to_schema(type(literal_value))
- schema["const"] = literal_value
- return schema
+ literal_args = typing.get_args(typ)
+ if len(literal_args) == 1:
+ (literal_value,) = literal_args
+ schema = self.type_to_schema(type(literal_value))
+ schema["const"] = literal_value
+ return schema
+ elif len(literal_args) > 1:
+ first_value = literal_args[0]
+ schema = self.type_to_schema(type(first_value))
+ schema["enum"] = list(literal_args)
+ return schema
+ else:
+ return {"enum": []}
elif origin_type is type:
(concrete_type,) = typing.get_args(typ) # unpack single tuple element
return {"const": self.type_to_schema(concrete_type, force_expand=True)}
diff --git a/pyproject.toml b/pyproject.toml
index 8a162e90ae..52eb8f7c8f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -32,7 +32,7 @@ dependencies = [
"jinja2>=3.1.6",
"jsonschema",
"llama-stack-client>=0.2.23",
- "openai>=1.100.0", # for expires_after support
+ "openai>=1.107", # for expires_after support
"prompt-toolkit",
"python-dotenv",
"python-jose[cryptography]",
@@ -49,6 +49,7 @@ dependencies = [
"opentelemetry-exporter-otlp-proto-http>=1.30.0", # server
"aiosqlite>=0.21.0", # server - for metadata store
"asyncpg", # for metadata store
+ "sqlalchemy[asyncio]>=2.0.41", # server - for conversations
]
[project.optional-dependencies]
diff --git a/tests/integration/conversations/test_openai_conversations.py b/tests/integration/conversations/test_openai_conversations.py
new file mode 100644
index 0000000000..345e1c00a6
--- /dev/null
+++ b/tests/integration/conversations/test_openai_conversations.py
@@ -0,0 +1,135 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import pytest
+
+
+@pytest.mark.integration
+class TestOpenAIConversations:
+ # TODO: Update to compat_client after client-SDK is generated
+ def test_conversation_create(self, openai_client):
+ conversation = openai_client.conversations.create(
+ metadata={"topic": "demo"}, items=[{"type": "message", "role": "user", "content": "Hello!"}]
+ )
+
+ assert conversation.id.startswith("conv_")
+ assert conversation.object == "conversation"
+ assert conversation.metadata["topic"] == "demo"
+ assert isinstance(conversation.created_at, int)
+
+ def test_conversation_retrieve(self, openai_client):
+ conversation = openai_client.conversations.create(metadata={"topic": "demo"})
+
+ retrieved = openai_client.conversations.retrieve(conversation.id)
+
+ assert retrieved.id == conversation.id
+ assert retrieved.object == "conversation"
+ assert retrieved.metadata["topic"] == "demo"
+ assert retrieved.created_at == conversation.created_at
+
+ def test_conversation_update(self, openai_client):
+ conversation = openai_client.conversations.create(metadata={"topic": "demo"})
+
+ updated = openai_client.conversations.update(conversation.id, metadata={"topic": "project-x"})
+
+ assert updated.id == conversation.id
+ assert updated.metadata["topic"] == "project-x"
+ assert updated.created_at == conversation.created_at
+
+ def test_conversation_delete(self, openai_client):
+ conversation = openai_client.conversations.create(metadata={"topic": "demo"})
+
+ deleted = openai_client.conversations.delete(conversation.id)
+
+ assert deleted.id == conversation.id
+ assert deleted.object == "conversation.deleted"
+ assert deleted.deleted is True
+
+ def test_conversation_items_create(self, openai_client):
+ conversation = openai_client.conversations.create()
+
+ items = openai_client.conversations.items.create(
+ conversation.id,
+ items=[
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Hello!"}]},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "How are you?"}]},
+ ],
+ )
+
+ assert items.object == "list"
+ assert len(items.data) == 2
+ assert items.data[0].content[0].text == "Hello!"
+ assert items.data[1].content[0].text == "How are you?"
+ assert items.first_id == items.data[0].id
+ assert items.last_id == items.data[1].id
+ assert items.has_more is False
+
+ def test_conversation_items_list(self, openai_client):
+ conversation = openai_client.conversations.create()
+
+ openai_client.conversations.items.create(
+ conversation.id,
+ items=[{"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Hello!"}]}],
+ )
+
+ items = openai_client.conversations.items.list(conversation.id, limit=10)
+
+ assert items.object == "list"
+ assert len(items.data) >= 1
+ assert items.data[0].type == "message"
+ assert items.data[0].role == "user"
+ assert hasattr(items, "first_id")
+ assert hasattr(items, "last_id")
+ assert hasattr(items, "has_more")
+
+ def test_conversation_item_retrieve(self, openai_client):
+ conversation = openai_client.conversations.create()
+
+ created_items = openai_client.conversations.items.create(
+ conversation.id,
+ items=[{"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Hello!"}]}],
+ )
+
+ item_id = created_items.data[0].id
+ item = openai_client.conversations.items.retrieve(item_id, conversation_id=conversation.id)
+
+ assert item.id == item_id
+ assert item.type == "message"
+ assert item.role == "user"
+ assert item.content[0].text == "Hello!"
+
+ def test_conversation_item_delete(self, openai_client):
+ conversation = openai_client.conversations.create()
+
+ created_items = openai_client.conversations.items.create(
+ conversation.id,
+ items=[{"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Hello!"}]}],
+ )
+
+ item_id = created_items.data[0].id
+ deleted = openai_client.conversations.items.delete(item_id, conversation_id=conversation.id)
+
+ assert deleted.id == item_id
+ assert deleted.object == "conversation.item.deleted"
+ assert deleted.deleted is True
+
+ def test_full_workflow(self, openai_client):
+ conversation = openai_client.conversations.create(
+ metadata={"topic": "workflow-test"}, items=[{"type": "message", "role": "user", "content": "Hello!"}]
+ )
+
+ openai_client.conversations.items.create(
+ conversation.id,
+ items=[{"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Follow up"}]}],
+ )
+
+ all_items = openai_client.conversations.items.list(conversation.id)
+ assert len(all_items.data) >= 2
+
+ updated = openai_client.conversations.update(conversation.id, metadata={"topic": "workflow-complete"})
+ assert updated.metadata["topic"] == "workflow-complete"
+
+ openai_client.conversations.delete(conversation.id)
diff --git a/tests/unit/conversations/test_api_models.py b/tests/unit/conversations/test_api_models.py
new file mode 100644
index 0000000000..0e52778b86
--- /dev/null
+++ b/tests/unit/conversations/test_api_models.py
@@ -0,0 +1,60 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from llama_stack.apis.conversations.conversations import (
+ Conversation,
+ ConversationCreateRequest,
+ ConversationItem,
+ ConversationItemList,
+)
+
+
+def test_conversation_create_request_defaults():
+ request = ConversationCreateRequest()
+ assert request.items == []
+ assert request.metadata == {}
+
+
+def test_conversation_model_defaults():
+ conversation = Conversation(
+ id="conv_123456789",
+ created_at=1234567890,
+ metadata=None,
+ object="conversation",
+ )
+ assert conversation.id == "conv_123456789"
+ assert conversation.object == "conversation"
+ assert conversation.metadata is None
+
+
+def test_openai_client_compatibility():
+ from openai.types.conversations.message import Message
+ from pydantic import TypeAdapter
+
+ openai_message = Message(
+ id="msg_123",
+ content=[{"type": "input_text", "text": "Hello"}],
+ role="user",
+ status="in_progress",
+ type="message",
+ object="message",
+ )
+
+ adapter = TypeAdapter(ConversationItem)
+ validated_item = adapter.validate_python(openai_message.model_dump())
+
+ assert validated_item.id == "msg_123"
+ assert validated_item.type == "message"
+
+
+def test_conversation_item_list():
+ item_list = ConversationItemList(data=[])
+ assert item_list.object == "list"
+ assert item_list.data == []
+ assert item_list.first_id is None
+ assert item_list.last_id is None
+ assert item_list.has_more is False
diff --git a/tests/unit/conversations/test_conversations.py b/tests/unit/conversations/test_conversations.py
new file mode 100644
index 0000000000..65c3e23338
--- /dev/null
+++ b/tests/unit/conversations/test_conversations.py
@@ -0,0 +1,132 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import tempfile
+from pathlib import Path
+
+import pytest
+from openai.types.conversations.conversation import Conversation as OpenAIConversation
+from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
+from pydantic import TypeAdapter
+
+from llama_stack.apis.agents.openai_responses import (
+ OpenAIResponseInputMessageContentText,
+ OpenAIResponseMessage,
+)
+from llama_stack.core.conversations.conversations import (
+ ConversationServiceConfig,
+ ConversationServiceImpl,
+)
+from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
+
+
+@pytest.fixture
+async def service():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = Path(tmpdir) / "test_conversations.db"
+
+ config = ConversationServiceConfig(conversations_store=SqliteSqlStoreConfig(db_path=str(db_path)), policy=[])
+ service = ConversationServiceImpl(config, {})
+ await service.initialize()
+ yield service
+
+
+async def test_conversation_lifecycle(service):
+ conversation = await service.create_conversation(metadata={"test": "data"})
+
+ assert conversation.id.startswith("conv_")
+ assert conversation.metadata == {"test": "data"}
+
+ retrieved = await service.get_conversation(conversation.id)
+ assert retrieved.id == conversation.id
+
+ deleted = await service.openai_delete_conversation(conversation.id)
+ assert deleted.id == conversation.id
+
+
+async def test_conversation_items(service):
+ conversation = await service.create_conversation()
+
+ items = [
+ OpenAIResponseMessage(
+ type="message",
+ role="user",
+ content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
+ id="msg_test123",
+ status="completed",
+ )
+ ]
+ item_list = await service.add_items(conversation.id, items)
+
+ assert len(item_list.data) == 1
+ assert item_list.data[0].id == "msg_test123"
+
+ items = await service.list(conversation.id)
+ assert len(items.data) == 1
+
+
+async def test_invalid_conversation_id(service):
+ with pytest.raises(ValueError, match="Expected an ID that begins with 'conv_'"):
+ await service._get_validated_conversation("invalid_id")
+
+
+async def test_empty_parameter_validation(service):
+ with pytest.raises(ValueError, match="Expected a non-empty value"):
+ await service.retrieve("", "item_123")
+
+
+async def test_openai_type_compatibility(service):
+ conversation = await service.create_conversation(metadata={"test": "value"})
+
+ conversation_dict = conversation.model_dump()
+ openai_conversation = OpenAIConversation.model_validate(conversation_dict)
+
+ for attr in ["id", "object", "created_at", "metadata"]:
+ assert getattr(openai_conversation, attr) == getattr(conversation, attr)
+
+ items = [
+ OpenAIResponseMessage(
+ type="message",
+ role="user",
+ content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
+ id="msg_test456",
+ status="completed",
+ )
+ ]
+ item_list = await service.add_items(conversation.id, items)
+
+ for attr in ["object", "data", "first_id", "last_id", "has_more"]:
+ assert hasattr(item_list, attr)
+ assert item_list.object == "list"
+
+ items = await service.list(conversation.id)
+ item = await service.retrieve(conversation.id, items.data[0].id)
+ item_dict = item.model_dump()
+
+ openai_item_adapter = TypeAdapter(OpenAIConversationItem)
+ openai_item_adapter.validate_python(item_dict)
+
+
+async def test_policy_configuration():
+ from llama_stack.core.access_control.datatypes import Action, Scope
+ from llama_stack.core.datatypes import AccessRule
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = Path(tmpdir) / "test_conversations_policy.db"
+
+ restrictive_policy = [
+ AccessRule(forbid=Scope(principal="test_user", actions=[Action.CREATE, Action.READ], resource="*"))
+ ]
+
+ config = ConversationServiceConfig(
+ conversations_store=SqliteSqlStoreConfig(db_path=str(db_path)), policy=restrictive_policy
+ )
+ service = ConversationServiceImpl(config, {})
+ await service.initialize()
+
+ assert service.policy == restrictive_policy
+ assert len(service.policy) == 1
+ assert service.policy[0].forbid is not None
diff --git a/tests/unit/utils/sqlstore/test_sqlstore.py b/tests/unit/utils/sqlstore/test_sqlstore.py
index ba59ec7eca..00669b698c 100644
--- a/tests/unit/utils/sqlstore/test_sqlstore.py
+++ b/tests/unit/utils/sqlstore/test_sqlstore.py
@@ -368,6 +368,32 @@ async def test_where_operator_gt_and_update_delete():
assert {r["id"] for r in rows_after} == {1, 3}
+async def test_batch_insert():
+ with TemporaryDirectory() as tmp_dir:
+ db_path = tmp_dir + "/test.db"
+ store = SqlAlchemySqlStoreImpl(SqliteSqlStoreConfig(db_path=db_path))
+
+ await store.create_table(
+ "batch_test",
+ {
+ "id": ColumnType.INTEGER,
+ "name": ColumnType.STRING,
+ "value": ColumnType.INTEGER,
+ },
+ )
+
+ batch_data = [
+ {"id": 1, "name": "first", "value": 10},
+ {"id": 2, "name": "second", "value": 20},
+ {"id": 3, "name": "third", "value": 30},
+ ]
+
+ await store.insert("batch_test", batch_data)
+
+ result = await store.fetch_all("batch_test", order_by=[("id", "asc")])
+ assert result.data == batch_data
+
+
async def test_where_operator_edge_cases():
with TemporaryDirectory() as tmp_dir:
db_path = tmp_dir + "/test.db"
diff --git a/uv.lock b/uv.lock
index 63639ee4a1..c1cd7e71ce 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1773,6 +1773,7 @@ dependencies = [
{ name = "python-jose", extra = ["cryptography"] },
{ name = "python-multipart" },
{ name = "rich" },
+ { name = "sqlalchemy", extra = ["asyncio"] },
{ name = "starlette" },
{ name = "termcolor" },
{ name = "tiktoken" },
@@ -1887,7 +1888,7 @@ requires-dist = [
{ name = "jsonschema" },
{ name = "llama-stack-client", specifier = ">=0.2.23" },
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.23" },
- { name = "openai", specifier = ">=1.100.0" },
+ { name = "openai", specifier = ">=1.107" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
{ name = "pandas", marker = "extra == 'ui'" },
@@ -1898,6 +1899,7 @@ requires-dist = [
{ name = "python-jose", extras = ["cryptography"] },
{ name = "python-multipart", specifier = ">=0.0.20" },
{ name = "rich" },
+ { name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.41" },
{ name = "starlette" },
{ name = "streamlit", marker = "extra == 'ui'" },
{ name = "streamlit-option-menu", marker = "extra == 'ui'" },