Skip to content

Commit 4a5cf0c

Browse files
Wauplingithub-actions[bot]
authored andcommitted
Update Inference Providers documentation (automated)
1 parent 801b6bf commit 4a5cf0c

19 files changed

+78
-46
lines changed

docs/inference-providers/providers/cerebras.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion).
4444

4545
<InferenceSnippet
4646
pipeline=text-generation
47-
providersMapping={ {"cerebras":{"modelId":"Qwen/Qwen3-32B","providerModelId":"qwen-3-32b"} } }
47+
providersMapping={ {"cerebras":{"modelId":"meta-llama/Llama-3.3-70B-Instruct","providerModelId":"llama-3.3-70b"} } }
4848
conversational />
4949

5050

docs/inference-providers/providers/fal-ai.md

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,16 @@ Find out more about Automatic Speech Recognition [here](../tasks/automatic_speec
4848
/>
4949

5050

51+
### Image To Image
52+
53+
Find out more about Image To Image [here](../tasks/image_to_image).
54+
55+
<InferenceSnippet
56+
pipeline=image-to-image
57+
providersMapping={ {"fal-ai":{"modelId":"black-forest-labs/FLUX.1-Kontext-dev","providerModelId":"fal-ai/flux-kontext/dev"} } }
58+
/>
59+
60+
5161
### Text To Image
5262

5363
Find out more about Text To Image [here](../tasks/text_to_image).
@@ -64,6 +74,6 @@ Find out more about Text To Video [here](../tasks/text_to_video).
6474

6575
<InferenceSnippet
6676
pipeline=text-to-video
67-
providersMapping={ {"fal-ai":{"modelId":"Lightricks/LTX-Video","providerModelId":"fal-ai/ltx-video"} } }
77+
providersMapping={ {"fal-ai":{"modelId":"tencent/HunyuanVideo","providerModelId":"fal-ai/hunyuan-video"} } }
6878
/>
6979

docs/inference-providers/providers/featherless-ai.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion).
4646

4747
<InferenceSnippet
4848
pipeline=text-generation
49-
providersMapping={ {"featherless-ai":{"modelId":"mistralai/Magistral-Small-2506","providerModelId":"mistralai/Magistral-Small-2506"} } }
49+
providersMapping={ {"featherless-ai":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"} } }
5050
conversational />
5151

5252

@@ -56,7 +56,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion).
5656

5757
<InferenceSnippet
5858
pipeline=image-text-to-text
59-
providersMapping={ {"featherless-ai":{"modelId":"google/gemma-3-27b-it","providerModelId":"google/gemma-3-27b-it"} } }
59+
providersMapping={ {"featherless-ai":{"modelId":"google/medgemma-4b-it","providerModelId":"google/medgemma-4b-it"} } }
6060
conversational />
6161

6262

@@ -66,6 +66,6 @@ Find out more about Text Generation [here](../tasks/text_generation).
6666

6767
<InferenceSnippet
6868
pipeline=text-generation
69-
providersMapping={ {"featherless-ai":{"modelId":"mistralai/Magistral-Small-2506","providerModelId":"mistralai/Magistral-Small-2506"} } }
69+
providersMapping={ {"featherless-ai":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"} } }
7070
/>
7171

docs/inference-providers/providers/groq.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion).
5454

5555
<InferenceSnippet
5656
pipeline=text-generation
57-
providersMapping={ {"groq":{"modelId":"Qwen/Qwen3-32B","providerModelId":"qwen/qwen3-32b"} } }
57+
providersMapping={ {"groq":{"modelId":"meta-llama/Llama-3.3-70B-Instruct","providerModelId":"llama-3.3-70b-versatile"} } }
5858
conversational />
5959

6060

docs/inference-providers/providers/hf-inference.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion).
5555

5656
<InferenceSnippet
5757
pipeline=text-generation
58-
providersMapping={ {"hf-inference":{"modelId":"sarvamai/sarvam-m","providerModelId":"sarvamai/sarvam-m"} } }
58+
providersMapping={ {"hf-inference":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Llama-3.1-8B-Instruct"} } }
5959
conversational />
6060

6161

@@ -65,7 +65,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion).
6565

6666
<InferenceSnippet
6767
pipeline=image-text-to-text
68-
providersMapping={ {"hf-inference":{"modelId":"meta-llama/Llama-3.2-11B-Vision-Instruct","providerModelId":"meta-llama/Llama-3.2-11B-Vision-Instruct"} } }
68+
providersMapping={ {"hf-inference":{"modelId":"mistralai/Mistral-Small-3.1-24B-Instruct-2503","providerModelId":"mistralai/Mistral-Small-3.1-24B-Instruct-2503"} } }
6969
conversational />
7070

7171

@@ -135,7 +135,7 @@ Find out more about Summarization [here](../tasks/summarization).
135135

136136
<InferenceSnippet
137137
pipeline=summarization
138-
providersMapping={ {"hf-inference":{"modelId":"facebook/bart-large-cnn","providerModelId":"facebook/bart-large-cnn"} } }
138+
providersMapping={ {"hf-inference":{"modelId":"Falconsai/text_summarization","providerModelId":"Falconsai/text_summarization"} } }
139139
/>
140140

141141

@@ -155,7 +155,7 @@ Find out more about Text Classification [here](../tasks/text_classification).
155155

156156
<InferenceSnippet
157157
pipeline=text-classification
158-
providersMapping={ {"hf-inference":{"modelId":"distilbert/distilbert-base-uncased-finetuned-sst-2-english","providerModelId":"distilbert/distilbert-base-uncased-finetuned-sst-2-english"} } }
158+
providersMapping={ {"hf-inference":{"modelId":"tabularisai/multilingual-sentiment-analysis","providerModelId":"tabularisai/multilingual-sentiment-analysis"} } }
159159
/>
160160

161161

@@ -165,7 +165,7 @@ Find out more about Text Generation [here](../tasks/text_generation).
165165

166166
<InferenceSnippet
167167
pipeline=text-generation
168-
providersMapping={ {"hf-inference":{"modelId":"sarvamai/sarvam-m","providerModelId":"sarvamai/sarvam-m"} } }
168+
providersMapping={ {"hf-inference":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Llama-3.1-8B-Instruct"} } }
169169
/>
170170

171171

docs/inference-providers/providers/nebius.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ Find out more about Feature Extraction [here](../tasks/feature_extraction).
6464

6565
<InferenceSnippet
6666
pipeline=feature-extraction
67-
providersMapping={ {"nebius":{"modelId":"intfloat/e5-mistral-7b-instruct","providerModelId":"intfloat/e5-mistral-7b-instruct"} } }
67+
providersMapping={ {"nebius":{"modelId":"Qwen/Qwen3-Embedding-8B","providerModelId":"Qwen/Qwen3-Embedding-8B"} } }
6868
/>
6969

7070

docs/inference-providers/providers/replicate.md

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,26 @@ Replicate is building tools so all software engineers can use AI as if it were n
3838
## Supported tasks
3939

4040

41+
### Automatic Speech Recognition
42+
43+
Find out more about Automatic Speech Recognition [here](../tasks/automatic_speech_recognition).
44+
45+
<InferenceSnippet
46+
pipeline=automatic-speech-recognition
47+
providersMapping={ {"replicate":{"modelId":"openai/whisper-large-v3","providerModelId":"openai/whisper"} } }
48+
/>
49+
50+
51+
### Image To Image
52+
53+
Find out more about Image To Image [here](../tasks/image_to_image).
54+
55+
<InferenceSnippet
56+
pipeline=image-to-image
57+
providersMapping={ {"replicate":{"modelId":"black-forest-labs/FLUX.1-Kontext-dev","providerModelId":"black-forest-labs/flux-kontext-dev"} } }
58+
/>
59+
60+
4161
### Text To Image
4262

4363
Find out more about Text To Image [here](../tasks/text_to_image).
@@ -54,6 +74,6 @@ Find out more about Text To Video [here](../tasks/text_to_video).
5474

5575
<InferenceSnippet
5676
pipeline=text-to-video
57-
providersMapping={ {"replicate":{"modelId":"Lightricks/LTX-Video","providerModelId":"lightricks/ltx-video:8c47da666861d081eeb4d1261853087de23923a268a69b63febdf5dc1dee08e4"} } }
77+
providersMapping={ {"replicate":{"modelId":"Wan-AI/Wan2.1-T2V-14B","providerModelId":"wavespeedai/wan-2.1-t2v-480p"} } }
5878
/>
5979

docs/inference-providers/providers/sambanova.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion).
5555

5656
<InferenceSnippet
5757
pipeline=image-text-to-text
58-
providersMapping={ {"sambanova":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"Llama-4-Scout-17B-16E-Instruct"} } }
58+
providersMapping={ {"sambanova":{"modelId":"meta-llama/Llama-4-Maverick-17B-128E-Instruct","providerModelId":"Llama-4-Maverick-17B-128E-Instruct"} } }
5959
conversational />
6060

6161

docs/inference-providers/tasks/automatic-speech-recognition.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ Explore all available models and find the one that suits you best [here](https:/
3838

3939
<InferenceSnippet
4040
pipeline=automatic-speech-recognition
41-
providersMapping={ {"fal-ai":{"modelId":"openai/whisper-large-v3","providerModelId":"fal-ai/whisper"},"hf-inference":{"modelId":"openai/whisper-large-v3","providerModelId":"openai/whisper-large-v3"}} }
41+
providersMapping={ {"fal-ai":{"modelId":"openai/whisper-large-v3","providerModelId":"fal-ai/whisper"},"hf-inference":{"modelId":"openai/whisper-large-v3","providerModelId":"openai/whisper-large-v3"},"replicate":{"modelId":"openai/whisper-large-v3","providerModelId":"openai/whisper"}} }
4242
/>
4343

4444

docs/inference-providers/tasks/chat-completion.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ The API supports:
6363

6464
<InferenceSnippet
6565
pipeline=text-generation
66-
providersMapping={ {"cerebras":{"modelId":"Qwen/Qwen3-32B","providerModelId":"qwen-3-32b"},"cohere":{"modelId":"CohereLabs/c4ai-command-r-plus","providerModelId":"command-r-plus-04-2024"},"featherless-ai":{"modelId":"mistralai/Magistral-Small-2506","providerModelId":"mistralai/Magistral-Small-2506"},"fireworks-ai":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"accounts/fireworks/models/deepseek-r1-0528"},"hf-inference":{"modelId":"sarvamai/sarvam-m","providerModelId":"sarvamai/sarvam-m"},"hyperbolic":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"},"nebius":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"},"novita":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek/deepseek-r1-0528"},"nscale":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Llama-3.1-8B-Instruct"},"sambanova":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"DeepSeek-R1-0528"},"together":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1"}} }
66+
providersMapping={ {"cerebras":{"modelId":"meta-llama/Llama-3.3-70B-Instruct","providerModelId":"llama-3.3-70b"},"cohere":{"modelId":"CohereLabs/c4ai-command-r-plus","providerModelId":"command-r-plus-04-2024"},"featherless-ai":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"},"fireworks-ai":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"accounts/fireworks/models/deepseek-r1-0528"},"groq":{"modelId":"meta-llama/Llama-3.3-70B-Instruct","providerModelId":"llama-3.3-70b-versatile"},"hf-inference":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Llama-3.1-8B-Instruct"},"hyperbolic":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"},"nebius":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1-0528"},"novita":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek/deepseek-r1-0528"},"nscale":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Llama-3.1-8B-Instruct"},"sambanova":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"DeepSeek-R1-0528"},"together":{"modelId":"deepseek-ai/DeepSeek-R1-0528","providerModelId":"deepseek-ai/DeepSeek-R1"}} }
6767
conversational />
6868

6969

@@ -73,7 +73,7 @@ conversational />
7373

7474
<InferenceSnippet
7575
pipeline=image-text-to-text
76-
providersMapping={ {"cerebras":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"llama-4-scout-17b-16e-instruct"},"cohere":{"modelId":"CohereLabs/aya-vision-8b","providerModelId":"c4ai-aya-vision-8b"},"featherless-ai":{"modelId":"google/gemma-3-27b-it","providerModelId":"google/gemma-3-27b-it"},"fireworks-ai":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"accounts/fireworks/models/llama4-scout-instruct-basic"},"hf-inference":{"modelId":"meta-llama/Llama-3.2-11B-Vision-Instruct","providerModelId":"meta-llama/Llama-3.2-11B-Vision-Instruct"},"hyperbolic":{"modelId":"Qwen/Qwen2.5-VL-7B-Instruct","providerModelId":"Qwen/Qwen2.5-VL-7B-Instruct"},"nebius":{"modelId":"google/gemma-3-27b-it","providerModelId":"google/gemma-3-27b-it-fast"},"novita":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/llama-4-scout-17b-16e-instruct"},"nscale":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct"},"sambanova":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"Llama-4-Scout-17B-16E-Instruct"},"together":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct"}} }
76+
providersMapping={ {"cerebras":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"llama-4-scout-17b-16e-instruct"},"cohere":{"modelId":"CohereLabs/aya-vision-8b","providerModelId":"c4ai-aya-vision-8b"},"featherless-ai":{"modelId":"google/medgemma-4b-it","providerModelId":"google/medgemma-4b-it"},"fireworks-ai":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"accounts/fireworks/models/llama4-scout-instruct-basic"},"groq":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/llama-4-scout-17b-16e-instruct"},"hf-inference":{"modelId":"mistralai/Mistral-Small-3.1-24B-Instruct-2503","providerModelId":"mistralai/Mistral-Small-3.1-24B-Instruct-2503"},"hyperbolic":{"modelId":"Qwen/Qwen2.5-VL-7B-Instruct","providerModelId":"Qwen/Qwen2.5-VL-7B-Instruct"},"nebius":{"modelId":"google/gemma-3-27b-it","providerModelId":"google/gemma-3-27b-it-fast"},"novita":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/llama-4-scout-17b-16e-instruct"},"nscale":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct"},"sambanova":{"modelId":"meta-llama/Llama-4-Maverick-17B-128E-Instruct","providerModelId":"Llama-4-Maverick-17B-128E-Instruct"},"together":{"modelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct","providerModelId":"meta-llama/Llama-4-Scout-17B-16E-Instruct"}} }
7777
conversational />
7878

7979

@@ -119,16 +119,16 @@ conversational />
119119
| **presence_penalty** | _number_ | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics |
120120
| **response_format** | _unknown_ | One of the following: |
121121
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(#1)** | _object_ | |
122-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;type*** | _enum_ | Possible values: json. |
123-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;value*** | _unknown_ | A string that represents a [JSON Schema](https://json-schema.org/). JSON Schema is a declarative language that allows to annotate JSON documents with types and descriptions. |
122+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;type*** | _enum_ | Possible values: text. |
124123
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(#2)** | _object_ | |
125-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;type*** | _enum_ | Possible values: regex. |
126-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;value*** | _string_ | |
127-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(#3)** | _object_ | |
128124
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;type*** | _enum_ | Possible values: json_schema. |
129-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;value*** | _object_ | |
130-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;name** | _string_ | Optional name identifier for the schema |
131-
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;schema*** | _unknown_ | The actual JSON schema definition |
125+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;json_schema*** | _object_ | |
126+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;name*** | _string_ | The name of the response format. |
127+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;description** | _string_ | A description of what the response format is for, used by the model to determine how to respond in the format. |
128+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;schema** | _object_ | The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas [here](https://json-schema.org/). |
129+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;strict** | _boolean_ | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. |
130+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;(#3)** | _object_ | |
131+
| **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;type*** | _enum_ | Possible values: json_object. |
132132
| **seed** | _integer_ | |
133133
| **stop** | _string[]_ | Up to 4 sequences where the API will stop generating further tokens. |
134134
| **stream** | _boolean_ | |

0 commit comments

Comments
 (0)