diff --git a/docs/inference-providers/providers/cerebras.md b/docs/inference-providers/providers/cerebras.md index 940cb0a0b..43b736c69 100644 --- a/docs/inference-providers/providers/cerebras.md +++ b/docs/inference-providers/providers/cerebras.md @@ -44,7 +44,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). diff --git a/docs/inference-providers/providers/fal-ai.md b/docs/inference-providers/providers/fal-ai.md index e3db72749..d32dbad91 100644 --- a/docs/inference-providers/providers/fal-ai.md +++ b/docs/inference-providers/providers/fal-ai.md @@ -48,6 +48,16 @@ Find out more about Automatic Speech Recognition [here](../tasks/automatic_speec /> +### Image To Image + +Find out more about Image To Image [here](../tasks/image_to_image). + + + + ### Text To Image Find out more about Text To Image [here](../tasks/text_to_image). @@ -64,6 +74,6 @@ Find out more about Text To Video [here](../tasks/text_to_video). diff --git a/docs/inference-providers/providers/featherless-ai.md b/docs/inference-providers/providers/featherless-ai.md index f247583cc..d56ea7765 100644 --- a/docs/inference-providers/providers/featherless-ai.md +++ b/docs/inference-providers/providers/featherless-ai.md @@ -46,7 +46,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). @@ -56,7 +56,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion). @@ -66,6 +66,6 @@ Find out more about Text Generation [here](../tasks/text_generation). diff --git a/docs/inference-providers/providers/groq.md b/docs/inference-providers/providers/groq.md index c8c660557..9e326230c 100644 --- a/docs/inference-providers/providers/groq.md +++ b/docs/inference-providers/providers/groq.md @@ -54,7 +54,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). diff --git a/docs/inference-providers/providers/hf-inference.md b/docs/inference-providers/providers/hf-inference.md index 4ec33aae0..226e038f9 100644 --- a/docs/inference-providers/providers/hf-inference.md +++ b/docs/inference-providers/providers/hf-inference.md @@ -55,7 +55,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). @@ -65,7 +65,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion). @@ -135,7 +135,7 @@ Find out more about Summarization [here](../tasks/summarization). @@ -155,7 +155,7 @@ Find out more about Text Classification [here](../tasks/text_classification). @@ -165,7 +165,7 @@ Find out more about Text Generation [here](../tasks/text_generation). diff --git a/docs/inference-providers/providers/nebius.md b/docs/inference-providers/providers/nebius.md index 6479e5cab..4ca0a80ec 100644 --- a/docs/inference-providers/providers/nebius.md +++ b/docs/inference-providers/providers/nebius.md @@ -64,7 +64,7 @@ Find out more about Feature Extraction [here](../tasks/feature_extraction). diff --git a/docs/inference-providers/providers/replicate.md b/docs/inference-providers/providers/replicate.md index c1cd26b93..6d0cd72cd 100644 --- a/docs/inference-providers/providers/replicate.md +++ b/docs/inference-providers/providers/replicate.md @@ -38,6 +38,16 @@ Replicate is building tools so all software engineers can use AI as if it were n ## Supported tasks +### Image To Image + +Find out more about Image To Image [here](../tasks/image_to_image). + + + + ### Text To Image Find out more about Text To Image [here](../tasks/text_to_image). @@ -54,6 +64,6 @@ Find out more about Text To Video [here](../tasks/text_to_video). diff --git a/docs/inference-providers/providers/sambanova.md b/docs/inference-providers/providers/sambanova.md index 03ba566e4..cb8110109 100644 --- a/docs/inference-providers/providers/sambanova.md +++ b/docs/inference-providers/providers/sambanova.md @@ -55,7 +55,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion). diff --git a/docs/inference-providers/tasks/chat-completion.md b/docs/inference-providers/tasks/chat-completion.md index f27a62972..46d5338e0 100644 --- a/docs/inference-providers/tasks/chat-completion.md +++ b/docs/inference-providers/tasks/chat-completion.md @@ -63,7 +63,7 @@ The API supports: @@ -73,7 +73,7 @@ conversational /> @@ -119,16 +119,16 @@ conversational /> | **presence_penalty** | _number_ | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics | | **response_format** | _unknown_ | One of the following: | | **         (#1)** | _object_ | | -| **                type*** | _enum_ | Possible values: json. | -| **                value*** | _unknown_ | A string that represents a [JSON Schema](https://json-schema.org/). JSON Schema is a declarative language that allows to annotate JSON documents with types and descriptions. | +| **                type*** | _enum_ | Possible values: text. | | **         (#2)** | _object_ | | -| **                type*** | _enum_ | Possible values: regex. | -| **                value*** | _string_ | | -| **         (#3)** | _object_ | | | **                type*** | _enum_ | Possible values: json_schema. | -| **                value*** | _object_ | | -| **                        name** | _string_ | Optional name identifier for the schema | -| **                        schema*** | _unknown_ | The actual JSON schema definition | +| **                json_schema*** | _object_ | | +| **                        name*** | _string_ | The name of the response format. | +| **                        description** | _string_ | A description of what the response format is for, used by the model to determine how to respond in the format. | +| **                        schema** | _object_ | The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas [here](https://json-schema.org/). | +| **                        strict** | _boolean_ | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. | +| **         (#3)** | _object_ | | +| **                type*** | _enum_ | Possible values: json_object. | | **seed** | _integer_ | | | **stop** | _string[]_ | Up to 4 sequences where the API will stop generating further tokens. | | **stream** | _boolean_ | | diff --git a/docs/inference-providers/tasks/feature-extraction.md b/docs/inference-providers/tasks/feature-extraction.md index 9fb6c2283..bc7f88aae 100644 --- a/docs/inference-providers/tasks/feature-extraction.md +++ b/docs/inference-providers/tasks/feature-extraction.md @@ -38,7 +38,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/image-text-to-text.md b/docs/inference-providers/tasks/image-text-to-text.md index e497c9dd1..2dd7ed2ed 100644 --- a/docs/inference-providers/tasks/image-text-to-text.md +++ b/docs/inference-providers/tasks/image-text-to-text.md @@ -33,7 +33,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/image-to-image.md b/docs/inference-providers/tasks/image-to-image.md index bbc82701e..859ed8361 100644 --- a/docs/inference-providers/tasks/image-to-image.md +++ b/docs/inference-providers/tasks/image-to-image.md @@ -35,7 +35,10 @@ Explore all available models and find the one that suits you best [here](https:/ ### Using the API -There are currently no snippet examples for the **image-to-image** task, as no providers support it yet. + diff --git a/docs/inference-providers/tasks/summarization.md b/docs/inference-providers/tasks/summarization.md index 6d3994406..8b948f871 100644 --- a/docs/inference-providers/tasks/summarization.md +++ b/docs/inference-providers/tasks/summarization.md @@ -24,7 +24,6 @@ For more details about the `summarization` task, check out its [dedicated page]( ### Recommended models -- [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn): A strong summarization model trained on English news articles. Excels at generating factual summaries. - [Falconsai/medical_summarization](https://huggingface.co/Falconsai/medical_summarization): A summarization model trained on medical articles. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=summarization&sort=trending). @@ -34,7 +33,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/text-classification.md b/docs/inference-providers/tasks/text-classification.md index fd2827abb..5e30ee191 100644 --- a/docs/inference-providers/tasks/text-classification.md +++ b/docs/inference-providers/tasks/text-classification.md @@ -34,7 +34,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/text-generation.md b/docs/inference-providers/tasks/text-generation.md index c91d1c749..2696bea55 100644 --- a/docs/inference-providers/tasks/text-generation.md +++ b/docs/inference-providers/tasks/text-generation.md @@ -41,7 +41,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/text-to-video.md b/docs/inference-providers/tasks/text-to-video.md index 5a29e371d..606302cb2 100644 --- a/docs/inference-providers/tasks/text-to-video.md +++ b/docs/inference-providers/tasks/text-to-video.md @@ -35,7 +35,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/scripts/inference-providers/package.json b/scripts/inference-providers/package.json index 6710dac5b..84c041f43 100644 --- a/scripts/inference-providers/package.json +++ b/scripts/inference-providers/package.json @@ -14,8 +14,8 @@ "author": "", "license": "ISC", "dependencies": { - "@huggingface/inference": "^4.0.4", - "@huggingface/tasks": "^0.19.15", + "@huggingface/inference": "^4.2.0", + "@huggingface/tasks": "^0.19.19", "@types/node": "^22.5.0", "handlebars": "^4.7.8", "node": "^20.17.0", diff --git a/scripts/inference-providers/pnpm-lock.yaml b/scripts/inference-providers/pnpm-lock.yaml index 34a24e34c..e4fac1664 100644 --- a/scripts/inference-providers/pnpm-lock.yaml +++ b/scripts/inference-providers/pnpm-lock.yaml @@ -9,11 +9,11 @@ importers: .: dependencies: '@huggingface/inference': - specifier: ^4.0.4 - version: 4.0.4 + specifier: ^4.2.0 + version: 4.2.0 '@huggingface/tasks': - specifier: ^0.19.15 - version: 0.19.15 + specifier: ^0.19.19 + version: 0.19.19 '@types/node': specifier: ^22.5.0 version: 22.5.0 @@ -189,16 +189,19 @@ packages: cpu: [x64] os: [win32] - '@huggingface/inference@4.0.4': - resolution: {integrity: sha512-Pz0l435mmlQNNOvIBqoDdG+ckdg9WdERFa8AiBnFX7ejY9DWxU31wr0sY8v9l+bsrmk6rZ3BqIXOlf5wjJf8Uw==} + '@huggingface/inference@4.2.0': + resolution: {integrity: sha512-mmt/wCojZ7OUYFsWAqz+xmJCe4RiU6Vs6q54tPxKl55jquLwBKgwd2LwGa8TxF7KeaGvBN+jk/OOLKwcce0vzg==} engines: {node: '>=18'} '@huggingface/jinja@0.5.0': resolution: {integrity: sha512-Ptc03/jGRiYRoi0bUYKZ14MkDslsBRT24oxmsvUlfYrvQMldrxCevhPnT+hfX8awKTT8/f/0ZBBWldoeAcMHdQ==} engines: {node: '>=18'} - '@huggingface/tasks@0.19.15': - resolution: {integrity: sha512-L4wB/iolKtsErke5yniXXNsGrSuaFmyREpcD4hL/wJox2UKtSEV5gE5gNrlvNaRLBOY41yN7/QmBF4y9byTM6Q==} + '@huggingface/tasks@0.19.19': + resolution: {integrity: sha512-+hXA5pk8gnaWc0BK/s5e68rwsucjPJjdoSBx9MCkRwVgVS0JwXCF5EbaSBSDt4CUDsq1uj33RbdQt/K9l2mJLg==} + + '@huggingface/tasks@0.19.5': + resolution: {integrity: sha512-WEwM/tZsRqw0TinTkQwqPsK5pdaGffAV6Nu6xxSTSZUsBvkDJxE9kTiMNSwvjeHvt9/MYAJKjZ+CMSo6Rugs4g==} '@jridgewell/resolve-uri@3.1.2': resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} @@ -415,14 +418,16 @@ snapshots: '@esbuild/win32-x64@0.23.1': optional: true - '@huggingface/inference@4.0.4': + '@huggingface/inference@4.2.0': dependencies: '@huggingface/jinja': 0.5.0 - '@huggingface/tasks': 0.19.15 + '@huggingface/tasks': 0.19.19 '@huggingface/jinja@0.5.0': {} - '@huggingface/tasks@0.19.15': {} + '@huggingface/tasks@0.19.19': {} + + '@huggingface/tasks@0.19.5': {} '@jridgewell/resolve-uri@3.1.2': {} diff --git a/scripts/inference-providers/scripts/generate.ts b/scripts/inference-providers/scripts/generate.ts index 9ae721b6b..5da8d5933 100644 --- a/scripts/inference-providers/scripts/generate.ts +++ b/scripts/inference-providers/scripts/generate.ts @@ -120,14 +120,15 @@ await Promise.all( >; for (const [task, models] of Object.entries(mapping)) { - for (const [modelId, modelMapping] of Object.entries(models)) { - if (modelMapping.status == "live") { - if (!PER_TASK_SUPPORTED_PROVIDERS[task]) { - PER_TASK_SUPPORTED_PROVIDERS[task] = []; - } - PER_TASK_SUPPORTED_PROVIDERS[task].push(provider); - break; + const hasLiveModel = Object.values(models).some( + (model) => model.status === "live", + ); + + if (hasLiveModel) { + if (!PER_TASK_SUPPORTED_PROVIDERS[task]) { + PER_TASK_SUPPORTED_PROVIDERS[task] = []; } + PER_TASK_SUPPORTED_PROVIDERS[task].push(provider); } } }),