diff --git a/packages/inference/src/lib/getInferenceProviderMapping.ts b/packages/inference/src/lib/getInferenceProviderMapping.ts index d5e10f32fd..a1026cb73e 100644 --- a/packages/inference/src/lib/getInferenceProviderMapping.ts +++ b/packages/inference/src/lib/getInferenceProviderMapping.ts @@ -2,23 +2,12 @@ import type { WidgetType } from "@huggingface/tasks"; import { HF_HUB_URL } from "../config.js"; import { HARDCODED_MODEL_INFERENCE_MAPPING } from "../providers/consts.js"; import { EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS } from "../providers/hf-inference.js"; -import type { InferenceProvider, InferenceProviderOrPolicy, ModelId } from "../types.js"; +import type { InferenceProvider, InferenceProviderMappingEntry, InferenceProviderOrPolicy, ModelId } from "../types.js"; import { typedInclude } from "../utils/typedInclude.js"; import { InferenceClientHubApiError, InferenceClientInputError } from "../errors.js"; export const inferenceProviderMappingCache = new Map(); -export interface InferenceProviderMappingEntry { - adapter?: string; - adapterWeightsPath?: string; - hfModelId: ModelId; - provider: string; - providerId: string; - status: "live" | "staging"; - task: WidgetType; - type?: "single-model" | "tag-filter"; -} - /** * Normalize inferenceProviderMapping to always return an array format. * This provides backward and forward compatibility for the API changes. diff --git a/packages/inference/src/lib/makeRequestOptions.ts b/packages/inference/src/lib/makeRequestOptions.ts index d079a3c333..c5d9df199d 100644 --- a/packages/inference/src/lib/makeRequestOptions.ts +++ b/packages/inference/src/lib/makeRequestOptions.ts @@ -1,7 +1,6 @@ import { HF_HEADER_X_BILL_TO, HF_HUB_URL } from "../config.js"; import { PACKAGE_NAME, PACKAGE_VERSION } from "../package.js"; -import type { InferenceTask, Options, RequestArgs } from "../types.js"; -import type { InferenceProviderMappingEntry } from "./getInferenceProviderMapping.js"; +import type { InferenceTask, InferenceProviderMappingEntry, Options, RequestArgs } from "../types.js"; import { getInferenceProviderMapping } from "./getInferenceProviderMapping.js"; import type { getProviderHelper } from "./getProviderHelper.js"; import { isUrl } from "./isUrl.js"; diff --git a/packages/inference/src/providers/consts.ts b/packages/inference/src/providers/consts.ts index b55594b685..995161970a 100644 --- a/packages/inference/src/providers/consts.ts +++ b/packages/inference/src/providers/consts.ts @@ -1,5 +1,4 @@ -import type { InferenceProviderMappingEntry } from "../lib/getInferenceProviderMapping.js"; -import type { InferenceProvider } from "../types.js"; +import type { InferenceProvider, InferenceProviderMappingEntry } from "../types.js"; import { type ModelId } from "../types.js"; /** diff --git a/packages/inference/src/snippets/getInferenceSnippets.ts b/packages/inference/src/snippets/getInferenceSnippets.ts index 077bc73fb1..b63a91cc58 100644 --- a/packages/inference/src/snippets/getInferenceSnippets.ts +++ b/packages/inference/src/snippets/getInferenceSnippets.ts @@ -8,10 +8,9 @@ import { } from "@huggingface/tasks"; import type { PipelineType, WidgetType } from "@huggingface/tasks"; import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks"; -import type { InferenceProviderMappingEntry } from "../lib/getInferenceProviderMapping.js"; import { getProviderHelper } from "../lib/getProviderHelper.js"; import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions.js"; -import type { InferenceProviderOrPolicy, InferenceTask, RequestArgs } from "../types.js"; +import type { InferenceProviderMappingEntry, InferenceProviderOrPolicy, InferenceTask, RequestArgs } from "../types.js"; import { templates } from "./templates.exported.js"; export type InferenceSnippetOptions = { diff --git a/packages/inference/src/types.ts b/packages/inference/src/types.ts index 8bdd18eb0a..f48e9a011c 100644 --- a/packages/inference/src/types.ts +++ b/packages/inference/src/types.ts @@ -1,5 +1,4 @@ -import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks"; -import type { InferenceProviderMappingEntry } from "./lib/getInferenceProviderMapping.js"; +import type { ChatCompletionInput, PipelineType, WidgetType } from "@huggingface/tasks"; /** * HF model id, like "meta-llama/Llama-3.3-70B-Instruct" @@ -63,6 +62,17 @@ export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number]; export type InferenceProviderOrPolicy = (typeof PROVIDERS_OR_POLICIES)[number]; +export interface InferenceProviderMappingEntry { + adapter?: string; + adapterWeightsPath?: string; + hfModelId: ModelId; + provider: string; + providerId: string; + status: "live" | "staging"; + task: WidgetType; + type?: "single-model" | "tag-filter"; +} + export interface BaseArgs { /** * The access token to use. Without it, you'll get rate-limited quickly.