-
-
Notifications
You must be signed in to change notification settings - Fork 240
feat: add LLM provider preset system with MiniMax support #594
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,8 @@ | ||
| <script setup lang="ts"> | ||
| import type { LLMProviderKey } from '@tg-search/core' | ||
|
|
||
| import { useAccountStore, useBridge } from '@tg-search/client' | ||
| import { CoreEventType } from '@tg-search/core' | ||
| import { CoreEventType, LLM_PROVIDERS } from '@tg-search/core' | ||
| import { storeToRefs } from 'pinia' | ||
| import { computed, watch } from 'vue' | ||
| import { useI18n } from 'vue-i18n' | ||
|
|
@@ -24,6 +26,28 @@ const messageResolvers = [ | |
|
|
||
| const embeddingDimensions = Object.values([1536, 1024, 768]) | ||
|
|
||
| const providerKeys = Object.keys(LLM_PROVIDERS) as LLMProviderKey[] | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Duplicated provider key list and detection logicLow Severity
Additional Locations (2) |
||
|
|
||
| const selectedProvider = computed({ | ||
| get() { | ||
| const apiBase = accountSettings.value?.llm?.apiBase ?? '' | ||
| for (const key of providerKeys) { | ||
| if (apiBase === LLM_PROVIDERS[key].apiBase) | ||
| return key | ||
| } | ||
| return '' // custom | ||
|
Comment on lines
+33
to
+38
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This logic for detecting a provider from the API base URL is already implemented in the |
||
| }, | ||
| set(key: string) { | ||
| if (!key || !accountSettings.value) | ||
| return | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Selecting "Custom" provider snaps back to detected providerMedium Severity The Additional Locations (1) |
||
| const preset = LLM_PROVIDERS[key as LLMProviderKey] | ||
| if (!preset) | ||
| return | ||
| accountSettings.value.llm.apiBase = preset.apiBase | ||
| accountSettings.value.llm.model = preset.defaultModel | ||
| }, | ||
| }) | ||
|
|
||
| function buildDefaultMessageProcessing() { | ||
| return { | ||
| receiveMessages: { receiveAll: true, downloadMedia: true }, | ||
|
|
@@ -224,13 +248,28 @@ function updateConfig() { | |
| </div> | ||
|
|
||
| <div class="grid gap-6"> | ||
| <div class="space-y-2"> | ||
| <label class="text-sm font-medium">{{ t('settings.llmProvider') }}</label> | ||
| <select | ||
| v-model="selectedProvider" | ||
| class="h-10 w-full flex border border-input rounded-md bg-background px-3 py-2 text-sm ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-ring" | ||
| > | ||
| <option v-for="key in providerKeys" :key="key" :value="key"> | ||
| {{ LLM_PROVIDERS[key].label }} | ||
| </option> | ||
| <option value=""> | ||
| {{ t('settings.customProvider') }} | ||
| </option> | ||
| </select> | ||
| </div> | ||
|
|
||
| <div class="grid gap-4 sm:grid-cols-2"> | ||
| <div class="space-y-2"> | ||
| <label class="text-sm font-medium">{{ t('settings.llmModel') }}</label> | ||
| <input | ||
| v-model="accountSettings.llm.model" | ||
| type="text" | ||
| placeholder="gpt-4o-mini" | ||
| :placeholder="LLM_PROVIDERS[selectedProvider as LLMProviderKey]?.defaultModel ?? 'gpt-4o-mini'" | ||
| class="h-10 w-full flex border border-input rounded-md bg-background px-3 py-2 text-sm ring-offset-background disabled:cursor-not-allowed file:border-0 file:bg-transparent file:text-sm placeholder:text-muted-foreground file:font-medium disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-ring" | ||
| > | ||
|
Comment on lines
269
to
274
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The expression for the dynamic placeholder is repeated for both the model and API base inputs, and it uses an unsafe type cast ( Consider creating a computed property for the currently selected preset to simplify the template and improve type safety. For example: // In <script setup>
const selectedPreset = computed(() => {
if (selectedProvider.value) {
return LLM_PROVIDERS[selectedProvider.value as LLMProviderKey];
}
return null;
});Then you can use it in the template like this, which is much cleaner: <input
v-model="accountSettings.llm.model"
type="text"
:placeholder="selectedPreset?.defaultModel ?? 'gpt-4o-mini'"
...
>This approach would also apply to the API Base URL input. |
||
| </div> | ||
|
|
@@ -239,7 +278,7 @@ function updateConfig() { | |
| <input | ||
| v-model="accountSettings.llm.apiBase" | ||
| type="text" | ||
| placeholder="https://api.openai.com/v1" | ||
| :placeholder="LLM_PROVIDERS[selectedProvider as LLMProviderKey]?.apiBase ?? 'https://api.openai.com/v1'" | ||
| class="h-10 w-full flex border border-input rounded-md bg-background px-3 py-2 text-sm ring-offset-background disabled:cursor-not-allowed file:border-0 file:bg-transparent file:text-sm placeholder:text-muted-foreground file:font-medium disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-ring" | ||
| > | ||
| </div> | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,84 @@ | ||
| import { describe, expect, it } from 'vitest' | ||
|
|
||
| import { detectProviderFromApiBase, LLM_PROVIDER_KEYS, LLM_PROVIDERS } from '../llm-providers' | ||
|
|
||
| describe('llm-providers', () => { | ||
| describe('lLM_PROVIDERS', () => { | ||
| it('should contain openai and minimax providers', () => { | ||
| expect(LLM_PROVIDERS).toHaveProperty('openai') | ||
| expect(LLM_PROVIDERS).toHaveProperty('minimax') | ||
| }) | ||
|
|
||
| it('should have valid openai preset', () => { | ||
| const openai = LLM_PROVIDERS.openai | ||
| expect(openai.label).toBe('OpenAI') | ||
| expect(openai.apiBase).toBe('https://api.openai.com/v1') | ||
| expect(openai.defaultModel).toBe('gpt-4o-mini') | ||
| expect(openai.models.length).toBeGreaterThan(0) | ||
| }) | ||
|
|
||
| it('should have valid minimax preset', () => { | ||
| const minimax = LLM_PROVIDERS.minimax | ||
| expect(minimax.label).toBe('MiniMax') | ||
| expect(minimax.apiBase).toBe('https://api.minimax.io/v1') | ||
| expect(minimax.defaultModel).toBe('MiniMax-M2.7') | ||
| expect(minimax.models).toContain('MiniMax-M2.7') | ||
| expect(minimax.models).toContain('MiniMax-M2.7-highspeed') | ||
| expect(minimax.models).toContain('MiniMax-M2.5') | ||
| expect(minimax.models).toContain('MiniMax-M2.5-highspeed') | ||
| }) | ||
|
|
||
| it('should have unique apiBase for each provider', () => { | ||
| const bases = Object.values(LLM_PROVIDERS).map(p => p.apiBase) | ||
| expect(new Set(bases).size).toBe(bases.length) | ||
| }) | ||
|
|
||
| it('should have non-empty label and defaultModel for each provider', () => { | ||
| for (const [key, preset] of Object.entries(LLM_PROVIDERS)) { | ||
| expect(preset.label, `${key} label`).toBeTruthy() | ||
| expect(preset.defaultModel, `${key} defaultModel`).toBeTruthy() | ||
| expect(preset.models.length, `${key} models`).toBeGreaterThan(0) | ||
| } | ||
| }) | ||
|
|
||
| it('should include defaultModel in models list', () => { | ||
| for (const [key, preset] of Object.entries(LLM_PROVIDERS)) { | ||
| expect(preset.models, `${key} models should include defaultModel`).toContain(preset.defaultModel) | ||
| } | ||
| }) | ||
| }) | ||
|
|
||
| describe('lLM_PROVIDER_KEYS', () => { | ||
| it('should contain all provider keys', () => { | ||
| expect(LLM_PROVIDER_KEYS).toContain('openai') | ||
| expect(LLM_PROVIDER_KEYS).toContain('minimax') | ||
| }) | ||
|
|
||
| it('should match Object.keys of LLM_PROVIDERS', () => { | ||
| expect(LLM_PROVIDER_KEYS).toEqual(Object.keys(LLM_PROVIDERS)) | ||
| }) | ||
| }) | ||
|
|
||
| describe('detectProviderFromApiBase', () => { | ||
| it('should detect openai provider', () => { | ||
| expect(detectProviderFromApiBase('https://api.openai.com/v1')).toBe('openai') | ||
| }) | ||
|
|
||
| it('should detect minimax provider', () => { | ||
| expect(detectProviderFromApiBase('https://api.minimax.io/v1')).toBe('minimax') | ||
| }) | ||
|
|
||
| it('should return undefined for unknown URL', () => { | ||
| expect(detectProviderFromApiBase('https://api.example.com/v1')).toBeUndefined() | ||
| }) | ||
|
|
||
| it('should return undefined for empty string', () => { | ||
| expect(detectProviderFromApiBase('')).toBeUndefined() | ||
| }) | ||
|
|
||
| it('should not match partial URLs', () => { | ||
| expect(detectProviderFromApiBase('https://api.openai.com')).toBeUndefined() | ||
| expect(detectProviderFromApiBase('https://api.minimax.io')).toBeUndefined() | ||
| }) | ||
| }) | ||
| }) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,38 @@ | ||
| export interface LLMProviderPreset { | ||
| label: string | ||
| apiBase: string | ||
| defaultModel: string | ||
| models: string[] | ||
| } | ||
|
|
||
| export const LLM_PROVIDERS = { | ||
| openai: { | ||
| label: 'OpenAI', | ||
| apiBase: 'https://api.openai.com/v1', | ||
| defaultModel: 'gpt-4o-mini', | ||
| models: ['gpt-4o', 'gpt-4o-mini', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4.1-nano', 'o3-mini'], | ||
| }, | ||
| minimax: { | ||
| label: 'MiniMax', | ||
| apiBase: 'https://api.minimax.io/v1', | ||
| defaultModel: 'MiniMax-M2.7', | ||
| models: ['MiniMax-M2.7', 'MiniMax-M2.7-highspeed', 'MiniMax-M2.5', 'MiniMax-M2.5-highspeed'], | ||
| }, | ||
| } as const satisfies Record<string, LLMProviderPreset> | ||
|
|
||
| export type LLMProviderKey = keyof typeof LLM_PROVIDERS | ||
|
|
||
| export const LLM_PROVIDER_KEYS = Object.keys(LLM_PROVIDERS) as LLMProviderKey[] | ||
|
|
||
| /** | ||
| * Detect provider key from an API base URL. | ||
| * Returns `undefined` for unrecognised or custom endpoints. | ||
| */ | ||
| export function detectProviderFromApiBase(apiBase: string): LLMProviderKey | undefined { | ||
| for (const [key, preset] of Object.entries(LLM_PROVIDERS)) { | ||
| if (apiBase === preset.apiBase) { | ||
| return key as LLMProviderKey | ||
| } | ||
| } | ||
| return undefined | ||
| } |


There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To avoid code duplication, it's better to use the
LLM_PROVIDER_KEYSconstant already exported from the@tg-search/corepackage instead of recreating the list of keys here. You'll need to addLLM_PROVIDER_KEYSto your imports from@tg-search/core.