Skip to content

Commit 0360095

Browse files
refactor(ai): better error handling
1 parent ead031e commit 0360095

File tree

2 files changed

+65
-33
lines changed

2 files changed

+65
-33
lines changed

README.md

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,15 @@ npm run setup
6767
<img width="100%" alt="booking-screen" src="./public/assets/02.gif">
6868

6969
**Currently Supporting:**
70-
- OpenAI API: Get OpenAI API Key [here](https://platform.openai.com/api-keys)
71-
- Gemini API: Get Gemini API Key [here](https://aistudio.google.com/apikey)
72-
- Local LLM Ollama & Whisper
70+
71+
- **OpenAI API**: Get your API Key [here](https://platform.openai.com/api-keys).
72+
- **Gemini API**: Get your API Key [here](https://aistudio.google.com/apikey).
73+
- **Local LLM (Ollama & Whisper).**
74+
- **Anthropic API**: Get your API Key [here](https://console.anthropic.com/dashboard).
75+
- **OpenRouter API (new)**: Supports models like **Grok 4, Claude 4 Sonnet, Gemini 2.5 Flash, GPT-4.1, LLaMA 4 Maverick** using your **OpenRouter API key**.
76+
Get your API key [here](https://openrouter.ai/settings/keys).
77+
78+
> Note: OpenRouter does not currently support native STT; use OpenAI or Gemini for speech-to-text.
7379
7480
### Liquid Glass Design (coming soon)
7581

src/features/common/ai/providers/openrouter.js

Lines changed: 56 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
44

55
class OpenRouterProvider {
66
static async validateApiKey(key) {
7-
if (!key || typeof key !== 'string' || !key.startsWith('sk-')) {
8-
return { success: false, error: 'Invalid OpenAI API key format.' };
7+
if (!key || typeof key !== 'string' || !key.startsWith('sk-or-')) {
8+
return { success: false, error: 'Invalid OpenRouter API key format.' };
99
}
1010

1111
try {
@@ -63,16 +63,26 @@ function createLLM({ apiKey, model = 'x-ai/grok-4', temperature = 0.7, maxTokens
6363
const client = new OpenAI({ apiKey, baseURL: OPENROUTER_BASE_URL });
6464

6565
const callApi = async (messages) => {
66-
const response = await client.chat.completions.create({
67-
model: model,
68-
messages: messages,
69-
temperature: temperature,
70-
max_tokens: maxTokens
71-
});
72-
return {
73-
content: response.choices[0].message.content.trim(),
74-
raw: response
75-
};
66+
try {
67+
const response = await client.chat.completions.create({
68+
model: model,
69+
messages: messages,
70+
temperature: temperature,
71+
max_tokens: maxTokens
72+
});
73+
74+
if (!response.choices || response.choices.length === 0) {
75+
throw new Error('No response choices returned from OpenRouter API');
76+
}
77+
78+
return {
79+
content: response.choices[0].message.content?.trim() || '',
80+
raw: response
81+
};
82+
} catch (error) {
83+
console.error('[OpenRouter] API call failed:', error);
84+
throw new Error(`OpenRouter API error: ${error.message}`);
85+
}
7686
};
7787

7888
return {
@@ -83,7 +93,13 @@ function createLLM({ apiKey, model = 'x-ai/grok-4', temperature = 0.7, maxTokens
8393

8494
for (const part of parts) {
8595
if (typeof part === 'string') {
86-
if (systemPrompt === '' && part.includes('You are')) {
96+
if (
97+
systemPrompt === '' &&
98+
(
99+
part.toLowerCase().startsWith('you are') ||
100+
part.toLowerCase().includes('system:')
101+
)
102+
) {
87103
systemPrompt = part;
88104
} else {
89105
userContent.push({ type: 'text', text: part });
@@ -120,7 +136,7 @@ function createLLM({ apiKey, model = 'x-ai/grok-4', temperature = 0.7, maxTokens
120136
* Creates an OpenRouter streaming LLM instance
121137
* @param {object} opts - Configuration options
122138
* @param {string} opts.apiKey - OpenRouter API key
123-
* @param {string} [opts.model='x-ai'] - Model name
139+
* @param {string} [opts.model='x-ai/grok-4'] - Model name
124140
* @param {number} [opts.temperature=0.7] - Temperature
125141
* @param {number} [opts.maxTokens=2048] - Max tokens
126142
* @returns {object} Streaming LLM instance
@@ -129,30 +145,40 @@ function createStreamingLLM({ apiKey, model = 'x-ai/grok-4', temperature = 0.7,
129145
return {
130146
streamChat: async (messages) => {
131147
console.log("[OpenRouter Provider] Starting Streaming request")
148+
149+
if (!messages || !Array.isArray(messages) || messages.length === 0) {
150+
throw new Error('Messages array is required and cannot be empty')
151+
}
152+
132153
const fetchUrl = `${OPENROUTER_BASE_URL}/chat/completions`;
133-
134154
const headers = {
135155
Authorization: `Bearer ${apiKey}`,
136156
'Content-Type': 'application/json',
137157
};
138158

139-
const response = await fetch(fetchUrl, {
140-
method: 'POST',
141-
headers,
142-
body: JSON.stringify({
143-
model: model,
144-
messages,
145-
temperature,
146-
max_tokens: maxTokens,
147-
stream: true,
148-
}),
149-
});
159+
try {
160+
const response = await fetch(fetchUrl, {
161+
method: 'POST',
162+
headers,
163+
body: JSON.stringify({
164+
model,
165+
messages,
166+
temperature,
167+
max_tokens: maxTokens,
168+
stream: true,
169+
}),
170+
});
150171

151-
if (!response.ok) {
152-
throw new Error(`OpenRouter API error: ${response.status} ${response.statusText}`);
153-
}
172+
if (!response.ok) {
173+
const errorText = await response.text().catch(() => 'Unknown error');
174+
throw new Error(`OpenRouter API error: ${response.status} ${response.statusText}. ${errorText}`);
175+
}
154176

155-
return response;
177+
return response;
178+
} catch (error) {
179+
console.error('[OpenRouter] Streaming request failed:', error);
180+
throw error;
181+
}
156182
}
157183
};
158184
}

0 commit comments

Comments
 (0)