Skip to content

Commit 051874f

Browse files
Merge pull request #5995 from continuedev/dallin/gemini-args-parsing
Handle JSON Parsing errors
2 parents 10d7423 + f4233ae commit 051874f

File tree

21 files changed

+308
-88
lines changed

21 files changed

+308
-88
lines changed
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import { vi } from "vitest";
2+
3+
export const fetchwithRequestOptions = vi.fn(
4+
async (url, options, requestOptions) => {
5+
console.log("Mocked fetch called with:", url, options, requestOptions);
6+
return {
7+
ok: true,
8+
status: 200,
9+
statusText: "OK",
10+
};
11+
},
12+
);
13+
14+
export const streamSse = vi.fn(function* () {
15+
yield "";
16+
});

core/context/providers/GoogleContextProvider.ts

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -38,27 +38,35 @@ class GoogleContextProvider extends BaseContextProvider {
3838
body: payload,
3939
});
4040

41+
if (!response.ok) {
42+
throw new Error(
43+
`Failed to fetch Google search results: ${response.statusText}`,
44+
);
45+
}
4146
const results = await response.text();
47+
try {
48+
const parsed = JSON.parse(results);
49+
let content = `Google Search: ${query}\n\n`;
50+
const answerBox = parsed.answerBox;
4251

43-
const jsonResults = JSON.parse(results);
44-
let content = `Google Search: ${query}\n\n`;
45-
const answerBox = jsonResults.answerBox;
52+
if (answerBox) {
53+
content += `Answer Box (${answerBox.title}): ${answerBox.answer}\n\n`;
54+
}
4655

47-
if (answerBox) {
48-
content += `Answer Box (${answerBox.title}): ${answerBox.answer}\n\n`;
49-
}
56+
for (const result of parsed.organic) {
57+
content += `${result.title}\n${result.link}\n${result.snippet}\n\n`;
58+
}
5059

51-
for (const result of jsonResults.organic) {
52-
content += `${result.title}\n${result.link}\n${result.snippet}\n\n`;
60+
return [
61+
{
62+
content,
63+
name: "Google Search",
64+
description: "Google Search",
65+
},
66+
];
67+
} catch (e) {
68+
throw new Error(`Failed to parse Google search results: ${results}`);
5369
}
54-
55-
return [
56-
{
57-
content,
58-
name: "Google Search",
59-
description: "Google Search",
60-
},
61-
];
6270
}
6371
}
6472

core/context/providers/GreptileContextProvider.ts

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -81,13 +81,17 @@ class GreptileContextProvider extends BaseContextProvider {
8181
}
8282

8383
// Parse the response as JSON
84-
const json = JSON.parse(rawText);
85-
86-
return json.sources.map((source: any) => ({
87-
description: source.filepath,
88-
content: `File: ${source.filepath}\nLines: ${source.linestart}-${source.lineend}\n\n${source.summary}`,
89-
name: (source.filepath.split("/").pop() ?? "").split("\\").pop() ?? "",
90-
}));
84+
try {
85+
const json = JSON.parse(rawText);
86+
return json.sources.map((source: any) => ({
87+
description: source.filepath,
88+
content: `File: ${source.filepath}\nLines: ${source.linestart}-${source.lineend}\n\n${source.summary}`,
89+
name:
90+
(source.filepath.split("/").pop() ?? "").split("\\").pop() ?? "",
91+
}));
92+
} catch (jsonError) {
93+
throw new Error(`Failed to parse Greptile response:\n${rawText}`);
94+
}
9195
} catch (error) {
9296
console.error("Error getting context items from Greptile:", error);
9397
throw new Error("Error getting context items from Greptile");

core/core.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ export class Core {
701701
this.messenger.send("toolCallPartialOutput", params);
702702
};
703703

704-
return await callTool(tool, toolCall.function.arguments, {
704+
return await callTool(tool, toolCall, {
705705
config,
706706
ide: this.ide,
707707
llm: config.selectedModelByRole.chat,

core/indexing/LanceDbIndex.ts

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -294,17 +294,27 @@ export class LanceDbIndex implements CodebaseIndex {
294294
);
295295
const cachedItems = await stmt.all();
296296

297-
const lanceRows: LanceDbRow[] = cachedItems.map(
298-
({ uuid, vector, startLine, endLine, contents }) => ({
299-
path,
300-
uuid,
301-
startLine,
302-
endLine,
303-
contents,
304-
cachekey: cacheKey,
305-
vector: JSON.parse(vector),
306-
}),
307-
);
297+
const lanceRows: LanceDbRow[] = [];
298+
for (const item of cachedItems) {
299+
try {
300+
const vector = JSON.parse(item.vector);
301+
const { uuid, startLine, endLine, contents } = item;
302+
303+
cachedItems.push({
304+
path,
305+
uuid,
306+
startLine,
307+
endLine,
308+
contents,
309+
cachekey: cacheKey,
310+
vector,
311+
});
312+
} catch (err) {
313+
console.warn(
314+
`LanceDBIndex, skipping ${item.path} due to invalid vector JSON:\n${item.vector}\n\nError: ${err}`,
315+
);
316+
}
317+
}
308318

309319
if (lanceRows.length > 0) {
310320
if (needToCreateLanceTable) {

core/llm/llm-pre-fetch.vitest.ts

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
import { fetchwithRequestOptions } from "@continuedev/fetch";
2+
import * as openAiAdapters from "@continuedev/openai-adapters";
3+
import * as dotenv from "dotenv";
4+
import { beforeEach, describe, expect, test, vi } from "vitest";
5+
import { ChatMessage, ILLM } from "..";
6+
import Anthropic from "./llms/Anthropic";
7+
import Gemini from "./llms/Gemini";
8+
import OpenAI from "./llms/OpenAI";
9+
10+
dotenv.config();
11+
12+
vi.mock("@continuedev/fetch");
13+
vi.mock("@continuedev/openai-adapters");
14+
15+
async function dudLLMCall(llm: ILLM, messages: ChatMessage[]) {
16+
try {
17+
const abortController = new AbortController();
18+
const gen = llm.streamChat(messages, abortController.signal, {});
19+
await gen.next();
20+
await gen.return({
21+
completion: "",
22+
completionOptions: {
23+
model: "",
24+
},
25+
modelTitle: "",
26+
prompt: "",
27+
});
28+
abortController.abort();
29+
} catch (e) {
30+
console.error("Expected error", e);
31+
}
32+
}
33+
34+
const invalidToolCallArg = '{"name": "Ali';
35+
const messagesWithInvalidToolCallArgs: ChatMessage[] = [
36+
{
37+
role: "user",
38+
content: "Call the say_hello tool",
39+
},
40+
{
41+
role: "assistant",
42+
content: "",
43+
toolCalls: [
44+
{
45+
id: "tool_call_1",
46+
type: "function",
47+
function: {
48+
name: "say_name",
49+
arguments: invalidToolCallArg,
50+
},
51+
},
52+
],
53+
},
54+
{
55+
role: "user",
56+
content: "This is my response",
57+
},
58+
];
59+
60+
describe("LLM Pre-fetch", () => {
61+
beforeEach(() => {
62+
vi.resetAllMocks();
63+
// Log to verify the mock is properly set up
64+
console.log("Mock setup:", openAiAdapters);
65+
});
66+
67+
test("Invalid tool call args are ignored", async () => {
68+
const anthropic = new Anthropic({
69+
model: "not-important",
70+
apiKey: "invalid",
71+
});
72+
await dudLLMCall(anthropic, messagesWithInvalidToolCallArgs);
73+
expect(fetchwithRequestOptions).toHaveBeenCalledWith(
74+
expect.any(URL),
75+
{
76+
method: "POST",
77+
headers: expect.any(Object),
78+
signal: expect.any(AbortSignal),
79+
body: expect.stringContaining('"name":"say_name","input":{}'),
80+
},
81+
expect.any(Object),
82+
);
83+
84+
vi.clearAllMocks();
85+
const gemini = new Gemini({ model: "gemini-something", apiKey: "invalid" });
86+
await dudLLMCall(gemini, messagesWithInvalidToolCallArgs);
87+
expect(fetchwithRequestOptions).toHaveBeenCalledWith(
88+
expect.any(URL),
89+
{
90+
method: "POST",
91+
// headers: expect.any(Object),
92+
signal: expect.any(AbortSignal),
93+
body: expect.stringContaining('"name":"say_name","args":{}'),
94+
},
95+
expect.any(Object),
96+
);
97+
98+
// OPENAI DOES NOT NEED TO CLEAR INVALID TOOL CALL ARGS BECAUSE IT STORES THEM IN STRINGS
99+
vi.clearAllMocks();
100+
const openai = new OpenAI({ model: "gpt-something", apiKey: "invalid" });
101+
await dudLLMCall(openai, messagesWithInvalidToolCallArgs);
102+
expect(fetchwithRequestOptions).toHaveBeenCalledWith(
103+
expect.any(URL),
104+
{
105+
method: "POST",
106+
headers: expect.any(Object),
107+
signal: expect.any(AbortSignal),
108+
body: expect.stringContaining(JSON.stringify(invalidToolCallArg)),
109+
},
110+
expect.any(Object),
111+
);
112+
});
113+
});

core/llm/llms/Anthropic.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { streamSse } from "@continuedev/fetch";
22
import { ChatMessage, CompletionOptions, LLMOptions } from "../../index.js";
3+
import { safeParseToolCallArgs } from "../../tools/parseArgs.js";
34
import { renderChatMessage, stripImages } from "../../util/messageContent.js";
45
import { BaseLLM } from "../index.js";
56

@@ -66,7 +67,7 @@ class Anthropic extends BaseLLM {
6667
type: "tool_use",
6768
id: toolCall.id,
6869
name: toolCall.function?.name,
69-
input: JSON.parse(toolCall.function?.arguments || "{}"),
70+
input: safeParseToolCallArgs(toolCall),
7071
})),
7172
};
7273
} else if (message.role === "thinking" && !message.redactedThinking) {

core/llm/llms/Bedrock.ts

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import {
1515
CompletionOptions,
1616
LLMOptions,
1717
} from "../../index.js";
18+
import { safeParseToolCallArgs } from "../../tools/parseArgs.js";
1819
import { renderChatMessage, stripImages } from "../../util/messageContent.js";
1920
import { BaseLLM } from "../index.js";
2021
import { PROVIDER_TOOL_SUPPORT } from "../toolSupport.js";
@@ -408,7 +409,7 @@ class Bedrock extends BaseLLM {
408409
toolUse: {
409410
toolUseId: toolCall.id,
410411
name: toolCall.function?.name,
411-
input: JSON.parse(toolCall.function?.arguments || "{}"),
412+
input: safeParseToolCallArgs(toolCall),
412413
},
413414
})),
414415
};
@@ -564,10 +565,14 @@ class Bedrock extends BaseLLM {
564565
const command = new InvokeModelCommand(input);
565566
const response = await client.send(command);
566567
if (response.body) {
567-
const responseBody = JSON.parse(
568-
new TextDecoder().decode(response.body),
569-
);
570-
return this._extractEmbeddings(responseBody);
568+
const decoder = new TextDecoder();
569+
const decoded = decoder.decode(response.body);
570+
try {
571+
const responseBody = JSON.parse(decoded);
572+
return this._extractEmbeddings(responseBody);
573+
} catch (e) {
574+
console.error(`Error parsing response body from:\n${decoded}`, e);
575+
}
571576
}
572577
return [];
573578
}),
@@ -662,12 +667,19 @@ class Bedrock extends BaseLLM {
662667
throw new Error("Empty response received from Bedrock");
663668
}
664669

665-
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
666-
667-
// Sort results by index to maintain original order
668-
return responseBody.results
669-
.sort((a: any, b: any) => a.index - b.index)
670-
.map((result: any) => result.relevance_score);
670+
const decoder = new TextDecoder();
671+
const decoded = decoder.decode(response.body);
672+
try {
673+
const responseBody = JSON.parse(decoded);
674+
// Sort results by index to maintain original order
675+
return responseBody.results
676+
.sort((a: any, b: any) => a.index - b.index)
677+
.map((result: any) => result.relevance_score);
678+
} catch (e) {
679+
throw new Error(
680+
`Error parsing JSON from Bedrock response body:\n${decoded}, ${JSON.stringify(e)}`,
681+
);
682+
}
671683
} catch (error: unknown) {
672684
if (error instanceof Error) {
673685
if ("code" in error) {

core/llm/llms/BedrockImport.ts

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,15 @@ class BedrockImport extends BaseLLM {
5151

5252
if (response.body) {
5353
for await (const item of response.body) {
54-
const chunk = JSON.parse(new TextDecoder().decode(item.chunk?.bytes));
55-
if (chunk.outputs[0].text) {
56-
yield chunk.outputs[0].text;
54+
const decoder = new TextDecoder();
55+
const decoded = decoder.decode(item.chunk?.bytes);
56+
try {
57+
const chunk = JSON.parse(decoded);
58+
if (chunk.outputs[0].text) {
59+
yield chunk.outputs[0].text;
60+
}
61+
} catch (e) {
62+
throw new Error(`Malformed JSON received from Bedrock: ${decoded}`);
5763
}
5864
}
5965
}

core/llm/llms/Gemini.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import {
99
TextMessagePart,
1010
ToolCallDelta,
1111
} from "../../index.js";
12+
import { safeParseToolCallArgs } from "../../tools/parseArgs.js";
1213
import { renderChatMessage, stripImages } from "../../util/messageContent.js";
1314
import { BaseLLM } from "../index.js";
1415
import {
@@ -250,11 +251,11 @@ class Gemini extends BaseLLM {
250251
};
251252
if (msg.toolCalls) {
252253
msg.toolCalls.forEach((toolCall) => {
253-
if (toolCall.function?.name && toolCall.function?.arguments) {
254+
if (toolCall.function?.name) {
254255
assistantMsg.parts.push({
255256
functionCall: {
256257
name: toolCall.function.name,
257-
args: JSON.parse(toolCall.function.arguments),
258+
args: safeParseToolCallArgs(toolCall),
258259
},
259260
});
260261
}

core/llm/llms/HuggingFaceTEI.ts

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,16 @@ class HuggingFaceTEIEmbeddingsProvider extends BaseLLM {
5252
});
5353
if (!resp.ok) {
5454
const text = await resp.text();
55-
const embedError = JSON.parse(text) as TEIEmbedErrorResponse;
56-
if (!embedError.error_type || !embedError.error) {
57-
throw new Error(text);
55+
let teiError: TEIEmbedErrorResponse | null = null;
56+
try {
57+
teiError = JSON.parse(text);
58+
} catch (e) {
59+
console.log(`Failed to parse TEI embed error response:\n${text}`, e);
5860
}
59-
throw new TEIEmbedError(embedError);
61+
if (teiError && (teiError.error_type || teiError.error)) {
62+
throw new TEIEmbedError(teiError);
63+
}
64+
throw new Error(text);
6065
}
6166
return (await resp.json()) as number[][];
6267
}

0 commit comments

Comments
 (0)