From 247e672105f382cc0e1f764d13ea908df4e637ba Mon Sep 17 00:00:00 2001 From: Psanyi89 Date: Fri, 24 Oct 2025 12:44:46 +0200 Subject: [PATCH 1/3] ollama n8n json response parsing --- core/llm/llms/Ollama.ts | 58 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/core/llm/llms/Ollama.ts b/core/llm/llms/Ollama.ts index 0a239fce909..7c5e8000c82 100644 --- a/core/llm/llms/Ollama.ts +++ b/core/llm/llms/Ollama.ts @@ -114,6 +114,18 @@ type OllamaErrorResponse = { error: string; }; +type N8nChatReponse = { + type: string; + content?: string; + metadata: { + nodeId: string; + nodeName: string; + itemIndex: number; + runIndex: number; + timestamps: number; + }; +}; + type OllamaRawResponse = | OllamaErrorResponse | (OllamaBaseResponse & { @@ -124,7 +136,8 @@ type OllamaChatResponse = | OllamaErrorResponse | (OllamaBaseResponse & { message: OllamaChatMessage; - }); + }) + | N8nChatReponse; interface OllamaTool { type: "function"; @@ -146,6 +159,7 @@ class Ollama extends BaseLLM implements ModelInstaller { private static modelsBeingInstalled: Set = new Set(); private static modelsBeingInstalledMutex = new Mutex(); + private static _isThinking: boolean = false; private fimSupported: boolean = false; constructor(options: LLMOptions) { super(options); @@ -388,6 +402,15 @@ class Ollama extends BaseLLM implements ModelInstaller { } } + static GetIsThinking(): boolean { + return this._isThinking; + } + static SetIsThinking(newValue: boolean): void { + if (this._isThinking !== newValue) { + this._isThinking = newValue; + } + } + protected async *_streamChat( messages: ChatMessage[], signal: AbortSignal, @@ -433,6 +456,39 @@ class Ollama extends BaseLLM implements ModelInstaller { throw new Error(res.error); } + if ("type" in res) { + const { content } = res; + + if (content === "") { + Ollama.SetIsThinking(true); + } + + if (Ollama.GetIsThinking() && content) { + const thinkingMessage: ThinkingChatMessage = { + role: "thinking", + content: content, + }; + + if (thinkingMessage) { + if (content === "") { + Ollama.SetIsThinking(false); + } + // When Streaming you can't have both thinking and content + console.log("THINKING TOKEN:", thinkingMessage.content); + return [thinkingMessage]; + } + } + + if (content) { + const chatMessage: ChatMessage = { + role: "assistant", + content: content, + }; + return [chatMessage]; + } + return []; + } + const { role, content, thinking, tool_calls: toolCalls } = res.message; if (role === "tool") { From 936dc9d8e160aa5d7d37a546d99820fba8295686 Mon Sep 17 00:00:00 2001 From: Psanyi89 Date: Fri, 24 Oct 2025 12:46:19 +0200 Subject: [PATCH 2/3] remove debugging console.log --- core/llm/llms/Ollama.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/core/llm/llms/Ollama.ts b/core/llm/llms/Ollama.ts index 7c5e8000c82..f451388b1ea 100644 --- a/core/llm/llms/Ollama.ts +++ b/core/llm/llms/Ollama.ts @@ -474,7 +474,6 @@ class Ollama extends BaseLLM implements ModelInstaller { Ollama.SetIsThinking(false); } // When Streaming you can't have both thinking and content - console.log("THINKING TOKEN:", thinkingMessage.content); return [thinkingMessage]; } } From 11f5ab477d673a411beb0e142160bfe742e4ee23 Mon Sep 17 00:00:00 2001 From: Psanyi89 Date: Fri, 24 Oct 2025 20:42:12 +0200 Subject: [PATCH 3/3] change scope of _isThinking variable and it's encapsulation --- core/llm/llms/Ollama.ts | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/core/llm/llms/Ollama.ts b/core/llm/llms/Ollama.ts index f451388b1ea..dafb085f651 100644 --- a/core/llm/llms/Ollama.ts +++ b/core/llm/llms/Ollama.ts @@ -159,7 +159,6 @@ class Ollama extends BaseLLM implements ModelInstaller { private static modelsBeingInstalled: Set = new Set(); private static modelsBeingInstalledMutex = new Mutex(); - private static _isThinking: boolean = false; private fimSupported: boolean = false; constructor(options: LLMOptions) { super(options); @@ -402,15 +401,6 @@ class Ollama extends BaseLLM implements ModelInstaller { } } - static GetIsThinking(): boolean { - return this._isThinking; - } - static SetIsThinking(newValue: boolean): void { - if (this._isThinking !== newValue) { - this._isThinking = newValue; - } - } - protected async *_streamChat( messages: ChatMessage[], signal: AbortSignal, @@ -450,7 +440,15 @@ class Ollama extends BaseLLM implements ModelInstaller { body: JSON.stringify(chatOptions), signal, }); - + let _isThinking: boolean = false; + function GetIsThinking(): boolean { + return _isThinking; + } + function SetIsThinking(newValue: boolean): void { + if (_isThinking !== newValue) { + _isThinking = newValue; + } + } function convertChatMessage(res: OllamaChatResponse): ChatMessage[] { if ("error" in res) { throw new Error(res.error); @@ -460,10 +458,10 @@ class Ollama extends BaseLLM implements ModelInstaller { const { content } = res; if (content === "") { - Ollama.SetIsThinking(true); + SetIsThinking(true); } - if (Ollama.GetIsThinking() && content) { + if (GetIsThinking() && content) { const thinkingMessage: ThinkingChatMessage = { role: "thinking", content: content, @@ -471,7 +469,7 @@ class Ollama extends BaseLLM implements ModelInstaller { if (thinkingMessage) { if (content === "") { - Ollama.SetIsThinking(false); + SetIsThinking(false); } // When Streaming you can't have both thinking and content return [thinkingMessage];