diff --git a/.gitignore b/.gitignore index d1e196e1..6f7ad97d 100644 --- a/.gitignore +++ b/.gitignore @@ -40,4 +40,6 @@ next-env.d.ts !.yarn/releases !.yarn/sdks !.yarn/versions -.env \ No newline at end of file +.env + +.vscode \ No newline at end of file diff --git a/app/api/chat/agents/route.ts b/app/api/chat/agents/route.ts index f4ce8316..1a2bd043 100644 --- a/app/api/chat/agents/route.ts +++ b/app/api/chat/agents/route.ts @@ -1,5 +1,5 @@ import { NextRequest, NextResponse } from "next/server"; -import { Message as VercelChatMessage, StreamingTextResponse } from "ai"; +import { Message as VercelChatMessage, LangChainAdapter } from "ai"; import { createReactAgent } from "@langchain/langgraph/prebuilt"; import { ChatOpenAI } from "@langchain/openai"; @@ -26,23 +26,28 @@ const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => { }; const convertLangChainMessageToVercelMessage = (message: BaseMessage) => { - if (message._getType() === "human") { + if (message.getType() === "human") { return { content: message.content, role: "user" }; - } else if (message._getType() === "ai") { + } else if (message.getType() === "ai") { return { content: message.content, role: "assistant", - tool_calls: (message as AIMessage).tool_calls, + parts: (message as AIMessage).tool_calls, + }; + } else if (message.getType() === "tool") { + return { + content: message.content, + role: "system", }; } else { - return { content: message.content, role: message._getType() }; + return { content: message.content, role: message.getType() }; } }; const AGENT_SYSTEM_TEMPLATE = `You are a talking parrot named Polly. All final responses must be how a talking parrot would respond. Squawk often!`; /** - * This handler initializes and calls an tool caling ReAct agent. + * This handler initializes and calls an tool calling ReAct agent. * See the docs for more information: * * https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/ @@ -89,36 +94,14 @@ export async function POST(req: NextRequest) { /** * Stream back all generated tokens and steps from their runs. * - * We do some filtering of the generated events and only stream back - * the final response as a string. - * * For this specific type of tool calling ReAct agents with OpenAI, we can tell when * the agent is ready to stream back final output when it no longer calls * a tool and instead streams back content. * * See: https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/ */ - const eventStream = await agent.streamEvents( - { messages }, - { version: "v2" }, - ); - - const textEncoder = new TextEncoder(); - const transformStream = new ReadableStream({ - async start(controller) { - for await (const { event, data } of eventStream) { - if (event === "on_chat_model_stream") { - // Intermediate chat model generations will contain tool calls and no content - if (!!data.chunk.content) { - controller.enqueue(textEncoder.encode(data.chunk.content)); - } - } - } - controller.close(); - }, - }); - - return new StreamingTextResponse(transformStream); + const eventStream = agent.streamEvents({ messages }, { version: "v2" }); + return LangChainAdapter.toDataStreamResponse(eventStream); } else { /** * We could also pick intermediate steps out from `streamEvents` chunks, but diff --git a/app/api/chat/retrieval/route.ts b/app/api/chat/retrieval/route.ts index 53a8078e..2773f393 100644 --- a/app/api/chat/retrieval/route.ts +++ b/app/api/chat/retrieval/route.ts @@ -1,5 +1,5 @@ import { NextRequest, NextResponse } from "next/server"; -import { Message as VercelChatMessage, StreamingTextResponse } from "ai"; +import { LangChainAdapter, Message as VercelChatMessage } from "ai"; import { createClient } from "@supabase/supabase-js"; @@ -140,7 +140,6 @@ export async function POST(req: NextRequest) { chat_history: (input) => input.chat_history, }, answerChain, - new BytesOutputParser(), ]); const stream = await conversationalRetrievalQAChain.stream({ @@ -160,10 +159,12 @@ export async function POST(req: NextRequest) { ), ).toString("base64"); - return new StreamingTextResponse(stream, { - headers: { - "x-message-index": (previousMessages.length + 1).toString(), - "x-sources": serializedSources, + return LangChainAdapter.toDataStreamResponse(stream, { + init: { + headers: { + "x-message-index": (previousMessages.length + 1).toString(), + "x-sources": serializedSources, + }, }, }); } catch (e: any) { diff --git a/app/api/chat/retrieval_agents/route.ts b/app/api/chat/retrieval_agents/route.ts index 7b5e3427..f1de0c6d 100644 --- a/app/api/chat/retrieval_agents/route.ts +++ b/app/api/chat/retrieval_agents/route.ts @@ -1,5 +1,5 @@ import { NextRequest, NextResponse } from "next/server"; -import { Message as VercelChatMessage, StreamingTextResponse } from "ai"; +import { LangChainAdapter, Message as VercelChatMessage } from "ai"; import { createClient } from "@supabase/supabase-js"; @@ -46,7 +46,7 @@ const AGENT_SYSTEM_TEMPLATE = `You are a stereotypical robot named Robbie and mu If you don't know how to answer a question, use the available tools to look up relevant information. You should particularly do this for questions about LangChain.`; /** - * This handler initializes and calls an tool caling ReAct agent. + * This handler initializes and calls an tool calling ReAct agent. * See the docs for more information: * * https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/ @@ -96,7 +96,7 @@ export async function POST(req: NextRequest) { /** * Use a prebuilt LangGraph agent. */ - const agent = await createReactAgent({ + const agent = createReactAgent({ llm: chatModel, tools: [tool], /** @@ -112,38 +112,20 @@ export async function POST(req: NextRequest) { /** * Stream back all generated tokens and steps from their runs. * - * We do some filtering of the generated events and only stream back - * the final response as a string. - * * For this specific type of tool calling ReAct agents with OpenAI, we can tell when * the agent is ready to stream back final output when it no longer calls * a tool and instead streams back content. * * See: https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/ */ - const eventStream = await agent.streamEvents( + const eventStream = agent.streamEvents( { messages, }, { version: "v2" }, ); - const textEncoder = new TextEncoder(); - const transformStream = new ReadableStream({ - async start(controller) { - for await (const { event, data } of eventStream) { - if (event === "on_chat_model_stream") { - // Intermediate chat model generations will contain tool calls and no content - if (!!data.chunk.content) { - controller.enqueue(textEncoder.encode(data.chunk.content)); - } - } - } - controller.close(); - }, - }); - - return new StreamingTextResponse(transformStream); + return LangChainAdapter.toDataStreamResponse(eventStream); } else { /** * We could also pick intermediate steps out from `streamEvents` chunks, but diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 093a17ce..ad9f02e0 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -1,9 +1,8 @@ import { NextRequest, NextResponse } from "next/server"; -import { Message as VercelChatMessage, StreamingTextResponse } from "ai"; +import { Message as VercelChatMessage, LangChainAdapter } from "ai"; import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; -import { HttpResponseOutputParser } from "langchain/output_parsers"; export const runtime = "edge"; @@ -47,26 +46,20 @@ export async function POST(req: NextRequest) { model: "gpt-4o-mini", }); - /** - * Chat models stream message chunks rather than bytes, so this - * output parser handles serialization and byte-encoding. - */ - const outputParser = new HttpResponseOutputParser(); - /** * Can also initialize as: * * import { RunnableSequence } from "@langchain/core/runnables"; * const chain = RunnableSequence.from([prompt, model, outputParser]); */ - const chain = prompt.pipe(model).pipe(outputParser); + const chain = prompt.pipe(model); const stream = await chain.stream({ chat_history: formattedPreviousMessages.join("\n"), input: currentMessageContent, }); - return new StreamingTextResponse(stream); + return LangChainAdapter.toDataStreamResponse(stream); } catch (e: any) { return NextResponse.json({ error: e.message }, { status: e.status ?? 500 }); } diff --git a/app/structured_output/page.tsx b/app/structured_output/page.tsx index 1edd2caa..212f37f1 100644 --- a/app/structured_output/page.tsx +++ b/app/structured_output/page.tsx @@ -80,6 +80,7 @@ export default function AgentsPage() { emptyStateComponent={InfoCard} placeholder={`No matter what you type here, I'll always return the same JSON object with the same structure!`} emoji="🧱" + streamProtocol="text" /> ); } diff --git a/components/ChatMessageBubble.tsx b/components/ChatMessageBubble.tsx index 9f6973c0..3d839df4 100644 --- a/components/ChatMessageBubble.tsx +++ b/components/ChatMessageBubble.tsx @@ -1,5 +1,5 @@ import { cn } from "@/utils/cn"; -import type { Message } from "ai/react"; +import type { Message } from "@ai-sdk/react"; export function ChatMessageBubble(props: { message: Message; diff --git a/components/ChatWindow.tsx b/components/ChatWindow.tsx index f6fd7f7e..8fd08490 100644 --- a/components/ChatWindow.tsx +++ b/components/ChatWindow.tsx @@ -1,16 +1,16 @@ "use client"; -import { type Message } from "ai"; -import { useChat } from "ai/react"; +import { UIMessage, type Message } from "ai"; +import { useChat } from "@ai-sdk/react"; import { useState } from "react"; import type { FormEvent, ReactNode } from "react"; import { toast } from "sonner"; import { StickToBottom, useStickToBottomContext } from "use-stick-to-bottom"; +import { ArrowDown, LoaderCircle, Paperclip } from "lucide-react"; import { ChatMessageBubble } from "@/components/ChatMessageBubble"; import { IntermediateStep } from "./IntermediateStep"; import { Button } from "./ui/button"; -import { ArrowDown, LoaderCircle, Paperclip } from "lucide-react"; import { Checkbox } from "./ui/checkbox"; import { UploadDocumentsForm } from "./UploadDocumentsForm"; import { @@ -22,6 +22,7 @@ import { DialogTrigger, } from "./ui/dialog"; import { cn } from "@/utils/cn"; +import { logError } from "@/utils/log"; function ChatMessages(props: { messages: Message[]; @@ -172,6 +173,7 @@ export function ChatWindow(props: { emoji?: string; showIngestForm?: boolean; showIntermediateStepsToggle?: boolean; + streamProtocol?: "text" | "data"; }) { const [showIntermediateSteps, setShowIntermediateSteps] = useState( !!props.showIntermediateStepsToggle, @@ -199,16 +201,22 @@ export function ChatWindow(props: { }); } }, - streamMode: "text", - onError: (e) => + streamProtocol: props.streamProtocol ?? undefined, + onError: (e) => { + logError("Error:", e); toast.error(`Error while processing your request`, { description: e.message, - }), + }); + }, }); + function isChatLoading(): boolean { + return chat.status === "streaming"; + } + async function sendMessage(e: FormEvent) { e.preventDefault(); - if (chat.isLoading || intermediateStepsLoading) return; + if (isChatLoading() || intermediateStepsLoading) return; if (!showIntermediateSteps) { chat.handleSubmit(e); @@ -223,6 +231,7 @@ export function ChatWindow(props: { id: chat.messages.length.toString(), content: chat.input, role: "user", + parts: [{ type: "text", text: chat.input }], }); chat.setMessages(messagesWithUserReply); @@ -237,6 +246,7 @@ export function ChatWindow(props: { setIntermediateStepsLoading(false); if (!response.ok) { + logError("Error:", json.error); toast.error(`Error while processing your request`, { description: json.error, }); @@ -251,23 +261,25 @@ export function ChatWindow(props: { (responseMessage: Message) => { return ( (responseMessage.role === "assistant" && - !!responseMessage.tool_calls?.length) || - responseMessage.role === "tool" + !!responseMessage.parts?.length) || + responseMessage.role === "system" ); }, ); - const intermediateStepMessages = []; + const intermediateStepMessages: UIMessage[] = []; for (let i = 0; i < toolCallMessages.length; i += 2) { const aiMessage = toolCallMessages[i]; const toolMessage = toolCallMessages[i + 1]; + const content = JSON.stringify({ + action: aiMessage.parts?.[0], + observation: toolMessage.content, + }); intermediateStepMessages.push({ id: (messagesWithUserReply.length + i / 2).toString(), role: "system" as const, - content: JSON.stringify({ - action: aiMessage.tool_calls?.[0], - observation: toolMessage.content, - }), + content, + parts: [{ type: "text", text: content }], }); } const newMessages = messagesWithUserReply; @@ -341,7 +353,7 @@ export function ChatWindow(props: { id="show_intermediate_steps" name="show_intermediate_steps" checked={showIntermediateSteps} - disabled={chat.isLoading || intermediateStepsLoading} + disabled={isChatLoading() || intermediateStepsLoading} onCheckedChange={(e) => setShowIntermediateSteps(!!e)} />