diff --git a/app.py b/app.py index afb0b96..a4b1c50 100644 --- a/app.py +++ b/app.py @@ -1,15 +1,33 @@ from langchain import PromptTemplate, LLMChain +from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI import chainlit as cl +from utils import get_docsearch + template = """Input: {question} Output: Let's think step by step.""" @cl.langchain_factory(use_async=True) -def factory(): - prompt = PromptTemplate(template=template, input_variables=["question"]) - llm = ChatOpenAI(model_name="gpt-3.5-turbo") - llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True) +async def factory(): + files = None + while files is None: + files = await cl.AskFileMessage( + content="Select File", + accept=["text/plain", "application/pdf"], + max_size_mb=20, + timeout=180, + ).send() + file = files[0] + msg = cl.Message(content=f"Processing `{file.name}`...") + await msg.send() + docsearch = await cl.make_async(get_docsearch)(file) + llm_chain = RetrievalQA.from_chain_type( + ChatOpenAI(temperature=0, streaming=True), + chain_type="stuff", + retriever=docsearch.as_retriever(max_tokens_limit=4097), + ) + await msg.update(content=f"`{file.name}` processed. You can now ask questions!") return llm_chain diff --git a/chainlit.md b/chainlit.md new file mode 100644 index 0000000..e69de29