In this example, we’re going to build an chatbot QA app. We’ll learn how to:

  • Upload a document
  • Create vector embeddings from a file
  • Create a chatbot app with the ability to display sources used to generate an answer

This example is inspired from the LangChain doc


This example has extra dependencies. You can install them with:

pip install langchain chromadb tiktoken

Then, you need to go to create an OpenAI key here.

The state of the union file is available here

Conversational Document QA with LangChain
import os
from typing import List

from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import (
from langchain.chat_models import ChatOpenAI

from langchain.docstore.document import Document
from langchain.memory import ChatMessageHistory, ConversationBufferMemory

import chainlit as cl


text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)

async def on_chat_start():
    files = None

    # Wait for the user to upload a file
    while files == None:
        files = await cl.AskFileMessage(
            content="Please upload a text file to begin!",

    file = files[0]

    msg = cl.Message(content=f"Processing `{}`...", disable_feedback=True)
    await msg.send()

    with open(file.path, "r", encoding="utf-8") as f:
        text =

    # Split the text into chunks
    texts = text_splitter.split_text(text)

    # Create a metadata for each chunk
    metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]

    # Create a Chroma vector store
    embeddings = OpenAIEmbeddings()
    docsearch = await cl.make_async(Chroma.from_texts)(
        texts, embeddings, metadatas=metadatas

    message_history = ChatMessageHistory()

    memory = ConversationBufferMemory(

    # Create a chain that uses the Chroma vector store
    chain = ConversationalRetrievalChain.from_llm(
        ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),

    # Let the user know that the system is ready
    msg.content = f"Processing `{}` done. You can now ask questions!"
    await msg.update()

    cl.user_session.set("chain", chain)

async def main(message: cl.Message):
    chain = cl.user_session.get("chain")  # type: ConversationalRetrievalChain
    cb = cl.AsyncLangchainCallbackHandler()

    res = await chain.acall(message.content, callbacks=[cb])
    answer = res["answer"]
    source_documents = res["source_documents"]  # type: List[Document]

    text_elements = []  # type: List[cl.Text]

    if source_documents:
        for source_idx, source_doc in enumerate(source_documents):
            source_name = f"source_{source_idx}"
            # Create the text element referenced in the message
                cl.Text(content=source_doc.page_content, name=source_name)
        source_names = [ for text_el in text_elements]

        if source_names:
            answer += f"\nSources: {', '.join(source_names)}"
            answer += "\nNo sources found"

    await cl.Message(content=answer, elements=text_elements).send()

Try it out

chainlit run

You can then upload any .txt file to the UI and ask questions about it. If you are using state_of_the_union.txt you can ask questions like What did the president say about Ketanji Brown Jackson?.