Tanmay09516's picture
Update app.py
a70133d verified
# app.py
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from qdrant_search import QdrantSearch
from langchain_groq import ChatGroq
from nomic_embeddings import EmbeddingsModel
import gradio as gr
load_dotenv()
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
os.environ["TOKENIZERS_PARALLELISM"] = "FALSE"
# Initialize global variables
collection_names = ["docs_v1_2", "docs_v2_2", "docs_v3_2"]
limit = 5
llm = ChatGroq(model="mixtral-8x7b-32768")
embeddings = EmbeddingsModel()
search = QdrantSearch(
qdrant_url=os.environ["QDRANT_CLOUD_URL"],
api_key=os.environ["QDRANT_API_KEY"],
embeddings=embeddings
)
# Define the query processing function
def chat_with_langassist(query: str):
if not query.strip():
return "Query cannot be empty.", []
# Retrieve relevant documents from Qdrant
retrieved_docs = search.query_multiple_collections(query, collection_names, limit)
# Prepare the context from retrieved documents
context = "\n".join([doc['text'] for doc in retrieved_docs])
# Construct the prompt with context and question
prompt = (
# "You are LangAssist, a knowledgeable assistant for the LangChain Python Library. "
# "Given the following context from the documentation, provide a helpful answer to the user's question.\n\n"
# "Context:\n{context}\n\n"
# "Question: {question}\n\n"
# "Answer:"
"You are LangChat, a knowledgeable assistant for the LangChain Python Library. "
"Given the following context from the documentation, provide a helpful answer to the user's question. \n\n"
"Context:\n{context}\n\n"
"You can ignore the context if the question is a simple chat like Hi, hello, and just respond in a normal manner as LangChat, otherwise use the context to answer the query."
"If you can't find the answer from the sources, mention that clearly instead of making up an answer.\n\n"
"Question: {question}\n\n"
"Answer:"
).format(context=context, question=query)
# Generate an answer using the language model
try:
answer = llm.invoke(prompt).content.strip()
except Exception as e:
return f"Error: {str(e)}", []
# Prepare sources
sources = [
{
"source": doc['source'],
"text": doc['text']
} for doc in retrieved_docs
]
return answer, sources
# Define Gradio interface
with gr.Blocks() as demo:
gr.Markdown("<h1>LangAssist Chat</h1>")
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
sources_display = gr.Markdown(label="Sources")
def respond(message, chat_history, sources_display):
answer, sources = chat_with_langassist(message)
chat_history.append((message, answer))
if sources:
formatted_sources = "\n".join([f"- **Source:** {source['source']}\n **Text:** {source['text']}" for source in sources])
else:
formatted_sources = "No sources available."
return chat_history, gr.update(value=''), formatted_sources
msg.submit(respond, [msg, chatbot, sources_display], [chatbot, msg, sources_display])
clear.click(lambda: None, None, [chatbot, sources_display])
# Run the Gradio app
if __name__ == "__main__":
demo.launch()