AISandbox / qa /utils.py
fracapuano's picture
Add files via upload
51fe9d2
raw
history blame
5.52 kB
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain import OpenAI
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.docstore.document import Document
from langchain.vectorstores import FAISS, VectorStore
import docx2txt
from typing import List, Dict, Any, Union, Text, Tuple
import re
from io import BytesIO
import streamlit as st
from .prompts import STUFF_PROMPT
from pypdf import PdfReader
from openai.error import AuthenticationError
class HashDocument(Document):
"""A document that uses the page content as the hash."""
def __hash__(self):
content = self.page_content + "".join(self.metadata[k] for k in self.metadata.keys())
return hash(content)
@st.cache_data
def parse_docx(file: BytesIO) -> str:
text = docx2txt.process(file)
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
return text
@st.cache_data
def parse_pdf(file: BytesIO) -> List[str]:
pdf = PdfReader(file)
output = []
for page in pdf.pages:
text = page.extract_text()
# Merge hyphenated words
text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
# Fix newlines in the middle of sentences
text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
output.append(text)
return output
@st.cache_data
def parse_txt(file: BytesIO) -> str:
text = file.read().decode("utf-8")
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
return text
@st.cache_data
def text_to_docs(text: Union[Text, Tuple[Text]]) -> List[Document]:
"""
Converts a string or frozenset of strings to a list of Documents
with metadata.
"""
if isinstance(text, str):
# Take a single string as one page
text = tuple([text])
elif isinstance(text, tuple):
# map each page into a document instance
page_docs = [HashDocument(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
# text splitter to split the text into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=20, # minimal overlap to capture sematic overlap across chunks
)
for doc in page_docs:
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
# Create a new document for each individual chunk
doc = HashDocument(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
else:
raise ValueError("Text must be either a string or a list of strings. Got: {type(text)}")
@st.cache_data
def embed_docs(_docs: Tuple[Document]) -> VectorStore:
"""Embeds a list of Documents and returns a FAISS index"""
docs = _docs
if not st.session_state.get("OPENAI_API_KEY"):
raise AuthenticationError(
"Enter your OpenAI API key in the sidebar. You can get a key at https://platform.openai.com/account/api-keys."
)
else:
# Embed the chunks
embeddings = OpenAIEmbeddings(openai_api_key=st.session_state.get("OPENAI_API_KEY"))
index = FAISS.from_documents(list(docs), embeddings)
return index
@st.cache_data
def search_docs(_index: VectorStore, query: str) -> List[Document]:
"""Searches a FAISS index for similar chunks to the query
and returns a list of Documents."""
# Search for similar chunks
docs = _index.similarity_search(query, k=5)
return docs
@st.cache_data
def get_answer(_docs: List[Document], query: str) -> Dict[str, Any]:
"""Gets an answer to a question from a list of Documents."""
# Get the answer
chain = load_qa_with_sources_chain(
OpenAI(temperature=0,
openai_api_key=st.session_state.get("OPENAI_API_KEY")),
chain_type="stuff",
prompt=STUFF_PROMPT
)
# also returnig the text of the source used to form the answer
answer = chain(
{"input_documents": _docs, "question": query}
)
return answer
@st.cache_data
def get_sources(answer: Dict[str, Any], docs: List[Document]) -> List[Document]:
"""Gets the source documents for an answer."""
# Get sources for the answer
source_keys = [s for s in answer["output_text"].split("SOURCES: ")[-1].split(", ")]
source_docs = []
for doc in docs:
if doc.metadata["source"] in source_keys:
source_docs.append(doc)
return source_docs
def wrap_text_in_html(text: str) -> str:
"""Wraps each text block separated by newlines in <p> tags"""
if isinstance(text, list):
# Add horizontal rules between pages
text = "\n<hr/>\n".join(text)
return "".join([f"<p>{line}</p>" for line in text.split("\n")])