File size: 4,862 Bytes
ef89dbb 650714a fb3b5a9 ef89dbb cb808f6 303bf04 cb808f6 ef89dbb 53916a8 ef89dbb 53916a8 ef89dbb cb808f6 ef89dbb 2663a60 cb808f6 2663a60 ef89dbb cb808f6 ef89dbb cb808f6 ef89dbb cb808f6 ef89dbb fb3b5a9 ef89dbb cb808f6 ef89dbb cb808f6 ef89dbb 77591ae ef89dbb 12ae97f ef89dbb 77591ae cb808f6 ef89dbb cb808f6 77591ae ef89dbb 77591ae cb808f6 77591ae ef89dbb fb3b5a9 cb808f6 ef89dbb fb3b5a9 ef89dbb fb3b5a9 cb808f6 fb3b5a9 cb808f6 77591ae fb3b5a9 cb808f6 77591ae cb808f6 77591ae ef89dbb 53916a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import gradio as gr
import pandas as pd
import pixeltable as pxt
from pixeltable.iterators import DocumentSplitter
import numpy as np
from pixeltable.functions.huggingface import sentence_transformer
from pixeltable.functions import openai
import os
"""## Store OpenAI API Key"""
if 'OPENAI_API_KEY' not in os.environ:
os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
"""Pixeltable Set up"""
# Ensure a clean slate for the demo
pxt.drop_dir('rag_demo', force=True)
pxt.create_dir('rag_demo')
# Set up embedding function
@pxt.expr_udf
def e5_embed(text: str) -> np.ndarray:
return sentence_transformer(text, model_id='intfloat/e5-large-v2')
# Create prompt function
@pxt.udf
def create_prompt(top_k_list: list[dict], question: str) -> str:
concat_top_k = '\n\n'.join(
elt['text'] for elt in reversed(top_k_list)
)
return f'''
PASSAGES:
{concat_top_k}
QUESTION:
{question}'''
# Gradio Application
def process_files(ground_truth_file, pdf_files):
# Ensure a clean slate for the demo by removing and recreating the 'rag_demo' directory
pxt.drop_dir('rag_demo', force=True)
pxt.create_dir('rag_demo')
# Process the ground truth file, which contains questions and correct answers
# Import as CSV or Excel depending on the file extension
if ground_truth_file.name.endswith('.csv'):
queries_t = pxt.io.import_csv('rag_demo.queries', ground_truth_file.name)
else:
queries_t = pxt.io.import_excel('rag_demo.queries', ground_truth_file.name)
# Create a table to store the uploaded PDF documents
documents_t = pxt.create_table(
'rag_demo.documents',
{'document': pxt.DocumentType()}
)
# Insert the PDF files into the documents table
documents_t.insert({'document': file.name} for file in pdf_files if file.name.endswith('.pdf'))
# Create a view that splits the documents into smaller chunks
chunks_t = pxt.create_view(
'rag_demo.chunks',
documents_t,
iterator=DocumentSplitter.create(
document=documents_t.document,
separators='token_limit',
limit=300
)
)
# Add an embedding index to the chunks for similarity search
chunks_t.add_embedding_index('text', string_embed=e5_embed)
# Define a query function to retrieve the top-k most similar chunks for a given question
@chunks_t.query
def top_k(query_text: str):
sim = chunks_t.text.similarity(query_text)
return (
chunks_t.order_by(sim, asc=False)
.select(chunks_t.text, sim=sim)
.limit(5)
)
# Add computed columns to the queries table for context retrieval and prompt creation
queries_t['question_context'] = chunks_t.top_k(queries_t.Question)
queries_t['prompt'] = create_prompt(
queries_t.question_context, queries_t.Question
)
# Prepare messages for the OpenAI API, including system instructions and user prompt
messages = [
{
'role': 'system',
'content': 'Please read the following passages and answer the question based on their contents.'
},
{
'role': 'user',
'content': queries_t.prompt
}
]
# Add OpenAI response column
queries_t['response'] = openai.chat_completions(
model='gpt-4o-mini-2024-07-18', messages=messages
)
# Extract the answer text from the API response
queries_t['answer'] = queries_t.response.choices[0].message.content.astype(pxt.StringType())
# Prepare the output dataframe with questions, correct answers, and model-generated answers
df_output = queries_t.select(queries_t.Question, queries_t.correct_answer, queries_t.answer).collect().to_pandas()
try:
# Return the output dataframe for display
return df_output
except Exception as e:
return f"An error occurred: {str(e)}", None
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# RAG Demo App")
# File upload components for ground truth and PDF documents
with gr.Row():
ground_truth_file = gr.File(label="Upload Ground Truth (CSV or XLSX)", file_count="single")
pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")
# Button to trigger file processing
process_button = gr.Button("Process Files and Generate Outputs")
# Output component to display the results
df_output = gr.DataFrame(label="Pixeltable Table")
process_button.click(process_files, inputs=[ground_truth_file, pdf_files], outputs=df_output)
#question_input = gr.Textbox(label="Enter your question")
#query_button = gr.Button("Query LLM")
#query_button.click(query_llm, inputs=question_input, outputs=output_dataframe)
if __name__ == "__main__":
demo.launch() |