Commit
Β·
0e9898e
1
Parent(s):
cfeb389
Logging to UI
Browse files- app.py +13 -1
- requirements.txt +0 -5
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import io
|
|
|
2 |
import os
|
3 |
import uuid
|
4 |
|
@@ -13,6 +14,11 @@ from transformers.image_utils import load_image
|
|
13 |
|
14 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
|
|
|
|
|
|
|
|
|
|
|
16 |
if "session_id" not in st.session_state:
|
17 |
st.session_state["session_id"] = str(uuid.uuid4()) # Generate unique session ID
|
18 |
|
@@ -58,6 +64,8 @@ with st.sidebar:
|
|
58 |
"[Source Code](https://huggingface.co/spaces/deepakkarkala/multimodal-rag/tree/main)"
|
59 |
|
60 |
st.title("π Image Q&A with VLM")
|
|
|
|
|
61 |
uploaded_pdf = st.file_uploader("Upload PDF file", type=("pdf"))
|
62 |
query = st.text_input(
|
63 |
"Ask something about the image",
|
@@ -77,13 +85,17 @@ if uploaded_pdf and "is_index_complete" not in st.session_state:
|
|
77 |
model_embedding.index(
|
78 |
input_path=images_folder, index_name=index_name, store_collection_with_index=False, overwrite=True
|
79 |
)
|
|
|
80 |
st.session_state["is_index_complete"] = True
|
81 |
|
82 |
|
83 |
|
84 |
if uploaded_pdf and query:
|
85 |
docs_retrieved = model_embedding.search(query, k=1)
|
86 |
-
|
|
|
|
|
|
|
87 |
|
88 |
# Create input messages
|
89 |
system_prompt = "You are an AI assistant. Your task is reply to user questions based on the provided image context."
|
|
|
1 |
import io
|
2 |
+
import logging
|
3 |
import os
|
4 |
import uuid
|
5 |
|
|
|
14 |
|
15 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
|
17 |
+
# Capture logs
|
18 |
+
log_stream = io.StringIO()
|
19 |
+
logging.basicConfig(stream=log_stream, level=logging.INFO)
|
20 |
+
|
21 |
+
|
22 |
if "session_id" not in st.session_state:
|
23 |
st.session_state["session_id"] = str(uuid.uuid4()) # Generate unique session ID
|
24 |
|
|
|
64 |
"[Source Code](https://huggingface.co/spaces/deepakkarkala/multimodal-rag/tree/main)"
|
65 |
|
66 |
st.title("π Image Q&A with VLM")
|
67 |
+
st.text_area("Logs:", log_stream.getvalue(), height=200)
|
68 |
+
|
69 |
uploaded_pdf = st.file_uploader("Upload PDF file", type=("pdf"))
|
70 |
query = st.text_input(
|
71 |
"Ask something about the image",
|
|
|
85 |
model_embedding.index(
|
86 |
input_path=images_folder, index_name=index_name, store_collection_with_index=False, overwrite=True
|
87 |
)
|
88 |
+
logging.info(f"{len(images)} number of images extracted from PDF and indexed")
|
89 |
st.session_state["is_index_complete"] = True
|
90 |
|
91 |
|
92 |
|
93 |
if uploaded_pdf and query:
|
94 |
docs_retrieved = model_embedding.search(query, k=1)
|
95 |
+
logging.info(f"{len(docs_retrieved)} number of images retrieved as relevant to query")
|
96 |
+
image_id = docs_retrieved[0]["doc_id"]
|
97 |
+
logging.info(f"Image id:{image_id} retrieved" )
|
98 |
+
image_similar_to_query = images[image_id]
|
99 |
|
100 |
# Create input messages
|
101 |
system_prompt = "You are an AI assistant. Your task is reply to user questions based on the provided image context."
|
requirements.txt
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
streamlit>=1.28
|
2 |
-
langchain>=0.0.217
|
3 |
-
openai>=1.2
|
4 |
-
duckduckgo-search
|
5 |
-
anthropic>=0.3.0
|
6 |
trubrics>=1.4.3
|
7 |
streamlit-feedback
|
8 |
-
langchain-community
|
9 |
torch
|
10 |
transformers
|
11 |
accelerate>=0.26.0
|
|
|
1 |
streamlit>=1.28
|
|
|
|
|
|
|
|
|
2 |
trubrics>=1.4.3
|
3 |
streamlit-feedback
|
|
|
4 |
torch
|
5 |
transformers
|
6 |
accelerate>=0.26.0
|