File size: 2,949 Bytes
ee8f068
 
 
 
 
 
7ae2e83
7571822
 
 
ee8f068
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9946d9a
 
5d9583a
 
 
 
 
 
 
 
 
 
 
 
 
ee8f068
 
 
 
 
 
 
9946d9a
 
 
 
ee8f068
 
 
5d9583a
ee8f068
5d9583a
 
ee8f068
 
5d9583a
ee8f068
f0adbfe
5d9583a
 
ee8f068
 
 
9946d9a
5d9583a
 
 
ee8f068
5d9583a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import streamlit as st
from gradio_client import Client
from llama_index.llms import Replicate
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, SimpleDirectoryReader
import os

PATH='/Data'

# Ensure the environment variable is set
if "REPLICATE_API_TOKEN" not in os.environ:
    raise ValueError("Please set the REPLICATE_API_TOKEN environment variable.")
else:
    os.environ["REPLICATE_API_TOKEN"] = os.environ["REPLICATE_API_TOKEN"]

llm = Replicate(
    model="replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b"
)

embeddings = LangchainEmbedding(
    HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
)

service_context = ServiceContext.from_defaults(
    chunk_size=1024,
    llm=llm,
    embed_model=embeddings
)
set_global_service_context(service_context)

# Transcribe function
def transcribe_video(youtube_url):
    with st.status("Starting client"):
        client = Client("https://sanchit-gandhi-whisper-jax.hf.space/")
        st.write("Requesting client")
    with st.status("Requesting Whisper"):
        result = client.predict(youtube_url, "transcribe", True, fn_index=7)
        st.write("Requesting API...")
        with open(f'{PATH}/docs.txt','w') as f:
            f.write(result[1])
        st.write('Writing File...')
    with st.status("Requesting Embeddings"):
        documents = SimpleDirectoryReader(PATH).load_data()
        index = VectorStoreIndex.from_documents(documents)
        return index.as_query_engine()

# Streamlit UI
st.title("YouTube Video Chatbot")

# Input for YouTube URL
youtube_url = st.text_input("Enter YouTube Video URL:")

if youtube_url and "query_engine" not in st.session_state:
    st.write("Transcribing video... Please wait.")
    st.session_state.query_engine = transcribe_video(youtube_url)

if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"], avatar=("πŸ§‘β€πŸ’»" if message["role"] == 'human' else 'πŸ¦™')):
        st.markdown(message["content"])

# User input
prompt = st.chat_input("Ask something about the video:")

if prompt := prompt and "query_engine" in st.session_state:
    # Display user message in chat message container
    st.chat_message("human",avatar = "πŸ§‘β€πŸ’»").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "human", "content": prompt})

    response = st.session_state.query_engine.query(prompt)
    response_text = response.response
    with st.chat_message("assistant", avatar='πŸ¦™'):
        st.markdown(response_text)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})