Spaces:
Runtime error
Runtime error
import streamlit as st | |
from getpass import getpass | |
import requests | |
import json | |
from langchain_google_genai import GoogleGenerativeAI | |
from langchain.prompts import PromptTemplate | |
from langchain.agents import AgentExecutor, initialize_agent, AgentType | |
from langchain.agents.format_scratchpad import format_to_openai_function_messages | |
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser | |
from langchain.utilities.tavily_search import TavilySearchAPIWrapper | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
from langchain_core.messages import AIMessage, HumanMessage | |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
from langchain_core.pydantic_v1 import BaseModel, Field | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
API_GOOGLE_SEARCH_KEY = "AIzaSyA4oDDFtPxAfmPC8EcfQrkByb9xKm2QfMc" | |
def query_fact_check_api(claim): | |
"""Queries the Google Fact Check Tools API for a given claim. | |
Args: | |
claim (str): The claim to search for fact checks. | |
Returns: | |
dict: The API response parsed as a JSON object. | |
""" | |
url = "https://factchecktools.googleapis.com/v1alpha1/claims:search" | |
params = { | |
"key": API_GOOGLE_SEARCH_KEY, | |
"query": claim, | |
} | |
response = requests.get(url, params=params) | |
response.raise_for_status() # Raise an exception for error HTTP statuses | |
return response.json() | |
def response_break_out(response): | |
if response.get("claims"): | |
iteration = 0 | |
answer = """Below is the searched result: \n""" | |
for claim in response["claims"]: | |
answer = answer + """claim: """ + claim['text'] + "\n" | |
for review in claim["claimReview"]: | |
answer = answer + """publisher: """ + review['publisher']['name'] + "\n" | |
answer = answer + """rating: """ + review['textualRating'] + "\n" | |
if iteration >= 1: | |
break | |
iteration += 1 | |
else: | |
answer = """No fact checks found for this claim.""" | |
return answer | |
def create_tools(): | |
search = TavilySearchAPIWrapper(tavily_api_key='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') | |
description = """"A search engine optimized for comprehensive, accurate, \ | |
and trusted results. Useful for when you need to answer questions \ | |
about current events or about recent information. \ | |
Input should be a search query. \ | |
If the user is asking about something that you don't know about, \ | |
you should probably use this tool to see if that can provide any information.""" | |
tavily_tool = TavilySearchResults(api_wrapper=search, description=description) | |
return [tavily_tool] | |
def create_llm_with_tools(llm, tools): | |
return llm.bind(functions=tools) | |
def create_agent_chain(tools, llm): | |
return initialize_agent( | |
tools, | |
llm, | |
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, | |
verbose=True, | |
) | |
def get_user_input(): | |
return st.text_input("Enter your question") | |
def display_response(response): | |
st.write(response) | |
def main(): | |
st.title('Fact-Checking Chatbot') | |
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key="AIzaSyBNfTHLMjR9vGiomZsW9NFsUTwc2U2NuFA") | |
tools = create_tools() | |
llm_with_tools = create_llm_with_tools(llm, tools) | |
agent_chain = create_agent_chain(tools, llm) | |
user_input = get_user_input() | |
if user_input: | |
response = llm.invoke(user_input) | |
display_response(response) | |
prompt = """ | |
You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool, the search result we provided, | |
and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible. | |
Your response should be True or False!!! If you are not sure, you should say that you are not sure. | |
""" | |
new_prompt = st.text_area(prompt) | |
result = query_fact_check_api(user_input) | |
facts = response_break_out(result) | |
if new_prompt: | |
prompt = new_prompt | |
answer = agent_chain.invoke( | |
prompt + "\n " + facts + "\n" + user_input, | |
) | |
display_response(answer) | |
if __name__ == "__main__": | |
main() |