lostecho commited on
Commit
f679d08
·
verified ·
1 Parent(s): 265cb40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -3
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import streamlit as st
2
  from getpass import getpass
 
 
3
  from langchain_google_genai import GoogleGenerativeAI
4
  from langchain.prompts import PromptTemplate
5
  from langchain.agents import AgentExecutor, initialize_agent, AgentType
@@ -12,6 +14,44 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
12
  from langchain_core.pydantic_v1 import BaseModel, Field
13
  from langchain_google_genai import ChatGoogleGenerativeAI
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def create_tools():
16
  search = TavilySearchAPIWrapper(tavily_api_key='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')
17
  description = """"A search engine optimized for comprehensive, accurate, \
@@ -51,15 +91,19 @@ def main():
51
  response = llm.invoke(user_input)
52
  display_response(response)
53
  prompt = """
54
- You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool
55
  and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible.
56
- Your response should be True or False. If you are not sure, you should say that you are not sure.
57
  """
58
  new_prompt = st.text_area(prompt)
 
 
 
 
59
  if new_prompt:
60
  prompt = new_prompt
61
  answer = agent_chain.invoke(
62
- prompt + "\n " + user_input,
63
  )
64
  display_response(answer)
65
 
 
1
  import streamlit as st
2
  from getpass import getpass
3
+ import requests
4
+ import json
5
  from langchain_google_genai import GoogleGenerativeAI
6
  from langchain.prompts import PromptTemplate
7
  from langchain.agents import AgentExecutor, initialize_agent, AgentType
 
14
  from langchain_core.pydantic_v1 import BaseModel, Field
15
  from langchain_google_genai import ChatGoogleGenerativeAI
16
 
17
+ API_GOOGLE_SEARCH_KEY = "AIzaSyA4oDDFtPxAfmPC8EcfQrkByb9xKm2QfMc"
18
+
19
+ def query_fact_check_api(claim):
20
+ """Queries the Google Fact Check Tools API for a given claim.
21
+ Args:
22
+ claim (str): The claim to search for fact checks.
23
+ Returns:
24
+ dict: The API response parsed as a JSON object.
25
+ """
26
+
27
+ url = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
28
+ params = {
29
+ "key": API_GOOGLE_SEARCH_KEY,
30
+ "query": claim,
31
+ }
32
+
33
+ response = requests.get(url, params=params)
34
+ response.raise_for_status() # Raise an exception for error HTTP statuses
35
+
36
+ return response.json()
37
+
38
+ def response_break_out(response):
39
+ if response.get("claims"):
40
+ iteration = 0
41
+ answer = """Below is the searched result: \n"""
42
+ for claim in response["claims"]:
43
+ answer = answer + """claim: """ + claim['text'] + "\n"
44
+ for review in claim["claimReview"]:
45
+ answer = answer + """publisher: """ + review['publisher']['name'] + "\n"
46
+ answer = answer + """rating: """ + review['textualRating'] + "\n"
47
+ if iteration >= 1:
48
+ break
49
+ iteration += 1
50
+ else:
51
+ answer = """No fact checks found for this claim."""
52
+
53
+ return answer
54
+
55
  def create_tools():
56
  search = TavilySearchAPIWrapper(tavily_api_key='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')
57
  description = """"A search engine optimized for comprehensive, accurate, \
 
91
  response = llm.invoke(user_input)
92
  display_response(response)
93
  prompt = """
94
+ You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool, the search result we provided,
95
  and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible.
96
+ Your response should be True or False!!! If you are not sure, you should say that you are not sure.
97
  """
98
  new_prompt = st.text_area(prompt)
99
+
100
+ result = query_fact_check_api(user_input)
101
+ facts = response_break_out(result)
102
+
103
  if new_prompt:
104
  prompt = new_prompt
105
  answer = agent_chain.invoke(
106
+ prompt + "\n " + facts + "\n" + user_input,
107
  )
108
  display_response(answer)
109