Shreyas094 commited on
Commit
33f1e50
·
verified ·
1 Parent(s): e8400ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +486 -43
app.py CHANGED
@@ -1,63 +1,506 @@
 
1
  import gradio as gr
 
 
 
 
 
 
 
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  """
45
- demo = gr.ChatInterface(
46
- respond,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
 
 
57
  ),
 
 
 
58
  ],
 
 
 
 
 
 
 
 
 
 
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
- demo.launch()
 
 
1
+ import requests
2
  import gradio as gr
3
+ from bs4 import BeautifulSoup
4
+ import logging
5
+ from urllib.parse import urlparse
6
+ from requests.adapters import HTTPAdapter
7
+ from requests.packages.urllib3.util.retry import Retry
8
+ from trafilatura import fetch_url, extract
9
+ import json
10
  from huggingface_hub import InferenceClient
11
+ import random
12
+ import time
13
+ from sentence_transformers import SentenceTransformer, util
14
+ import torch
15
+ from datetime import datetime
16
+ import os
17
+ from dotenv import load_dotenv
18
 
19
+ # Load environment variables from a .env file
20
+ load_dotenv()
21
+
22
+ # Set up logging
23
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
24
+ logger = logging.getLogger(__name__)
25
+
26
+ # SearXNG instance details
27
+ SEARXNG_URL = 'https://shreyas094-searxng-local.hf.space/search'
28
+ SEARXNG_KEY = 'f9f07f93b37b8483aadb5ba717f556f3a4ac507b281b4ca01e6c6288aa3e3ae5'
29
+
30
+ # Use the environment variable
31
+ HF_TOKEN = os.getenv('HF_TOKEN')
32
+ client = InferenceClient(
33
+ "mistralai/Mistral-Nemo-Instruct-2407",
34
+ token=HF_TOKEN,
35
+ )
36
+ )
37
+
38
+ # Initialize the similarity model
39
+ similarity_model = SentenceTransformer('all-MiniLM-L6-v2')
40
 
41
 
42
+ # Set up a session with retry mechanism
43
+ def requests_retry_session(
44
+ retries=1,
45
+ backoff_factor=0.1,
46
+ status_forcelist=(500, 502, 504),
47
+ session=None,
 
48
  ):
49
+ session = session or requests.Session()
50
+ retry = Retry(
51
+ total=retries,
52
+ read=retries,
53
+ connect=retries,
54
+ backoff_factor=backoff_factor,
55
+ status_forcelist=status_forcelist,
56
+ )
57
+ adapter = HTTPAdapter(max_retries=retry)
58
+ session.mount('http://', adapter)
59
+ session.mount('https://', adapter)
60
+ return session
61
+
62
+ def is_valid_url(url):
63
+ try:
64
+ result = urlparse(url)
65
+ return all([result.scheme, result.netloc])
66
+ except ValueError:
67
+ return False
68
+
69
+ def scrape_with_bs4(url, session):
70
+ try:
71
+ response = session.get(url, timeout=10)
72
+ response.raise_for_status()
73
+ soup = BeautifulSoup(response.content, 'html.parser')
74
+
75
+ main_content = soup.find('main') or soup.find('article') or soup.find('div', class_='content')
76
+
77
+ if main_content:
78
+ content = main_content.get_text(strip=True)
79
+ else:
80
+ content = soup.get_text(strip=True)
81
+
82
+ return content
83
+ except Exception as e:
84
+ logger.error(f"Error scraping {url} with BeautifulSoup: {e}")
85
+ return ""
86
+
87
+ def scrape_with_trafilatura(url):
88
+ try:
89
+ downloaded = fetch_url(url)
90
+ content = extract(downloaded)
91
+ return content or ""
92
+ except Exception as e:
93
+ logger.error(f"Error scraping {url} with Trafilatura: {e}")
94
+ return ""
95
+
96
+ def rephrase_query(chat_history, query, temperature=0.2):
97
+ system_prompt = """You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
98
+ 1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
99
+ 2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
100
+ 3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
101
+ 4. Provide ONLY the rephrased query without any additional explanation or reasoning."""
102
+
103
+ user_prompt = f"""
104
+ Context:
105
+ {chat_history}
106
+
107
+ New query: {query}
108
+
109
+ Rephrased query:
110
+ """
111
 
112
+ messages = [
113
+ {"role": "system", "content": system_prompt},
114
+ {"role": "user", "content": user_prompt}
115
+ ]
 
116
 
117
+ try:
118
+ logger.info(f"Sending rephrasing request to LLM with temperature {temperature}")
119
+ response = client.chat_completion(
120
+ messages=messages,
121
+ max_tokens=150,
122
+ temperature=temperature
123
+ )
124
+ logger.info("Received rephrased query from LLM")
125
+ rephrased_question = response.choices[0].message.content.strip()
126
 
127
+ # Remove surrounding quotes if present
128
+ if (rephrased_question.startswith('"') and rephrased_question.endswith('"')) or \
129
+ (rephrased_question.startswith("'") and rephrased_question.endswith("'")):
130
+ rephrased_question = rephrased_question[1:-1].strip()
131
 
132
+ logger.info(f"Rephrased Query (cleaned): {rephrased_question}")
133
+ return rephrased_question
134
+ except Exception as e:
135
+ logger.error(f"Error rephrasing query with LLM: {e}")
136
+ return query # Fallback to original query if rephrasing fails
 
 
 
137
 
138
+ def rerank_documents(query, documents):
139
+ try:
140
+ # Step 1: Encode the query and document summaries
141
+ query_embedding = similarity_model.encode(query, convert_to_tensor=True)
142
+ doc_summaries = [doc['summary'] for doc in documents]
143
+
144
+ if not doc_summaries:
145
+ logger.warning("No document summaries to rerank.")
146
+ return documents # Return original documents if there's nothing to rerank
147
+
148
+ doc_embeddings = similarity_model.encode(doc_summaries, convert_to_tensor=True)
149
+
150
+ # Step 2: Compute Cosine Similarity
151
+ cosine_scores = util.cos_sim(query_embedding, doc_embeddings)[0]
152
+
153
+ # Step 3: Compute Dot Product Similarity
154
+ dot_product_scores = torch.matmul(query_embedding, doc_embeddings.T)
155
+
156
+ # Ensure dot_product_scores is a 1-D tensor
157
+ if dot_product_scores.dim() == 0:
158
+ dot_product_scores = dot_product_scores.unsqueeze(0)
159
+
160
+ # Combine documents, cosine scores, and dot product scores
161
+ scored_documents = list(zip(documents, cosine_scores, dot_product_scores))
162
 
163
+ # Step 4: Sort documents by cosine similarity score
164
+ scored_documents.sort(key=lambda x: x[1], reverse=True)
165
+
166
+ # Step 5: Return only the top 5 documents
167
+ reranked_docs = [doc[0] for doc in scored_documents[:5]]
168
+ logger.info(f"Reranked to top {len(reranked_docs)} documents.")
169
+ return reranked_docs
170
+ except Exception as e:
171
+ logger.error(f"Error during reranking documents: {e}")
172
+ return documents[:5] # Fallback to first 5 documents if reranking fails
173
+
174
+ def compute_similarity(text1, text2):
175
+ # Encode the texts
176
+ embedding1 = similarity_model.encode(text1, convert_to_tensor=True)
177
+ embedding2 = similarity_model.encode(text2, convert_to_tensor=True)
178
+
179
+ # Compute cosine similarity
180
+ cosine_similarity = util.pytorch_cos_sim(embedding1, embedding2)
181
+
182
+ return cosine_similarity.item()
183
+
184
+ def is_content_unique(new_content, existing_contents, similarity_threshold=0.8):
185
+ for existing_content in existing_contents:
186
+ similarity = compute_similarity(new_content, existing_content)
187
+ if similarity > similarity_threshold:
188
+ return False
189
+ return True
190
+
191
+ def assess_relevance_and_summarize(llm_client, query, document, temperature=0.2):
192
+ system_prompt = """You are a financial analyst AI assistant. Your task is to assess whether the given text is relevant to the user's query from a financial perspective and provide a brief summary if it is relevant."""
193
+
194
+ user_prompt = f"""
195
+ Query: {query}
196
+
197
+ Document Content:
198
+ {document['content']}
199
+
200
+ Instructions:
201
+ 1. Assess if the document is relevant to the query from a financial analyst's perspective.
202
+ 2. If relevant, summarize the main points in 1-2 sentences.
203
+ 3. If not relevant, simply state "Not relevant".
204
+
205
+ Your response should be in the following format:
206
+ Relevant: [Yes/No]
207
+ Summary: [Your 1-2 sentence summary if relevant, or "Not relevant" if not]
208
+
209
+ Remember to focus on financial aspects and implications in your assessment and summary.
210
  """
211
+
212
+ messages = [
213
+ {"role": "system", "content": system_prompt},
214
+ {"role": "user", "content": user_prompt}
215
+ ]
216
+
217
+ try:
218
+ response = llm_client.chat_completion(
219
+ messages=messages,
220
+ max_tokens=150,
221
+ temperature=temperature
222
+ )
223
+ return response.choices[0].message.content.strip()
224
+ except Exception as e:
225
+ logger.error(f"Error assessing relevance and summarizing with LLM: {e}")
226
+ return "Error: Unable to assess relevance and summarize"
227
+
228
+ def scrape_full_content(url, scraper="trafilatura"):
229
+ try:
230
+ logger.info(f"Scraping full content from: {url}")
231
+
232
+ if scraper == "bs4":
233
+ session = requests_retry_session()
234
+ response = session.get(url, timeout=10)
235
+ response.raise_for_status()
236
+ soup = BeautifulSoup(response.content, 'html.parser')
237
+
238
+ # Try to find the main content
239
+ main_content = soup.find('main') or soup.find('article') or soup.find('div', class_='content')
240
+
241
+ if main_content:
242
+ content = main_content.get_text(strip=True, separator='\n')
243
+ else:
244
+ content = soup.get_text(strip=True, separator='\n')
245
+ else: # trafilatura
246
+ downloaded = fetch_url(url)
247
+ content = extract(downloaded, include_comments=False, include_tables=True, no_fallback=False)
248
+
249
+ return content or ""
250
+ except Exception as e:
251
+ logger.error(f"Error scraping full content from {url}: {e}")
252
+ return ""
253
+
254
+ def llm_summarize(query, documents, llm_client, temperature=0.2):
255
+ system_prompt = """You are Sentinel, a world class Financial analysis AI model who is expert at searching the web and answering user's queries. You are also an expert at summarizing web pages or documents and searching for content in them."""
256
+
257
+ # Prepare the context from the documents
258
+ context = "\n\n".join([f"Document {i+1}:\nTitle: {doc['title']}\nURL: {doc['url']}\n(SCRAPED CONTENT)\n{doc['full_content']}\n(/SCRAPED CONTENT)" for i, doc in enumerate(documents)])
259
+
260
+ user_prompt = f"""
261
+ Query: {query}
262
+
263
+ Context: {context}
264
+ Instructions: Write a detailed, long and complete research document that is informative and relevant to the user, who is a financial analyst, query based on provided context (the context consists of search results containing a brief description of the content of that page). You must use this context to answer the user's query in the best way possible.
265
+ Use an unbiased and writer tone in your response. Do not repeat the text. You must provide the answer in the response itself. If the user asks for links you can provide them.
266
+ If the user asks to summarize content from some links, you will be provided the entire content of the page inside the (SCRAPED CONTENT) block.
267
+ You can then use this content to summarize the text.Your responses should be detailed in length be informative, accurate and relevant to the user's query.
268
+ You can use markdowns to format your response. You should use bullet points to list the information.
269
+ Make sure the answer is long and is informative in a research document style. You have to cite the answer using [number] notation along with the appropriate source URL embedded in the notation.
270
+ You must cite the sentences with their relevant context number.
271
+ You must cite each and every part of the answer so the user can know where the information is coming from. Place these citations at the end of that particular sentence.
272
+ You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
273
+ However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times.
274
+ The number refers to the number of the search result (passed in the context) used to generate that part of the answer. Anything inside the following (SCRAPED CONTENT) block provided below is for your knowledge returned by the search engine and is not shared by the user.
275
+ You have to answer question on the basis of it and cite the relevant information from it but you do not have to talk about the context in your response.
276
+ If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'.
277
+ You do not need to do this for summarization tasks. Anything between the (SCRAPED CONTENT) is retrieved from a search engine and is not a part of the conversation with the user.
278
+
279
+ Please provide a comprehensive summary based on the above instructions:
280
  """
281
+
282
+ messages = [
283
+ {"role": "system", "content": system_prompt},
284
+ {"role": "user", "content": user_prompt}
285
+ ]
286
+
287
+ try:
288
+ response = llm_client.chat_completion(
289
+ messages=messages,
290
+ max_tokens=5000,
291
+ temperature=temperature
292
+ )
293
+ return response.choices[0].message.content.strip()
294
+ except Exception as e:
295
+ logger.error(f"Error in LLM summarization: {e}")
296
+ return "Error: Unable to generate a summary. Please try again."
297
+
298
+ def search_and_scrape(query, chat_history, num_results=5, scraper="trafilatura", max_chars=2000, time_range="", language="all", category="",
299
+ engines=[], safesearch=2, method="GET", llm_temperature=0.2):
300
+ try:
301
+ # Step 1: Rephrase the Query
302
+ rephrased_query = rephrase_query(chat_history, query, temperature=llm_temperature)
303
+ logger.info(f"Rephrased Query: {rephrased_query}")
304
+
305
+ if not rephrased_query or rephrased_query.lower() == "not_needed":
306
+ logger.info("No need to perform search based on the rephrased query.")
307
+ return "No search needed for the provided input."
308
+
309
+ # Search query parameters
310
+ params = {
311
+ 'q': rephrased_query,
312
+ 'format': 'json',
313
+ 'num_results': num_results,
314
+ 'time_range': time_range,
315
+ 'language': language,
316
+ 'category': category,
317
+ 'engines': ','.join(engines),
318
+ 'safesearch': safesearch
319
+ }
320
+
321
+ # Remove empty parameters
322
+ params = {k: v for k, v in params.items() if v != ""}
323
+
324
+ # If no engines are specified, set default engines
325
+ if 'engines' not in params:
326
+ params['engines'] = 'google' # Default to 'google' or any preferred engine
327
+ logger.info("No engines specified. Defaulting to 'google'.")
328
+
329
+ # Headers for SearXNG request
330
+ headers = {
331
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
332
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
333
+ 'Accept-Language': 'en-US,en;q=0.5',
334
+ 'Origin': 'https://shreyas094-searxng-local.hf.space',
335
+ 'Referer': 'https://shreyas094-searxng-local.hf.space/',
336
+ 'DNT': '1',
337
+ 'Connection': 'keep-alive',
338
+ 'Sec-Fetch-Dest': 'empty',
339
+ 'Sec-Fetch-Mode': 'cors',
340
+ 'Sec-Fetch-Site': 'same-origin',
341
+ }
342
+
343
+
344
+ # Send request to SearXNG
345
+ logger.info(f"Sending request to SearXNG for query: {rephrased_query}")
346
+ session = requests_retry_session()
347
+
348
+ if method.upper() == "GET":
349
+ response = session.get(SEARXNG_URL, params=params, headers=headers, timeout=10, verify=certifi.where())
350
+ else: # POST
351
+ response = session.post(SEARXNG_URL, data=params, headers=headers, timeout=10, verify=certifi.where())
352
+
353
+ response.raise_for_status()
354
+
355
+ search_results = response.json()
356
+ logger.debug(f"SearXNG Response: {search_results}")
357
+
358
+ num_received = len(search_results.get('results', []))
359
+ logger.info(f"Received {num_received} results from SearXNG")
360
+
361
+ if num_received == 0:
362
+ logger.warning("No results returned from SearXNG.")
363
+ return "No results found for the given query."
364
+
365
+ scraped_content = []
366
+
367
+ for result in search_results.get('results', [])[:num_results]:
368
+ url = result.get('url', '')
369
+ title = result.get('title', 'No title')
370
+
371
+ if not is_valid_url(url):
372
+ logger.warning(f"Invalid URL: {url}")
373
+ continue
374
+
375
+ try:
376
+ logger.info(f"Scraping content from: {url}")
377
+
378
+ if scraper == "bs4":
379
+ content = scrape_with_bs4(url, session)
380
+ else: # trafilatura
381
+ content = scrape_with_trafilatura(url)
382
+
383
+ # Limit content to max_chars
384
+ scraped_content.append({
385
+ "title": title,
386
+ "url": url,
387
+ "content": content[:max_chars],
388
+ "scraper": scraper
389
+ })
390
+ except requests.exceptions.RequestException as e:
391
+ logger.error(f"Error scraping {url}: {e}")
392
+ except Exception as e:
393
+ logger.error(f"Unexpected error while scraping {url}: {e}")
394
+
395
+ if not scraped_content:
396
+ logger.warning("No content scraped from search results.")
397
+ return "No content could be scraped from the search results."
398
+
399
+ # Step 3: Assess relevance, summarize, and check for uniqueness
400
+ relevant_documents = []
401
+ unique_summaries = []
402
+ for doc in scraped_content:
403
+ assessment = assess_relevance_and_summarize(client, rephrased_query, doc, temperature=llm_temperature)
404
+ relevance, summary = assessment.split('\n', 1)
405
+
406
+ if relevance.strip().lower() == "relevant: yes":
407
+ summary_text = summary.replace("Summary: ", "").strip()
408
+
409
+ if is_content_unique(summary_text, unique_summaries):
410
+ relevant_documents.append({
411
+ "title": doc['title'],
412
+ "url": doc['url'],
413
+ "summary": summary_text,
414
+ "scraper": doc['scraper']
415
+ })
416
+ unique_summaries.append(summary_text)
417
+ else:
418
+ logger.info(f"Skipping similar content: {doc['title']}")
419
+
420
+ if not relevant_documents:
421
+ logger.warning("No relevant and unique documents found.")
422
+ return "No relevant and unique financial news found for the given query."
423
+
424
+ # Step 4: Rerank documents based on similarity to query
425
+ reranked_docs = rerank_documents(rephrased_query, relevant_documents)
426
+
427
+ if not reranked_docs:
428
+ logger.warning("No documents remained after reranking.")
429
+ return "No relevant financial news found after filtering and ranking."
430
+
431
+ logger.info(f"Reranked and filtered to top {len(reranked_docs)} unique, finance-related documents.")
432
+
433
+ # Step 5: Scrape full content for top 5 documents
434
+ for doc in reranked_docs[:5]:
435
+ full_content = scrape_full_content(doc['url'], scraper)
436
+ doc['full_content'] = full_content
437
+
438
+ # Step 6: LLM Summarization
439
+ llm_summary = llm_summarize(query, reranked_docs[:5], client, temperature=llm_temperature)
440
+
441
+ return llm_summary
442
+
443
+ except requests.exceptions.RequestException as e:
444
+ logger.error(f"Request exception: {e}")
445
+ return f"An error occurred during the search: {e}"
446
+ except Exception as e:
447
+ logger.error(f"Unexpected error: {e}")
448
+ return f"An unexpected error occurred: {e}"
449
+
450
+
451
+ def chat_function(message, history, num_results, scraper, max_chars, time_range, language, category, engines, safesearch, method, llm_temperature):
452
+ chat_history = "\n".join([f"{role}: {msg}" for role, msg in history])
453
+
454
+ response = search_and_scrape(
455
+ query=message,
456
+ chat_history=chat_history,
457
+ num_results=num_results,
458
+ scraper=scraper,
459
+ max_chars=max_chars,
460
+ time_range=time_range,
461
+ language=language,
462
+ category=category,
463
+ engines=engines,
464
+ safesearch=safesearch,
465
+ method=method,
466
+ llm_temperature=llm_temperature
467
+ )
468
+
469
+ yield response
470
+
471
+ iface = gr.ChatInterface(
472
+ chat_function,
473
+ title="SearXNG Scraper for Financial News",
474
+ description="Enter your query, and I'll search the web for the most recent and relevant financial news, scrape content, and provide summarized results.",
475
  additional_inputs=[
476
+ gr.Slider(5, 20, value=10, step=1, label="Number of initial results"),
477
+ gr.Dropdown(["bs4", "trafilatura"], value="trafilatura", label="Scraping Method"),
478
+ gr.Slider(500, 10000, value=3000, step=100, label="Max characters to retrieve"),
479
+ gr.Dropdown(["", "day", "week", "month", "year"], value="year", label="Time Range"),
480
+ gr.Dropdown(["all", "en", "fr", "de", "es", "it", "nl", "pt", "pl", "ru", "zh"], value="en", label="Language"),
481
+ gr.Dropdown(["", "general", "news", "images", "videos", "music", "files", "it", "science", "social media"], value="", label="Category"),
482
+ gr.Dropdown(
483
+ ["google", "bing", "duckduckgo", "baidu", "yahoo", "qwant", "startpage"],
484
+ multiselect=True,
485
+ value=["google", "duckduckgo"],
486
+ label="Engines"
487
  ),
488
+ gr.Slider(0, 2, value=2, step=1, label="Safe Search Level"),
489
+ gr.Radio(["GET", "POST"], value="POST", label="HTTP Method"),
490
+ gr.Slider(0, 1, value=0.2, step=0.1, label="LLM Temperature"),
491
  ],
492
+ additional_inputs_accordion=gr.Accordion("⚙️ Advanced Parameters", open=True),
493
+ retry_btn="Retry",
494
+ undo_btn="Undo",
495
+ clear_btn="Clear",
496
+ chatbot=gr.Chatbot(
497
+ show_copy_button=True,
498
+ likeable=True,
499
+ layout="bubble",
500
+ height=400,
501
+ )
502
  )
503
 
 
504
  if __name__ == "__main__":
505
+ logger.info("Starting the SearXNG Scraper for Financial News using ChatInterface with Advanced Parameters")
506
+ iface.launch(share=True)