Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -64,68 +64,6 @@ def get_current_time_and_date():
|
|
64 |
current_time_and_date = get_current_time_and_date()
|
65 |
|
66 |
|
67 |
-
# def fetch_local_events():
|
68 |
-
# api_key = os.environ['SERP_API']
|
69 |
-
# url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Birmingham&hl=en&gl=us&api_key={api_key}'
|
70 |
-
# response = requests.get(url)
|
71 |
-
# if response.status_code == 200:
|
72 |
-
# events_results = response.json().get("events_results", [])
|
73 |
-
# events_html = """
|
74 |
-
# <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
|
75 |
-
# <style>
|
76 |
-
# table {
|
77 |
-
# font-family: 'Verdana', sans-serif;
|
78 |
-
# color: #333;
|
79 |
-
# border-collapse: collapse;
|
80 |
-
# width: 100%;
|
81 |
-
# }
|
82 |
-
# th, td {
|
83 |
-
# border: 1px solid #ddd;
|
84 |
-
# padding: 8px;
|
85 |
-
# }
|
86 |
-
# th {
|
87 |
-
# background-color: #f2f2f2;
|
88 |
-
# color: #333;
|
89 |
-
# text-align: left;
|
90 |
-
# }
|
91 |
-
# tr:hover {
|
92 |
-
# background-color: #f5f5f5;
|
93 |
-
# }
|
94 |
-
# .event-link {
|
95 |
-
# color: #1E90FF;
|
96 |
-
# text-decoration: none;
|
97 |
-
# }
|
98 |
-
# .event-link:hover {
|
99 |
-
# text-decoration: underline;
|
100 |
-
# }
|
101 |
-
# </style>
|
102 |
-
# <table>
|
103 |
-
# <tr>
|
104 |
-
# <th>Title</th>
|
105 |
-
# <th>Date and Time</th>
|
106 |
-
# <th>Location</th>
|
107 |
-
# </tr>
|
108 |
-
# """
|
109 |
-
# for event in events_results:
|
110 |
-
# title = event.get("title", "No title")
|
111 |
-
# date_info = event.get("date", {})
|
112 |
-
# date = f"{date_info.get('start_date', '')} {date_info.get('when', '')}".replace("{", "").replace("}", "")
|
113 |
-
# location = event.get("address", "No location")
|
114 |
-
# if isinstance(location, list):
|
115 |
-
# location = " ".join(location)
|
116 |
-
# location = location.replace("[", "").replace("]", "")
|
117 |
-
# link = event.get("link", "#")
|
118 |
-
# events_html += f"""
|
119 |
-
# <tr>
|
120 |
-
# <td><a class='event-link' href='{link}' target='_blank'>{title}</a></td>
|
121 |
-
# <td>{date}</td>
|
122 |
-
# <td>{location}</td>
|
123 |
-
# </tr>
|
124 |
-
# """
|
125 |
-
# events_html += "</table>"
|
126 |
-
# return events_html
|
127 |
-
# else:
|
128 |
-
# return "<p>Failed to fetch local events</p>"
|
129 |
|
130 |
def fetch_local_events():
|
131 |
|
@@ -351,52 +289,22 @@ def generate_answer(message, choice):
|
|
351 |
addresses = extract_addresses(response['output'])
|
352 |
return response['output'], addresses
|
353 |
|
354 |
-
# def bot(history, choice, tts_choice):
|
355 |
-
# if not history:
|
356 |
-
# return history
|
357 |
-
# response, addresses = generate_answer(history[-1][0], choice)
|
358 |
-
# history[-1][1] = ""
|
359 |
-
|
360 |
-
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
361 |
-
# if tts_choice == "Alpha":
|
362 |
-
# audio_future = executor.submit(generate_audio_elevenlabs, response)
|
363 |
-
# elif tts_choice == "Beta":
|
364 |
-
# audio_future = executor.submit(generate_audio_parler_tts, response)
|
365 |
-
# elif tts_choice == "Gamma":
|
366 |
-
# audio_future = executor.submit(generate_audio_mars5, response)
|
367 |
-
# elif tts_choice == "Delta":
|
368 |
-
# audio_future = executor.submit(generate_audio_fishaudio, response)
|
369 |
-
|
370 |
-
# for character in response:
|
371 |
-
# history[-1][1] += character
|
372 |
-
# time.sleep(0.05)
|
373 |
-
# yield history, None
|
374 |
-
|
375 |
-
# audio_path = audio_future.result()
|
376 |
-
# yield history, audio_path
|
377 |
-
|
378 |
def bot(history, choice, tts_choice):
|
379 |
if not history:
|
380 |
return history
|
|
|
|
|
381 |
|
382 |
-
user_message = history[-1][0]
|
383 |
-
response_text, addresses = generate_answer(user_message, choice)
|
384 |
-
|
385 |
-
# Ensure history has the correct format
|
386 |
-
if len(history[-1]) == 1:
|
387 |
-
history[-1].append(response_text)
|
388 |
-
else:
|
389 |
-
history[-1][1] = response_text
|
390 |
-
|
391 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
392 |
if tts_choice == "Alpha":
|
393 |
-
audio_future = executor.submit(generate_audio_elevenlabs,
|
394 |
elif tts_choice == "Beta":
|
395 |
-
audio_future = executor.submit(generate_audio_parler_tts,
|
396 |
elif tts_choice == "Gamma":
|
397 |
-
audio_future = executor.submit(generate_audio_mars5,
|
398 |
|
399 |
-
|
|
|
400 |
history[-1][1] += character
|
401 |
time.sleep(0.05)
|
402 |
yield history, None
|
@@ -404,15 +312,14 @@ def bot(history, choice, tts_choice):
|
|
404 |
audio_path = audio_future.result()
|
405 |
yield history, audio_path
|
406 |
|
407 |
-
|
408 |
-
# history.append((message, None))
|
409 |
-
# return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
|
410 |
|
411 |
def add_message(history, message):
|
412 |
-
history
|
413 |
-
history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
|
414 |
return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
|
415 |
|
|
|
|
|
416 |
# def generate_voice_response(history, tts_choice):
|
417 |
# if not history:
|
418 |
# return None
|
@@ -584,26 +491,6 @@ pipe_asr = pipeline("automatic-speech-recognition", model=model, tokenizer=proce
|
|
584 |
|
585 |
base_audio_drive = "/data/audio"
|
586 |
|
587 |
-
# def transcribe_function(stream, new_chunk):
|
588 |
-
# try:
|
589 |
-
# sr, y = new_chunk[0], new_chunk[1]
|
590 |
-
# except TypeError:
|
591 |
-
# print(f"Error chunk structure: {type(new_chunk)}, content: {new_chunk}")
|
592 |
-
# return stream, "", None
|
593 |
-
|
594 |
-
# y = y.astype(np.float32) / np.max(np.abs(y))
|
595 |
-
|
596 |
-
# if stream is not None:
|
597 |
-
# stream = np.concatenate([stream, y])
|
598 |
-
# else:
|
599 |
-
# stream = y
|
600 |
-
|
601 |
-
# result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
602 |
-
|
603 |
-
# full_text = result.get("text","")
|
604 |
-
|
605 |
-
# return stream, full_text, result
|
606 |
-
|
607 |
def transcribe_function(stream, new_chunk):
|
608 |
try:
|
609 |
sr, y = new_chunk[0], new_chunk[1]
|
@@ -624,6 +511,7 @@ def transcribe_function(stream, new_chunk):
|
|
624 |
|
625 |
return stream, full_text, result
|
626 |
|
|
|
627 |
def update_map_with_response(history):
|
628 |
if not history:
|
629 |
return ""
|
|
|
64 |
current_time_and_date = get_current_time_and_date()
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
def fetch_local_events():
|
69 |
|
|
|
289 |
addresses = extract_addresses(response['output'])
|
290 |
return response['output'], addresses
|
291 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
def bot(history, choice, tts_choice):
|
293 |
if not history:
|
294 |
return history
|
295 |
+
response, addresses = generate_answer(history[-1][0], choice)
|
296 |
+
history[-1][1] = ""
|
297 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
298 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
299 |
if tts_choice == "Alpha":
|
300 |
+
audio_future = executor.submit(generate_audio_elevenlabs, response)
|
301 |
elif tts_choice == "Beta":
|
302 |
+
audio_future = executor.submit(generate_audio_parler_tts, response)
|
303 |
elif tts_choice == "Gamma":
|
304 |
+
audio_future = executor.submit(generate_audio_mars5, response)
|
305 |
|
306 |
+
|
307 |
+
for character in response:
|
308 |
history[-1][1] += character
|
309 |
time.sleep(0.05)
|
310 |
yield history, None
|
|
|
312 |
audio_path = audio_future.result()
|
313 |
yield history, audio_path
|
314 |
|
315 |
+
|
|
|
|
|
316 |
|
317 |
def add_message(history, message):
|
318 |
+
history.append((message, None))
|
|
|
319 |
return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
|
320 |
|
321 |
+
|
322 |
+
|
323 |
# def generate_voice_response(history, tts_choice):
|
324 |
# if not history:
|
325 |
# return None
|
|
|
491 |
|
492 |
base_audio_drive = "/data/audio"
|
493 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
494 |
def transcribe_function(stream, new_chunk):
|
495 |
try:
|
496 |
sr, y = new_chunk[0], new_chunk[1]
|
|
|
511 |
|
512 |
return stream, full_text, result
|
513 |
|
514 |
+
|
515 |
def update_map_with_response(history):
|
516 |
if not history:
|
517 |
return ""
|