Spaces:
Paused
Paused
Pijush2023
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,644 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
import os
|
@@ -263,7 +904,8 @@ def bot(history, choice, tts_choice):
|
|
263 |
audio_future = executor.submit(generate_audio_parler_tts, response)
|
264 |
elif tts_choice == "MARS5":
|
265 |
audio_future = executor.submit(generate_audio_mars5, response)
|
266 |
-
|
|
|
267 |
|
268 |
for character in response:
|
269 |
history[-1][1] += character
|
@@ -314,7 +956,7 @@ def generate_map(location_names):
|
|
314 |
if geocode_result:
|
315 |
location = geocode_result[0]['geometry']['location']
|
316 |
folium.Marker(
|
317 |
-
[location['lat'], 'lng'],
|
318 |
tooltip=f"{geocode_result[0]['formatted_address']}"
|
319 |
).add_to(m)
|
320 |
|
@@ -331,8 +973,7 @@ def fetch_local_news():
|
|
331 |
<h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Birmingham Today</h2>
|
332 |
<style>
|
333 |
.news-item {
|
334 |
-
font-family: 'Verdana', sans-serif;
|
335 |
-
color: #333;
|
336 |
background-color: #f0f8ff;
|
337 |
margin-bottom: 15px;
|
338 |
padding: 10px;
|
@@ -584,6 +1225,75 @@ def generate_audio_mars5(text):
|
|
584 |
logging.debug(f"Audio saved to {combined_audio_path}")
|
585 |
return combined_audio_path
|
586 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
587 |
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
588 |
pipe.to(device)
|
589 |
|
@@ -617,7 +1327,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
617 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
618 |
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
619 |
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
620 |
-
tts_choice = gr.Radio(label="Select TTS System", choices=["Eleven Labs", "Parler-TTS", "MARS5"], value="Eleven Labs")
|
621 |
bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
622 |
bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
623 |
chatbot.like(print_like_dislike, None, None)
|
@@ -659,6 +1369,7 @@ demo.launch(share=True)
|
|
659 |
|
660 |
|
661 |
|
|
|
662 |
|
663 |
|
664 |
# import gradio as gr
|
|
|
1 |
+
# import gradio as gr
|
2 |
+
# import requests
|
3 |
+
# import os
|
4 |
+
# import time
|
5 |
+
# import re
|
6 |
+
# import logging
|
7 |
+
# import tempfile
|
8 |
+
# import folium
|
9 |
+
# import concurrent.futures
|
10 |
+
# import torch
|
11 |
+
# from PIL import Image
|
12 |
+
# from datetime import datetime
|
13 |
+
# from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
|
14 |
+
# from googlemaps import Client as GoogleMapsClient
|
15 |
+
# from gtts import gTTS
|
16 |
+
# from diffusers import StableDiffusionPipeline
|
17 |
+
# from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
18 |
+
# from langchain_pinecone import PineconeVectorStore
|
19 |
+
# from langchain.prompts import PromptTemplate
|
20 |
+
# from langchain.chains import RetrievalQA
|
21 |
+
# from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
22 |
+
# from langchain.agents import Tool, initialize_agent
|
23 |
+
# from huggingface_hub import login
|
24 |
+
# from transformers.models.speecht5.number_normalizer import EnglishNumberNormalizer
|
25 |
+
# from parler_tts import ParlerTTSForConditionalGeneration
|
26 |
+
# from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
|
27 |
+
# from scipy.io.wavfile import write as write_wav
|
28 |
+
# from pydub import AudioSegment
|
29 |
+
# from string import punctuation
|
30 |
+
# import librosa
|
31 |
+
# from pathlib import Path
|
32 |
+
# import torchaudio
|
33 |
+
|
34 |
+
# # Check if the token is already set in the environment variables
|
35 |
+
# hf_token = os.getenv("HF_TOKEN")
|
36 |
+
# if hf_token is None:
|
37 |
+
# print("Please set your Hugging Face token in the environment variables.")
|
38 |
+
# else:
|
39 |
+
# login(token=hf_token)
|
40 |
+
|
41 |
+
# logging.basicConfig(level=logging.DEBUG)
|
42 |
+
|
43 |
+
# embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
44 |
+
|
45 |
+
# from pinecone import Pinecone
|
46 |
+
# pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
|
47 |
+
|
48 |
+
# index_name = "birmingham-dataset"
|
49 |
+
# vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
|
50 |
+
# retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
|
51 |
+
|
52 |
+
# chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
53 |
+
|
54 |
+
# conversational_memory = ConversationBufferWindowMemory(
|
55 |
+
# memory_key='chat_history',
|
56 |
+
# k=10,
|
57 |
+
# return_messages=True
|
58 |
+
# )
|
59 |
+
|
60 |
+
# def get_current_time_and_date():
|
61 |
+
# now = datetime.now()
|
62 |
+
# return now.strftime("%Y-%m-%d %H:%M:%S")
|
63 |
+
|
64 |
+
# current_time_and_date = get_current_time_and_date()
|
65 |
+
|
66 |
+
# def fetch_local_events():
|
67 |
+
# api_key = os.environ['SERP_API']
|
68 |
+
# url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Birmingham&hl=en&gl=us&api_key={api_key}'
|
69 |
+
# response = requests.get(url)
|
70 |
+
# if response.status_code == 200:
|
71 |
+
# events_results = response.json().get("events_results", [])
|
72 |
+
# events_html = """
|
73 |
+
# <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Events</h2>
|
74 |
+
# <style>
|
75 |
+
# .event-item {
|
76 |
+
# font-family: 'Verdana', sans-serif;
|
77 |
+
# color: #333;
|
78 |
+
# margin-bottom: 15px;
|
79 |
+
# padding: 10px;
|
80 |
+
# font-weight: bold;
|
81 |
+
# }
|
82 |
+
# .event-item a {
|
83 |
+
# color: #1E90FF;
|
84 |
+
# text-decoration: none;
|
85 |
+
# }
|
86 |
+
# .event-item a:hover {
|
87 |
+
# text-decoration: underline;
|
88 |
+
# }
|
89 |
+
# </style>
|
90 |
+
# """
|
91 |
+
# for index, event in enumerate(events_results):
|
92 |
+
# title = event.get("title", "No title")
|
93 |
+
# date = event.get("date", "No date")
|
94 |
+
# location = event.get("address", "No location")
|
95 |
+
# link = event.get("link", "#")
|
96 |
+
# events_html += f"""
|
97 |
+
# <div class="event-item">
|
98 |
+
# <a href='{link}' target='_blank'>{index + 1}. {title}</a>
|
99 |
+
# <p>Date: {date}<br>Location: {location}</p>
|
100 |
+
# </div>
|
101 |
+
# """
|
102 |
+
# return events_html
|
103 |
+
# else:
|
104 |
+
# return "<p>Failed to fetch local events</p>"
|
105 |
+
|
106 |
+
# def fetch_local_weather():
|
107 |
+
# try:
|
108 |
+
# api_key = os.environ['WEATHER_API']
|
109 |
+
# url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/birmingham?unitGroup=metric&include=events%2Calerts%2Chours%2Cdays%2Ccurrent&key={api_key}'
|
110 |
+
# response = requests.get(url)
|
111 |
+
# response.raise_for_status()
|
112 |
+
# jsonData = response.json()
|
113 |
+
|
114 |
+
# current_conditions = jsonData.get("currentConditions", {})
|
115 |
+
# temp_celsius = current_conditions.get("temp", "N/A")
|
116 |
+
|
117 |
+
# if temp_celsius != "N/A":
|
118 |
+
# temp_fahrenheit = int((temp_celsius * 9/5) + 32)
|
119 |
+
# else:
|
120 |
+
# temp_fahrenheit = "N/A"
|
121 |
+
|
122 |
+
# condition = current_conditions.get("conditions", "N/A")
|
123 |
+
# humidity = current_conditions.get("humidity", "N/A")
|
124 |
+
|
125 |
+
# weather_html = f"""
|
126 |
+
# <div class="weather-theme">
|
127 |
+
# <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Local Weather</h2>
|
128 |
+
# <div class="weather-content">
|
129 |
+
# <div class="weather-icon">
|
130 |
+
# <img src="https://www.weatherbit.io/static/img/icons/{get_weather_icon(condition)}.png" alt="{condition}" style="width: 100px; height: 100px;">
|
131 |
+
# </div>
|
132 |
+
# <div class="weather-details">
|
133 |
+
# <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Temperature: {temp_fahrenheit}°F</p>
|
134 |
+
# <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Condition: {condition}</p>
|
135 |
+
# <p style="font-family: 'Verdana', sans-serif; color: #333; font-size: 1.2em;">Humidity: {humidity}%</p>
|
136 |
+
# </div>
|
137 |
+
# </div>
|
138 |
+
# </div>
|
139 |
+
# <style>
|
140 |
+
# .weather-theme {{
|
141 |
+
# animation: backgroundAnimation 10s infinite alternate;
|
142 |
+
# border-radius: 10px;
|
143 |
+
# padding: 10px;
|
144 |
+
# margin-bottom: 15px;
|
145 |
+
# background: linear-gradient(45deg, #ffcc33, #ff6666, #ffcc33, #ff6666);
|
146 |
+
# background-size: 400% 400%;
|
147 |
+
# box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
148 |
+
# transition: box-shadow 0.3s ease, background-color 0.3s ease;
|
149 |
+
# }}
|
150 |
+
# .weather-theme:hover {{
|
151 |
+
# box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
|
152 |
+
# background-position: 100% 100%;
|
153 |
+
# }}
|
154 |
+
# @keyframes backgroundAnimation {{
|
155 |
+
# 0% {{ background-position: 0% 50%; }}
|
156 |
+
# 100% {{ background-position: 100% 50%; }}
|
157 |
+
# }}
|
158 |
+
# .weather-content {{
|
159 |
+
# display: flex;
|
160 |
+
# align-items: center;
|
161 |
+
# }}
|
162 |
+
# .weather-icon {{
|
163 |
+
# flex: 1;
|
164 |
+
# }}
|
165 |
+
# .weather-details {{
|
166 |
+
# flex: 3;
|
167 |
+
# }}
|
168 |
+
# </style>
|
169 |
+
# """
|
170 |
+
# return weather_html
|
171 |
+
# except requests.exceptions.RequestException as e:
|
172 |
+
# return f"<p>Failed to fetch local weather: {e}</p>"
|
173 |
+
|
174 |
+
# def get_weather_icon(condition):
|
175 |
+
# condition_map = {
|
176 |
+
# "Clear": "c01d",
|
177 |
+
# "Partly Cloudy": "c02d",
|
178 |
+
# "Cloudy": "c03d",
|
179 |
+
# "Overcast": "c04d",
|
180 |
+
# "Mist": "a01d",
|
181 |
+
# "Patchy rain possible": "r01d",
|
182 |
+
# "Light rain": "r02d",
|
183 |
+
# "Moderate rain": "r03d",
|
184 |
+
# "Heavy rain": "r04d",
|
185 |
+
# "Snow": "s01d",
|
186 |
+
# "Thunderstorm": "t01d",
|
187 |
+
# "Fog": "a05d",
|
188 |
+
# }
|
189 |
+
# return condition_map.get(condition, "c04d")
|
190 |
+
|
191 |
+
# template1 = """You are an expert concierge who is helpful and a renowned guide for Birmingham,Alabama. Based on weather being a sunny bright day and the today's date is 1st july 2024, use the following pieces of context,
|
192 |
+
# memory, and message history, along with your knowledge of perennial events in Birmingham,Alabama, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
193 |
+
# Use fifteen sentences maximum. Keep the answer as detailed as possible. Always include the address, time, date, and
|
194 |
+
# event type and description. Always say "It was my pleasure!" at the end of the answer.
|
195 |
+
# {context}
|
196 |
+
# Question: {question}
|
197 |
+
# Helpful Answer:"""
|
198 |
+
|
199 |
+
# template2 = """You are an expert concierge who is helpful and a renowned guide for Birmingham,Alabama. Based on today's weather being a sunny bright day and today's date is 1st july 2024, take the location or address but don't show the location or address on the output prompts. Use the following pieces of context,
|
200 |
+
# memory, and message history, along with your knowledge of perennial events in Birmingham,Alabama, to answer the question at the end. If you don't know the answer, just say "Homie, I need to get more data for this," and don't try to make up an answer.
|
201 |
+
# Keep the answer short and sweet and crisp. Always say "It was my pleasure!" at the end of the answer.
|
202 |
+
# {context}
|
203 |
+
# Question: {question}
|
204 |
+
# Helpful Answer:"""
|
205 |
+
|
206 |
+
# QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
|
207 |
+
# QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
|
208 |
+
|
209 |
+
# def build_qa_chain(prompt_template):
|
210 |
+
# qa_chain = RetrievalQA.from_chain_type(
|
211 |
+
# llm=chat_model,
|
212 |
+
# chain_type="stuff",
|
213 |
+
# retriever=retriever,
|
214 |
+
# chain_type_kwargs={"prompt": prompt_template}
|
215 |
+
# )
|
216 |
+
# tools = [
|
217 |
+
# Tool(
|
218 |
+
# name='Knowledge Base',
|
219 |
+
# func=qa_chain,
|
220 |
+
# description='Use this tool when answering general knowledge queries to get more information about the topic'
|
221 |
+
# )
|
222 |
+
# ]
|
223 |
+
# return qa_chain, tools
|
224 |
+
|
225 |
+
# def initialize_agent_with_prompt(prompt_template):
|
226 |
+
# qa_chain, tools = build_qa_chain(prompt_template)
|
227 |
+
# agent = initialize_agent(
|
228 |
+
# agent='chat-conversational-react-description',
|
229 |
+
# tools=tools,
|
230 |
+
# llm=chat_model,
|
231 |
+
# verbose=False,
|
232 |
+
# max_iteration=5,
|
233 |
+
# early_stopping_method='generate',
|
234 |
+
# memory=conversational_memory
|
235 |
+
# )
|
236 |
+
# return agent
|
237 |
+
|
238 |
+
# def generate_answer(message, choice):
|
239 |
+
# logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
240 |
+
|
241 |
+
# if choice == "Details":
|
242 |
+
# agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
243 |
+
# elif choice == "Conversational":
|
244 |
+
# agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
245 |
+
# else:
|
246 |
+
# logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
|
247 |
+
# agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
248 |
+
# response = agent(message)
|
249 |
+
|
250 |
+
# addresses = extract_addresses(response['output'])
|
251 |
+
# return response['output'], addresses
|
252 |
+
|
253 |
+
# def bot(history, choice, tts_choice):
|
254 |
+
# if not history:
|
255 |
+
# return history
|
256 |
+
# response, addresses = generate_answer(history[-1][0], choice)
|
257 |
+
# history[-1][1] = ""
|
258 |
+
|
259 |
+
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
260 |
+
# if tts_choice == "Eleven Labs":
|
261 |
+
# audio_future = executor.submit(generate_audio_elevenlabs, response)
|
262 |
+
# elif tts_choice == "Parler-TTS":
|
263 |
+
# audio_future = executor.submit(generate_audio_parler_tts, response)
|
264 |
+
# elif tts_choice == "MARS5":
|
265 |
+
# audio_future = executor.submit(generate_audio_mars5, response)
|
266 |
+
|
267 |
+
|
268 |
+
# for character in response:
|
269 |
+
# history[-1][1] += character
|
270 |
+
# time.sleep(0.05)
|
271 |
+
# yield history, None
|
272 |
+
|
273 |
+
# audio_path = audio_future.result()
|
274 |
+
# yield history, audio_path
|
275 |
+
|
276 |
+
# def add_message(history, message):
|
277 |
+
# history.append((message, None))
|
278 |
+
# return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
|
279 |
+
|
280 |
+
# def print_like_dislike(x: gr.LikeData):
|
281 |
+
# print(x.index, x.value, x.liked)
|
282 |
+
|
283 |
+
# def extract_addresses(response):
|
284 |
+
# if not isinstance(response, str):
|
285 |
+
# response = str(response)
|
286 |
+
# address_patterns = [
|
287 |
+
# r'([A-Z].*,\sBirmingham,\sAL\s\d{5})',
|
288 |
+
# r'(\d{4}\s.*,\sBirmingham,\sAL\s\d{5})',
|
289 |
+
# r'([A-Z].*,\sAL\s\d{5})',
|
290 |
+
# r'([A-Z].*,.*\sSt,\sBirmingham,\sAL\s\d{5})',
|
291 |
+
# r'([A-Z].*,.*\sStreets,\sBirmingham,\sAL\s\d{5})',
|
292 |
+
# r'(\d{2}.*\sStreets)',
|
293 |
+
# r'([A-Z].*\s\d{2},\sBirmingham,\sAL\s\d{5})',
|
294 |
+
# r'([a-zA-Z]\s Birmingham)'
|
295 |
+
# ]
|
296 |
+
# addresses = []
|
297 |
+
# for pattern in address_patterns:
|
298 |
+
# addresses.extend(re.findall(pattern, response))
|
299 |
+
# return addresses
|
300 |
+
|
301 |
+
# all_addresses = []
|
302 |
+
|
303 |
+
# def generate_map(location_names):
|
304 |
+
# global all_addresses
|
305 |
+
# all_addresses.extend(location_names)
|
306 |
+
|
307 |
+
# api_key = os.environ['GOOGLEMAPS_API_KEY']
|
308 |
+
# gmaps = GoogleMapsClient(key=api_key)
|
309 |
+
|
310 |
+
# m = folium.Map(location=[33.5175,-86.809444], zoom_start=16)
|
311 |
+
|
312 |
+
# for location_name in all_addresses:
|
313 |
+
# geocode_result = gmaps.geocode(location_name)
|
314 |
+
# if geocode_result:
|
315 |
+
# location = geocode_result[0]['geometry']['location']
|
316 |
+
# folium.Marker(
|
317 |
+
# [location['lat'], 'lng'],
|
318 |
+
# tooltip=f"{geocode_result[0]['formatted_address']}"
|
319 |
+
# ).add_to(m)
|
320 |
+
|
321 |
+
# map_html = m._repr_html_()
|
322 |
+
# return map_html
|
323 |
+
|
324 |
+
# def fetch_local_news():
|
325 |
+
# api_key = os.environ['SERP_API']
|
326 |
+
# url = f'https://serpapi.com/search.json?engine=google_news&q=birmingham headline&api_key={api_key}'
|
327 |
+
# response = requests.get(url)
|
328 |
+
# if response.status_code == 200:
|
329 |
+
# results = response.json().get("news_results", [])
|
330 |
+
# news_html = """
|
331 |
+
# <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Birmingham Today</h2>
|
332 |
+
# <style>
|
333 |
+
# .news-item {
|
334 |
+
# font-family: 'Verdana', sans-serif;
|
335 |
+
# color: #333;
|
336 |
+
# background-color: #f0f8ff;
|
337 |
+
# margin-bottom: 15px;
|
338 |
+
# padding: 10px;
|
339 |
+
# border-radius: 5px;
|
340 |
+
# transition: box-shadow 0.3s ease, background-color 0.3s ease;
|
341 |
+
# font-weight: bold;
|
342 |
+
# }
|
343 |
+
# .news-item:hover {
|
344 |
+
# box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
345 |
+
# background-color: #e6f7ff;
|
346 |
+
# }
|
347 |
+
# .news-item a {
|
348 |
+
# color: #1E90FF;
|
349 |
+
# text-decoration: none;
|
350 |
+
# font-weight: bold;
|
351 |
+
# }
|
352 |
+
# .news-item a:hover {
|
353 |
+
# text-decoration: underline;
|
354 |
+
# }
|
355 |
+
# .news-preview {
|
356 |
+
# position: absolute;
|
357 |
+
# display: none;
|
358 |
+
# border: 1px solid #ccc;
|
359 |
+
# border-radius: 5px;
|
360 |
+
# box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
|
361 |
+
# background-color: white;
|
362 |
+
# z-index: 1000;
|
363 |
+
# max-width: 300px;
|
364 |
+
# padding: 10px;
|
365 |
+
# font-family: 'Verdana', sans-serif;
|
366 |
+
# color: #333;
|
367 |
+
# }
|
368 |
+
# </style>
|
369 |
+
# <script>
|
370 |
+
# function showPreview(event, previewContent) {
|
371 |
+
# var previewBox = document.getElementById('news-preview');
|
372 |
+
# previewBox.innerHTML = previewContent;
|
373 |
+
# previewBox.style.left = event.pageX + 'px';
|
374 |
+
# previewBox.style.top = event.pageY + 'px';
|
375 |
+
# previewBox.style.display = 'block';
|
376 |
+
# }
|
377 |
+
# function hidePreview() {
|
378 |
+
# var previewBox = document.getElementById('news-preview');
|
379 |
+
# previewBox.style.display = 'none';
|
380 |
+
# }
|
381 |
+
# </script>
|
382 |
+
# <div id="news-preview" class="news-preview"></div>
|
383 |
+
# """
|
384 |
+
# for index, result in enumerate(results[:7]):
|
385 |
+
# title = result.get("title", "No title")
|
386 |
+
# link = result.get("link", "#")
|
387 |
+
# snippet = result.get("snippet", "")
|
388 |
+
# news_html += f"""
|
389 |
+
# <div class="news-item" onmouseover="showPreview(event, '{snippet}')" onmouseout="hidePreview()">
|
390 |
+
# <a href='{link}' target='_blank'>{index + 1}. {title}</a>
|
391 |
+
# <p>{snippet}</p>
|
392 |
+
# </div>
|
393 |
+
# """
|
394 |
+
# return news_html
|
395 |
+
# else:
|
396 |
+
# return "<p>Failed to fetch local news</p>"
|
397 |
+
|
398 |
+
# import numpy as np
|
399 |
+
# import torch
|
400 |
+
# from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
|
401 |
+
|
402 |
+
# model_id = 'openai/whisper-large-v3'
|
403 |
+
# device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
404 |
+
# torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
405 |
+
# model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
|
406 |
+
# processor = AutoProcessor.from_pretrained(model_id)
|
407 |
+
|
408 |
+
# pipe_asr = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=16, torch_dtype=torch_dtype, device=device, return_timestamps=True)
|
409 |
+
|
410 |
+
# base_audio_drive = "/data/audio"
|
411 |
+
|
412 |
+
# def transcribe_function(stream, new_chunk):
|
413 |
+
# try:
|
414 |
+
# sr, y = new_chunk[0], new_chunk[1]
|
415 |
+
# except TypeError:
|
416 |
+
# print(f"Error chunk structure: {type(new_chunk)}, content: {new_chunk}")
|
417 |
+
# return stream, "", None
|
418 |
+
|
419 |
+
# y = y.astype(np.float32) / np.max(np.abs(y))
|
420 |
+
|
421 |
+
# if stream is not None:
|
422 |
+
# stream = np.concatenate([stream, y])
|
423 |
+
# else:
|
424 |
+
# stream = y
|
425 |
+
|
426 |
+
# result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
427 |
+
|
428 |
+
# full_text = result.get("text", "")
|
429 |
+
|
430 |
+
# return stream, full_text, result
|
431 |
+
|
432 |
+
# def update_map_with_response(history):
|
433 |
+
# if not history:
|
434 |
+
# return ""
|
435 |
+
# response = history[-1][1]
|
436 |
+
# addresses = extract_addresses(response)
|
437 |
+
# return generate_map(addresses)
|
438 |
+
|
439 |
+
# def clear_textbox():
|
440 |
+
# return ""
|
441 |
+
|
442 |
+
# def show_map_if_details(history,choice):
|
443 |
+
# if choice in ["Details", "Conversational"]:
|
444 |
+
# return gr.update(visible=True), update_map_with_response(history)
|
445 |
+
# else:
|
446 |
+
# return gr.update(visible=False), ""
|
447 |
+
|
448 |
+
# def generate_audio_elevenlabs(text):
|
449 |
+
# XI_API_KEY = os.environ['ELEVENLABS_API']
|
450 |
+
# VOICE_ID = 'd9MIrwLnvDeH7aZb61E9'
|
451 |
+
# tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
|
452 |
+
# headers = {
|
453 |
+
# "Accept": "application/json",
|
454 |
+
# "xi-api-key": XI_API_KEY
|
455 |
+
# }
|
456 |
+
# data = {
|
457 |
+
# "text": str(text),
|
458 |
+
# "model_id": "eleven_multilingual_v2",
|
459 |
+
# "voice_settings": {
|
460 |
+
# "stability": 1.0,
|
461 |
+
# "similarity_boost": 0.0,
|
462 |
+
# "style": 0.60,
|
463 |
+
# "use_speaker_boost": False
|
464 |
+
# }
|
465 |
+
# }
|
466 |
+
# response = requests.post(tts_url, headers=headers, json=data, stream=True)
|
467 |
+
# if response.ok:
|
468 |
+
# with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
|
469 |
+
# for chunk in response.iter_content(chunk_size=1024):
|
470 |
+
# f.write(chunk)
|
471 |
+
# temp_audio_path = f.name
|
472 |
+
# logging.debug(f"Audio saved to {temp_audio_path}")
|
473 |
+
# return temp_audio_path
|
474 |
+
# else:
|
475 |
+
# logging.error(f"Error generating audio: {response.text}")
|
476 |
+
# return None
|
477 |
+
|
478 |
+
# repo_id = "parler-tts/parler-tts-mini-expresso"
|
479 |
+
|
480 |
+
# parler_model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
|
481 |
+
# parler_tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
482 |
+
# parler_feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
|
483 |
+
|
484 |
+
# SAMPLE_RATE = parler_feature_extractor.sampling_rate
|
485 |
+
# SEED = 42
|
486 |
+
|
487 |
+
# def preprocess(text):
|
488 |
+
# number_normalizer = EnglishNumberNormalizer()
|
489 |
+
# text = number_normalizer(text).strip()
|
490 |
+
# if text[-1] not in punctuation:
|
491 |
+
# text = f"{text}."
|
492 |
+
|
493 |
+
# abbreviations_pattern = r'\b[A-Z][A-Z\.]+\b'
|
494 |
+
|
495 |
+
# def separate_abb(chunk):
|
496 |
+
# chunk = chunk.replace(".", "")
|
497 |
+
# return " ".join(chunk)
|
498 |
+
|
499 |
+
# abbreviations = re.findall(abbreviations_pattern, text)
|
500 |
+
# for abv in abbreviations:
|
501 |
+
# if abv in text:
|
502 |
+
# text = text.replace(abv, separate_abb(abv))
|
503 |
+
# return text
|
504 |
+
|
505 |
+
# def chunk_text(text, max_length=250):
|
506 |
+
# words = text.split()
|
507 |
+
# chunks = []
|
508 |
+
# current_chunk = []
|
509 |
+
# current_length = 0
|
510 |
+
|
511 |
+
# for word in words:
|
512 |
+
# if current_length + len(word) + 1 <= max_length:
|
513 |
+
# current_chunk.append(word)
|
514 |
+
# current_length += len(word) + 1
|
515 |
+
# else:
|
516 |
+
# chunks.append(' '.join(current_chunk))
|
517 |
+
# current_chunk = [word]
|
518 |
+
# current_length = len(word) + 1
|
519 |
+
|
520 |
+
# if current_chunk:
|
521 |
+
# chunks.append(' '.join(current_chunk))
|
522 |
+
|
523 |
+
# return chunks
|
524 |
+
|
525 |
+
# def generate_audio_parler_tts(text):
|
526 |
+
# description = "Thomas speaks with emphasis and excitement at a moderate pace with high quality."
|
527 |
+
# chunks = chunk_text(preprocess(text))
|
528 |
+
# audio_segments = []
|
529 |
+
|
530 |
+
# for chunk in chunks:
|
531 |
+
# inputs = parler_tokenizer(description, return_tensors="pt").to(device)
|
532 |
+
# prompt = parler_tokenizer(chunk, return_tensors="pt").to(device)
|
533 |
+
|
534 |
+
# set_seed(SEED)
|
535 |
+
# generation = parler_model.generate(input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids)
|
536 |
+
# audio_arr = generation.cpu().numpy().squeeze()
|
537 |
+
|
538 |
+
# temp_audio_path = os.path.join(tempfile.gettempdir(), f"parler_tts_audio_{len(audio_segments)}.wav")
|
539 |
+
# write_wav(temp_audio_path, SAMPLE_RATE, audio_arr)
|
540 |
+
# audio_segments.append(AudioSegment.from_wav(temp_audio_path))
|
541 |
+
|
542 |
+
# combined_audio = sum(audio_segments)
|
543 |
+
# combined_audio_path = os.path.join(tempfile.gettempdir(), "parler_tts_combined_audio.wav")
|
544 |
+
# combined_audio.export(combined_audio_path, format="wav")
|
545 |
+
|
546 |
+
# logging.debug(f"Audio saved to {combined_audio_path}")
|
547 |
+
# return combined_audio_path
|
548 |
+
|
549 |
+
# # Load the MARS5 model
|
550 |
+
# mars5, config_class = torch.hub.load('Camb-ai/mars5-tts', 'mars5_english', trust_repo=True)
|
551 |
+
|
552 |
+
# def generate_audio_mars5(text):
|
553 |
+
# description = "Thomas speaks with emphasis and excitement at a moderate pace with high quality."
|
554 |
+
# kwargs_dict = {
|
555 |
+
# 'temperature': 0.8,
|
556 |
+
# 'top_k': -1,
|
557 |
+
# 'top_p': 0.2,
|
558 |
+
# 'typical_p': 1.0,
|
559 |
+
# 'freq_penalty': 2.6,
|
560 |
+
# 'presence_penalty': 0.4,
|
561 |
+
# 'rep_penalty_window': 100,
|
562 |
+
# 'max_prompt_phones': 360,
|
563 |
+
# 'deep_clone': True,
|
564 |
+
# 'nar_guidance_w': 3
|
565 |
+
# }
|
566 |
+
|
567 |
+
# chunks = chunk_text(preprocess(text))
|
568 |
+
# audio_segments = []
|
569 |
+
|
570 |
+
# for chunk in chunks:
|
571 |
+
# wav = torch.zeros(1, mars5.sr) # Use a placeholder silent audio for the reference
|
572 |
+
# cfg = config_class(**{k: kwargs_dict[k] for k in kwargs_dict if k in config_class.__dataclass_fields__})
|
573 |
+
# ar_codes, wav_out = mars5.tts(chunk, wav, "", cfg=cfg)
|
574 |
+
|
575 |
+
|
576 |
+
# temp_audio_path = os.path.join(tempfile.gettempdir(), f"mars5_audio_{len(audio_segments)}.wav")
|
577 |
+
# torchaudio.save(temp_audio_path, wav_out.unsqueeze(0), mars5.sr)
|
578 |
+
# audio_segments.append(AudioSegment.from_wav(temp_audio_path))
|
579 |
+
|
580 |
+
# combined_audio = sum(audio_segments)
|
581 |
+
# combined_audio_path = os.path.join(tempfile.gettempdir(), "mars5_combined_audio.wav")
|
582 |
+
# combined_audio.export(combined_audio_path, format="wav")
|
583 |
+
|
584 |
+
# logging.debug(f"Audio saved to {combined_audio_path}")
|
585 |
+
# return combined_audio_path
|
586 |
+
|
587 |
+
# pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
588 |
+
# pipe.to(device)
|
589 |
+
|
590 |
+
# def generate_image(prompt):
|
591 |
+
# with torch.cuda.amp.autocast():
|
592 |
+
# image = pipe(
|
593 |
+
# prompt,
|
594 |
+
# num_inference_steps=28,
|
595 |
+
# guidance_scale=3.0,
|
596 |
+
# ).images[0]
|
597 |
+
# return image
|
598 |
+
|
599 |
+
# hardcoded_prompt_1 = "Give a high quality photograph of a great looking red 2026 Bentley coupe against a skyline setting in the night, michael mann style in omaha enticing the consumer to buy this product"
|
600 |
+
# hardcoded_prompt_2 = "A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
|
601 |
+
# hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
|
602 |
+
|
603 |
+
# def update_images():
|
604 |
+
# image_1 = generate_image(hardcoded_prompt_1)
|
605 |
+
# image_2 = generate_image(hardcoded_prompt_2)
|
606 |
+
# image_3 = generate_image(hardcoded_prompt_3)
|
607 |
+
# return image_1, image_2, image_3
|
608 |
+
|
609 |
+
# with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
610 |
+
# with gr.Row():
|
611 |
+
# with gr.Column():
|
612 |
+
# state = gr.State()
|
613 |
+
|
614 |
+
# chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
615 |
+
# choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
616 |
+
|
617 |
+
# gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
618 |
+
# chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
619 |
+
# chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
620 |
+
# tts_choice = gr.Radio(label="Select TTS System", choices=["Eleven Labs", "Parler-TTS", "MARS5"], value="Eleven Labs")
|
621 |
+
# bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
622 |
+
# bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
623 |
+
# chatbot.like(print_like_dislike, None, None)
|
624 |
+
# clear_button = gr.Button("Clear")
|
625 |
+
# clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
626 |
+
|
627 |
+
# audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
628 |
+
# audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
|
629 |
+
|
630 |
+
# with gr.Column():
|
631 |
+
# image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
632 |
+
# image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
633 |
+
# image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
634 |
+
|
635 |
+
# refresh_button = gr.Button("Refresh Images")
|
636 |
+
# refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
637 |
+
|
638 |
+
# demo.queue()
|
639 |
+
# demo.launch(share=True)
|
640 |
+
|
641 |
+
|
642 |
import gradio as gr
|
643 |
import requests
|
644 |
import os
|
|
|
904 |
audio_future = executor.submit(generate_audio_parler_tts, response)
|
905 |
elif tts_choice == "MARS5":
|
906 |
audio_future = executor.submit(generate_audio_mars5, response)
|
907 |
+
elif tts_choice == "Coqui":
|
908 |
+
audio_future = executor.submit(generate_audio_coqui, response)
|
909 |
|
910 |
for character in response:
|
911 |
history[-1][1] += character
|
|
|
956 |
if geocode_result:
|
957 |
location = geocode_result[0]['geometry']['location']
|
958 |
folium.Marker(
|
959 |
+
[location['lat'], location['lng']],
|
960 |
tooltip=f"{geocode_result[0]['formatted_address']}"
|
961 |
).add_to(m)
|
962 |
|
|
|
973 |
<h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Birmingham Today</h2>
|
974 |
<style>
|
975 |
.news-item {
|
976 |
+
font-family: 'Verdana', sans-serif; color: #333;
|
|
|
977 |
background-color: #f0f8ff;
|
978 |
margin-bottom: 15px;
|
979 |
padding: 10px;
|
|
|
1225 |
logging.debug(f"Audio saved to {combined_audio_path}")
|
1226 |
return combined_audio_path
|
1227 |
|
1228 |
+
# Initialize Coqui XTTS
|
1229 |
+
os.system('python -m unidic download')
|
1230 |
+
os.environ["COQUI_TOS_AGREED"] = "1"
|
1231 |
+
|
1232 |
+
from TTS.api import TTS
|
1233 |
+
from TTS.tts.configs.xtts_config import XttsConfig
|
1234 |
+
from TTS.tts.models.xtts import Xtts
|
1235 |
+
from TTS.utils.generic_utils import get_user_data_dir
|
1236 |
+
from huggingface_hub import HfApi
|
1237 |
+
|
1238 |
+
api = HfApi(token=hf_token)
|
1239 |
+
repo_id = "coqui/xtts"
|
1240 |
+
|
1241 |
+
model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
|
1242 |
+
ModelManager().download_model(model_name)
|
1243 |
+
model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
|
1244 |
+
config = XttsConfig()
|
1245 |
+
config.load_json(os.path.join(model_path, "config.json"))
|
1246 |
+
model = Xtts.init_from_config(config)
|
1247 |
+
model.load_checkpoint(
|
1248 |
+
config,
|
1249 |
+
checkpoint_path=os.path.join(model_path, "model.pth"),
|
1250 |
+
vocab_path=os.path.join(model_path, "vocab.json"),
|
1251 |
+
eval=True,
|
1252 |
+
use_deepspeed=True,
|
1253 |
+
)
|
1254 |
+
model.cuda()
|
1255 |
+
|
1256 |
+
def generate_audio_coqui(text):
|
1257 |
+
language = "en"
|
1258 |
+
use_mic = False
|
1259 |
+
voice_cleanup = False
|
1260 |
+
no_lang_auto_detect = False
|
1261 |
+
agree = True
|
1262 |
+
mic_file_path = None
|
1263 |
+
audio_file_pth = None
|
1264 |
+
|
1265 |
+
if agree:
|
1266 |
+
if language not in config.languages:
|
1267 |
+
raise Exception("Language not supported")
|
1268 |
+
|
1269 |
+
if use_mic:
|
1270 |
+
if mic_file_path is not None:
|
1271 |
+
speaker_wav = mic_file_path
|
1272 |
+
else:
|
1273 |
+
raise Exception("Microphone input required")
|
1274 |
+
else:
|
1275 |
+
speaker_wav = audio_file_pth
|
1276 |
+
|
1277 |
+
lowpass_highpass = "lowpass=8000,highpass=75," if voice_cleanup else ""
|
1278 |
+
trim_silence = "areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02," if voice_cleanup else ""
|
1279 |
+
|
1280 |
+
out_filename = speaker_wav + str(uuid.uuid4()) + ".wav" if voice_cleanup else speaker_wav
|
1281 |
+
if voice_cleanup:
|
1282 |
+
shell_command = f"./ffmpeg -y -i {speaker_wav} -af {lowpass_highpass}{trim_silence} {out_filename}".split(" ")
|
1283 |
+
subprocess.run([item for item in shell_command], capture_output=False, text=True, check=True)
|
1284 |
+
speaker_wav = out_filename
|
1285 |
+
|
1286 |
+
t_latent = time.time()
|
1287 |
+
gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav, gpt_cond_len=30, gpt_cond_chunk_len=4, max_ref_length=60)
|
1288 |
+
latent_calculation_time = time.time() - t_latent
|
1289 |
+
|
1290 |
+
prompt = re.sub("([^\x00-\x7F]|\w)(\.|\。|\?)", r"\1 \2\2", text)
|
1291 |
+
out = model.inference(prompt, language, gpt_cond_latent, speaker_embedding, repetition_penalty=5.0, temperature=0.75)
|
1292 |
+
torchaudio.save("output.wav", torch.tensor(out["wav"]).unsqueeze(0), 24000)
|
1293 |
+
return "output.wav"
|
1294 |
+
else:
|
1295 |
+
raise Exception("Terms & Conditions not accepted")
|
1296 |
+
|
1297 |
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
1298 |
pipe.to(device)
|
1299 |
|
|
|
1327 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
1328 |
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
1329 |
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
1330 |
+
tts_choice = gr.Radio(label="Select TTS System", choices=["Eleven Labs", "Parler-TTS", "MARS5", "Coqui"], value="Eleven Labs")
|
1331 |
bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
1332 |
bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
1333 |
chatbot.like(print_like_dislike, None, None)
|
|
|
1369 |
|
1370 |
|
1371 |
|
1372 |
+
|
1373 |
|
1374 |
|
1375 |
# import gradio as gr
|