|
import gradio as gr |
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
import requests |
|
from bs4 import BeautifulSoup |
|
|
|
|
|
sentiment_pipeline = pipeline("sentiment-analysis") |
|
def get_sentiment(text): |
|
result = sentiment_pipeline(text)[0] |
|
sentiment = result['label'] |
|
confidence = result['score'] |
|
return sentiment, confidence |
|
|
|
|
|
summary_pipeline = pipeline("summarization", model="Falconsai/text_summarization") |
|
def summarize_url(url): |
|
try: |
|
data = requests.get(url) |
|
soup = BeautifulSoup(data.content, "html.parser") |
|
article = soup.find("article") |
|
if article: |
|
text = article.text.strip() |
|
summary = summary_pipeline(text, max_length=512, truncation=True)[0]['summary_text'] |
|
return summary |
|
else: |
|
return "Could not find an article on the provided URL." |
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
|
|
transcription_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-small") |
|
def transcribe_audio(audio_file): |
|
try: |
|
transcription = transcription_pipeline(audio_file)["text"] |
|
return transcription |
|
except Exception as e: |
|
return f"Error during transcription: {str(e)}" |
|
|
|
|
|
chatbot_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") |
|
chatbot_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") |
|
|
|
def generate_response(message, history): |
|
|
|
input_ids = chatbot_tokenizer.encode(message + chatbot_tokenizer.eos_token, return_tensors="pt") |
|
|
|
|
|
response_ids = chatbot_model.generate( |
|
input_ids, |
|
max_length=1000, |
|
pad_token_id=chatbot_tokenizer.eos_token_id, |
|
no_repeat_ngram_size=3, |
|
do_sample=True, |
|
top_k=100, |
|
top_p=0.7, |
|
temperature=0.8 |
|
) |
|
|
|
|
|
response = chatbot_tokenizer.decode(response_ids[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
with gr.Blocks() as interface: |
|
gr.Markdown("# NLP Jyad's KIT: Sentiment Analysis, Summarization, Speech Recognition & Chatbot") |
|
|
|
with gr.Tabs(): |
|
with gr.Tab("Sentiment Analysis"): |
|
gr.Markdown("Enter a sentence to analyze its sentiment and confidence score.") |
|
text_input = gr.Textbox(label="Enter text") |
|
sentiment_output = gr.Textbox(label='Sentiment') |
|
confidence_output = gr.Textbox(label='Confidence Score') |
|
analyze_button = gr.Button("Analyze") |
|
analyze_button.click(get_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output]) |
|
|
|
with gr.Tab("Summarization"): |
|
gr.Markdown("Enter a news article URL to get a summary.") |
|
url_input = gr.Textbox(label="Article URL") |
|
summary_output = gr.Textbox(label="Summary", lines=5) |
|
summarize_button = gr.Button("Summarize") |
|
summarize_button.click(summarize_url, inputs=url_input, outputs=summary_output) |
|
|
|
with gr.Tab("Speech Recognition"): |
|
gr.Markdown("Upload an audio file for transcription.") |
|
audio_input = gr.Audio(label="Upload Audio", type="filepath") |
|
transcription_output = gr.Textbox(label="Transcription", lines=3) |
|
transcribe_button = gr.Button("Transcribe") |
|
transcribe_button.click(transcribe_audio, inputs=audio_input, outputs=transcription_output) |
|
|
|
with gr.Tab("Chatbot"): |
|
gr.Markdown("Have a conversation with the AI chatbot.") |
|
chatbot = gr.Chatbot( |
|
label="Chat History", |
|
height=400 |
|
) |
|
msg = gr.Textbox( |
|
label="Type your message", |
|
placeholder="Type your message here...", |
|
show_label=False |
|
) |
|
clear = gr.Button("Clear") |
|
|
|
def user(user_message, history): |
|
return "", history + [[user_message, None]] |
|
|
|
def bot(history): |
|
user_message = history[-1][0] |
|
bot_message = generate_response(user_message, history) |
|
history[-1][1] = bot_message |
|
return history |
|
|
|
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( |
|
bot, chatbot, chatbot |
|
) |
|
clear.click(lambda: None, None, chatbot, queue=False) |
|
|
|
interface.launch() |