izhan001's picture
Update app.py
a8f97d5 verified
import gradio as gr
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
from sentence_transformers import SentenceTransformer
from gtts import gTTS
import os
# Load models
def load_model():
try:
# Sentiment Analysis Pipeline
sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
sentiment_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
sentiment_analyzer = pipeline("sentiment-analysis", model=sentiment_model, tokenizer=sentiment_tokenizer)
# Emotion Detection
emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
emotion_analyzer = pipeline("sentiment-analysis", model=emotion_model, tokenizer=emotion_tokenizer)
# Sentence Transformers for Book Insights
book_model = SentenceTransformer('all-MiniLM-L6-v2')
return sentiment_analyzer, emotion_analyzer, book_model
except Exception as e:
print(f"Error loading models: {str(e)}")
return None, None, None
# Text-to-Speech Function
def text_to_speech(text):
try:
tts = gTTS(text, lang='en')
audio_file = "response.mp3"
tts.save(audio_file)
return audio_file
except Exception as e:
print(f"Error in text-to-speech conversion: {str(e)}")
return None
# Response Generation Logic with empathetic responses
def virtual_psychologist_assistant(user_input, sentiment_analyzer, emotion_analyzer, book_model):
try:
# Sentiment Analysis
sentiment = sentiment_analyzer(user_input)[0]['label']
emotion = emotion_analyzer(user_input)[0]['label']
# Predefined responses for emotions
response_templates = {
"POSITIVE": "I’m glad to hear you're feeling well. Could you share more about what's bringing you joy?",
"NEGATIVE": "I'm here for you. It sounds like things might be tough right now. Want to tell me more?",
"NEUTRAL": "I'm here to listen. Feel free to share whatever's on your mind."
}
# Choose response based on sentiment
response = response_templates.get(sentiment, "Thank you for sharing. Can you tell me more?")
# Book Insights for added context
book_quotes = {
"Man’s Search for Meaning": [
"The meaning of life is to give life meaning.",
"Those who have a why to live, can bear almost any how."
]
}
quote = book_quotes["Man’s Search for Meaning"][0] if sentiment == "POSITIVE" else book_quotes["Man’s Search for Meaning"][1]
# Combine final response
final_response = f"Emotion Detected: {emotion}\nSentiment: {sentiment}\n\nPsychologist’s Insight: {quote} - Viktor E. Frankl\n\nResponse: {response}"
# Convert to Audio
audio_file = text_to_speech(final_response)
return final_response, audio_file
except Exception as e:
print(f"Error during response generation: {str(e)}")
return f"An error occurred: {str(e)}", None
# Gradio Interface
def gradio_interface(user_input):
sentiment_analyzer, emotion_analyzer, book_model = load_model()
if sentiment_analyzer is None or emotion_analyzer is None or book_model is None:
return "Error loading models. Please try again later.", None
response, audio_file = virtual_psychologist_assistant(user_input, sentiment_analyzer, emotion_analyzer, book_model)
return response, audio_file
# Create Gradio interface
iface = gr.Interface(fn=gradio_interface,
inputs="text",
outputs=["text", "audio"],
title="Virtual Psychologist Assistant",
description="A virtual psychologist that analyzes emotions and provides meaningful insights.",
live=False) # Set live=False for submit button
iface.launch()