Spaces:
Runtime error
Runtime error
import os | |
os.system('pip install -r requirements.txt') | |
import streamlit as st | |
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan | |
from datasets import load_dataset | |
import torch | |
import soundfile as sf | |
from transformers import pipeline | |
from PIL import Image | |
import io | |
st.title('Video to text and then text to speech app') | |
image = st.file_uploader("Upload an image", type=["jpg", "png"]) | |
question = st.text_input( | |
label="Enter your question", | |
value = "How many people and what is the color of this image?" | |
) | |
def generate_speech(text): | |
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") | |
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") | |
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") | |
inputs = processor(text=text, return_tensors="pt") | |
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") | |
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) | |
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) | |
sf.write("speech.wav", speech.numpy(), samplerate=16000) | |
if st.button("Generate"): | |
image = Image.open(io.BytesIO(image.getvalue())) | |
vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") | |
vqa_result = vqa_pipeline({"image": image, "question": question}) | |
answer = vqa_result[0]['answer'] | |
st.write(f"Question: {question} Answer: {answer}") # 显示回答 | |
generate_speech(f"Question: {question}, Answer: {answer}") | |
audio_file = open("speech.wav", 'rb') | |
audio_bytes = audio_file.read() | |
st.audio(audio_bytes, format="audio/wav") | |