File size: 1,740 Bytes
b82e401
 
 
 
461c0fb
b82e401
 
 
 
461c0fb
 
b82e401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461c0fb
b82e401
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import os

os.system('pip install -r requirements.txt')

import streamlit as st
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
from datasets import load_dataset
import torch
import soundfile as sf
from transformers import pipeline
from PIL import Image
import io

st.title('Video to text and then text to speech app')


image = st.file_uploader("Upload an image", type=["jpg", "png"])

question = st.text_input(
    label="Enter your question",
	value = "How many people and what is the color of this image?"
)

def generate_speech(text):
    processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
    model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
    vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
    inputs = processor(text=text, return_tensors="pt")

    embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
    speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

    speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)

    sf.write("speech.wav", speech.numpy(), samplerate=16000)

if st.button("Generate"):
    image = Image.open(io.BytesIO(image.getvalue()))
    vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
    vqa_result = vqa_pipeline({"image": image, "question": question})
    answer = vqa_result[0]['answer']
    st.write(f"Question: {question} Answer: {answer}")  # 显示回答
    generate_speech(f"Question: {question}, Answer: {answer}")
    audio_file = open("speech.wav", 'rb')
    audio_bytes = audio_file.read()
    st.audio(audio_bytes, format="audio/wav")