Spaces:
Running
Running
import gradio as gr | |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor | |
import torch | |
import torchaudio | |
# Load the pre-trained Wav2Vec2 model for Darija | |
processor = Wav2Vec2Processor.from_pretrained("boumehdi/wav2vec2-large-xlsr-moroccan-darija") | |
model = Wav2Vec2ForCTC.from_pretrained("boumehdi/wav2vec2-large-xlsr-moroccan-darija") | |
# Function to process the audio file and return transcription | |
def transcribe_audio(audio_file): | |
# Load and process the audio file with the correct sampling rate | |
audio_input, sampling_rate = torchaudio.load(audio_file, normalize=True) | |
# Make sure the audio input has the correct dimensions | |
audio_input = audio_input.squeeze() # Remove unnecessary dimensions | |
# Process the audio input for the model | |
input_values = processor(audio_input, sampling_rate=sampling_rate, return_tensors="pt").input_values | |
# Perform transcription | |
with torch.no_grad(): | |
logits = model(input_values).logits | |
# Decode the logits to text | |
predicted_ids = torch.argmax(logits, dim=-1) | |
transcription = processor.batch_decode(predicted_ids) | |
return transcription[0] | |
# Create a Gradio interface | |
interface = gr.Interface( | |
fn=transcribe_audio, # Function to call | |
inputs=gr.Audio(type="filepath"), # Input component (audio file upload) | |
outputs="text", # Output component (text) | |
title="Darija ASR Transcription", # Title of the interface | |
description="Upload an audio file in Darija, and the ASR model will transcribe it into text." # Description | |
) | |
# Launch the Gradio interface | |
if __name__ == "__main__": | |
interface.launch() | |