import streamlit as st from st_audiorec import st_audiorec from Modules.Speech2Text.transcribe import transcribe import base64 st.set_page_config(layout="wide", initial_sidebar_state="collapsed") # Create two columns col1, col2 = st.columns(2) video_uploaded = None # First column containers with col1: st.subheader("Audio Recorder") recorded = False temp_path = 'data/temp_audio/audio_file.wav' wav_audio_data = st_audiorec() if wav_audio_data is not None: with open(temp_path, 'wb') as f: # Write the audio data to the file f.write(wav_audio_data) instruction = transcribe(temp_path) print(instruction) recorded = True st.subheader("LLM answering") if recorded: if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) st.session_state.messages.append({"role": "user", "content": instruction}) with st.chat_message("user"): st.markdown(instruction) with st.chat_message("assistant"): # Build answer from LLM response = " to be DEFINED "# TO DO st.session_state.messages.append({"role": "assistant", "content": response}) st.subheader("Movement Analysis") # TO DO # Second column containers with col2: st.subheader("Sports Agenda") # TO DO st.subheader("Video Analysis") ask_video = st.empty() if video_uploaded is None: video_uploaded = ask_video.file_uploader("Choose a video file", type=["mp4", "ogg", "webm"]) if video_uploaded: ask_video.empty() with st.spin("Processing video"): pass # TO DO _left, mid, _right = st.columns(3) with mid: st.video(video_uploaded) st.subheader("Graph Displayer") # TO DO