import streamlit as st # Set the page layout to 'wide' st.set_page_config(layout="wide") import requests from PIL import Image from io import BytesIO # from IPython.display import display import base64 import time # helper decoder def decode_base64_image(image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) return Image.open(buffer) # display PIL images as grid def display_image(image=None,width=500,height=500): img = image.resize((width, height)) return img # API Gateway endpoint URL api_url = 'https://a02q342s5b.execute-api.us-east-2.amazonaws.com/reinvent-demo-inf2-sm-20231114' # Define the CSS to change the text input background color input_field_style = """ """ # Inject custom styles into the Streamlit app st.markdown(input_field_style, unsafe_allow_html=True) # Creating Tabs tab1, tab2 = st.tabs(["Image Generation", "Architecture"]) with tab1: # Create two columns for layout left_column, right_column = st.columns(2) # =========== with left_column: # Define Streamlit UI elements st.title('Stable Diffusion XL Image Generation with AWS Inferentia') prompt_one = st.text_area("Enter your prompt:", f"Raccoon astronaut in space, sci-fi, future, cold color palette, muted colors, detailed, 8k") # Number of inference steps num_inference_steps_one = st.slider("Number of Inference Steps", min_value=1, max_value=100, value=30, help="More steps might improve quality, with diminishing marginal returns. 30-50 seems best, but your mileage may vary.") # Create an expandable section for optional parameters with st.expander("Optional Parameters"): # Random seed input seed_one = st.number_input("Random seed", value=555, help="Set to the same value to generate the same image if other inputs are the same, change to generate a different image for same inputs.") # Negative prompt input negative_prompt_one = st.text_area("Enter your negative prompt:", "cartoon, graphic, text, painting, crayon, graphite, abstract glitch, blurry") if st.button('Generate Image'): with st.spinner(f'Generating Image with {num_inference_steps_one} iterations'): with right_column: start_time = time.time() # =============== # Example input data prompt_input_one = { "prompt": prompt_one, "parameters": { "num_inference_steps": num_inference_steps_one, "seed": seed_one, "negative_prompt": negative_prompt_one } } # Make API request response_one = requests.post(api_url, json=prompt_input_one) # Process and display the response if response_one.status_code == 200: result_one = response_one.json() # st.success(f"Prediction result: {result}") image_one = display_image(decode_base64_image(result_one["generated_images"][0])) st.image(image_one, caption=f"{prompt_one}") end_time = time.time() total_time = round(end_time - start_time, 2) st.text(f"Prompt: {prompt_one}") st.text(f"Number of Iterations: {num_inference_steps_one}") st.text(f"Random Seed: {seed_one}") st.text(f'Total time taken: {total_time} seconds') # Calculate and display the time per iteration in milliseconds time_per_iteration_ms = (total_time / num_inference_steps_one) st.text(f'Time per iteration: {time_per_iteration_ms:.2f} seconds') else: st.error(f"Error: {response_one.text}") with tab2: # =========== # Define Streamlit UI elements st.title('Architecture') st.image('./architecture.png', caption=f"Application Architecture")