import tensorflow as tf from tensorflow.keras.models import load_model import numpy as np from PIL import Image import gradio as gr # Load the saved generator model generator = load_model('DCGEN_50_epochs.h5') latent_dim = 300 # Assuming the model expects a latent dimension of 300 # Function to generate images using the generator model def generate_images(generator, num_images, latent_dim): noise = tf.random.normal([num_images, latent_dim]) generated_images = generator.predict(noise) generated_images = (generated_images * 127.5) + 127.5 # Denormalize return generated_images # Function to convert generated images to a list of PIL Images def generate_pil_images(): generated_images = generate_images(generator, 16, latent_dim) pil_images = [Image.fromarray(np.uint8(image)) for image in generated_images] return pil_images # Path to the video of training images visualization training_video_path = "anime-gan-training.mp4" # Replace with your video file path # Create a Gradio interface with gr.Blocks() as demo: with gr.Row(): with gr.Column(): gr.Markdown("# Generated Images") gr.Interface( fn=generate_pil_images, inputs=[], outputs=gr.Gallery(label="Generated Images", columns=4, height="fill"), live=True # Live interface so it updates every time you refresh ) with gr.Column(): gr.Markdown("# Training Visualization Video") gr.Video( value=training_video_path, label="Training Visualization", format="mp4", autoplay=True ) # Launch the Gradio app demo.launch()