QwQ-Edge / app.py
prithivMLmods's picture
Update app.py
6204f5f verified
raw
history blame
7.36 kB
#!/usr/bin/env python
import os
import random
import uuid
import gradio as gr
import numpy as np
from PIL import Image
import spaces
import torch
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
from diffusers import AuraFlowPipeline
DESCRIPTIONx = """
"""
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
MODEL_OPTIONS = {
"Lightning": "SG161222/RealVisXL_V4.0_Lightning",
"Realvision": "SG161222/RealVisXL_V4.0",
"AuraFlow": "fal/AuraFlow",
}
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_and_prepare_model(model_id):
if model_id == "fal/AuraFlow":
pipe = AuraFlowPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
).to(device)
else:
pipe = StableDiffusionXLPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
use_safetensors=True,
add_watermarker=False,
).to(device)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
if USE_TORCH_COMPILE:
pipe.compile()
if ENABLE_CPU_OFFLOAD:
pipe.enable_model_cpu_offload()
return pipe
# Preload and compile all models
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
MAX_SEED = np.iinfo(np.int32).max
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@spaces.GPU(duration=60, enable_queue=True)
def generate(
model_choice: str,
prompt: str,
negative_prompt: str = "",
use_negative_prompt: bool = False,
seed: int = 1,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 3,
num_inference_steps: int = 25,
randomize_seed: bool = False,
use_resolution_binning: bool = True,
num_images: int = 1,
progress=gr.Progress(track_tqdm=True),
):
global models
pipe = models[model_choice]
seed = int(randomize_seed_fn(seed, randomize_seed))
generator = torch.Generator(device=device).manual_seed(seed)
options = {
"prompt": [prompt] * num_images,
"negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
"width": width,
"height": height,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps,
"generator": generator,
"output_type": "pil",
}
if use_resolution_binning:
options["use_resolution_binning"] = True
images = []
for i in range(0, num_images, BATCH_SIZE):
batch_options = options.copy()
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
if "negative_prompt" in batch_options:
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
images.extend(pipe(**batch_options).images)
image_paths = [save_image(img) for img in images]
return image_paths, seed
def load_predefined_images():
predefined_images = [
"assets/1.png",
"assets/2.png",
"assets/3.png",
"assets/4.png",
"assets/5.png",
"assets/6.png",
"assets/7.png",
"assets/8.png",
"assets/9.png",
"assets/10.png",
"assets/11.png",
"assets/12.png",
]
return predefined_images
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTIONx)
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
value="A cartoon of a Ironman fighting with Hulk, wall painting",
container=False,
)
run_button = gr.Button("Run⚡", scale=0)
result = gr.Gallery(label="Result", columns=1, show_label=False)
with gr.Row():
model_choice = gr.Dropdown(
label="Model Selection",
choices=list(MODEL_OPTIONS.keys()),
value="Lightning"
)
with gr.Accordion("Advanced options", open=True, visible=False):
num_images = gr.Slider(
label="Number of Images",
minimum=1,
maximum=1,
step=1,
value=1,
)
with gr.Row():
with gr.Column(scale=1):
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=5,
lines=4,
placeholder="Enter a negative prompt",
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
visible=True,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=0.1,
maximum=6,
step=0.1,
value=3.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=35,
step=1,
value=20,
)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
api_name=False,
)
gr.on(
triggers=[
prompt.submit,
negative_prompt.submit,
run_button.click,
],
fn=generate,
inputs=[
model_choice,
prompt,
negative_prompt,
use_negative_prompt,
seed,
width,
height,
guidance_scale,
num_inference_steps,
randomize_seed,
num_images
],
outputs=[result, seed],
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=40).launch(show_api=False)