Spaces:
Running
Running
import gradio as gr | |
from all_models import good_models, new_models, outtaken_models | |
from _prompt import thePrompt, howManyModelsToUse | |
from externalmod import gr_Interface_load, save_image, randomize_seed | |
import asyncio | |
import os | |
from threading import RLock | |
from datetime import datetime | |
preSetPrompt = thePrompt | |
negPreSetPrompt = "deformed face, disfigured, deformed, bad anatomy, watermark, signature, cut off, cropped, head cropped off, low contrast, poorly drawn hands, poorly rendered hands, username, error, missing limbs, malformed limbs, extra fingers, extra arms, extra legs, fused fingers, too many fingers" | |
models = good_models | |
#models = new_models | |
lock = RLock() | |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary. | |
def get_current_time(): | |
now = datetime.now() | |
current_time = now.strftime("%y-%m-%d %H:%M:%S") | |
return current_time | |
def load_fn(models): | |
global models_load | |
models_load = {} | |
for model in models: | |
if model not in models_load.keys(): | |
try: | |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN) | |
except Exception as error: | |
print(error) | |
m = gr.Interface(lambda: None, ['text'], ['image']) | |
models_load.update({model: m}) | |
load_fn(models) | |
num_models = howManyModelsToUse | |
max_images = howManyModelsToUse | |
inference_timeout = 900 | |
default_models = models[:num_models] | |
MAX_SEED = 2**32-1 | |
def extend_choices(choices): | |
return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA'] | |
def update_imgbox(choices): | |
choices_plus = extend_choices(choices[:num_models]) | |
return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus] | |
def random_choices(): | |
import random | |
random.seed() | |
return random.choices(models, k=num_models) | |
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout): | |
kwargs = {} | |
if height > 0: kwargs["height"] = height | |
if width > 0: kwargs["width"] = width | |
if steps > 0: kwargs["num_inference_steps"] = steps | |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg | |
print(f"infer: model '{model_str}', prompt: '{prompt}'...") | |
if seed == -1: | |
theSeed = randomize_seed() | |
else: | |
theSeed = seed | |
kwargs["seed"] = theSeed | |
print("Create task...") | |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN)) | |
await asyncio.sleep(0) | |
try: | |
result = await asyncio.wait_for(task, timeout=timeout) | |
print(f"Task '{model_str}' ended, result: {result}") | |
except asyncio.TimeoutError as e: | |
print(e) | |
print(f"infer: Task timed out: {model_str}") | |
if not task.done(): task.cancel() | |
result = None | |
raise Exception(f"Task timed out: {model_str}") from e | |
except Exception as e: | |
print(e) | |
print(f"infer: exception: {model_str}") | |
if not task.done(): task.cancel() | |
result = None | |
raise Exception() from e | |
if task.done() and result is not None and not isinstance(result, tuple): | |
with lock: | |
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png" | |
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, theSeed) | |
return image | |
else: | |
print(f"Else, no valid result...: result '{result}'") | |
return None | |
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1): | |
try: | |
loop = asyncio.new_event_loop() | |
result = loop.run_until_complete(infer(model_str, prompt, nprompt, | |
height, width, steps, cfg, seed, inference_timeout)) | |
except (Exception, asyncio.CancelledError) as e: | |
print(e) | |
print(f"gen_fn: Task aborted: {model_str}") | |
result = None | |
raise gr.Error(f"Task aborted: {model_str}, Error: {e}") | |
finally: | |
loop.close() | |
return result | |
def add_gallery(image, model_str, gallery): | |
if gallery is None: gallery = [] | |
with lock: | |
if image is not None: gallery.insert(0, (image, model_str)) | |
return gallery | |
JS=""" | |
<script> | |
/* | |
function simulateButtonPress_() { | |
const button = document.getElementById('simulate-button'); | |
if (button) { | |
button.click(); // Simulate the button press | |
console.log('Button Pressed!'); | |
} | |
} | |
*/ | |
function simulateButtonPress() { | |
console.log('Button Pressed!'); | |
} | |
// Function to observe image changes | |
function observeImageChanges() { | |
// Select all images with the 'image-monitor' class | |
const images = document.querySelectorAll('.svelte-1pijsyv'); | |
// Create a MutationObserver to watch for changes in the image src | |
const observer = new MutationObserver((mutationsList, observer) => { | |
mutationsList.forEach(mutation => { | |
if (mutation.type === 'attributes' && mutation.attributeName === 'src') { | |
// If the image src changes, simulate button press | |
console.log('Image changed!'); | |
simulateButtonPress(); | |
} | |
}); | |
}); | |
// Observer options: observe changes to attributes (like src) | |
const config = { attributes: true }; | |
// Start observing each image | |
images.forEach(image => { | |
observer.observe(image, config); | |
}); | |
} | |
// Start observing | |
window.addEventListener('load', () => { | |
observeImageChanges(); | |
console.log("Yo"); | |
}); | |
</script> | |
""" | |
CSS=""" | |
<style> | |
.image-monitor { | |
border:1px solid red; | |
} | |
/* | |
.svelte-1pijsyv{ | |
border:1px solid green; | |
} | |
*/ | |
.gallery-container{ | |
max-height: 512px; | |
} | |
.butt{ | |
background-color:#2b4764 !important | |
} | |
.butt:hover{ | |
background-color:#3a6c9f !important; | |
} | |
</style> | |
""" | |
# with gr.Blocks(fill_width=True, head=js) as demo: | |
with gr.Blocks(head=CSS + JS) as demo: | |
with gr.Tab(str(num_models) + ' Models'): | |
with gr.Column(scale=2): | |
with gr.Group(): | |
txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1) | |
neg_input = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1) | |
with gr.Accordion("Advanced", open=False, visible=True): | |
with gr.Row(): | |
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0) | |
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0) | |
with gr.Row(): | |
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0) | |
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0) | |
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1) | |
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary") | |
seed_rand.click(randomize_seed, None, [seed], queue=False) | |
with gr.Row(): | |
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"]) | |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1) | |
with gr.Column(scale=1): | |
with gr.Group(): | |
with gr.Row(): | |
output = [gr.Image(label=m, show_download_button=True, elem_classes=["image-monitor"], | |
interactive=False, width=112, height=112, show_share_button=False, format="png", | |
visible=True) for m in default_models] | |
current_models = [gr.Textbox(m, visible=False) for m in default_models] | |
with gr.Column(scale=2): | |
gallery = gr.Gallery(label="Output", show_download_button=True, | |
interactive=False, show_share_button=False, container=True, format="png", | |
preview=True, object_fit="cover", columns=2, rows=2) | |
for m, o in zip(current_models, output): | |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn, | |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o], | |
concurrency_limit=None, queue=False) | |
o.change(add_gallery, [o, m, gallery], [gallery]) | |
with gr.Column(scale=4): | |
with gr.Accordion('Model selection'): | |
model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True) | |
model_choice.change(update_imgbox, model_choice, output) | |
model_choice.change(extend_choices, model_choice, current_models) | |
random_button.click(random_choices, None, model_choice) | |
with gr.Tab('Single model'): | |
with gr.Column(scale=2): | |
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0]) | |
with gr.Group(): | |
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1) | |
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1) | |
with gr.Accordion("Advanced", open=False, visible=True): | |
with gr.Row(): | |
width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0) | |
height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0) | |
with gr.Row(): | |
steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0) | |
cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0) | |
seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1) | |
seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary") | |
seed_rand2.click(randomize_seed, None, [seed2], queue=False) | |
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images') | |
with gr.Row(): | |
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2, elem_classes=["butt"]) | |
with gr.Column(scale=1): | |
with gr.Group(): | |
with gr.Row(): | |
output2 = [gr.Image(label='', show_download_button=True, | |
interactive=False, width=112, height=112, visible=True, format="png", | |
show_share_button=False, show_label=False) for _ in range(max_images)] | |
with gr.Column(scale=2): | |
gallery2 = gr.Gallery(label="Output", show_download_button=True, | |
interactive=False, show_share_button=True, container=True, format="png", | |
preview=True, object_fit="cover", columns=2, rows=2) | |
for i, o in enumerate(output2): | |
img_i = gr.Number(i, visible=False) | |
num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False) | |
gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit], | |
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None, | |
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2, | |
height2, width2, steps2, cfg2, seed2], outputs=[o], | |
concurrency_limit=None, queue=False) | |
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2]) | |
demo.launch(show_api=False, max_threads=400) | |