import PIL
import requests
import torch
import gradio as gr
import random
from PIL import Image
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
model_id = "timbrooks/instruct-pix2pix"
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", safety_checker=None)
pipe.to("cuda")
pipe.enable_attention_slicing()
#seed = random.randint(0, 1000000)
counter = 0
#print(f"SEED IS : {seed}")
def chat(image_in, message, history, progress=gr.Progress(track_tqdm=True)):
progress(0, desc="Starting...")
global counter
#global seed
#img_nm = f"./edited_image_{seed}.png"
counter += 1
#print(f"seed is : {seed}")
#print(f"image_in name is :{img_nm}")
#if message == "revert": --to add revert functionality later
if counter > 1:
# Open the image
image_in = Image.open("edited_image.png") #(img_nm)
prompt = message #eg - "turn him into cyborg"
edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
edited_image.save("edited_image.png") # (img_nm) #("./edited_image.png")
history = history or []
add_text_list = ["There you go ", "Enjoy your image! ", "Nice work! Wonder what you gonna do next! ", "Way to go! ", "Does this work for you? ", "Something like this? "]
#Resizing the image for better display
#response = random.choice(add_text_list) + ''
response = random.choice(add_text_list) + '
'
history.append((message, response))
return history, history
with gr.Blocks() as demo:
gr.HTML("""
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.