|
|
|
import PIL |
|
import requests |
|
import torch |
|
import gradio as gr |
|
import random |
|
from PIL import Image |
|
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler |
|
|
|
model_id = "timbrooks/instruct-pix2pix" |
|
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", safety_checker=None) |
|
pipe.to("cuda") |
|
pipe.enable_attention_slicing() |
|
|
|
seed = random.randint(0, 1000000) |
|
counter = 0 |
|
|
|
def chat(image_in, message, history): |
|
|
|
global counter |
|
global seed |
|
img_nm = f"./edited_image_{seed}.png" |
|
counter += 1 |
|
|
|
|
|
if counter > 1: |
|
|
|
image_in = Image.open(img_nm) |
|
prompt = message |
|
edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0] |
|
edited_image.save(img_nm) |
|
history = history or [] |
|
add_text_list = ["There you go ", "Enjoy your image! ", "Nice work! Wonder what you gonna do next! ", "Way to go! ", "Does this work for you? ", "Something like this? "] |
|
|
|
response = random.choice(add_text_list) + '<img src="/file=' + img_nm[2:] + '" style="width: 200px; height: 200px;">' |
|
|
|
history.append((message, response)) |
|
return history, history |
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
with gr.Column(): |
|
image_in = gr.Image(type='pil', label="Original Image") |
|
text_in = gr.Textbox() |
|
state_in = gr.State() |
|
b1 = gr.Button('Edit the image!') |
|
chatbot = gr.Chatbot() |
|
b1.click(chat,[image_in, text_in, state_in], [chatbot, state_in]) |
|
|
|
|
|
demo.launch(debug=True, width="80%", height=1500) |