Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import torch
|
|
2 |
import requests
|
3 |
from PIL import Image
|
4 |
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
|
|
|
5 |
|
6 |
# Load the pipeline
|
7 |
pipeline = DiffusionPipeline.from_pretrained(
|
@@ -18,17 +19,59 @@ pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(
|
|
18 |
)
|
19 |
pipeline.to('cuda:0')
|
20 |
|
21 |
-
# Download an example image.
|
22 |
-
cond = Image.open(requests.get("https://d.skis.ltd/nrp/sample-data/lysol.png", stream=True).raw)
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
#
|
29 |
-
#
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
|
|
2 |
import requests
|
3 |
from PIL import Image
|
4 |
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
|
5 |
+
import rembg
|
6 |
|
7 |
# Load the pipeline
|
8 |
pipeline = DiffusionPipeline.from_pretrained(
|
|
|
19 |
)
|
20 |
pipeline.to('cuda:0')
|
21 |
|
|
|
|
|
22 |
|
23 |
+
def inference(input_img, num_inference_steps, guidance_scale, seed ):
|
24 |
+
# Download an example image.
|
25 |
+
cond = Image.open(input_img)
|
26 |
+
|
27 |
+
# Run the pipeline!
|
28 |
+
#result = pipeline(cond, num_inference_steps=75).images[0]
|
29 |
+
result = pipeline(cond, num_inference_steps=num_inference_steps,
|
30 |
+
guidance_scale=guidance_scale,
|
31 |
+
generator=torch.Generator(pipeline.device).manual_seed(seed)).images[0]
|
32 |
|
33 |
+
# for general real and synthetic images of general objects
|
34 |
+
# usually it is enough to have around 28 inference steps
|
35 |
+
# for images with delicate details like faces (real or anime)
|
36 |
+
# you may need 75-100 steps for the details to construct
|
37 |
+
|
38 |
+
#result.show()
|
39 |
+
#result.save("output.png")
|
40 |
+
return result
|
41 |
|
42 |
+
def remove_background(result):
|
43 |
+
result = rembg.remove(result)
|
44 |
+
return result
|
45 |
+
|
46 |
+
|
47 |
+
import gradio as gr
|
48 |
+
|
49 |
+
with gr.Blocks() as demo:
|
50 |
+
gr.Markdown("<h1><center> Zero123++ Demo</center></h1>")
|
51 |
+
with gr.Column():
|
52 |
+
input_img = gr.Image(label='Input Image', tyoe='filepath')
|
53 |
+
with gr.Column():
|
54 |
+
output_img = gr.Image(label='Zero123++ Output')
|
55 |
+
with gr.Accordion("Advanced options:", open=False):
|
56 |
+
rm_in_bkg = gr.Checkbox(label='Remove Input Background', )
|
57 |
+
rm_out_bkg = gr.Checkbox(label='Remove Output Background')
|
58 |
+
num_inference_steps = gr.Slider(label="Number of Inference Steps",minimum=15, maximum=100, step=1, value=75, interactive=True)
|
59 |
+
guidance_scale = gr.Slider(label="Classifier Free Guidance Scale",minimum=1.00, maximum=10.00, step=0.1, value=4.0, interactive=True)
|
60 |
+
seed = gr.Number(0, label='Seed')
|
61 |
+
btn = gr.Button('Submit')
|
62 |
+
|
63 |
+
btn.click(inference, [input_img, num_inference_steps, guidance_scale, seed ], output_img)
|
64 |
+
rm_in_bkg.input(remove_background, input_img, output_img)
|
65 |
+
rm_out_bkg.input(remove_background, output_img, output_img)
|
66 |
+
|
67 |
+
gr.Examples(
|
68 |
+
examples=[["one.jpg"],['two.jpg'], ['three.jpg']],
|
69 |
+
inputs=input_img,
|
70 |
+
outputs=output_img,
|
71 |
+
fn=dummy,
|
72 |
+
cache_examples=True,
|
73 |
+
)
|
74 |
+
|
75 |
+
|
76 |
+
demo.launch()
|
77 |
|