Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,6 @@ import paramiko
|
|
20 |
import datetime
|
21 |
#from diffusers import DPMSolverSDEScheduler
|
22 |
from diffusers.models.attention_processor import AttnProcessor2_0
|
23 |
-
import gc
|
24 |
|
25 |
torch.backends.cuda.matmul.allow_tf32 = False
|
26 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
@@ -86,18 +85,22 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
86 |
|
87 |
def load_and_prepare_model():
|
88 |
#vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=True, token=True)
|
89 |
-
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False)
|
90 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
91 |
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
92 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", token=True) #, beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True, token=True)
|
93 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
|
|
94 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
95 |
'ford442/RealVisXL_V5.0_BF16',
|
96 |
#torch_dtype=torch.bfloat16,
|
97 |
token=True,
|
98 |
-
# low_cpu_mem_usage = False,
|
99 |
add_watermarker=False,
|
|
|
|
|
|
|
100 |
)
|
|
|
101 |
#pipe.scheduler = sched
|
102 |
#pipe.vae.do_resize=False
|
103 |
#pipe.vae.vae_scale_factor=8
|
@@ -110,8 +113,9 @@ def load_and_prepare_model():
|
|
110 |
#pipe.unet.to(memory_format=torch.channels_last)
|
111 |
#pipe.enable_vae_tiling()
|
112 |
pipe.to(device=device, dtype=torch.bfloat16)
|
113 |
-
|
114 |
-
|
|
|
115 |
return pipe
|
116 |
|
117 |
pipe = load_and_prepare_model()
|
@@ -164,11 +168,6 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
|
164 |
f.write(f"To cuda and bfloat \n")
|
165 |
upload_to_ftp(filename)
|
166 |
|
167 |
-
def flush():
|
168 |
-
gc.collect()
|
169 |
-
torch.cuda.empty_cache()
|
170 |
-
torch.cuda.reset_peak_memory_stats()
|
171 |
-
|
172 |
@spaces.GPU(duration=30)
|
173 |
def generate_30(
|
174 |
prompt: str,
|
@@ -184,24 +183,12 @@ def generate_30(
|
|
184 |
):
|
185 |
seed = random.randint(0, MAX_SEED)
|
186 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
187 |
-
|
188 |
-
|
189 |
-
(
|
190 |
-
prompt_embeds,
|
191 |
-
negative_prompt_embeds,
|
192 |
-
pooled_prompt_embeds,
|
193 |
-
negative_pooled_prompt_embeds
|
194 |
-
) = pipe.encode_prompt(prompt,negative_prompt)
|
195 |
-
del pipe.text_encoder, pipe.text_encoder_2
|
196 |
-
flush()
|
197 |
options = {
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
"prompt_embeds": prompt_embeds,
|
202 |
-
"negative_prompt_embeds": negative_prompt_embeds,
|
203 |
-
"pooled_prompt_embeds": pooled_prompt_embeds,
|
204 |
-
"negative_pooled_prompt_embeds": negative_pooled_prompt_embeds,
|
205 |
"width": width,
|
206 |
"height": height,
|
207 |
"guidance_scale": guidance_scale,
|
@@ -238,24 +225,12 @@ def generate_60(
|
|
238 |
):
|
239 |
seed = random.randint(0, MAX_SEED)
|
240 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
241 |
-
|
242 |
-
|
243 |
-
(
|
244 |
-
prompt_embeds,
|
245 |
-
negative_prompt_embeds,
|
246 |
-
pooled_prompt_embeds,
|
247 |
-
negative_pooled_prompt_embeds
|
248 |
-
) = pipe.encode_prompt(prompt)
|
249 |
-
del pipe.text_encoder, pipe.text_encoder_2
|
250 |
-
flush()
|
251 |
options = {
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
"prompt_embeds": prompt_embeds,
|
256 |
-
"negative_prompt_embeds": negative_prompt_embeds,
|
257 |
-
"pooled_prompt_embeds": pooled_prompt_embeds,
|
258 |
-
"negative_pooled_prompt_embeds": negative_pooled_prompt_embeds,
|
259 |
"width": width,
|
260 |
"height": height,
|
261 |
"guidance_scale": guidance_scale,
|
@@ -292,24 +267,12 @@ def generate_90(
|
|
292 |
):
|
293 |
seed = random.randint(0, MAX_SEED)
|
294 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
295 |
-
|
296 |
-
|
297 |
-
(
|
298 |
-
prompt_embeds,
|
299 |
-
negative_prompt_embeds,
|
300 |
-
pooled_prompt_embeds,
|
301 |
-
negative_pooled_prompt_embeds
|
302 |
-
) = pipe.encode_prompt(prompt)
|
303 |
-
del pipe.text_encoder, pipe.text_encoder_2
|
304 |
-
flush()
|
305 |
options = {
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
"prompt_embeds": prompt_embeds,
|
310 |
-
"negative_prompt_embeds": negative_prompt_embeds,
|
311 |
-
"pooled_prompt_embeds": pooled_prompt_embeds,
|
312 |
-
"negative_pooled_prompt_embeds": negative_pooled_prompt_embeds,
|
313 |
"width": width,
|
314 |
"height": height,
|
315 |
"guidance_scale": guidance_scale,
|
|
|
20 |
import datetime
|
21 |
#from diffusers import DPMSolverSDEScheduler
|
22 |
from diffusers.models.attention_processor import AttnProcessor2_0
|
|
|
23 |
|
24 |
torch.backends.cuda.matmul.allow_tf32 = False
|
25 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
85 |
|
86 |
def load_and_prepare_model():
|
87 |
#vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=True, token=True)
|
88 |
+
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
89 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
90 |
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
91 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", token=True) #, beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True, token=True)
|
92 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
93 |
+
|
94 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
95 |
'ford442/RealVisXL_V5.0_BF16',
|
96 |
#torch_dtype=torch.bfloat16,
|
97 |
token=True,
|
|
|
98 |
add_watermarker=False,
|
99 |
+
text_encoder=None,
|
100 |
+
text_encoder_2=None,
|
101 |
+
vae=None,
|
102 |
)
|
103 |
+
#pipe.vae = vaeXL #.to(torch.bfloat16)
|
104 |
#pipe.scheduler = sched
|
105 |
#pipe.vae.do_resize=False
|
106 |
#pipe.vae.vae_scale_factor=8
|
|
|
113 |
#pipe.unet.to(memory_format=torch.channels_last)
|
114 |
#pipe.enable_vae_tiling()
|
115 |
pipe.to(device=device, dtype=torch.bfloat16)
|
116 |
+
pipe.unet.set_attn_processor(AttnProcessor2_0())
|
117 |
+
|
118 |
+
pipe.vae = vaeXL.to('cpu') #.to(torch.bfloat16)
|
119 |
return pipe
|
120 |
|
121 |
pipe = load_and_prepare_model()
|
|
|
168 |
f.write(f"To cuda and bfloat \n")
|
169 |
upload_to_ftp(filename)
|
170 |
|
|
|
|
|
|
|
|
|
|
|
171 |
@spaces.GPU(duration=30)
|
172 |
def generate_30(
|
173 |
prompt: str,
|
|
|
183 |
):
|
184 |
seed = random.randint(0, MAX_SEED)
|
185 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
186 |
+
pipe.text_encoder=CLIPTextModel.from_config(pipe.config)
|
187 |
+
pipe.text_encoder_2=CLIPTextModelWithProjection.from_config(pipe.config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
options = {
|
189 |
+
"prompt": [prompt],
|
190 |
+
"negative_prompt": [negative_prompt],
|
191 |
+
"negative_prompt_2": [neg_prompt_2],
|
|
|
|
|
|
|
|
|
192 |
"width": width,
|
193 |
"height": height,
|
194 |
"guidance_scale": guidance_scale,
|
|
|
225 |
):
|
226 |
seed = random.randint(0, MAX_SEED)
|
227 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
228 |
+
pipe.text_encoder=CLIPTextModel.from_config(pipe.config)
|
229 |
+
pipe.text_encoder_2=CLIPTextModelWithProjection.from_config(pipe.config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
options = {
|
231 |
+
"prompt": [prompt],
|
232 |
+
"negative_prompt": [negative_prompt],
|
233 |
+
"negative_prompt_2": [neg_prompt_2],
|
|
|
|
|
|
|
|
|
234 |
"width": width,
|
235 |
"height": height,
|
236 |
"guidance_scale": guidance_scale,
|
|
|
267 |
):
|
268 |
seed = random.randint(0, MAX_SEED)
|
269 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
270 |
+
pipe.text_encoder=CLIPTextModel.from_config(pipe.config)
|
271 |
+
pipe.text_encoder_2=CLIPTextModelWithProjection.from_config(pipe.config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
options = {
|
273 |
+
"prompt": [prompt],
|
274 |
+
"negative_prompt": [negative_prompt],
|
275 |
+
"negative_prompt_2": [neg_prompt_2],
|
|
|
|
|
|
|
|
|
276 |
"width": width,
|
277 |
"height": height,
|
278 |
"guidance_scale": guidance_scale,
|