ford442 commited on
Commit
9d0f448
·
verified ·
1 Parent(s): 618278e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -4
app.py CHANGED
@@ -206,7 +206,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
206
  seed = random.randint(0, MAX_SEED)
207
  return seed
208
 
209
- def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
210
  filename= f'tst_A_{timestamp}.txt'
211
  with open(filename, "w") as f:
212
  f.write(f"Realvis 5.0 (Tester B) \n")
@@ -214,6 +214,7 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
214
  f.write(f"Prompt: {prompt} \n")
215
  f.write(f"Steps: {num_inference_steps} \n")
216
  f.write(f"Guidance Scale: {guidance_scale} \n")
 
217
  f.write(f"SPACE SETUP: \n")
218
  f.write(f"Use Model Dtype: no \n")
219
  f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
@@ -240,6 +241,7 @@ def generate_30(
240
  use_resolution_binning: bool = True,
241
  num_images: int = 1,
242
  juggernaut: bool = True,
 
243
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
244
  ):
245
  torch.backends.cudnn.benchmark = False
@@ -256,6 +258,7 @@ def generate_30(
256
  "prompt": [prompt] * num_images,
257
  "negative_prompt": [negative_prompt],
258
  "negative_prompt_2": [neg_prompt_2],
 
259
  "width": width,
260
  "height": height,
261
  "guidance_scale": guidance_scale,
@@ -269,7 +272,7 @@ def generate_30(
269
  images = []
270
  pipe.scheduler.set_timesteps(num_inference_steps,device)
271
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
272
- uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
273
  for i in range(0, num_images, BATCH_SIZE):
274
  batch_options = options.copy()
275
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
@@ -300,6 +303,7 @@ def generate_60(
300
  use_resolution_binning: bool = True,
301
  num_images: int = 1,
302
  juggernaut: bool = True,
 
303
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
304
  ):
305
  torch.backends.cudnn.benchmark = True
@@ -316,6 +320,7 @@ def generate_60(
316
  "prompt": [prompt] * num_images,
317
  "negative_prompt": [negative_prompt],
318
  "negative_prompt_2": [neg_prompt_2],
 
319
  "width": width,
320
  "height": height,
321
  "guidance_scale": guidance_scale,
@@ -329,7 +334,7 @@ def generate_60(
329
  images = []
330
  pipe.scheduler.set_timesteps(num_inference_steps,device)
331
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
332
- uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
333
  for i in range(0, num_images, BATCH_SIZE):
334
  batch_options = options.copy()
335
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
@@ -360,6 +365,7 @@ def generate_90(
360
  use_resolution_binning: bool = True,
361
  num_images: int = 1,
362
  juggernaut: bool = True,
 
363
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
364
  ):
365
  torch.backends.cudnn.benchmark = True
@@ -376,6 +382,7 @@ def generate_90(
376
  "prompt": [prompt] * num_images,
377
  "negative_prompt": [negative_prompt],
378
  "negative_prompt_2": [neg_prompt_2],
 
379
  "width": width,
380
  "height": height,
381
  "guidance_scale": guidance_scale,
@@ -389,7 +396,7 @@ def generate_90(
389
  images = []
390
  pipe.scheduler.set_timesteps(num_inference_steps,device)
391
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
392
- uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
393
  for i in range(0, num_images, BATCH_SIZE):
394
  batch_options = options.copy()
395
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
@@ -487,6 +494,13 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
487
  step=1,
488
  value=0,
489
  )
 
 
 
 
 
 
 
490
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
491
  juggernaut = gr.Checkbox(label="Use Juggernaut VAE", value=True)
492
  with gr.Row():
@@ -553,6 +567,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
553
  randomize_seed,
554
  num_images,
555
  juggernaut,
 
556
  ],
557
  outputs=[result, seed],
558
  )
@@ -577,6 +592,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
577
  randomize_seed,
578
  num_images,
579
  juggernaut,
 
580
  ],
581
  outputs=[result, seed],
582
  )
@@ -601,6 +617,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
601
  randomize_seed,
602
  num_images,
603
  juggernaut,
 
604
  ],
605
  outputs=[result, seed],
606
  )
 
206
  seed = random.randint(0, MAX_SEED)
207
  return seed
208
 
209
+ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
210
  filename= f'tst_A_{timestamp}.txt'
211
  with open(filename, "w") as f:
212
  f.write(f"Realvis 5.0 (Tester B) \n")
 
214
  f.write(f"Prompt: {prompt} \n")
215
  f.write(f"Steps: {num_inference_steps} \n")
216
  f.write(f"Guidance Scale: {guidance_scale} \n")
217
+ f.write(f"Denoise Strength: {denoise} \n")
218
  f.write(f"SPACE SETUP: \n")
219
  f.write(f"Use Model Dtype: no \n")
220
  f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
 
241
  use_resolution_binning: bool = True,
242
  num_images: int = 1,
243
  juggernaut: bool = True,
244
+ denoise: float = 0.3,
245
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
246
  ):
247
  torch.backends.cudnn.benchmark = False
 
258
  "prompt": [prompt] * num_images,
259
  "negative_prompt": [negative_prompt],
260
  "negative_prompt_2": [neg_prompt_2],
261
+ "strength": denoise,
262
  "width": width,
263
  "height": height,
264
  "guidance_scale": guidance_scale,
 
272
  images = []
273
  pipe.scheduler.set_timesteps(num_inference_steps,device)
274
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
275
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
276
  for i in range(0, num_images, BATCH_SIZE):
277
  batch_options = options.copy()
278
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
 
303
  use_resolution_binning: bool = True,
304
  num_images: int = 1,
305
  juggernaut: bool = True,
306
+ denoise: float = 0.3,
307
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
308
  ):
309
  torch.backends.cudnn.benchmark = True
 
320
  "prompt": [prompt] * num_images,
321
  "negative_prompt": [negative_prompt],
322
  "negative_prompt_2": [neg_prompt_2],
323
+ "strength": denoise,
324
  "width": width,
325
  "height": height,
326
  "guidance_scale": guidance_scale,
 
334
  images = []
335
  pipe.scheduler.set_timesteps(num_inference_steps,device)
336
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
337
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
338
  for i in range(0, num_images, BATCH_SIZE):
339
  batch_options = options.copy()
340
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
 
365
  use_resolution_binning: bool = True,
366
  num_images: int = 1,
367
  juggernaut: bool = True,
368
+ denoise: float = 0.3,
369
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
370
  ):
371
  torch.backends.cudnn.benchmark = True
 
382
  "prompt": [prompt] * num_images,
383
  "negative_prompt": [negative_prompt],
384
  "negative_prompt_2": [neg_prompt_2],
385
+ "strength": denoise,
386
  "width": width,
387
  "height": height,
388
  "guidance_scale": guidance_scale,
 
396
  images = []
397
  pipe.scheduler.set_timesteps(num_inference_steps,device)
398
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
399
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
400
  for i in range(0, num_images, BATCH_SIZE):
401
  batch_options = options.copy()
402
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
 
494
  step=1,
495
  value=0,
496
  )
497
+ denoise = gr.Slider(
498
+ label="Denoising Strength",
499
+ minimum=0.0,
500
+ maximum=1.0,
501
+ step=0.01,
502
+ value=0.3,
503
+ )
504
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
505
  juggernaut = gr.Checkbox(label="Use Juggernaut VAE", value=True)
506
  with gr.Row():
 
567
  randomize_seed,
568
  num_images,
569
  juggernaut,
570
+ denoise
571
  ],
572
  outputs=[result, seed],
573
  )
 
592
  randomize_seed,
593
  num_images,
594
  juggernaut,
595
+ denoise
596
  ],
597
  outputs=[result, seed],
598
  )
 
617
  randomize_seed,
618
  num_images,
619
  juggernaut,
620
+ denoise
621
  ],
622
  outputs=[result, seed],
623
  )