TheAwakenOne commited on
Commit
0b25f63
·
1 Parent(s): 2261f3f
Files changed (5) hide show
  1. README.md +52 -8
  2. app.py +313 -133
  3. live_preview_helpers.py +166 -0
  4. loras.json +68 -0
  5. requirements.txt +4 -4
README.md CHANGED
@@ -1,14 +1,58 @@
1
  ---
2
- title: AwakenOnesLoraPlayground
3
- emoji: 🖼
4
- colorFrom: purple
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
- pinned: false
10
- license: other
11
- short_description: 'Preview images generated using different LoRA models '
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Awaken Ones' Lora Previews
3
+ emoji: 🎨
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.0.2
8
  app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ models:
12
+ - TheAwakenOne/camberganng
13
+ - TheAwakenOne/Marilyn-Monroe
14
+ - TheAwakenOne/The-Maxx-Style
15
+ - TheAwakenOne/The-Mask-Lora
16
+ - TheAwakenOne/graffiti-style
17
+ - TheAwakenOne/rockbrow
18
+ - TheAwakenOne/ldlaughingmemeface
19
+ - TheAwakenOne/mtdp-balloon-character
20
+ - TheAwakenOne/watercolor
21
+ - TheAwakenOne/max-headroom
22
+ - TheAwakenOne/caricature
23
  ---
24
 
25
+ # Awaken Ones' Lora Previews
26
+
27
+ Welcome to the Awaken Ones' Lora Previews! This Space showcases a collection of my custom LoRA models created with FluxGym.
28
+
29
+ ## Features
30
+
31
+ - Preview images generated using different LoRA models
32
+ - Customize prompts and generation parameters
33
+ - Experiment with various styles and characters
34
+
35
+ ## How to Use
36
+
37
+ 1. Select a LoRA model from the gallery
38
+ 2. Enter a prompt in the text box
39
+ 3. Adjust the generation parameters as desired
40
+ 4. Click "Generate" to create your image
41
+
42
+ ## Models Included
43
+
44
+ - Camberganng
45
+ - Marilyn Monroe
46
+ - The Maxx Style
47
+ - The Mask Lora
48
+ - Graffiti Style
49
+ - Rockbrow
50
+ - LD Laughing Meme Face
51
+ - MTDP Balloon Character
52
+ - Watercolor
53
+ - Max Headroom
54
+ - Caricature
55
+
56
+ Enjoy exploring these unique LoRA models and create amazing images!
57
+
58
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,154 +1,334 @@
 
1
  import gradio as gr
2
- import numpy as np
 
 
 
 
 
 
 
 
 
3
  import random
 
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
 
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  width=width,
47
  height=height,
48
  generator=generator,
 
 
49
  ).images[0]
 
50
 
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
 
80
- run_button = gr.Button("Run", scale=0, variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
- result = gr.Image(label="Result", show_label=False)
 
 
 
 
 
 
83
 
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
 
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
 
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
 
 
119
  with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
  )
152
 
153
- if __name__ == "__main__":
154
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ import json
4
+ import logging
5
+ import torch
6
+ from PIL import Image
7
+ import spaces
8
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
9
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
+ from diffusers.utils import load_image
11
+ from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
12
+ import copy
13
  import random
14
+ import time
15
 
16
+ # Load LoRAs from JSON file
17
+ with open('loras.json', 'r') as f:
18
+ loras = json.load(f)
19
 
20
+ # Initialize the base model
21
+ dtype = torch.bfloat16
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
+ base_model = "black-forest-labs/FLUX.1-dev"
24
+
25
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
26
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
27
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
28
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype)
29
+
30
+ MAX_SEED = 2**32-1
31
+
32
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
33
+
34
+ class calculateDuration:
35
+ def __init__(self, activity_name=""):
36
+ self.activity_name = activity_name
37
+
38
+ def __enter__(self):
39
+ self.start_time = time.time()
40
+ return self
41
+
42
+ def __exit__(self, exc_type, exc_value, traceback):
43
+ self.end_time = time.time()
44
+ self.elapsed_time = self.end_time - self.start_time
45
+ if self.activity_name:
46
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
47
+ else:
48
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
49
+
50
+ def update_selection(evt: gr.SelectData, width, height):
51
+ selected_lora = loras[evt.index]
52
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
53
+ lora_repo = selected_lora["repo"]
54
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
55
+ if "aspect" in selected_lora:
56
+ if selected_lora["aspect"] == "portrait":
57
+ width = 768
58
+ height = 1024
59
+ elif selected_lora["aspect"] == "landscape":
60
+ width = 1024
61
+ height = 768
62
+ else:
63
+ width = 1024
64
+ height = 1024
65
+ return (
66
+ gr.update(placeholder=new_placeholder),
67
+ updated_text,
68
+ evt.index,
69
+ width,
70
+ height,
71
+ )
72
+
73
+ @spaces.GPU(duration=70)
74
+ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
75
+ pipe.to("cuda")
76
+ generator = torch.Generator(device="cuda").manual_seed(seed)
77
+ with calculateDuration("Generating image"):
78
+ # Generate image
79
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
80
+ prompt=prompt_mash,
81
+ num_inference_steps=steps,
82
+ guidance_scale=cfg_scale,
83
+ width=width,
84
+ height=height,
85
+ generator=generator,
86
+ joint_attention_kwargs={"scale": lora_scale},
87
+ output_type="pil",
88
+ good_vae=good_vae,
89
+ ):
90
+ yield img
91
+
92
+ @spaces.GPU(duration=70)
93
+ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
94
+ generator = torch.Generator(device="cuda").manual_seed(seed)
95
+ pipe_i2i.to("cuda")
96
+ image_input = load_image(image_input_path)
97
+ final_image = pipe_i2i(
98
+ prompt=prompt_mash,
99
+ image=image_input,
100
+ strength=image_strength,
101
+ num_inference_steps=steps,
102
+ guidance_scale=cfg_scale,
103
  width=width,
104
  height=height,
105
  generator=generator,
106
+ joint_attention_kwargs={"scale": lora_scale},
107
+ output_type="pil",
108
  ).images[0]
109
+ return final_image
110
 
111
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
112
+ if selected_index is None:
113
+ raise gr.Error("You must select a LoRA before proceeding.")
114
+ selected_lora = loras[selected_index]
115
+ lora_path = selected_lora["repo"]
116
+ trigger_word = selected_lora["trigger_word"]
117
+ if(trigger_word):
118
+ if "trigger_position" in selected_lora:
119
+ if selected_lora["trigger_position"] == "prepend":
120
+ prompt_mash = f"{trigger_word} {prompt}"
121
+ else:
122
+ prompt_mash = f"{prompt} {trigger_word}"
123
+ else:
124
+ prompt_mash = f"{trigger_word} {prompt}"
125
+ else:
126
+ prompt_mash = prompt
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ with calculateDuration("Unloading LoRA"):
129
+ pipe.unload_lora_weights()
130
+ pipe_i2i.unload_lora_weights()
131
+
132
+ # Load LoRA weights
133
+ with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
134
+ if(image_input is not None):
135
+ if "weights" in selected_lora:
136
+ pipe_i2i.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
137
+ else:
138
+ pipe_i2i.load_lora_weights(lora_path)
139
+ else:
140
+ if "weights" in selected_lora:
141
+ pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
142
+ else:
143
+ pipe.load_lora_weights(lora_path)
144
+
145
+ # Set random seed for reproducibility
146
+ with calculateDuration("Randomizing seed"):
147
+ if randomize_seed:
148
+ seed = random.randint(0, MAX_SEED)
149
+
150
+ if(image_input is not None):
151
+
152
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
153
+ yield final_image, seed, gr.update(visible=False)
154
+ else:
155
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
156
+
157
+ # Consume the generator to get the final image
158
+ final_image = None
159
+ step_counter = 0
160
+ for image in image_generator:
161
+ step_counter+=1
162
+ final_image = image
163
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
164
+ yield image, seed, gr.update(value=progress_bar, visible=True)
165
+
166
+ yield final_image, seed, gr.update(value=progress_bar, visible=False)
167
+
168
+ def get_huggingface_safetensors(link):
169
+ split_link = link.split("/")
170
+ if(len(split_link) == 2):
171
+ model_card = ModelCard.load(link)
172
+ base_model = model_card.data.get("base_model")
173
+ print(base_model)
174
+ if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
175
+ raise Exception("Not a FLUX LoRA!")
176
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
177
+ trigger_word = model_card.data.get("instance_prompt", "")
178
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
179
+ fs = HfFileSystem()
180
+ try:
181
+ list_of_files = fs.ls(link, detail=False)
182
+ for file in list_of_files:
183
+ if(file.endswith(".safetensors")):
184
+ safetensors_name = file.split("/")[-1]
185
+ if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
186
+ image_elements = file.split("/")
187
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
188
+ except Exception as e:
189
+ print(e)
190
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
191
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
192
+ return split_link[1], link, safetensors_name, trigger_word, image_url
193
 
194
+ def check_custom_model(link):
195
+ if(link.startswith("https://")):
196
+ if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
197
+ link_split = link.split("huggingface.co/")
198
+ return get_huggingface_safetensors(link_split[1])
199
+ else:
200
+ return get_huggingface_safetensors(link)
201
 
202
+ def add_custom_lora(custom_lora):
203
+ global loras
204
+ if(custom_lora):
205
+ try:
206
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
207
+ print(f"Loaded custom LoRA: {repo}")
208
+ card = f'''
209
+ <div class="custom_lora_card">
210
+ <span>Loaded custom LoRA:</span>
211
+ <div class="card_internal">
212
+ <img src="{image}" />
213
+ <div>
214
+ <h3>{title}</h3>
215
+ <small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
216
+ </div>
217
+ </div>
218
+ </div>
219
+ '''
220
+ existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
221
+ if(not existing_item_index):
222
+ new_item = {
223
+ "image": image,
224
+ "title": title,
225
+ "repo": repo,
226
+ "weights": path,
227
+ "trigger_word": trigger_word
228
+ }
229
+ print(new_item)
230
+ existing_item_index = len(loras)
231
+ loras.append(new_item)
232
+
233
+ return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
234
+ except Exception as e:
235
+ gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
236
+ return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=True), gr.update(), "", None, ""
237
+ else:
238
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
239
 
240
+ def remove_custom_lora():
241
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
 
 
 
 
 
242
 
243
+ run_lora.zerogpu = True
244
 
245
+ css = '''
246
+ #gen_btn{height: 100%}
247
+ #gen_column{align-self: stretch}
248
+ #title{text-align: center}
249
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
250
+ #title img{width: 100px; margin-right: 0.5em}
251
+ #gallery .grid-wrap{height: 10vh}
252
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
253
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
254
+ .card_internal img{margin-right: 1em}
255
+ .styler{--form-gap-width: 0px !important}
256
+ #progress{height:30px}
257
+ #progress .generating{display:none}
258
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
259
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
260
+ '''
261
+ font=[gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
262
+ with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 3600)) as app:
263
+ title = gr.HTML(
264
+ """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> FLUX LoRA the Explorer</h1>""",
265
+ elem_id="title",
266
+ )
267
+ selected_index = gr.State(None)
268
+ with gr.Row():
269
+ with gr.Column(scale=3):
270
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
271
+ with gr.Column(scale=1, elem_id="gen_column"):
272
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
273
+ with gr.Row():
274
+ with gr.Column():
275
+ selected_info = gr.Markdown("")
276
+ gallery = gr.Gallery(
277
+ [(item["image"], item["title"]) for item in loras],
278
+ label="LoRA Gallery",
279
+ allow_preview=False,
280
+ columns=3,
281
+ elem_id="gallery",
282
+ show_share_button=False
283
+ )
284
+ with gr.Group():
285
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path", placeholder="multimodalart/vintage-ads-flux")
286
+ gr.Markdown("[Check the list of FLUX LoRas](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
287
+ custom_lora_info = gr.HTML(visible=False)
288
+ custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
289
+ with gr.Column():
290
+ progress_bar = gr.Markdown(elem_id="progress",visible=False)
291
+ result = gr.Image(label="Generated Image")
292
 
293
+ with gr.Row():
294
+ with gr.Accordion("Advanced Settings", open=False):
295
  with gr.Row():
296
+ input_image = gr.Image(label="Input image", type="filepath")
297
+ image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
298
+ with gr.Column():
299
+ with gr.Row():
300
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
301
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
302
+
303
+ with gr.Row():
304
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
305
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
306
+
307
+ with gr.Row():
308
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
309
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
310
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=0.95)
311
+
312
+ gallery.select(
313
+ update_selection,
314
+ inputs=[width, height],
315
+ outputs=[prompt, selected_info, selected_index, width, height]
316
+ )
317
+ custom_lora.input(
318
+ add_custom_lora,
319
+ inputs=[custom_lora],
320
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
321
+ )
322
+ custom_lora_button.click(
323
+ remove_custom_lora,
324
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
325
+ )
326
  gr.on(
327
+ triggers=[generate_button.click, prompt.submit],
328
+ fn=run_lora,
329
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
330
+ outputs=[result, seed, progress_bar]
 
 
 
 
 
 
 
 
 
331
  )
332
 
333
+ app.queue()
334
+ app.launch()
live_preview_helpers.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ # Helper functions
7
+ def calculate_shift(
8
+ image_seq_len,
9
+ base_seq_len: int = 256,
10
+ max_seq_len: int = 4096,
11
+ base_shift: float = 0.5,
12
+ max_shift: float = 1.16,
13
+ ):
14
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
15
+ b = base_shift - m * base_seq_len
16
+ mu = image_seq_len * m + b
17
+ return mu
18
+
19
+ def retrieve_timesteps(
20
+ scheduler,
21
+ num_inference_steps: Optional[int] = None,
22
+ device: Optional[Union[str, torch.device]] = None,
23
+ timesteps: Optional[List[int]] = None,
24
+ sigmas: Optional[List[float]] = None,
25
+ **kwargs,
26
+ ):
27
+ if timesteps is not None and sigmas is not None:
28
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
29
+ if timesteps is not None:
30
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
31
+ timesteps = scheduler.timesteps
32
+ num_inference_steps = len(timesteps)
33
+ elif sigmas is not None:
34
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
35
+ timesteps = scheduler.timesteps
36
+ num_inference_steps = len(timesteps)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
39
+ timesteps = scheduler.timesteps
40
+ return timesteps, num_inference_steps
41
+
42
+ # FLUX pipeline function
43
+ @torch.inference_mode()
44
+ def flux_pipe_call_that_returns_an_iterable_of_images(
45
+ self,
46
+ prompt: Union[str, List[str]] = None,
47
+ prompt_2: Optional[Union[str, List[str]]] = None,
48
+ height: Optional[int] = None,
49
+ width: Optional[int] = None,
50
+ num_inference_steps: int = 28,
51
+ timesteps: List[int] = None,
52
+ guidance_scale: float = 3.5,
53
+ num_images_per_prompt: Optional[int] = 1,
54
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
55
+ latents: Optional[torch.FloatTensor] = None,
56
+ prompt_embeds: Optional[torch.FloatTensor] = None,
57
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
58
+ output_type: Optional[str] = "pil",
59
+ return_dict: bool = True,
60
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
61
+ max_sequence_length: int = 512,
62
+ good_vae: Optional[Any] = None,
63
+ ):
64
+ height = height or self.default_sample_size * self.vae_scale_factor
65
+ width = width or self.default_sample_size * self.vae_scale_factor
66
+
67
+ # 1. Check inputs
68
+ self.check_inputs(
69
+ prompt,
70
+ prompt_2,
71
+ height,
72
+ width,
73
+ prompt_embeds=prompt_embeds,
74
+ pooled_prompt_embeds=pooled_prompt_embeds,
75
+ max_sequence_length=max_sequence_length,
76
+ )
77
+
78
+ self._guidance_scale = guidance_scale
79
+ self._joint_attention_kwargs = joint_attention_kwargs
80
+ self._interrupt = False
81
+
82
+ # 2. Define call parameters
83
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
84
+ device = self._execution_device
85
+
86
+ # 3. Encode prompt
87
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
88
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
89
+ prompt=prompt,
90
+ prompt_2=prompt_2,
91
+ prompt_embeds=prompt_embeds,
92
+ pooled_prompt_embeds=pooled_prompt_embeds,
93
+ device=device,
94
+ num_images_per_prompt=num_images_per_prompt,
95
+ max_sequence_length=max_sequence_length,
96
+ lora_scale=lora_scale,
97
+ )
98
+ # 4. Prepare latent variables
99
+ num_channels_latents = self.transformer.config.in_channels // 4
100
+ latents, latent_image_ids = self.prepare_latents(
101
+ batch_size * num_images_per_prompt,
102
+ num_channels_latents,
103
+ height,
104
+ width,
105
+ prompt_embeds.dtype,
106
+ device,
107
+ generator,
108
+ latents,
109
+ )
110
+ # 5. Prepare timesteps
111
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
112
+ image_seq_len = latents.shape[1]
113
+ mu = calculate_shift(
114
+ image_seq_len,
115
+ self.scheduler.config.base_image_seq_len,
116
+ self.scheduler.config.max_image_seq_len,
117
+ self.scheduler.config.base_shift,
118
+ self.scheduler.config.max_shift,
119
+ )
120
+ timesteps, num_inference_steps = retrieve_timesteps(
121
+ self.scheduler,
122
+ num_inference_steps,
123
+ device,
124
+ timesteps,
125
+ sigmas,
126
+ mu=mu,
127
+ )
128
+ self._num_timesteps = len(timesteps)
129
+
130
+ # Handle guidance
131
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
132
+
133
+ # 6. Denoising loop
134
+ for i, t in enumerate(timesteps):
135
+ if self.interrupt:
136
+ continue
137
+
138
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
139
+
140
+ noise_pred = self.transformer(
141
+ hidden_states=latents,
142
+ timestep=timestep / 1000,
143
+ guidance=guidance,
144
+ pooled_projections=pooled_prompt_embeds,
145
+ encoder_hidden_states=prompt_embeds,
146
+ txt_ids=text_ids,
147
+ img_ids=latent_image_ids,
148
+ joint_attention_kwargs=self.joint_attention_kwargs,
149
+ return_dict=False,
150
+ )[0]
151
+ # Yield intermediate result
152
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
153
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
154
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
155
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
156
+
157
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
158
+ torch.cuda.empty_cache()
159
+
160
+ # Final image using good_vae
161
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
162
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
163
+ image = good_vae.decode(latents, return_dict=False)[0]
164
+ self.maybe_free_model_hooks()
165
+ torch.cuda.empty_cache()
166
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
loras.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "image": "https://huggingface.co/TheAwakenOne/camberganng/resolve/main/images/example_8jhtkanli.png",
4
+ "title": "camberganng",
5
+ "repo": "TheAwakenOne/camberganng",
6
+ "trigger_word": "CMBER"
7
+ },
8
+ {
9
+ "image": "https://huggingface.co/TheAwakenOne/Marilyn-Monroe/resolve/main/sample/Marilyn%20Monroe_000600_01_20240917012312.png",
10
+ "title": "Marilyn-Monroe",
11
+ "repo": "TheAwakenOne/Marilyn-Monroe",
12
+ "trigger_word": "Marilyn Monroe"
13
+ },
14
+ {
15
+ "image": "https://huggingface.co/TheAwakenOne/The-Maxx-Style/resolve/main/images/example_5j9t9fzzc.png",
16
+ "title": "The-Maxx-Style",
17
+ "repo": "TheAwakenOne/The-Maxx-Style",
18
+ "trigger_word": "the maxx style"
19
+ },
20
+ {
21
+ "image": "https://huggingface.co/TheAwakenOne/The-Mask-Lora/resolve/main/images/ComfyUI_00847_.png",
22
+ "title": "The-Mask-Lora",
23
+ "repo": "TheAwakenOne/The-Mask-Lora",
24
+ "trigger_word": "the mask style"
25
+ },
26
+ {
27
+ "image": "https://huggingface.co/TheAwakenOne/graffiti-style/resolve/main/images/example_lbxcragsi.png",
28
+ "title": "graffiti-style",
29
+ "repo": "TheAwakenOne/graffiti-style",
30
+ "trigger_word": "GRFTI"
31
+ },
32
+ {
33
+ "image": "https://huggingface.co/TheAwakenOne/rockbrow/resolve/main/sample/rockbrow_000600_02_20240923012526.png",
34
+ "title": "rockbrow",
35
+ "repo": "TheAwakenOne/rockbrow",
36
+ "trigger_word": "PeopleBrow"
37
+ },
38
+ {
39
+ "image": "https://huggingface.co/TheAwakenOne/ldlaughingmemeface/resolve/main/sample/ldlaughingmemeface_000450_01_20241001223612.png",
40
+ "title": "ldlaughingmemeface",
41
+ "repo": "TheAwakenOne/ldlaughingmemeface",
42
+ "trigger_word": "LDME"
43
+ },
44
+ {
45
+ "image": "https://huggingface.co/TheAwakenOne/mtdp-balloon-character/resolve/main/sample/mtdp-balloon-character_000800_00_20241015003502.png",
46
+ "title": "mtdp-balloon-character",
47
+ "repo": "TheAwakenOne/mtdp-balloon-character",
48
+ "trigger_word": "FLOAT"
49
+ },
50
+ {
51
+ "image": "https://huggingface.co/TheAwakenOne/watercolor/resolve/main/sample/watercolor_000900_02_20241010021119.png",
52
+ "title": "watercolor",
53
+ "repo": "TheAwakenOne/watercolor",
54
+ "trigger_word": "WAT3R"
55
+ },
56
+ {
57
+ "image": "https://huggingface.co/TheAwakenOne/max-headroom/resolve/main/images/example_gm7jfx8a1.png",
58
+ "title": "max-headroom",
59
+ "repo": "TheAwakenOne/max-headroom",
60
+ "trigger_word": "M2X"
61
+ },
62
+ {
63
+ "image": "https://huggingface.co/TheAwakenOne/caricature/resolve/main/images/example_gzbm8wswr.png",
64
+ "title": "caricature",
65
+ "repo": "TheAwakenOne/caricature",
66
+ "trigger_word": "CCTUR3"
67
+ }
68
+ ]
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- accelerate
2
- diffusers
3
- invisible_watermark
4
  torch
 
 
5
  transformers
6
- xformers
 
 
 
 
 
1
  torch
2
+ diffusers
3
+ spaces
4
  transformers
5
+ peft
6
+ sentencepiece