Upload custom_hires_fix.py
Browse files- custom_hires_fix.py +416 -0
custom_hires_fix.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from os.path import exists
|
3 |
+
|
4 |
+
from tqdm import trange
|
5 |
+
from modules import scripts, shared, processing, sd_samplers, script_callbacks, rng
|
6 |
+
from modules import devices, prompt_parser, sd_models, extra_networks
|
7 |
+
import modules.images as images
|
8 |
+
import k_diffusion
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
import numpy as np
|
12 |
+
from PIL import Image, ImageEnhance
|
13 |
+
import torch
|
14 |
+
import importlib
|
15 |
+
|
16 |
+
|
17 |
+
def safe_import(import_name, pkg_name = None):
|
18 |
+
try:
|
19 |
+
__import__(import_name)
|
20 |
+
except Exception:
|
21 |
+
pkg_name = pkg_name or import_name
|
22 |
+
import pip
|
23 |
+
if hasattr(pip, 'main'):
|
24 |
+
pip.main(['install', pkg_name])
|
25 |
+
else:
|
26 |
+
pip._internal.main(['install', pkg_name])
|
27 |
+
__import__(import_name)
|
28 |
+
|
29 |
+
|
30 |
+
safe_import('kornia')
|
31 |
+
safe_import('omegaconf')
|
32 |
+
safe_import('pathlib')
|
33 |
+
from omegaconf import DictConfig, OmegaConf
|
34 |
+
from pathlib import Path
|
35 |
+
import kornia
|
36 |
+
from skimage import exposure
|
37 |
+
|
38 |
+
config_path = Path(__file__).parent.resolve() / '../config.yaml'
|
39 |
+
|
40 |
+
|
41 |
+
class CustomHiresFix(scripts.Script):
|
42 |
+
def __init__(self):
|
43 |
+
super().__init__()
|
44 |
+
if not exists(config_path):
|
45 |
+
open(config_path, 'w').close()
|
46 |
+
self.config: DictConfig = OmegaConf.load(config_path)
|
47 |
+
self.callback_set = False
|
48 |
+
self.orig_clip_skip = None
|
49 |
+
self.orig_cfg = None
|
50 |
+
self.p: processing.StableDiffusionProcessing = None
|
51 |
+
self.pp = None
|
52 |
+
self.sampler = None
|
53 |
+
self.cond = None
|
54 |
+
self.uncond = None
|
55 |
+
self.step = None
|
56 |
+
self.tv = None
|
57 |
+
self.width = None
|
58 |
+
self.height = None
|
59 |
+
self.use_cn = False
|
60 |
+
self.external_code = None
|
61 |
+
self.cn_image = None
|
62 |
+
self.cn_units = []
|
63 |
+
|
64 |
+
def title(self):
|
65 |
+
return "Custom Hires Fix"
|
66 |
+
|
67 |
+
def show(self, is_img2img):
|
68 |
+
return scripts.AlwaysVisible
|
69 |
+
|
70 |
+
def ui(self, is_img2img):
|
71 |
+
with gr.Accordion(label='Custom hires fix', open=False):
|
72 |
+
enable = gr.Checkbox(label='Enable extension', value=self.config.get('enable', False))
|
73 |
+
with gr.Row():
|
74 |
+
width = gr.Slider(minimum=512, maximum=2048, step=8,
|
75 |
+
label="Upscale width to",
|
76 |
+
value=self.config.get('width', 1024), allow_flagging='never', show_progress=False)
|
77 |
+
height = gr.Slider(minimum=512, maximum=2048, step=8,
|
78 |
+
label="Upscale height to",
|
79 |
+
value=self.config.get('height', 0), allow_flagging='never', show_progress=False)
|
80 |
+
steps = gr.Slider(minimum=8, maximum=25, step=1,
|
81 |
+
label="Steps",
|
82 |
+
value=self.config.get('steps', 15))
|
83 |
+
with gr.Row():
|
84 |
+
prompt = gr.Textbox(label='Prompt for upscale (added to generation prompt)',
|
85 |
+
placeholder='Leave empty for using generation prompt',
|
86 |
+
value=self.config.get('prompt', ''))
|
87 |
+
with gr.Row():
|
88 |
+
negative_prompt = gr.Textbox(label='Negative prompt for upscale (replaces generation prompt)',
|
89 |
+
placeholder='Leave empty for using generation negative prompt',
|
90 |
+
value=self.config.get('negative_prompt', ''))
|
91 |
+
with gr.Row():
|
92 |
+
first_upscaler = gr.Dropdown([*[x.name for x in shared.sd_upscalers
|
93 |
+
if x.name not in ['None', 'Nearest', 'LDSR']]],
|
94 |
+
label='First upscaler',
|
95 |
+
value=self.config.get('first_upscaler', 'R-ESRGAN 4x+'))
|
96 |
+
second_upscaler = gr.Dropdown([*[x.name for x in shared.sd_upscalers
|
97 |
+
if x.name not in ['None', 'Nearest', 'LDSR']]],
|
98 |
+
label='Second upscaler',
|
99 |
+
value=self.config.get('second_upscaler', 'R-ESRGAN 4x+'))
|
100 |
+
with gr.Row():
|
101 |
+
first_latent = gr.Slider(minimum=0.0, maximum=1.0, step=0.01,
|
102 |
+
label="Latent upscale ratio (1)",
|
103 |
+
value=self.config.get('first_latent', 0.3))
|
104 |
+
second_latent = gr.Slider(minimum=0.0, maximum=1.0, step=0.01,
|
105 |
+
label="Latent upscale ratio (2)",
|
106 |
+
value=self.config.get('second_latent', 0.1))
|
107 |
+
with gr.Row():
|
108 |
+
filter = gr.Dropdown(['Noise sync (sharp)', 'Morphological (smooth)', 'Combined (balanced)'],
|
109 |
+
label='Filter mode',
|
110 |
+
value=self.config.get('filter', 'Noise sync (sharp)'))
|
111 |
+
strength = gr.Slider(minimum=1.0, maximum=3.5, step=0.1, label="Generation strength",
|
112 |
+
value=self.config.get('strength', 2.0))
|
113 |
+
denoise_offset = gr.Slider(minimum=-0.05, maximum=0.15, step=0.01,
|
114 |
+
label="Denoise offset",
|
115 |
+
value=self.config.get('denoise_offset', 0.05))
|
116 |
+
with gr.Accordion(label='Extra', open=False):
|
117 |
+
with gr.Row():
|
118 |
+
filter_offset = gr.Slider(minimum=-1.0, maximum=1.0, step=0.1,
|
119 |
+
label="Filter offset (higher - smoother)",
|
120 |
+
value=self.config.get('filter_offset', 0.0))
|
121 |
+
clip_skip = gr.Slider(minimum=0, maximum=5, step=1,
|
122 |
+
label="Clip skip for upscale (0 - not change)",
|
123 |
+
value=self.config.get('clip_skip', 0))
|
124 |
+
with gr.Row():
|
125 |
+
start_control_at = gr.Slider(minimum=0.0, maximum=0.7, step=0.01,
|
126 |
+
label="CN start for enabled units",
|
127 |
+
value=self.config.get('start_control_at', 0.0))
|
128 |
+
cn_ref = gr.Checkbox(label='Use last image for reference', value=self.config.get('cn_ref', False))
|
129 |
+
with gr.Row():
|
130 |
+
sampler = gr.Dropdown(['Restart', 'DPM++ 2M', 'DPM++ 2M Karras', 'DPM++ 2M SDE', 'DPM++ 2M SDE Karras', 'DPM++ 2M SDE Heun', 'DPM++ 2M SDE Heun Karras', 'DPM++ 3M SDE', 'DPM++ 3M SDE Karras', 'Restart + DPM++ 3M SDE'],
|
131 |
+
label='Sampler',
|
132 |
+
value=self.config.get('sampler', 'DPM++ 2M Karras'))
|
133 |
+
|
134 |
+
if is_img2img:
|
135 |
+
width.change(fn=lambda x: gr.update(value=0), inputs=width, outputs=height)
|
136 |
+
height.change(fn=lambda x: gr.update(value=0), inputs=height, outputs=width)
|
137 |
+
else:
|
138 |
+
width.change(fn=lambda x: gr.update(value=0), inputs=width, outputs=height)
|
139 |
+
height.change(fn=lambda x: gr.update(value=0), inputs=height, outputs=width)
|
140 |
+
|
141 |
+
ui = [enable, width, height, steps, first_upscaler, second_upscaler, first_latent, second_latent, prompt,
|
142 |
+
negative_prompt, strength, filter, filter_offset, denoise_offset, clip_skip, sampler, cn_ref, start_control_at]
|
143 |
+
for elem in ui:
|
144 |
+
setattr(elem, "do_not_save_to_config", True)
|
145 |
+
return ui
|
146 |
+
|
147 |
+
def process(self, p, *args, **kwargs):
|
148 |
+
self.p = p
|
149 |
+
self.cn_units = []
|
150 |
+
try:
|
151 |
+
self.external_code = importlib.import_module('extensions.sd-webui-controlnet.scripts.external_code', 'external_code')
|
152 |
+
cn_units = self.external_code.get_all_units_in_processing(p)
|
153 |
+
for unit in cn_units:
|
154 |
+
self.cn_units += [unit]
|
155 |
+
self.use_cn = len(self.cn_units) > 0
|
156 |
+
except ImportError:
|
157 |
+
self.use_cn = False
|
158 |
+
|
159 |
+
def postprocess_image(self, p, pp: scripts.PostprocessImageArgs,
|
160 |
+
enable, width, height, steps, first_upscaler, second_upscaler, first_latent, second_latent, prompt,
|
161 |
+
negative_prompt, strength, filter, filter_offset, denoise_offset, clip_skip, sampler, cn_ref, start_control_at
|
162 |
+
):
|
163 |
+
if not enable:
|
164 |
+
return
|
165 |
+
self.step = 0
|
166 |
+
self.pp = pp
|
167 |
+
self.config.width = width
|
168 |
+
self.config.height = height
|
169 |
+
self.config.prompt = prompt.strip()
|
170 |
+
self.config.negative_prompt = negative_prompt.strip()
|
171 |
+
self.config.steps = steps
|
172 |
+
self.config.first_upscaler = first_upscaler
|
173 |
+
self.config.second_upscaler = second_upscaler
|
174 |
+
self.config.first_latent = first_latent
|
175 |
+
self.config.second_latent = second_latent
|
176 |
+
self.config.strength = strength
|
177 |
+
self.config.filter = filter
|
178 |
+
self.config.filter_offset = filter_offset
|
179 |
+
self.config.denoise_offset = denoise_offset
|
180 |
+
self.config.clip_skip = clip_skip
|
181 |
+
self.config.sampler = sampler
|
182 |
+
self.config.cn_ref = cn_ref
|
183 |
+
self.config.start_control_at = start_control_at
|
184 |
+
self.orig_clip_skip = shared.opts.CLIP_stop_at_last_layers
|
185 |
+
self.orig_cfg = p.cfg_scale
|
186 |
+
|
187 |
+
if clip_skip > 0:
|
188 |
+
shared.opts.CLIP_stop_at_last_layers = clip_skip
|
189 |
+
if 'Restart' in self.config.sampler:
|
190 |
+
self.sampler = sd_samplers.create_sampler('Restart', p.sd_model)
|
191 |
+
else:
|
192 |
+
self.sampler = sd_samplers.create_sampler(sampler, p.sd_model)
|
193 |
+
|
194 |
+
def denoise_callback(params: script_callbacks.CFGDenoiserParams):
|
195 |
+
if params.sampling_step > 0:
|
196 |
+
p.cfg_scale = self.orig_cfg
|
197 |
+
if self.step == 1 and self.config.strength != 1.0:
|
198 |
+
params.sigma[-1] = params.sigma[0] * (1 - (1 - self.config.strength) / 100)
|
199 |
+
elif self.step == 2 and self.config.filter == 'Noise sync (sharp)':
|
200 |
+
params.sigma[-1] = params.sigma[0] * (1 - (self.tv - 1 + self.config.filter_offset - (self.config.denoise_offset * 5)) / 50)
|
201 |
+
elif self.step == 2 and self.config.filter == 'Combined (balanced)':
|
202 |
+
params.sigma[-1] = params.sigma[0] * (1 - (self.tv - 1 + self.config.filter_offset - (self.config.denoise_offset * 5)) / 100)
|
203 |
+
|
204 |
+
if self.callback_set is False:
|
205 |
+
script_callbacks.on_cfg_denoiser(denoise_callback)
|
206 |
+
self.callback_set = True
|
207 |
+
|
208 |
+
_, loras_act = extra_networks.parse_prompt(prompt)
|
209 |
+
extra_networks.activate(p, loras_act)
|
210 |
+
_, loras_deact = extra_networks.parse_prompt(negative_prompt)
|
211 |
+
extra_networks.deactivate(p, loras_deact)
|
212 |
+
|
213 |
+
self.cn_image = pp.image
|
214 |
+
|
215 |
+
with devices.autocast():
|
216 |
+
shared.state.nextjob()
|
217 |
+
x = self.gen(pp.image)
|
218 |
+
shared.state.nextjob()
|
219 |
+
x = self.filter(x)
|
220 |
+
shared.opts.CLIP_stop_at_last_layers = self.orig_clip_skip
|
221 |
+
sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
|
222 |
+
pp.image = x
|
223 |
+
extra_networks.deactivate(p, loras_act)
|
224 |
+
OmegaConf.save(self.config, config_path)
|
225 |
+
|
226 |
+
def enable_cn(self, image: np.ndarray):
|
227 |
+
for unit in self.cn_units:
|
228 |
+
if unit.model != 'None':
|
229 |
+
unit.guidance_start = self.config.start_control_at if unit.enabled else unit.guidance_start
|
230 |
+
unit.processor_res = min(image.shape[0], image.shape[0])
|
231 |
+
unit.enabled = True
|
232 |
+
if unit.image is None:
|
233 |
+
unit.image = image
|
234 |
+
self.p.width = image.shape[1]
|
235 |
+
self.p.height = image.shape[0]
|
236 |
+
self.external_code.update_cn_script_in_processing(self.p, self.cn_units)
|
237 |
+
for script in self.p.scripts.alwayson_scripts:
|
238 |
+
if script.title().lower() == 'controlnet':
|
239 |
+
script.controlnet_hack(self.p)
|
240 |
+
|
241 |
+
def process_prompt(self):
|
242 |
+
prompt = self.p.prompt.strip().split('AND', 1)[0]
|
243 |
+
if self.config.prompt != '':
|
244 |
+
prompt = f'{prompt} {self.config.prompt}'
|
245 |
+
|
246 |
+
if self.config.negative_prompt != '':
|
247 |
+
negative_prompt = self.config.negative_prompt
|
248 |
+
else:
|
249 |
+
negative_prompt = self.p.negative_prompt.strip()
|
250 |
+
|
251 |
+
with devices.autocast():
|
252 |
+
if self.width is not None and self.height is not None and hasattr(prompt_parser, 'SdConditioning'):
|
253 |
+
c = prompt_parser.SdConditioning([prompt], False, self.width, self.height)
|
254 |
+
uc = prompt_parser.SdConditioning([negative_prompt], False, self.width, self.height)
|
255 |
+
else:
|
256 |
+
c = [prompt]
|
257 |
+
uc = [negative_prompt]
|
258 |
+
self.cond = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, c, self.config.steps)
|
259 |
+
self.uncond = prompt_parser.get_learned_conditioning(shared.sd_model, uc, self.config.steps)
|
260 |
+
|
261 |
+
def gen(self, x):
|
262 |
+
self.step = 1
|
263 |
+
ratio = x.width / x.height
|
264 |
+
self.width = self.config.width if self.config.width > 0 else int(self.config.height * ratio)
|
265 |
+
self.height = self.config.height if self.config.height > 0 else int(self.config.width / ratio)
|
266 |
+
self.width = int((self.width - x.width) // 2 + x.width)
|
267 |
+
self.height = int((self.height - x.height) // 2 + x.height)
|
268 |
+
sd_models.apply_token_merging(self.p.sd_model, self.p.get_token_merging_ratio(for_hr=True) / 2)
|
269 |
+
|
270 |
+
if self.use_cn:
|
271 |
+
self.enable_cn(np.array(self.cn_image.resize((self.width, self.height))))
|
272 |
+
|
273 |
+
with devices.autocast(), torch.inference_mode():
|
274 |
+
self.process_prompt()
|
275 |
+
|
276 |
+
x_big = None
|
277 |
+
if self.config.first_latent > 0:
|
278 |
+
image = np.array(x).astype(np.float32) / 255.0
|
279 |
+
image = np.moveaxis(image, 2, 0)
|
280 |
+
decoded_sample = torch.from_numpy(image)
|
281 |
+
decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
|
282 |
+
decoded_sample = 2.0 * decoded_sample - 1.0
|
283 |
+
encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
|
284 |
+
sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
|
285 |
+
x_big = torch.nn.functional.interpolate(sample, (self.height // 8, self.width // 8), mode='nearest')
|
286 |
+
|
287 |
+
if self.config.first_latent < 1:
|
288 |
+
x = images.resize_image(0, x, self.width, self.height,
|
289 |
+
upscaler_name=self.config.first_upscaler)
|
290 |
+
image = np.array(x).astype(np.float32) / 255.0
|
291 |
+
image = np.moveaxis(image, 2, 0)
|
292 |
+
decoded_sample = torch.from_numpy(image)
|
293 |
+
decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
|
294 |
+
decoded_sample = 2.0 * decoded_sample - 1.0
|
295 |
+
encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
|
296 |
+
sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
|
297 |
+
else:
|
298 |
+
sample = x_big
|
299 |
+
if x_big is not None and self.config.first_latent != 1:
|
300 |
+
sample = (sample * (1 - self.config.first_latent)) + (x_big * self.config.first_latent)
|
301 |
+
image_conditioning = self.p.img2img_image_conditioning(decoded_sample, sample)
|
302 |
+
|
303 |
+
noise = torch.zeros_like(sample)
|
304 |
+
noise = kornia.augmentation.RandomGaussianNoise(mean=0.0, std=1.0, p=1.0)(noise)
|
305 |
+
steps = int(max(((self.p.steps - self.config.steps) / 2) + self.config.steps, self.config.steps))
|
306 |
+
self.p.denoising_strength = 0.45 + self.config.denoise_offset * 0.2
|
307 |
+
self.p.cfg_scale = self.orig_cfg + 3
|
308 |
+
|
309 |
+
def denoiser_override(n):
|
310 |
+
sigmas = k_diffusion.sampling.get_sigmas_polyexponential(n, 0.01, 15, 0.5, devices.device)
|
311 |
+
return sigmas
|
312 |
+
|
313 |
+
self.p.rng = rng.ImageRNG(sample.shape[1:], self.p.seeds, subseeds=self.p.subseeds,
|
314 |
+
subseed_strength=self.p.subseed_strength,
|
315 |
+
seed_resize_from_h=self.p.seed_resize_from_h, seed_resize_from_w=self.p.seed_resize_from_w)
|
316 |
+
|
317 |
+
self.p.sampler_noise_scheduler_override = denoiser_override
|
318 |
+
self.p.batch_size = 1
|
319 |
+
sample = self.sampler.sample_img2img(self.p, sample.to(devices.dtype), noise, self.cond, self.uncond,
|
320 |
+
steps=steps, image_conditioning=image_conditioning).to(devices.dtype_vae)
|
321 |
+
b, c, w, h = sample.size()
|
322 |
+
self.tv = kornia.losses.TotalVariation()(sample).mean() / (w * h)
|
323 |
+
devices.torch_gc()
|
324 |
+
decoded_sample = processing.decode_first_stage(shared.sd_model, sample)
|
325 |
+
if math.isnan(decoded_sample.min()):
|
326 |
+
devices.torch_gc()
|
327 |
+
sample = torch.clamp(sample, -3, 3)
|
328 |
+
decoded_sample = processing.decode_first_stage(shared.sd_model, sample)
|
329 |
+
decoded_sample = torch.clamp((decoded_sample + 1.0) / 2.0, min=0.0, max=1.0).squeeze()
|
330 |
+
x_sample = 255. * np.moveaxis(decoded_sample.cpu().numpy(), 0, 2)
|
331 |
+
x_sample = x_sample.astype(np.uint8)
|
332 |
+
image = Image.fromarray(x_sample)
|
333 |
+
return image
|
334 |
+
|
335 |
+
def filter(self, x):
|
336 |
+
if 'Restart' == self.config.sampler:
|
337 |
+
self.sampler = sd_samplers.create_sampler('Restart', shared.sd_model)
|
338 |
+
elif 'Restart + DPM++ 3M SDE' == self.config.sampler:
|
339 |
+
self.sampler = sd_samplers.create_sampler('DPM++ 3M SDE', shared.sd_model)
|
340 |
+
self.step = 2
|
341 |
+
ratio = x.width / x.height
|
342 |
+
self.width = self.config.width if self.config.width > 0 else int(self.config.height * ratio)
|
343 |
+
self.height = self.config.height if self.config.height > 0 else int(self.config.width / ratio)
|
344 |
+
sd_models.apply_token_merging(self.p.sd_model, self.p.get_token_merging_ratio(for_hr=True))
|
345 |
+
|
346 |
+
if self.use_cn:
|
347 |
+
self.cn_image = x if self.config.cn_ref else self.cn_image
|
348 |
+
self.enable_cn(np.array(self.cn_image.resize((self.width, self.height))))
|
349 |
+
|
350 |
+
with devices.autocast(), torch.inference_mode():
|
351 |
+
self.process_prompt()
|
352 |
+
|
353 |
+
x_big = None
|
354 |
+
if self.config.second_latent > 0:
|
355 |
+
image = np.array(x).astype(np.float32) / 255.0
|
356 |
+
image = np.moveaxis(image, 2, 0)
|
357 |
+
decoded_sample = torch.from_numpy(image)
|
358 |
+
decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
|
359 |
+
decoded_sample = 2.0 * decoded_sample - 1.0
|
360 |
+
encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
|
361 |
+
sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
|
362 |
+
x_big = torch.nn.functional.interpolate(sample, (self.height // 8, self.width // 8), mode='nearest')
|
363 |
+
|
364 |
+
if self.config.second_latent < 1:
|
365 |
+
x = images.resize_image(0, x, self.width, self.height, upscaler_name=self.config.second_upscaler)
|
366 |
+
image = np.array(x).astype(np.float32) / 255.0
|
367 |
+
image = np.moveaxis(image, 2, 0)
|
368 |
+
decoded_sample = torch.from_numpy(image)
|
369 |
+
decoded_sample = decoded_sample.to(shared.device).to(devices.dtype_vae)
|
370 |
+
decoded_sample = 2.0 * decoded_sample - 1.0
|
371 |
+
encoded_sample = shared.sd_model.encode_first_stage(decoded_sample.unsqueeze(0).to(devices.dtype_vae))
|
372 |
+
sample = shared.sd_model.get_first_stage_encoding(encoded_sample)
|
373 |
+
else:
|
374 |
+
sample = x_big
|
375 |
+
if x_big is not None and self.config.second_latent != 1:
|
376 |
+
sample = (sample * (1 - self.config.second_latent)) + (x_big * self.config.second_latent)
|
377 |
+
image_conditioning = self.p.img2img_image_conditioning(decoded_sample, sample)
|
378 |
+
|
379 |
+
noise = torch.zeros_like(sample)
|
380 |
+
noise = kornia.augmentation.RandomGaussianNoise(mean=0.0, std=1.0, p=1.0)(noise)
|
381 |
+
self.p.denoising_strength = 0.45 + self.config.denoise_offset
|
382 |
+
self.p.cfg_scale = self.orig_cfg + 3
|
383 |
+
|
384 |
+
if self.config.filter == 'Morphological (smooth)':
|
385 |
+
noise_mask = kornia.morphology.gradient(sample, torch.ones(5, 5).to(devices.device))
|
386 |
+
noise_mask = kornia.filters.median_blur(noise_mask, (3, 3))
|
387 |
+
noise_mask = (0.1 + noise_mask / noise_mask.max()) * (max(
|
388 |
+
(1.75 - (self.tv - 1) * 4), 1.75) - self.config.filter_offset)
|
389 |
+
noise = noise * noise_mask
|
390 |
+
elif self.config.filter == 'Combined (balanced)':
|
391 |
+
noise_mask = kornia.morphology.gradient(sample, torch.ones(5, 5).to(devices.device))
|
392 |
+
noise_mask = kornia.filters.median_blur(noise_mask, (3, 3))
|
393 |
+
noise_mask = (0.1 + noise_mask / noise_mask.max()) * (max(
|
394 |
+
(1.75 - (self.tv - 1) / 2), 1.75) - self.config.filter_offset)
|
395 |
+
noise = noise * noise_mask
|
396 |
+
|
397 |
+
def denoiser_override(n):
|
398 |
+
return k_diffusion.sampling.get_sigmas_polyexponential(n, 0.01, 7, 0.5, devices.device)
|
399 |
+
|
400 |
+
self.p.sampler_noise_scheduler_override = denoiser_override
|
401 |
+
self.p.batch_size = 1
|
402 |
+
samples = self.sampler.sample_img2img(self.p, sample.to(devices.dtype), noise, self.cond, self.uncond,
|
403 |
+
steps=self.config.steps, image_conditioning=image_conditioning
|
404 |
+
).to(devices.dtype_vae)
|
405 |
+
devices.torch_gc()
|
406 |
+
self.p.iteration += 1
|
407 |
+
decoded_sample = processing.decode_first_stage(shared.sd_model, samples)
|
408 |
+
if math.isnan(decoded_sample.min()):
|
409 |
+
devices.torch_gc()
|
410 |
+
samples = torch.clamp(samples, -3, 3)
|
411 |
+
decoded_sample = processing.decode_first_stage(shared.sd_model, samples)
|
412 |
+
decoded_sample = torch.clamp((decoded_sample + 1.0) / 2.0, min=0.0, max=1.0).squeeze()
|
413 |
+
x_sample = 255. * np.moveaxis(decoded_sample.cpu().numpy(), 0, 2)
|
414 |
+
x_sample = x_sample.astype(np.uint8)
|
415 |
+
image = Image.fromarray(x_sample)
|
416 |
+
return image
|