pierrefdz commited on
Commit
8e5c4a8
·
verified ·
1 Parent(s): d088ce7

Create super-res.py

Browse files
Files changed (1) hide show
  1. super-res.py +176 -0
super-res.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, argparse, glob, os
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ import gradio as gr
6
+ from PIL import Image
7
+ from omegaconf import OmegaConf
8
+ from einops import repeat, rearrange
9
+ from pytorch_lightning import seed_everything
10
+ from imwatermark import WatermarkEncoder
11
+
12
+ from scripts.txt2img import put_watermark
13
+ from ldm.models.diffusion.ddim import DDIMSampler
14
+ from ldm.models.diffusion.ddpm import LatentUpscaleDiffusion, LatentUpscaleFinetuneDiffusion
15
+ from ldm.util import exists, instantiate_from_config
16
+
17
+
18
+ torch.set_grad_enabled(False)
19
+
20
+
21
+ def load_model_from_config(config, ckpt, verbose=False):
22
+ print(f"Loading model from {ckpt}")
23
+ pl_sd = torch.load(ckpt, map_location="cpu")
24
+ if "global_step" in pl_sd:
25
+ print(f"Global Step: {pl_sd['global_step']}")
26
+ sd = pl_sd["state_dict"]
27
+ model = instantiate_from_config(config.model)
28
+ m, u = model.load_state_dict(sd, strict=False)
29
+ if len(m) > 0 and verbose:
30
+ print("missing keys:")
31
+ print(m)
32
+ if len(u) > 0 and verbose:
33
+ print("unexpected keys:")
34
+ print(u)
35
+
36
+ model.cuda()
37
+ model.eval()
38
+ return model
39
+
40
+
41
+ def make_batch_sd( image, txt, device,num_samples=1,size=(512,512)):
42
+ image = Image.open(image).convert("RGB")
43
+ image = image.resize(size)
44
+ image = np.array(image)
45
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
46
+ batch = {
47
+ "lr": rearrange(image, 'h w c -> 1 c h w'),
48
+ "txt": num_samples * [txt],
49
+ }
50
+ batch["lr"] = repeat(batch["lr"].to(device=device), "1 ... -> n ...", n=num_samples)
51
+ return batch
52
+
53
+
54
+ def make_noise_augmentation(model, batch, noise_level=None):
55
+ x_low = batch[model.low_scale_key]
56
+ x_low = x_low.to(memory_format=torch.contiguous_format).float()
57
+ x_aug, noise_level = model.low_scale_model(x_low, noise_level)
58
+ return x_aug, noise_level
59
+
60
+
61
+ def paint(sampler, image, prompt, seed, scale, h, w, steps, num_samples=1, callback=None, eta=0., noise_level=None):
62
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
63
+ model = sampler.model
64
+ seed_everything(seed)
65
+ prng = np.random.RandomState(seed)
66
+ start_code = prng.randn(num_samples, model.channels, h, w)
67
+ start_code = torch.from_numpy(start_code).to(device=device, dtype=torch.float32)
68
+
69
+ with torch.no_grad(), torch.autocast("cuda"):
70
+ batch = make_batch_sd(image, txt=prompt, device=device, num_samples=num_samples, size=(h, w))
71
+ c = model.cond_stage_model.encode(batch["txt"])
72
+ c_cat = list()
73
+ if isinstance(model, LatentUpscaleFinetuneDiffusion):
74
+ for ck in model.concat_keys:
75
+ cc = batch[ck]
76
+ if exists(model.reshuffle_patch_size):
77
+ assert isinstance(model.reshuffle_patch_size, int)
78
+ cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',p1=model.reshuffle_patch_size, p2=model.reshuffle_patch_size)
79
+ c_cat.append(cc)
80
+ c_cat = torch.cat(c_cat, dim=1)
81
+ # cond
82
+ cond = {"c_concat": [c_cat], "c_crossattn": [c]}
83
+ # uncond cond
84
+ uc_cross = model.get_unconditional_conditioning(num_samples, "")
85
+ uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]}
86
+ elif isinstance(model, LatentUpscaleDiffusion):
87
+ x_augment, noise_level = make_noise_augmentation(model, batch, noise_level)
88
+ cond = {"c_concat": [x_augment], "c_crossattn": [c], "c_adm": noise_level}
89
+ # uncond cond
90
+ uc_cross = model.get_unconditional_conditioning(num_samples, "")
91
+ uc_full = {"c_concat": [x_augment], "c_crossattn": [uc_cross], "c_adm": noise_level}
92
+ else:
93
+ raise NotImplementedError()
94
+
95
+ shape = [model.channels, h, w]
96
+ samples, intermediates = sampler.sample(
97
+ steps,
98
+ num_samples,
99
+ shape,
100
+ cond,
101
+ verbose=False,
102
+ eta=eta,
103
+ unconditional_guidance_scale=scale,
104
+ unconditional_conditioning=uc_full,
105
+ x_T=start_code,
106
+ callback=callback
107
+ )
108
+ with torch.no_grad():
109
+ x_samples_ddim = model.decode_first_stage(samples)
110
+ result = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
111
+ result = result.cpu().numpy().transpose(0, 2, 3, 1) * 255
112
+ return Image.fromarray(result.astype(np.uint8)[0])
113
+
114
+
115
+ if __name__ == "__main__":
116
+ parser = argparse.ArgumentParser()
117
+ parser.add_argument("--indir", type=str, nargs="?", help="dir containing image-mask pairs (`example.png` and `example_mask.png`)",)
118
+ parser.add_argument("--num_imgs", type=int, default=None, help="number of images to generate",)
119
+ parser.add_argument("--steps",type=int,default=50,help="number of ddim sampling steps",)
120
+ parser.add_argument("--config",type=str,default="/checkpoint/pfz/autoencoders/sd/stable-diffusion-x4-upscaler/x4-upscaling.yaml",help="path to config which constructs model",)
121
+ parser.add_argument("--ckpt",type=str,default="/checkpoint/pfz/autoencoders/sd/stable-diffusion-x4-upscaler/x4-upscaler-ema.ckpt",help="path to checkpoint of model",)
122
+ parser.add_argument("--ldm_decoder_ckpt",default=None,type=str,help="path to checkpoint of LDM decoder")
123
+ parser.add_argument("--num_samples",default=1,type=int,help="number of samples to generate")
124
+ parser.add_argument("--scale", default=10.0, type=float, help="scale")
125
+ parser.add_argument("--eta", default=0.0, type=float, help="eta")
126
+ parser.add_argument("--noise_level", default=20, type=float, help="eta")
127
+ parser.add_argument("--output_dir",type=str,default="outputs",nargs="?",help="dir to write results to",)
128
+ parser.add_argument("--height",type=int,default=512,help="height of output image",)
129
+ parser.add_argument("--width",type=int,default=512,help="width of output image",)
130
+ parser.add_argument("--seed",type=int,default=0,help="random seed",)
131
+ opt = parser.parse_args()
132
+
133
+ print(f'>>> Building LDM model with config {opt.config} and weights from {opt.ckpt}...')
134
+ config = OmegaConf.load(f"{opt.config}")
135
+ model = load_model_from_config(config, f"{opt.ckpt}")
136
+
137
+ # Parameter None for clutil sweep
138
+ print(f'reload decoder weights {opt.ldm_decoder_ckpt}...')
139
+ if opt.ldm_decoder_ckpt is not None and opt.ldm_decoder_ckpt.lower() == "none":
140
+ opt.ldm_decoder_ckpt = None
141
+ if opt.ldm_decoder_ckpt is not None:
142
+ state_dict = torch.load(opt.ldm_decoder_ckpt)['ldm_decoder']
143
+ msg = model.first_stage_model.load_state_dict(state_dict, strict=False)
144
+ print(msg)
145
+
146
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
147
+ model = model.to(device)
148
+ model.eval()
149
+ sampler = DDIMSampler(model)
150
+
151
+ os.makedirs(opt.output_dir, exist_ok=True)
152
+
153
+ images = sorted(glob.glob(os.path.join(opt.indir, "*.png"))) + sorted(glob.glob(os.path.join(opt.indir, "*.jpg"))) + sorted(glob.glob(os.path.join(opt.indir, "*.jpeg")))
154
+ images += sorted(glob.glob(os.path.join(opt.indir, "*.PNG"))) + sorted(glob.glob(os.path.join(opt.indir, "*.JPG"))) + sorted(glob.glob(os.path.join(opt.indir, "*.JPEG")))
155
+ print(f"Found {len(images)} inputs.")
156
+
157
+ counter = 0
158
+ for image in tqdm(images):
159
+ if opt.num_imgs is not None and counter >= opt.num_imgs:
160
+ break
161
+ noise_level = torch.Tensor( opt.num_samples * [opt.noise_level]).to(sampler.model.device).long()
162
+ sampler.make_schedule(opt.steps, ddim_eta=opt.eta, verbose=True)
163
+ result = paint(
164
+ sampler=sampler,
165
+ image=image,
166
+ prompt="",
167
+ seed=opt.seed,
168
+ scale=opt.scale,
169
+ h=opt.height, w=opt.width, steps=opt.steps,
170
+ num_samples=opt.num_samples,
171
+ callback=None,
172
+ noise_level=noise_level
173
+ )
174
+ outpath = os.path.join(opt.output_dir, os.path.split(image)[1]).replace('.jpg', '.png').replace('.jpeg', '.png').replace('.JPG', '.png').replace('.JPEG', '.png')
175
+ result.save(outpath)
176
+ counter += 1