Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -43,12 +43,10 @@ else:
|
|
43 |
|
44 |
|
45 |
# 1. Prepare all the face models
|
46 |
-
|
47 |
|
48 |
|
49 |
# 2. Load Pipeline.
|
50 |
-
transformer = ConsisIDTransformer3DModel.from_pretrained_cus(model_path, subfolder=subfolder)
|
51 |
-
transformer.to(device, dtype=dtype)
|
52 |
pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, torch_dtype=dtype)
|
53 |
|
54 |
# If you're using with lora, add this code
|
@@ -58,10 +56,6 @@ if lora_path:
|
|
58 |
|
59 |
|
60 |
# 3. Move to device.
|
61 |
-
face_helper_1.face_det.to(device)
|
62 |
-
face_helper_1.face_parse.to(device)
|
63 |
-
face_clip_model.to(device, dtype=dtype)
|
64 |
-
transformer.to(device, dtype=dtype)
|
65 |
pipe.to(device)
|
66 |
# Save Memory. Turn on if you don't have multiple GPUs or enough GPU memory(such as H100) and it will cost more time in inference, it may also reduce the quality
|
67 |
pipe.enable_model_cpu_offload()
|
|
|
43 |
|
44 |
|
45 |
# 1. Prepare all the face models
|
46 |
+
face_helper_1, face_helper_2, face_clip_model, face_main_model, eva_transform_mean, eva_transform_std = prepare_face_models(model_path, device, dtype)
|
47 |
|
48 |
|
49 |
# 2. Load Pipeline.
|
|
|
|
|
50 |
pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, torch_dtype=dtype)
|
51 |
|
52 |
# If you're using with lora, add this code
|
|
|
56 |
|
57 |
|
58 |
# 3. Move to device.
|
|
|
|
|
|
|
|
|
59 |
pipe.to(device)
|
60 |
# Save Memory. Turn on if you don't have multiple GPUs or enough GPU memory(such as H100) and it will cost more time in inference, it may also reduce the quality
|
61 |
pipe.enable_model_cpu_offload()
|