File size: 2,246 Bytes
0a7d59a
 
 
 
 
 
 
 
 
 
 
 
 
6bd743d
 
 
0bd0fb4
8891d44
 
 
 
0bd0fb4
e053d3b
77d29ae
8891d44
d18eb29
 
 
1a80c1d
8cc4ce5
09a531c
8cc4ce5
 
252b569
dac412d
 
 
 
8cc4ce5
33c8594
 
 
 
0bd0fb4
d1e7833
 
 
 
 
 
d826c60
33c8594
 
 
 
 
 
d1e7833
 
 
 
 
8cc4ce5
d1e7833
 
 
 
0a7d59a
8cc4ce5
6b5cc4c
748f620
8cc4ce5
 
 
6b5cc4c
8cc4ce5
 
 
6aaa86f
 
0a7d59a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import torch
import spaces
from diffusers import (
    DiffusionPipeline,
    AutoencoderTiny,
)
from huggingface_hub import hf_hub_download


def feifeimodload():

    dtype = torch.bfloat16
    device = "cuda" if torch.cuda.is_available() else "cpu"
    #taef1 = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype).to(
    #    device
    #)
    
    #pipe = DiffusionPipeline.from_pretrained(
    #    "aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype, vae=taef1
    #).to(device)

    pipe = DiffusionPipeline.from_pretrained(
        "aifeifei798/DarkIdol-flux-v1.1", torch_dtype=dtype
    ).to(device)
    
    #pipe = DiffusionPipeline.from_pretrained(
    #    "shuttleai/shuttle-3.1-aesthetic", torch_dtype=dtype
    #).to(device)
    
    pipe.load_lora_weights(
        hf_hub_download("aifeifei798/feifei-flux-lora-v1.1", "feifei-v1.1.safetensors"),
        adapter_name="feifei",
    )

    #pipe.load_lora_weights(
    #    hf_hub_download("aifeifei798/flux-nsfw-lora", "Sakimi_chan_-_FLUX.safetensors"),
    #    adapter_name="Sakimi_chan",
    #)

    #pipe.load_lora_weights(
    #    hf_hub_download("adirik/flux-cinestill", "lora.safetensors"),
    #    adapter_name="fluxcinestill",
    #)
    
    #pipe.load_lora_weights(
    #    hf_hub_download(
    #        "aifeifei798/feifei-flux-lora-v1", "FLUX-dev-lora-add_details.safetensors"
    #    ),
    #    adapter_name="FLUX-dev-lora-add_details",
    #)
    
    #pipe.load_lora_weights(
    #    hf_hub_download(
    #        "aifeifei798/feifei-flux-lora-v1", "Shadow-Projection.safetensors"
    #    ),
    #    adapter_name="Shadow-Projection",
    #)

    #pipe.set_adapters(
    #    ["feifei", "FLUX-dev-lora-add_details", "Shadow-Projection"],
    #    adapter_weights=[0.65, 0.35, 0.35],
    #)
    
    #pipe.fuse_lora(
    #    adapter_name=["feifei", "FLUX-dev-lora-add_details", "Shadow-Projection"],
    #    lora_scale=1.0,
    #)

    pipe.set_adapters(
        ["feifei"],
        adapter_weights=[0.65],
    )
    
    pipe.fuse_lora(
        adapter_name=["feifei"],
        lora_scale=1.0,
    )

    pipe.vae.enable_slicing()
    pipe.vae.enable_tiling()
    pipe.unload_lora_weights()
    torch.cuda.empty_cache()
    return pipe