import torch import spaces from diffusers import ( DiffusionPipeline, AutoencoderTiny, ) from huggingface_hub import hf_hub_download def feifeimodload(): dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" #taef1 = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype).to( # device #) #pipe = DiffusionPipeline.from_pretrained( # "aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype, vae=taef1 #).to(device) pipe = DiffusionPipeline.from_pretrained( "aifeifei798/DarkIdol-flux-v1.1", torch_dtype=dtype ).to(device) #pipe = DiffusionPipeline.from_pretrained( # "shuttleai/shuttle-3.1-aesthetic", torch_dtype=dtype #).to(device) pipe.load_lora_weights( hf_hub_download("aifeifei798/feifei-flux-lora-v1.1", "feifei-v1.1.safetensors"), adapter_name="feifei", ) #pipe.load_lora_weights( # hf_hub_download("aifeifei798/flux-nsfw-lora", "Sakimi_chan_-_FLUX.safetensors"), # adapter_name="Sakimi_chan", #) #pipe.load_lora_weights( # hf_hub_download("adirik/flux-cinestill", "lora.safetensors"), # adapter_name="fluxcinestill", #) #pipe.load_lora_weights( # hf_hub_download( # "aifeifei798/feifei-flux-lora-v1", "FLUX-dev-lora-add_details.safetensors" # ), # adapter_name="FLUX-dev-lora-add_details", #) #pipe.load_lora_weights( # hf_hub_download( # "aifeifei798/feifei-flux-lora-v1", "Shadow-Projection.safetensors" # ), # adapter_name="Shadow-Projection", #) #pipe.set_adapters( # ["feifei", "FLUX-dev-lora-add_details", "Shadow-Projection"], # adapter_weights=[0.65, 0.35, 0.35], #) #pipe.fuse_lora( # adapter_name=["feifei", "FLUX-dev-lora-add_details", "Shadow-Projection"], # lora_scale=1.0, #) pipe.set_adapters( ["feifei"], adapter_weights=[0.65], ) pipe.fuse_lora( adapter_name=["feifei"], lora_scale=1.0, ) pipe.vae.enable_slicing() pipe.vae.enable_tiling() pipe.unload_lora_weights() torch.cuda.empty_cache() return pipe