aifeifei798 commited on
Commit
33c8594
·
verified ·
1 Parent(s): 8891d44

Update feifeilib/feifeimodload.py

Browse files
Files changed (1) hide show
  1. feifeilib/feifeimodload.py +14 -14
feifeilib/feifeimodload.py CHANGED
@@ -37,10 +37,10 @@ def feifeimodload():
37
  # adapter_name="Sakimi_chan",
38
  #)
39
 
40
- pipe.load_lora_weights(
41
- hf_hub_download("adirik/flux-cinestill", "lora.safetensors"),
42
- adapter_name="fluxcinestill",
43
- )
44
 
45
  #pipe.load_lora_weights(
46
  # hf_hub_download(
@@ -49,12 +49,12 @@ def feifeimodload():
49
  # adapter_name="FLUX-dev-lora-add_details",
50
  #)
51
 
52
- pipe.load_lora_weights(
53
- hf_hub_download(
54
- "aifeifei798/feifei-flux-lora-v1", "Shadow-Projection.safetensors"
55
- ),
56
- adapter_name="Shadow-Projection",
57
- )
58
 
59
  #pipe.set_adapters(
60
  # ["feifei", "FLUX-dev-lora-add_details", "Shadow-Projection"],
@@ -67,16 +67,16 @@ def feifeimodload():
67
  #)
68
 
69
  pipe.set_adapters(
70
- ["feifei","fluxcinestill","Shadow-Projection"],
71
- adapter_weights=[0.65,0.65,0.65],
72
  )
73
 
74
  pipe.fuse_lora(
75
- adapter_name=["feifei","fluxcinestill","Shadow-Projection"],
76
  lora_scale=1.0,
77
  )
78
 
79
- pipe.vae.enable_tiling()
80
  pipe.unload_lora_weights()
81
  torch.cuda.empty_cache()
82
  return pipe
 
37
  # adapter_name="Sakimi_chan",
38
  #)
39
 
40
+ #pipe.load_lora_weights(
41
+ # hf_hub_download("adirik/flux-cinestill", "lora.safetensors"),
42
+ # adapter_name="fluxcinestill",
43
+ #)
44
 
45
  #pipe.load_lora_weights(
46
  # hf_hub_download(
 
49
  # adapter_name="FLUX-dev-lora-add_details",
50
  #)
51
 
52
+ #pipe.load_lora_weights(
53
+ # hf_hub_download(
54
+ # "aifeifei798/feifei-flux-lora-v1", "Shadow-Projection.safetensors"
55
+ # ),
56
+ # adapter_name="Shadow-Projection",
57
+ #)
58
 
59
  #pipe.set_adapters(
60
  # ["feifei", "FLUX-dev-lora-add_details", "Shadow-Projection"],
 
67
  #)
68
 
69
  pipe.set_adapters(
70
+ ["feifei"],
71
+ adapter_weights=[0.70],
72
  )
73
 
74
  pipe.fuse_lora(
75
+ adapter_name=["feifei"],
76
  lora_scale=1.0,
77
  )
78
 
79
+ #pipe.vae.enable_tiling()
80
  pipe.unload_lora_weights()
81
  torch.cuda.empty_cache()
82
  return pipe