Spaces:
vilarin
/
Running on Zero

vilarin commited on
Commit
5d1149a
·
verified ·
1 Parent(s): ac63aaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -9,7 +9,7 @@ from diffusers.utils import load_image
9
  from PIL import Image
10
  import requests
11
  import transformers
12
- from transformers import AutoTokenizer, T5EncoderModel
13
  from translatepy import Translator
14
 
15
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
@@ -55,9 +55,15 @@ text_encoder_3 = T5EncoderModel.from_pretrained(
55
  torch_dtype=torch.float16,
56
  )
57
 
 
 
 
 
 
58
  tokenizer_3 = AutoTokenizer.from_pretrained(
59
  repo,
60
- subfolder="tokenizer_3",
 
61
  torch_dtype=torch.float16,
62
  )
63
 
@@ -100,9 +106,8 @@ def generate_image(
100
  if init_image:
101
  image = pipe2(
102
  prompt=text,
103
- prompt_3=text,
104
  image=init_image,
105
- negative_prompt_3=negative,
106
  guidance_scale=scale,
107
  num_inference_steps=steps,
108
  strength=strength,
@@ -111,8 +116,7 @@ def generate_image(
111
  else:
112
  image = pipe(
113
  prompt=text,
114
- prompt_3=text,
115
- negative_prompt_3=negative,
116
  width=width,
117
  height=height,
118
  guidance_scale=scale,
@@ -141,7 +145,7 @@ examples = [
141
 
142
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
143
  gr.HTML("<h1><center>SD3M🦄</center></h1>")
144
- gr.HTML("<p><center><a href='https://huggingface.co/stabilityai/stable-diffusion-3-medium'>sd3m</a> text/image-to-image generation<br>Update: img2img, T5 long token</center></p>")
145
  with gr.Group():
146
  with gr.Row():
147
  prompt = gr.MultimodalTextbox(label='Enter Your Prompt (Multi-Languages)', interactive=True, placeholder="Enter prompt, add one image.", file_types=['image'], scale=6)
 
9
  from PIL import Image
10
  import requests
11
  import transformers
12
+ from transformers import AutoConfig, AutoTokenizer, T5EncoderModel
13
  from translatepy import Translator
14
 
15
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
 
55
  torch_dtype=torch.float16,
56
  )
57
 
58
+ model_config = AutoConfig.from_pretrained(
59
+ repo,
60
+ subfolder="tokenizer_3",
61
+ )
62
+
63
  tokenizer_3 = AutoTokenizer.from_pretrained(
64
  repo,
65
+ subfolder="tokenizer_3",
66
+ config=model_config,
67
  torch_dtype=torch.float16,
68
  )
69
 
 
106
  if init_image:
107
  image = pipe2(
108
  prompt=text,
 
109
  image=init_image,
110
+ negative_prompt=negative,
111
  guidance_scale=scale,
112
  num_inference_steps=steps,
113
  strength=strength,
 
116
  else:
117
  image = pipe(
118
  prompt=text,
119
+ negative_prompt=negative,
 
120
  width=width,
121
  height=height,
122
  guidance_scale=scale,
 
145
 
146
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
147
  gr.HTML("<h1><center>SD3M🦄</center></h1>")
148
+ gr.HTML("<p><center><a href='https://huggingface.co/stabilityai/stable-diffusion-3-medium'>sd3m</a> text/image-to-image generation<br>Update: img2img, add Strength, T5 long Token</center></p>")
149
  with gr.Group():
150
  with gr.Row():
151
  prompt = gr.MultimodalTextbox(label='Enter Your Prompt (Multi-Languages)', interactive=True, placeholder="Enter prompt, add one image.", file_types=['image'], scale=6)