rphrp1985 commited on
Commit
bc4e38f
·
verified ·
1 Parent(s): 52672b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -11
app.py CHANGED
@@ -3,13 +3,17 @@ import numpy as np
3
  import random
4
  import torch
5
  from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
 
 
6
  import spaces
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
  dtype = torch.float16
10
 
11
  repo = "dataautogpt3/OpenDalleV1.1"
12
- pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
 
 
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 1344
@@ -20,17 +24,20 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
20
  if randomize_seed:
21
  seed = random.randint(0, MAX_SEED)
22
 
23
- generator = torch.Generator().manual_seed(seed)
 
 
 
24
 
25
- image = pipe(
26
- prompt = prompt,
27
- negative_prompt = negative_prompt,
28
- guidance_scale = guidance_scale,
29
- num_inference_steps = num_inference_steps,
30
- width = width,
31
- height = height,
32
- generator = generator
33
- ).images[0]
34
 
35
  return image, seed
36
 
 
3
  import random
4
  import torch
5
  from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
6
+ from diffusers import AutoPipelineForText2Image
7
+
8
  import spaces
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  dtype = torch.float16
12
 
13
  repo = "dataautogpt3/OpenDalleV1.1"
14
+ # pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
15
+ pipeline = AutoPipelineForText2Image.from_pretrained('dataautogpt3/OpenDalleV1.1', torch_dtype=torch.float16).to('cuda')
16
+
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 1344
 
24
  if randomize_seed:
25
  seed = random.randint(0, MAX_SEED)
26
 
27
+ # generator = torch.Generator().manual_seed(seed)
28
+ image = pipeline(prompt).images[0]
29
+
30
+
31
 
32
+ # image = pipe(
33
+ # prompt = prompt,
34
+ # negative_prompt = negative_prompt,
35
+ # guidance_scale = guidance_scale,
36
+ # num_inference_steps = num_inference_steps,
37
+ # width = width,
38
+ # height = height,
39
+ # generator = generator
40
+ # ).images[0]
41
 
42
  return image, seed
43