ysharma HF staff commited on
Commit
af4ad56
·
1 Parent(s): 8a2ccc0

update queuing

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -12,31 +12,31 @@ pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dt
12
  pipe.to("cuda")
13
  pipe.enable_attention_slicing()
14
 
15
- seed = random.randint(0, 1000000)
16
  counter = 0
17
- print(f"SEED IS : {seed}")
18
 
19
- def chat(image_in, message, history): #, progress=gr.Progress(track_tqdm=True)):
20
- #progress(0, desc="Starting...")
21
  global counter
22
- global seed
23
- img_nm = f"./edited_image_{seed}.png"
24
  counter += 1
25
- print(f"seed is : {seed}")
26
- print(f"image_in name is :{img_nm}")
27
 
28
  #if message == "revert": --to add revert functionality later
29
  if counter > 1:
30
  # Open the image
31
- image_in = Image.open(img_nm) #("./edited_image.png")
32
  prompt = message #eg - "turn him into cyborg"
33
  edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
34
- edited_image.save(img_nm) #("edited_image.png") #("./edited_image.png")
35
  history = history or []
36
  add_text_list = ["There you go ", "Enjoy your image! ", "Nice work! Wonder what you gonna do next! ", "Way to go! ", "Does this work for you? ", "Something like this? "]
37
  #Resizing the image for better display
38
- response = random.choice(add_text_list) + '<img src="/file=' + img_nm[2:] + '" style="width: 200px; height: 200px;">'
39
- #response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 200px; height: 200px;">'
40
  history.append((message, response))
41
  return history, history
42
 
@@ -50,5 +50,5 @@ with gr.Blocks() as demo:
50
  chatbot = gr.Chatbot()
51
  b1.click(chat,[image_in, text_in, state_in], [chatbot, state_in])
52
 
53
- #demo.queue()
54
  demo.launch(debug=True, width="80%", height=1500)
 
12
  pipe.to("cuda")
13
  pipe.enable_attention_slicing()
14
 
15
+ #seed = random.randint(0, 1000000)
16
  counter = 0
17
+ #print(f"SEED IS : {seed}")
18
 
19
+ def chat(image_in, message, history, progress=gr.Progress(track_tqdm=True)):
20
+ progress(0, desc="Starting...")
21
  global counter
22
+ #global seed
23
+ #img_nm = f"./edited_image_{seed}.png"
24
  counter += 1
25
+ #print(f"seed is : {seed}")
26
+ #print(f"image_in name is :{img_nm}")
27
 
28
  #if message == "revert": --to add revert functionality later
29
  if counter > 1:
30
  # Open the image
31
+ image_in = Image.open("edited_image.png") #(img_nm)
32
  prompt = message #eg - "turn him into cyborg"
33
  edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
34
+ edited_image.save("edited_image.png") # (img_nm) #("./edited_image.png")
35
  history = history or []
36
  add_text_list = ["There you go ", "Enjoy your image! ", "Nice work! Wonder what you gonna do next! ", "Way to go! ", "Does this work for you? ", "Something like this? "]
37
  #Resizing the image for better display
38
+ #response = random.choice(add_text_list) + '<img src="/file=' + img_nm[2:] + '" style="width: 200px; height: 200px;">'
39
+ response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 200px; height: 200px;">'
40
  history.append((message, response))
41
  return history, history
42
 
 
50
  chatbot = gr.Chatbot()
51
  b1.click(chat,[image_in, text_in, state_in], [chatbot, state_in])
52
 
53
+ demo.queue(concurrency_count=10)
54
  demo.launch(debug=True, width="80%", height=1500)