ysharma HF staff commited on
Commit
6fb313b
·
1 Parent(s): f5501f6

update desc

Browse files
Files changed (1) hide show
  1. app.py +8 -12
app.py CHANGED
@@ -17,8 +17,8 @@ counter = 0
17
 
18
  help_text = """ Note: I will try to add the functionality to revert your changes to previous/original image in future versions of space. For now only forward editing is available.
19
 
20
- From the official Space by the authors [instruct-pix2pix](https://huggingface.co/spaces/timbrooks/instruct-pix2pix)
21
- and from official [Diffusers docs](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix) -
22
 
23
  If you're not getting what you want, there may be a few reasons:
24
  1. Is the image not changing enough? Your guidance_scale may be too low. It should be >1. Higher guidance scale encourages to generate images
@@ -40,24 +40,18 @@ be to the input. This pipeline requires a value of at least `1`. It's possible y
40
  def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, prompt, history, progress=gr.Progress(track_tqdm=True)):
41
  progress(0, desc="Starting...")
42
  global counter
43
- #global seed
44
- #img_nm = f"./edited_image_{seed}.png"
45
- #print(f"seed is:{seed}")
46
- #print(f"image name is:{img_nm}")
47
-
48
  counter += 1
49
  #if message == "revert": --to add revert functionality later
50
  if counter > 1:
51
  # Open the image
52
- image_in = Image.open("edited_image.png") #(img_nm)
53
- #prompt = message #eg - "turn him into cyborg"
54
  #edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
55
  edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
56
  edited_image.save("edited_image.png") #("/tmp/edited_image.png") #(img_nm)
57
  history = history or []
58
- #Resizing the image for better display
59
  add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
60
- #response = random.choice(add_text_list) + '<img src="/file=' + img_nm[2:] + '" style="width: 200px; height: 200px;">'
61
  #response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 350px; height: 350px;">'
62
  response = random.choice(add_text_list) + '<img src="/file=edited_image.png">' # style="width: 350px; height: 350px;">'
63
  history.append((prompt, response))
@@ -65,7 +59,9 @@ def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, prompt, h
65
 
66
  with gr.Blocks() as demo:
67
  gr.Markdown("""<h1><center> Chat Interface with InstructPix2Pix: Give Image Editing Instructions </h1></center>
68
- <p>For faster inference without waiting in the queue, you may duplicate the space and upgrade to GPU in settings.<br/>
 
 
69
  <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true">
70
  <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
71
  <p/>""")
 
17
 
18
  help_text = """ Note: I will try to add the functionality to revert your changes to previous/original image in future versions of space. For now only forward editing is available.
19
 
20
+ Some notes from the official [instruct-pix2pix](https://huggingface.co/spaces/timbrooks/instruct-pix2pix) Space by the authors
21
+ and from the official [Diffusers docs](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix) -
22
 
23
  If you're not getting what you want, there may be a few reasons:
24
  1. Is the image not changing enough? Your guidance_scale may be too low. It should be >1. Higher guidance scale encourages to generate images
 
40
  def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, prompt, history, progress=gr.Progress(track_tqdm=True)):
41
  progress(0, desc="Starting...")
42
  global counter
 
 
 
 
 
43
  counter += 1
44
  #if message == "revert": --to add revert functionality later
45
  if counter > 1:
46
  # Open the image
47
+ image_in = Image.open("edited_image.png")
 
48
  #edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
49
  edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
50
  edited_image.save("edited_image.png") #("/tmp/edited_image.png") #(img_nm)
51
  history = history or []
52
+ #Fixed supportive text
53
  add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
54
+ #Resize (or not) the image for better display
55
  #response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 350px; height: 350px;">'
56
  response = random.choice(add_text_list) + '<img src="/file=edited_image.png">' # style="width: 350px; height: 350px;">'
57
  history.append((prompt, response))
 
59
 
60
  with gr.Blocks() as demo:
61
  gr.Markdown("""<h1><center> Chat Interface with InstructPix2Pix: Give Image Editing Instructions </h1></center>
62
+ <p>*Apologies for inconvenience, this Space is still very much a work in progress... *
63
+
64
+ For faster inference without waiting in the queue, you may duplicate the space and upgrade to GPU in settings.<br/>
65
  <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true">
66
  <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
67
  <p/>""")