anvilarth commited on
Commit
d968579
·
verified ·
1 Parent(s): 94859ac

add @spaces.GPU

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import cv2
3
  import time
4
  import torch
 
5
  import subprocess
6
  import numpy as np
7
  import gradio as gr
@@ -292,6 +293,7 @@ class GradioWindow():
292
  self.concatenated_masks = res
293
  return res, current_object, True
294
 
 
295
  def detect(self, image: Image, prompt: str, is_segmmask: bool,
296
  box_threshold: float, text_threshold: float):
297
  detections = self.grounding_dino_model.predict_with_classes(
@@ -362,6 +364,7 @@ class GradioWindow():
362
  image = cv2.addWeighted(image, 0.7, mask, 0.3, 0)
363
  return image
364
 
 
365
  def segment(self, sam_predictor: SamPredictor, image: np.ndarray, xyxy: np.ndarray) -> np.ndarray:
366
  sam_predictor.set_image(image)
367
  result_masks = []
@@ -374,6 +377,7 @@ class GradioWindow():
374
  result_masks.append(masks[index])
375
  return np.array(result_masks)
376
 
 
377
  def augment_image(self, image: Image,
378
  current_object: str, new_objects_list: str,
379
  ddim_steps: int, guidance_scale: int, seed: int, return_prompt: str) -> tuple:
 
2
  import cv2
3
  import time
4
  import torch
5
+ import spaces
6
  import subprocess
7
  import numpy as np
8
  import gradio as gr
 
293
  self.concatenated_masks = res
294
  return res, current_object, True
295
 
296
+ @spaces.GPU
297
  def detect(self, image: Image, prompt: str, is_segmmask: bool,
298
  box_threshold: float, text_threshold: float):
299
  detections = self.grounding_dino_model.predict_with_classes(
 
364
  image = cv2.addWeighted(image, 0.7, mask, 0.3, 0)
365
  return image
366
 
367
+ @spaces.GPU
368
  def segment(self, sam_predictor: SamPredictor, image: np.ndarray, xyxy: np.ndarray) -> np.ndarray:
369
  sam_predictor.set_image(image)
370
  result_masks = []
 
377
  result_masks.append(masks[index])
378
  return np.array(result_masks)
379
 
380
+ @spaces.GPU
381
  def augment_image(self, image: Image,
382
  current_object: str, new_objects_list: str,
383
  ddim_steps: int, guidance_scale: int, seed: int, return_prompt: str) -> tuple: