Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
from transformers import AutoProcessor, AutoModelForCausalLM | |
import spaces | |
import requests | |
import copy | |
from PIL import Image, ImageDraw, ImageFont | |
import io | |
import matplotlib.pyplot as plt | |
import matplotlib.patches as patches | |
import random | |
import numpy as np | |
import subprocess | |
subprocess.run( | |
"pip install flash-attn --no-build-isolation", | |
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, | |
shell=True, | |
) | |
models = { | |
"microsoft/Florence-2-base": AutoModelForCausalLM.from_pretrained( | |
"PJMixers-Images/Florence-2-base-gemini-2.0-flash-thinking-exp-1219-v0.2", trust_remote_code=True | |
) | |
.to("cuda") | |
.eval() | |
} | |
processors = { | |
"microsoft/Florence-2-base": AutoProcessor.from_pretrained( | |
"PJMixers-Images/Florence-2-base-gemini-2.0-flash-thinking-exp-1219-v0.2", trust_remote_code=True | |
) | |
} | |
colormap = [ | |
"blue", | |
"orange", | |
"green", | |
"purple", | |
"brown", | |
"pink", | |
"gray", | |
"olive", | |
"cyan", | |
"red", | |
"lime", | |
"indigo", | |
"violet", | |
"aqua", | |
"magenta", | |
"coral", | |
"gold", | |
"tan", | |
"skyblue", | |
] | |
def fig_to_pil(fig): | |
buf = io.BytesIO() | |
fig.savefig(buf, format="png") | |
buf.seek(0) | |
return Image.open(buf) | |
def run_example( | |
task_prompt="<MORE_DETAILED_CAPTION>", | |
image=None, | |
text_input=None, | |
model_id="microsoft/Florence-2-base", | |
progress=gr.Progress(track_tqdm=True), | |
): | |
model = models[model_id] | |
processor = processors[model_id] | |
if text_input is None: | |
prompt = task_prompt | |
else: | |
prompt = task_prompt + text_input | |
inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda") | |
generated_ids = model.generate( | |
input_ids=inputs["input_ids"], | |
pixel_values=inputs["pixel_values"], | |
max_new_tokens=1024, | |
early_stopping=False, | |
do_sample=False, | |
num_beams=3, | |
) | |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] | |
parsed_answer = processor.post_process_generation( | |
generated_text, task=task_prompt, image_size=(image.width, image.height) | |
) | |
return parsed_answer | |
def plot_bbox(image, data): | |
fig, ax = plt.subplots() | |
ax.imshow(image) | |
for bbox, label in zip(data["bboxes"], data["labels"]): | |
x1, y1, x2, y2 = bbox | |
rect = patches.Rectangle( | |
(x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor="r", facecolor="none" | |
) | |
ax.add_patch(rect) | |
plt.text( | |
x1, | |
y1, | |
label, | |
color="white", | |
fontsize=8, | |
bbox=dict(facecolor="red", alpha=0.5), | |
) | |
ax.axis("off") | |
return fig | |
def draw_polygons(image, prediction, fill_mask=False): | |
draw = ImageDraw.Draw(image) | |
scale = 1 | |
for polygons, label in zip(prediction["polygons"], prediction["labels"]): | |
color = random.choice(colormap) | |
fill_color = random.choice(colormap) if fill_mask else None | |
for _polygon in polygons: | |
_polygon = np.array(_polygon).reshape(-1, 2) | |
if len(_polygon) < 3: | |
print("Invalid polygon:", _polygon) | |
continue | |
_polygon = (_polygon * scale).reshape(-1).tolist() | |
if fill_mask: | |
draw.polygon(_polygon, outline=color, fill=fill_color) | |
else: | |
draw.polygon(_polygon, outline=color) | |
draw.text((_polygon[0] + 8, _polygon[1] + 2), label, fill=color) | |
return image | |
def convert_to_od_format(data): | |
bboxes = data.get("bboxes", []) | |
labels = data.get("bboxes_labels", []) | |
od_results = {"bboxes": bboxes, "labels": labels} | |
return od_results | |
def draw_ocr_bboxes(image, prediction): | |
scale = 1 | |
draw = ImageDraw.Draw(image) | |
bboxes, labels = prediction["quad_boxes"], prediction["labels"] | |
for box, label in zip(bboxes, labels): | |
color = random.choice(colormap) | |
new_box = (np.array(box) * scale).tolist() | |
draw.polygon(new_box, width=3, outline=color) | |
draw.text( | |
(new_box[0] + 8, new_box[1] + 2), | |
"{}".format(label), | |
align="right", | |
fill=color, | |
) | |
return image | |
def process_image( | |
image, | |
task_prompt="More Detailed Caption", | |
text_input=None, | |
model_id="microsoft/Florence-2-base", | |
): | |
image = Image.open(image).convert("RGB") | |
base_height = 512 | |
h_percent = base_height / float(image.size[1]) | |
w_size = int((float(image.size[0]) * float(h_percent))) | |
image = image.resize((w_size, base_height), Image.LANCZOS) | |
if task_prompt == "Caption": | |
task_prompt = "<CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
return results | |
elif task_prompt == "Detailed Caption": | |
task_prompt = "<DETAILED_CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
return results | |
elif task_prompt == "More Detailed Caption": | |
task_prompt = "<MORE_DETAILED_CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
results = results[task_prompt] | |
return results | |
elif task_prompt == "Caption + Grounding": | |
task_prompt = "<CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
text_input = results[task_prompt] | |
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
results["<CAPTION>"] = text_input | |
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Detailed Caption + Grounding": | |
task_prompt = "<DETAILED_CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
text_input = results[task_prompt] | |
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
results["<DETAILED_CAPTION>"] = text_input | |
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "More Detailed Caption + Grounding": | |
task_prompt = "<MORE_DETAILED_CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
text_input = results[task_prompt] | |
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
results["<MORE_DETAILED_CAPTION>"] = text_input | |
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Object Detection": | |
task_prompt = "<OD>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
fig = plot_bbox(image, results["<OD>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Dense Region Caption": | |
task_prompt = "<DENSE_REGION_CAPTION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
fig = plot_bbox(image, results["<DENSE_REGION_CAPTION>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Region Proposal": | |
task_prompt = "<REGION_PROPOSAL>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
fig = plot_bbox(image, results["<REGION_PROPOSAL>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Caption to Phrase Grounding": | |
task_prompt = "<CAPTION_TO_PHRASE_GROUNDING>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
fig = plot_bbox(image, results["<CAPTION_TO_PHRASE_GROUNDING>"]) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Referring Expression Segmentation": | |
task_prompt = "<REFERRING_EXPRESSION_SEGMENTATION>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
output_image = copy.deepcopy(image) | |
output_image = draw_polygons( | |
output_image, results["<REFERRING_EXPRESSION_SEGMENTATION>"], fill_mask=True | |
) | |
return results, output_image | |
elif task_prompt == "Region to Segmentation": | |
task_prompt = "<REGION_TO_SEGMENTATION>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
output_image = copy.deepcopy(image) | |
output_image = draw_polygons( | |
output_image, results["<REGION_TO_SEGMENTATION>"], fill_mask=True | |
) | |
return results, output_image | |
elif task_prompt == "Open Vocabulary Detection": | |
task_prompt = "<OPEN_VOCABULARY_DETECTION>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
bbox_results = convert_to_od_format(results["<OPEN_VOCABULARY_DETECTION>"]) | |
fig = plot_bbox(image, bbox_results) | |
return results, fig_to_pil(fig) | |
elif task_prompt == "Region to Category": | |
task_prompt = "<REGION_TO_CATEGORY>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
return results | |
elif task_prompt == "Region to Description": | |
task_prompt = "<REGION_TO_DESCRIPTION>" | |
results = run_example(task_prompt, image, text_input, model_id) | |
return results | |
elif task_prompt == "OCR": | |
task_prompt = "<OCR>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
return results | |
elif task_prompt == "OCR with Region": | |
task_prompt = "<OCR_WITH_REGION>" | |
results = run_example(task_prompt, image, model_id=model_id) | |
output_image = copy.deepcopy(image) | |
output_image = draw_ocr_bboxes(output_image, results["<OCR_WITH_REGION>"]) | |
return results, output_image | |
else: | |
return "", None # Return empty string and None for unknown task prompts | |
def update_task_dropdown(choice): | |
if choice == "Cascased task": | |
return gr.Dropdown(choices=cascased_task_list, value="Caption + Grounding") | |
else: | |
return gr.Dropdown(choices=single_task_list, value="Caption") | |