freealise's picture
Update app.py
4487001 verified
import gradio as gr
import cv2
from PIL import Image
import numpy as np
import os
import torch
import torch.nn.functional as F
from torchvision import transforms
from torchvision.transforms import Compose
import tempfile
from functools import partial
import spaces
from zipfile import ZipFile
from vincenty import vincenty
import json
from collections import Counter
import mediapy
#from depth_anything.dpt import DepthAnything
#from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
from huggingface_hub import hf_hub_download
from depth_anything_v2.dpt import DepthAnythingV2
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
model_configs = {
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
}
encoder2name = {
'vits': 'Small',
'vitb': 'Base',
'vitl': 'Large',
'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint
}
blurin = "1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1"
edge = []
gradient = None
params = { "fnum":0 }
pcolors = []
frame_selected = 0
frames = []
backups = []
depths = []
masks = []
locations = []
def zip_files(files_in, subs):
with ZipFile("depth_result.zip", "w") as zipObj:
for idx, file in enumerate(files_in):
zipObj.write(file, file.split("/")[-1])
for idx, file in enumerate(subs):
zipObj.write(file, file.split("/")[-1])
return "depth_result.zip"
def create_video(frames, fps, type):
print("building video result")
imgs = []
for j, img in enumerate(frames):
imgs.append(cv2.cvtColor(cv2.imread(img).astype(np.uint8), cv2.COLOR_BGR2RGB))
mediapy.write_video(type + "_result.mp4", imgs, fps=fps)
return type + "_result.mp4"
@torch.no_grad()
#@spaces.GPU
def predict_depth(image, model):
return model.infer_image(image)
#def predict_depth(model, image):
# return model(image)["depth"]
def make_video(video_path, outdir='./vis_video_depth', encoder='vits', blur_data=blurin, o=1, b=32):
if encoder not in ["vitl","vitb","vits","vitg"]:
encoder = "vits"
model_name = encoder2name[encoder]
model = DepthAnythingV2(**model_configs[encoder])
filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model")
state_dict = torch.load(filepath, map_location="cpu")
model.load_state_dict(state_dict)
model = model.to(DEVICE).eval()
#mapper = {"vits":"small","vitb":"base","vitl":"large"}
# DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
# model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval()
# Define path for temporary processed frames
#temp_frame_dir = tempfile.mkdtemp()
#margin_width = 50
#to_tensor_transform = transforms.ToTensor()
#DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
# depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_{}14'.format(encoder)).to(DEVICE).eval()
#depth_anything = pipeline(task = "depth-estimation", model=f"nielsr/depth-anything-{mapper[encoder]}")
# total_params = sum(param.numel() for param in depth_anything.parameters())
# print('Total parameters: {:.2f}M'.format(total_params / 1e6))
#transform = Compose([
# Resize(
# width=518,
# height=518,
# resize_target=False,
# keep_aspect_ratio=True,
# ensure_multiple_of=14,
# resize_method='lower_bound',
# image_interpolation_method=cv2.INTER_CUBIC,
# ),
# NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# PrepareForNet(),
#])
if os.path.isfile(video_path):
if video_path.endswith('txt'):
with open(video_path, 'r') as f:
lines = f.read().splitlines()
else:
filenames = [video_path]
else:
filenames = os.listdir(video_path)
filenames = [os.path.join(video_path, filename) for filename in filenames if not filename.startswith('.')]
filenames.sort()
# os.makedirs(outdir, exist_ok=True)
global masks
for k, filename in enumerate(filenames):
file_size = os.path.getsize(filename)/1024/1024
if file_size > 128.0:
print(f'File size of {filename} larger than 128Mb, sorry!')
return filename
print('Progress {:}/{:},'.format(k+1, len(filenames)), 'Processing', filename)
raw_video = cv2.VideoCapture(filename)
frame_width, frame_height = int(raw_video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(raw_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_rate = int(raw_video.get(cv2.CAP_PROP_FPS))
if frame_rate < 1:
frame_rate = 1
cframes = int(raw_video.get(cv2.CAP_PROP_FRAME_COUNT))
print(f'frames: {cframes}, fps: {frame_rate}')
# output_width = frame_width * 2 + margin_width
#filename = os.path.basename(filename)
# output_path = os.path.join(outdir, filename[:filename.rfind('.')] + '_video_depth.mp4')
#with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile:
# output_path = tmpfile.name
#out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"avc1"), frame_rate, (output_width, frame_height))
#fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#out = cv2.VideoWriter(output_path, fourcc, frame_rate, (output_width, frame_height))
count = 0
n = 0
depth_frames = []
orig_frames = []
backup_frames = []
comb_frames = []
thumbnail_old = []
while raw_video.isOpened():
ret, raw_frame = raw_video.read()
if not ret:
break
else:
print(count)
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2RGB) / 255.0
frame_pil = Image.fromarray((frame * 255).astype(np.uint8))
#frame = transform({'image': frame})['image']
#frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
#raw_frame_bg = cv2.medianBlur(raw_frame, 255)
#
depth = predict_depth(raw_frame[:, :, ::-1], model)
depth_gray = ((depth - depth.min()) / (depth.max() - depth.min()) * 255.0).astype(np.uint8)
#
#depth = to_tensor_transform(predict_depth(depth_anything, frame_pil))
#depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
#depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
#depth = depth.cpu().numpy().astype(np.uint8)
#depth_color = cv2.applyColorMap(depth, cv2.COLORMAP_BONE)
#depth_gray = cv2.cvtColor(depth_color, cv2.COLOR_RGBA2GRAY)
# Remove white border around map:
# define lower and upper limits of white
#white_lo = np.array([250,250,250])
#white_hi = np.array([255,255,255])
# mask image to only select white
mask = cv2.inRange(depth_gray[0:int(depth_gray.shape[0]/7*6)-1, 0:depth_gray.shape[1]], 250, 255)
# change image to black where we found white
depth_gray[0:int(depth_gray.shape[0]/7*6)-1, 0:depth_gray.shape[1]][mask>0] = 0
mask = cv2.inRange(depth_gray[int(depth_gray.shape[0]/7*6):depth_gray.shape[0], 0:depth_gray.shape[1]], 180, 255)
depth_gray[int(depth_gray.shape[0]/7*6):depth_gray.shape[0], 0:depth_gray.shape[1]][mask>0] = 180
depth_color = cv2.cvtColor(depth_gray, cv2.COLOR_GRAY2BGRA)
# split_region = np.ones((frame_height, margin_width, 3), dtype=np.uint8) * 255
# combined_frame = cv2.hconcat([raw_frame, split_region, depth_color])
# out.write(combined_frame)
# frame_path = os.path.join(temp_frame_dir, f"frame_{count:05d}.png")
# cv2.imwrite(frame_path, combined_frame)
#raw_frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2BGRA)
#raw_frame[:, :, 3] = 255
if cframes < 16:
thumbnail = cv2.cvtColor(cv2.resize(raw_frame, (16,32)), cv2.COLOR_BGR2GRAY).flatten()
if len(thumbnail_old) > 0:
diff = thumbnail - thumbnail_old
#print(diff)
c = Counter(diff)
value, cc = c.most_common()[0]
if value == 0 and cc > int(16*32*0.8):
count += 1
continue
thumbnail_old = thumbnail
blur_frame = blur_image(cv2.cvtColor(raw_frame, cv2.COLOR_BGR2BGRA), depth_color, blur_data)
comb_frame = np.concatenate((blur_frame, depth_color), axis=0)
cv2.imwrite(f"f{count}_comb.png", comb_frame)
comb_frames.append(f"f{count}_comb.png")
cv2.imwrite(f"f{count}.png", blur_frame)
orig_frames.append(f"f{count}.png")
cv2.imwrite(f"f{count}_.png", blur_frame)
backup_frames.append(f"f{count}_.png")
cv2.imwrite(f"f{count}_dmap.png", depth_color)
depth_frames.append(f"f{count}_dmap.png")
depth_gray = seg_frame(depth_gray, o, b) + 128
#print(depth_gray[depth_gray>128]-128)
cv2.imwrite(f"f{count}_mask.png", depth_gray)
masks.append(f"f{count}_mask.png")
count += 1
final_vid = create_video(comb_frames, frame_rate, "orig")
final_zip = zip_files(comb_frames, ["orig_result.vtt"])
raw_video.release()
# out.release()
cv2.destroyAllWindows()
global gradient
global frame_selected
global depths
global frames
global backups
frames = orig_frames
backups = backup_frames
depths = depth_frames
if depth_color.shape[0] == 2048: #height
gradient = cv2.imread('./gradient_large.png').astype(np.uint8)
elif depth_color.shape[0] == 1024:
gradient = cv2.imread('./gradient.png').astype(np.uint8)
else:
gradient = cv2.imread('./gradient_small.png').astype(np.uint8)
return final_vid, final_zip, frames, masks[frame_selected], depths #output_path
def depth_edges_mask(depth):
"""Returns a mask of edges in the depth map.
Args:
depth: 2D numpy array of shape (H, W) with dtype float32.
Returns:
mask: 2D numpy array of shape (H, W) with dtype bool.
"""
# Compute the x and y gradients of the depth map.
depth_dx, depth_dy = np.gradient(depth)
# Compute the gradient magnitude.
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2)
# Compute the edge mask.
mask = depth_grad > 0.05
return mask
def blur_image(image, depth, blur_data):
blur_a = blur_data.split()
print(f'blur data {blur_data}')
blur_frame = image.copy()
j = 0
while j < 256:
i = 255 - j
blur_lo = np.array([i,i,i,255])
blur_hi = np.array([i+1,i+1,i+1,255])
blur_mask = cv2.inRange(depth, blur_lo, blur_hi)
#print(f'kernel size {int(blur_a[j])}')
blur = cv2.GaussianBlur(image, (int(blur_a[j]), int(blur_a[j])), 0)
blur_frame[blur_mask>0] = blur[blur_mask>0]
j = j + 1
return blur_frame
def update_blur(blur):
global blurin
blurin = blur
return None
def loadfile(f):
return f
def show_json(txt):
data = json.loads(txt)
print(txt)
i=0
while i < len(data[2]):
data[2][i] = data[2][i]["image"]["path"]
data[4][i] = data[4][i]["path"]
i=i+1
return data[0]["video"]["path"], data[1]["path"], data[2], data[3]["background"]["path"], data[4]
def seg_frame(newmask, b, d):
if newmask.shape[0] == 2048: #height
gd = cv2.imread('./gradient_large.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8)
elif newmask.shape[0] == 1024:
gd = cv2.imread('./gradient.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8)
else:
gd = cv2.imread('./gradient_small.png', cv2.IMREAD_GRAYSCALE).astype(np.uint8)
newmask[np.absolute(newmask.astype(np.int16)-gd.astype(np.int16))<16] = 0
ret,newmask = cv2.threshold(newmask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#b = 1
#d = 32
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * b + 1, 2 * b + 1), (b, b))
bd = cv2.erode(newmask, element)
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * d + 1, 2 * d + 1), (d, d))
bg = cv2.dilate(newmask, element)
bg[bg.shape[0]-64:bg.shape[0],0:bg.shape[1]] = 0
mask = np.zeros(newmask.shape[:2],np.uint8)
# https://docs.opencv.org/4.x/d8/d83/tutorial_py_grabcut.html
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask[bg == 255] = 3
mask[bd == 255] = 1 #2: probable bg, 3: probable fg
return mask
def select_frame(d, evt: gr.SelectData):
global frame_selected
global depths
global masks
global edge
if evt.index != frame_selected:
edge = []
frame_selected = evt.index
return depths[frame_selected], frame_selected
def switch_rows(v):
global frames
global depths
if v == True:
print(depths[0])
return depths
else:
print(frames[0])
return frames
def bincount(a):
a2D = a.reshape(-1,a.shape[-1])
col_range = (256, 256, 256) # generically : a2D.max(0)+1
a1D = np.ravel_multi_index(a2D.T, col_range)
return list(reversed(np.unravel_index(np.bincount(a1D).argmax(), col_range)))
def reset_mask(d):
global frame_selected
global frames
global backups
global masks
global depths
global edge
edge = []
backup = cv2.imread(backups[frame_selected]).astype(np.uint8)
cv2.imwrite(frames[frame_selected], backup)
d["layers"][0][0:d["layers"][0].shape[0], 0:d["layers"][0].shape[1]] = (0,0,0,0)
return gr.ImageEditor(value=d)
def draw_mask(o, b, v, d, evt: gr.EventData):
global frames
global depths
global params
global frame_selected
global masks
global gradient
global edge
points = json.loads(v)
pts = np.array(points, np.int32)
pts = pts.reshape((-1,1,2))
if len(edge) == 0 or params["fnum"] != frame_selected:
if params["fnum"] != frame_selected:
d["background"] = cv2.imread(depths[frame_selected]).astype(np.uint8)
params["fnum"] = frame_selected
bg = cv2.cvtColor(d["background"], cv2.COLOR_RGBA2GRAY)
bg[bg==255] = 0
edge = bg.copy()
else:
bg = edge.copy()
x = points[len(points)-1][0]
y = points[len(points)-1][1]
mask = cv2.imread(masks[frame_selected], cv2.IMREAD_GRAYSCALE).astype(np.uint8)
mask[mask==128] = 0
print(mask[mask>0]-128)
d["layers"][0] = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGBA)
sel = cv2.floodFill(mask, None, (x, y), 1, 2, 2, (4 | cv2.FLOODFILL_FIXED_RANGE))[2] #(4 | cv2.FLOODFILL_FIXED_RANGE | cv2.FLOODFILL_MASK_ONLY | 255 << 8)
# 255 << 8 tells to fill with the value 255)
sel = sel[1:sel.shape[0]-1, 1:sel.shape[1]-1]
d["layers"][0][sel==0] = (0,0,0,0)
mask = cv2.cvtColor(d["layers"][0], cv2.COLOR_RGBA2GRAY)
mask[mask==0] = 128
print(mask[mask>128]-128)
mask, bgdModel, fgdModel = cv2.grabCut(cv2.cvtColor(d["background"], cv2.COLOR_RGBA2RGB), mask-128, None,None,None,15, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
frame = cv2.imread(frames[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8)
frame[mask>0] = (0,0,0,0)
cv2.imwrite(frames[frame_selected], frame)
depth = cv2.imread(depths[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8)
depth[mask>0] = (0,0,0,0)
cv2.imwrite(depths[frame_selected], depth)
return gr.ImageEditor(value=d)
js = """
async()=>{
const chart = document.getElementById('chart');
const blur_in = document.getElementById('blur_in').getElementsByTagName('textarea')[0];
var md = false;
var xold = 128;
var yold = 32;
var a = new Array(256);
var l;
for (var i=0; i<256; i++) {
const hr = document.createElement('hr');
hr.style.backgroundColor = 'hsl(0,0%,' + (100-i/256*100) + '%)';
chart.appendChild(hr);
}
function resetLine() {
a.fill(1);
for (var i=0; i<256; i++) {
chart.childNodes[i].style.height = a[i] + 'px';
chart.childNodes[i].style.marginTop = '32px';
}
}
resetLine();
window.resetLine = resetLine;
function pointerDown(x, y) {
md = true;
xold = parseInt(x - chart.getBoundingClientRect().x);
yold = parseInt(y - chart.getBoundingClientRect().y);
chart.title = xold + ',' + yold;
}
window.pointerDown = pointerDown;
function pointerUp() {
md = false;
var evt = document.createEvent('Event');
evt.initEvent('input', true, false);
blur_in.dispatchEvent(evt);
chart.title = '';
}
window.pointerUp = pointerUp;
function lerp(y1, y2, mu) { return y1*(1-mu)+y2*mu; }
function drawLine(x, y) {
x = parseInt(x - chart.getBoundingClientRect().x);
y = parseInt(y - chart.getBoundingClientRect().y);
if (md === true && y >= 0 && y < 64 && x >= 0 && x < 256) {
if (y < 32) {
a[x] = Math.abs(32-y)*2 + 1;
chart.childNodes[x].style.height = a[x] + 'px';
chart.childNodes[x].style.marginTop = y + 'px';
for (var i=Math.min(xold, x)+1; i<Math.max(xold, x); i++) {
l = parseInt(lerp( yold, y, (i-xold)/(x-xold) ));
if (l < 32) {
a[i] = Math.abs(32-l)*2 + 1;
chart.childNodes[i].style.height = a[i] + 'px';
chart.childNodes[i].style.marginTop = l + 'px';
} else if (l < 64) {
a[i] = Math.abs(l-32)*2 + 1;
chart.childNodes[i].style.height = a[i] + 'px';
chart.childNodes[i].style.marginTop = (64-l) + 'px';
}
}
} else if (y < 64) {
a[x] = Math.abs(y-32)*2 + 1;
chart.childNodes[x].style.height = a[x] + 'px';
chart.childNodes[x].style.marginTop = (64-y) + 'px';
for (var i=Math.min(xold, x)+1; i<Math.max(xold, x); i++) {
l = parseInt(lerp( yold, y, (i-xold)/(x-xold) ));
if (l < 32) {
a[i] = Math.abs(32-l)*2 + 1;
chart.childNodes[i].style.height = a[i] + 'px';
chart.childNodes[i].style.marginTop = l + 'px';
} else if (l < 64) {
a[i] = Math.abs(l-32)*2 + 1;
chart.childNodes[i].style.height = a[i] + 'px';
chart.childNodes[i].style.marginTop = (64-l) + 'px';
}
}
}
blur_in.value = a.join(' ');
xold = x;
yold = y;
chart.title = xold + ',' + yold;
}
}
window.drawLine = drawLine;
var intv_ = setInterval(function(){
if (document.getElementById("image_edit") && document.getElementById("image_edit").getElementsByTagName("canvas")) {
document.getElementById("image_edit").getElementsByTagName("canvas")[0].oncontextmenu = function(e){e.preventDefault();}
document.getElementById("image_edit").getElementsByTagName("canvas")[0].ondrag = function(e){e.preventDefault();}
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onclick = function(e) {
var x = parseInt((e.clientX-e.target.getBoundingClientRect().x)*e.target.width/e.target.getBoundingClientRect().width);
var y = parseInt((e.clientY-e.target.getBoundingClientRect().y)*e.target.height/e.target.getBoundingClientRect().height);
var p = document.getElementById("mouse").getElementsByTagName("textarea")[0].value.slice(1, -1);
if (p != "") { p += ", "; }
p += "[" + x + ", " + y + "]";
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[" + p + "]";
var evt = document.createEvent("Event");
evt.initEvent("input", true, false);
document.getElementById("mouse").getElementsByTagName("textarea")[0].dispatchEvent(evt);
}
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onfocus = function(e) {
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[]";
}
document.getElementById("image_edit").getElementsByTagName("canvas")[0].onblur = function(e) {
document.getElementById("mouse").getElementsByTagName("textarea")[0].value = "[]";
}
clearInterval(intv_);
}
}, 40);
}
"""
css = """
#img-display-container {
max-height: 100vh;
}
#img-display-input {
max-height: 80vh;
}
#img-display-output {
max-height: 80vh;
}
"""
head = """
"""
title = "# Depth Anything V2 Video"
description = """**Depth Anything V2** on full video files, intended for Google Street View panorama slideshows.
Please refer to the [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), and [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details."""
#transform = Compose([
# Resize(
# width=518,
# height=518,
# resize_target=False,
# keep_aspect_ratio=True,
# ensure_multiple_of=14,
# resize_method='lower_bound',
# image_interpolation_method=cv2.INTER_CUBIC,
# ),
# NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# PrepareForNet(),
#])
# @torch.no_grad()
# def predict_depth(model, image):
# return model(image)
with gr.Blocks(css=css, js=js, head=head) as demo:
gr.Markdown(title)
gr.Markdown(description)
gr.Markdown("### Video Depth Prediction demo")
with gr.Row():
with gr.Column():
with gr.Group():
input_json = gr.Textbox(elem_id="json_in", value="{}", label="JSON", interactive=False)
input_url = gr.Textbox(elem_id="url_in", value="./examples/streetview.mp4", label="URL")
input_video = gr.Video(label="Input Video", format="mp4")
input_url.input(fn=loadfile, inputs=[input_url], outputs=[input_video])
submit = gr.Button("Submit")
with gr.Group():
output_frame = gr.Gallery(label="Frames", preview=True, columns=8192, interactive=False)
output_switch = gr.Checkbox(label="Show depths")
output_switch.input(fn=switch_rows, inputs=[output_switch], outputs=[output_frame])
selected = gr.Number(label="Selected frame", visible=False, elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
with gr.Accordion(label="Depths", open=False):
output_depth = gr.Files(label="Depth files", interactive=False)
with gr.Group():
output_mask = gr.ImageEditor(layers=False, sources=('clipboard'), show_download_button=True, type="numpy", interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(default_size=0, colors=['black', '#505050', '#a0a0a0', 'white']), elem_id="image_edit")
with gr.Accordion(label="Border", open=False):
boffset = gr.Slider(label="Inner", value=1, maximum=256, minimum=0, step=1)
bsize = gr.Slider(label="Outer", value=32, maximum=256, minimum=0, step=1)
mouse = gr.Textbox(label="Mouse x,y", elem_id="mouse", value="""[]""", interactive=False)
reset = gr.Button("Reset", size='sm')
mouse.input(fn=draw_mask, show_progress="minimal", inputs=[boffset, bsize, mouse, output_mask], outputs=[output_mask])
reset.click(fn=reset_mask, inputs=[output_mask], outputs=[output_mask])
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
with gr.Column():
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type')
processed_video = gr.Video(label="Output Video", format="mp4", elem_id="output_video", interactive=False)
processed_zip = gr.File(label="Output Archive", interactive=False)
with gr.Tab("Blur"):
chart_c = gr.HTML(elem_id="chart_c", value="""<div id='chart' onpointermove='window.drawLine(event.clientX, event.clientY);' onpointerdown='window.pointerDown(event.clientX, event.clientY);' onpointerup='window.pointerUp();' onpointerleave='window.pointerUp();' onpointercancel='window.pointerUp();' onclick='window.resetLine();'></div>
<style>
* {
user-select: none;
}
html, body {
user-select: none;
}
#chart hr {
width: 1px;
height: 1px;
clear: none;
border: 0;
padding:0;
display: inline-block;
position: relative;
vertical-align: top;
margin-top:32px;
}
#chart {
padding:0;
margin:0;
width:256px;
height:64px;
background-color:#808080;
touch-action: none;
}
</style>
""")
average = gr.HTML(value="""<label for='average'>Average</label><input id='average' type='range' style='width:256px;height:1em;' value='1' min='1' max='15' step='2' onclick='
var pts_a = document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].value.split(\" \");
for (var i=0; i<256; i++) {
var avg = 0;
var div = this.value;
for (var j = i-parseInt(this.value/2); j <= i+parseInt(this.value/2); j++) {
if (pts_a[j]) {
avg += parseInt(pts_a[j]);
} else if (div > 1) {
div--;
}
}
pts_a[i] = Math.round((avg / div - 1) / 2) * 2 + 1;
document.getElementById(\"chart\").childNodes[i].style.height = pts_a[i] + \"px\";
document.getElementById(\"chart\").childNodes[i].style.marginTop = (64-pts_a[i])/2 + \"px\";
}
document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].value = pts_a.join(\" \");
var evt = document.createEvent(\"Event\");
evt.initEvent(\"input\", true, false);
document.getElementById(\"blur_in\").getElementsByTagName(\"textarea\")[0].dispatchEvent(evt);
' oninput='
this.parentNode.childNodes[2].innerText = this.value;
' onchange='this.click();'/><span>1</span>""")
with gr.Accordion(label="Levels", open=False):
blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value=blurin)
blur_in.input(fn=update_blur, inputs=[blur_in], outputs=None)
with gr.Group():
with gr.Accordion(label="Locations", open=False):
example_coords = """[
{"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997},
{"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064},
{"lat": 50.07377647505558, "lng": 14.437161000659017, "heading": 151.41025, "pitch": 3.4802200000000028},
{"lat": 50.07379496839027, "lng": 14.437148958238538, "heading": 151.93391, "pitch": 2.843050000000005},
{"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998}
]"""
coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False)
model3d = gr.HTML(value="""
<a style='color:white;font-weight:bold' href='https://freeali.se/freealise/transparent_video/' target='_blank'>Open renderer in new tab and upload your video there</a><br/>
<a style='color:white;font-weight:bold' href='https://freeali.se/image-morph-js/' target='_blank'>Warp the images to fix the holes from removed objects</a><br/>
Adjust the saturation, lightness and contrast for inpainting
""")
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth])
def on_submit(uploaded_video,model_type,blur_in,boffset,bsize,coordinates):
global locations
locations = []
avg = [0, 0]
locations = json.loads(coordinates)
for k, location in enumerate(locations):
if "tiles" in locations[k]:
locations[k]["heading"] = locations[k]["tiles"]["originHeading"]
locations[k]["pitch"] = locations[k]["tiles"]["originPitch"]
elif not "heading" in locations[k] or not "pitch" in locations[k]:
locations[k]["heading"] = 0.0
locations[k]["pitch"] = 0.0
if "location" in locations[k]:
locations[k] = locations[k]["location"]["latLng"]
elif not "lat" in locations[k] or not "lng" in locations[k]:
locations[k]["lat"] = 0.0
locations[k]["lng"] = 0.0
avg[0] = avg[0] + locations[k]["lat"]
avg[1] = avg[1] + locations[k]["lng"]
if len(locations) > 0:
avg[0] = avg[0] / len(locations)
avg[1] = avg[1] / len(locations)
vtt = "WEBVTT\n\n"
h = [0,0]
m = [0,0]
s = [-1,0]
for k, location in enumerate(locations):
lat = vincenty((location["lat"], 0), (avg[0], 0)) * 1000
lng = vincenty((0, location["lng"]), (0, avg[1])) * 1000
locations[k]["lat"] = float(lat / 2.5 * 111 * np.sign(location["lat"]-avg[0]))
locations[k]["lng"] = float(lng / 2.5 * 111 * np.sign(location["lng"]-avg[1]))
# 2.5m is height of camera on google street view car,
# distance from center of sphere to pavement roughly 255 - 144 = 111 units
locations[k]["heading"] = locations[k]["heading"] / 180 * np.pi
locations[k]["pitch"] = locations[k]["pitch"] / 180 * np.pi
print(locations)
for l, sec in enumerate(s):
s[l] = s[l] + 1
if s[l] == 60:
s[l] = 0
m[l] = m[l] + 1
if m[l] == 60:
m[l] = 0
h[l] = h[l] + 1
vtt = vtt + str(k+1) + "\n" + str(h[0]).zfill(2) + ":" + str(m[0]).zfill(2) + ":" + str(s[0]).zfill(2) + ".000 --> " + str(h[1]).zfill(2) + ":" + str(m[1]).zfill(2) + ":" + str(s[1]).zfill(2) + ".000\n" + str(locations[k]["lat"]) + "," + str(locations[k]["lng"]) + " " + str(locations[k]["heading"]) + "," + str(locations[k]["pitch"]) + "\n\n"
f = open("orig_result.vtt", "w")
f.write(vtt[0:-2])
f.close()
# Process the video and get the path of the output video
output_video_path = make_video(uploaded_video,encoder=model_type,blur_data=blurin,o=boffset,b=bsize)
return output_video_path
submit.click(on_submit, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth])
example_files = [["./examples/streetview.mp4", "vits", blurin, 1, 32, example_coords]]
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, blur_in, boffset, bsize, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth])
if __name__ == '__main__':
demo.queue().launch()