Spaces:
Runtime error
Runtime error
import os | |
import cv2 | |
import numpy as np | |
import gradio as gr | |
from tqdm import tqdm | |
from stability_sdk.api import Context | |
from stability_sdk.utils import create_video_from_frames | |
from stability_sdk.animation import AnimationArgs, Animator | |
STABILITY_HOST = "grpc.stability.ai:443" | |
def anim(f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse): | |
# Connect to Stability API | |
context = Context(STABILITY_HOST, stability_key) | |
# Test the connection | |
context.get_user_info() | |
print("Connection successfuly!") | |
# Configure the animation | |
args = AnimationArgs() | |
args.interpolate_prompts = interpolate_prompts | |
args.locked_seed = locked_seed | |
args.max_frames = max_frames | |
args.seed = seed | |
args.strength_curve = strength_curve | |
args.diffusion_cadence_curve = diffusion_cadence_curve | |
args.cadence_interp = cadence_interp | |
args.width = width | |
args.height = height | |
args.sampler = sampler | |
args.model = model | |
args.custom_model = custom_model | |
args.seed = seed | |
args.cfg_scale = cfg_scale | |
args.clip_guidance = clip_guidance | |
args.init_image = init_image | |
args.init_sizing = init_sizing | |
args.mask_path = mask_path | |
args.mask_invert = mask_invert | |
args.preset = preset | |
args.animation_mode = animation_mode | |
args.max_frames = max_frames | |
args.border = border | |
args.noise_add_curve = noise_add_curve | |
args.noise_scale_curve = noise_scale_curve | |
args.strength_curve = strength_curve | |
args.steps_curve = steps_curve | |
args.steps_strength_adj = steps_strength_adj | |
args.interpolate_prompts = interpolate_prompts | |
args.locked_seed = locked_seed | |
args.angle = angle | |
args.zoom = zoom | |
args.translation_x = translation_x | |
args.translation_y = translation_y | |
args.translation_z = translation_z | |
args.rotation_x = rotation_x | |
args.rotation_y = rotation_y | |
args.rotation_z = rotation_z | |
args.diffusion_cadence_curve = diffusion_cadence_curve | |
args.cadence_spans = cadence_spans | |
args.color_coherence = color_coherence | |
args.brightness_curve = brightness_curve | |
args.contrast_curve = contrast_curve | |
args.hue_curve = hue_curve | |
args.saturation_curve = saturation_curve | |
args.lightness_curve = lightness_curve | |
args.color_match_animate = color_match_animate | |
args.depth_model_weight = depth_model_weight | |
args.near_plane = near_plane | |
args.far_plane = far_plane | |
args.fov_curve = fov_curve | |
args.depth_blur_curve = depth_blur_curve | |
args.depth_warp_curve = depth_warp_curve | |
args.save_depth_maps = save_depth_maps | |
args.camera_type = camera_type | |
args.render_mode = render_mode | |
args.mask_power = mask_power | |
args.use_inpainting_model = use_inpainting_model | |
args.inpaint_border = inpaint_border | |
args.mask_min_value = mask_min_value | |
args.mask_binarization_thr = mask_binarization_thr | |
args.save_inpaint_masks = save_inpaint_masks | |
args.video_init_path = video_init_path | |
args.extract_nth_frame = extract_nth_frame | |
args.video_mix_in_curve = video_mix_in_curve | |
args.video_flow_warp = video_flow_warp | |
args.fps = fps | |
args.reverse = reverse | |
animation_prompts = { | |
0: f_promt, | |
2: s_promt, | |
} | |
negative_prompt = "" | |
print(args) | |
# Create Animator object to orchestrate the rendering | |
animator = Animator( | |
api_context=context, | |
animation_prompts=animation_prompts, | |
negative_prompt=negative_prompt, | |
args=args, | |
out_dir="video_01" | |
) | |
for _ in tqdm(animator.render(), total=args.max_frames): | |
pass | |
video_path = os.path.join(animator.out_dir, "video.mp4") | |
create_video_from_frames(animator.out_dir, video_path, fps=24) | |
return video_path | |
with gr.Blocks() as demo: | |
gr.Markdown("Stability Animation 1") | |
f_promt = gr.Textbox(label="First Prompt", value="a photo of a cute cat") | |
s_promt = gr.Textbox(label="Second Prompt", value="a photo of a cute dog") | |
stability_key = gr.Textbox(label="Stability Key", value="") | |
with gr.Accordion(open=False): | |
width = gr.Slider(minimum=100, maximum=1000, value=512, label="Width") | |
height = gr.Slider(minimum=100, maximum=1000, value=512, label="Height") | |
sampler = gr.Dropdown(choices=['K_dpmpp_2m'], value='K_dpmpp_2m', label="Sampler") | |
model = gr.Dropdown(choices=[ | |
"stable-diffusion-512-v2-1", "stable-diffusion-xl-beta-v2-2-2", "stable-diffusion-xl-1024-v0-9", | |
"stable-diffusion-xl-1024-v1-0", "custom" | |
], value="stable-diffusion-xl-1024-v1-0", label="Model") | |
custom_model = gr.Textbox(value="", label="Custom Model") | |
seed = gr.Slider(minimum=-1, maximum=100, value=-1, label="Seed") | |
cfg_scale = gr.Slider(minimum=1, maximum=10, value=7, label="Cfg Scale") | |
clip_guidance = gr.Dropdown(choices=["None", "Simple", "FastBlue", "FastGreen"], value="None", label="Clip Guidance") | |
init_image = gr.Textbox(value='', label="Init Image") | |
init_sizing = gr.Dropdown(choices=['stretch'], value='stretch', label="Init Sizing") | |
mask_path = gr.Textbox(value="", label="Mask Path") | |
mask_invert = gr.Checkbox(value=False, label="Mask Invert") | |
preset = gr.Dropdown(value="cinematic", label="Preset", choices=[ | |
'None', '3d-model', 'analog-film', 'anime', 'cinematic', 'comic-book', 'digital-art', | |
'enhance', 'fantasy-art', 'isometric', 'line-art', 'low-poly', 'modeling-compound', | |
'neon-punk', 'origami', 'photographic', 'pixel-art', | |
]) | |
animation_mode = gr.Dropdown(choices=['3D warp'], value='3D warp', label="Animation Mode") | |
max_frames = gr.Slider(minimum=1, maximum=100, value=5, label="Max Frames") | |
border = gr.Dropdown(choices=['replicate'], value='replicate', label="Border") | |
noise_add_curve = gr.Textbox(value="0:(0.02)", label="Noise Add Curve") | |
noise_scale_curve = gr.Textbox(value="0:(0.99)", label="Noise Scale Curve") | |
strength_curve = gr.Textbox(value="0:(0.65)", label="Strength Curve") | |
steps_curve = gr.Textbox(value="0:(30)", label="Steps Curve") | |
steps_strength_adj = gr.Checkbox(value=False, label="Steps Strength Adj") | |
interpolate_prompts = gr.Checkbox(value=False, label="Interpolate Prompts") | |
locked_seed = gr.Checkbox(value=False, label="Locked Seed") | |
angle = gr.Textbox(value="0:(0)", label="Angle") | |
zoom = gr.Textbox(value="0:(1)", label="Zoom") | |
translation_x = gr.Textbox(value="0:(0)", label="Translation X") | |
translation_y = gr.Textbox(value="0:(0)", label="Translation Y") | |
translation_z = gr.Textbox(value="0:(0)", label="Translation Z") | |
rotation_x = gr.Textbox(value="0:(0)", label="Rotation X") | |
rotation_y = gr.Textbox(value="0:(0)", label="Rotation Y") | |
rotation_z = gr.Textbox(value="0:(0)", label="Rotation Z") | |
diffusion_cadence_curve = gr.Textbox(value="0:(1)", label="Diffusion Cadence Curve") | |
cadence_interp = gr.Dropdown(choices=['film', 'mix', 'rife', 'vae-lerp', 'vae-slerp'], value='mix', label="Cadence Interp") | |
cadence_spans = gr.Checkbox(value=False, label="Cadence Spans") | |
color_coherence = gr.Dropdown(choices=['None', 'HSV', 'LAB', 'RGB'], value='LAB', label="Color Coherence") | |
brightness_curve = gr.Textbox(value="0:(1.0)", label="Brightness Curve") | |
contrast_curve = gr.Textbox(value="0:(1.0)", label="Contrast Curve") | |
hue_curve = gr.Textbox(value="0:(0.0)", label="Hue Curve") | |
saturation_curve = gr.Textbox(value="0:(1.0)", label="Saturation Curve") | |
lightness_curve = gr.Textbox(value="0:(0.0)", label="Lightness Curve") | |
color_match_animate = gr.Checkbox(value=True, label="Color Match Animate") | |
depth_model_weight = gr.Slider(minimum=0, maximum=1, value=0.3, step=0.1, label="Depth Model Weight") | |
near_plane = gr.Slider(minimum=1, maximum=1000, value=200, label="Near Plane") | |
far_plane = gr.Slider(minimum=1, maximum=10000, value=10000, label="Far Plane") | |
fov_curve = gr.Textbox(value="0:(25)", label="Fov Curve") | |
depth_blur_curve = gr.Textbox(value="0:(0.0)", label="Depth Blur Curve") | |
depth_warp_curve = gr.Textbox(value="0:(1.0)", label="Depth Warp Curve") | |
save_depth_maps = gr.Checkbox(value=False, label="Save Depth Maps") | |
camera_type = gr.Dropdown(choices=['perspective'], value='perspective', label="Camera Type") | |
render_mode = gr.Dropdown(choices=['mesh'], value='mesh', label="Render Mode") | |
mask_power = gr.Slider(minimum=0, maximum=1, value=0.3, step=0.1, label="Mask Power") | |
use_inpainting_model = gr.Checkbox(value=False, label="Use Inpainting Model") | |
inpaint_border = gr.Checkbox(value=False, label="Inpaint Border") | |
mask_min_value = gr.Textbox(value="0:(0.25)", label="Mask Min Value") | |
mask_binarization_thr = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Mask Binarization Threshold") | |
save_inpaint_masks = gr.Checkbox(value=False, label="Save Inpaint Masks") | |
video_init_path = gr.Textbox(value="", label="Video Init Path") | |
extract_nth_frame = gr.Slider(minimum=1, maximum=10, value=1, label="Extract Nth Frame") | |
video_mix_in_curve = gr.Textbox(value="0:(0.02)", label="Video Mix In Curve") | |
video_flow_warp = gr.Checkbox(value=True, label="Video Flow Warp") | |
fps = gr.Slider(minimum=1, maximum=60, value=12, label="FPS") | |
reverse = gr.Checkbox(value=False, label="Reverse") | |
vid = gr.Video(label='video') | |
btn = gr.Button('Anim') | |
btn.click(fn=anim, inputs=[f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse], outputs=[vid],api_name="AnimAPI") | |
demo.launch() |