Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
from stability_sdk.api import Context | |
from stability_sdk.animation import AnimationArgs, Animator | |
STABILITY_HOST = "grpc.stability.ai:443" | |
def anim(f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse): | |
# Connect to Stability API | |
context = Context(STABILITY_HOST, stability_key) | |
# Test the connection | |
context.get_user_info() | |
print("Connection successfuly!") | |
# Configure the animation | |
args = AnimationArgs() | |
args.interpolate_prompts = interpolate_prompts | |
args.locked_seed = locked_seed | |
args.max_frames = max_frames | |
args.seed = seed | |
args.strength_curve = strength_curve | |
args.diffusion_cadence_curve = diffusion_cadence_curve | |
args.cadence_interp = cadence_interp | |
args.width = width | |
args.height = height | |
args.sampler = sampler | |
args.model = model | |
args.custom_model = custom_model | |
args.seed = seed | |
args.cfg_scale = cfg_scale | |
args.clip_guidance = clip_guidance | |
args.init_image = init_image | |
args.init_sizing = init_sizing | |
args.mask_path = mask_path | |
args.mask_invert = mask_invert | |
args.preset = preset | |
args.animation_mode = animation_mode | |
args.max_frames = max_frames | |
args.border = border | |
args.noise_add_curve = noise_add_curve | |
args.noise_scale_curve = noise_scale_curve | |
args.strength_curve = strength_curve | |
args.steps_curve = steps_curve | |
args.steps_strength_adj = steps_strength_adj | |
args.interpolate_prompts = interpolate_prompts | |
args.locked_seed = locked_seed | |
args.angle = angle | |
args.zoom = zoom | |
args.translation_x = translation_x | |
args.translation_y = translation_y | |
args.translation_z = translation_z | |
args.rotation_x = rotation_x | |
args.rotation_y = rotation_y | |
args.rotation_z = rotation_z | |
args.diffusion_cadence_curve = diffusion_cadence_curve | |
args.cadence_spans = cadence_spans | |
args.color_coherence = color_coherence | |
args.brightness_curve = brightness_curve | |
args.contrast_curve = contrast_curve | |
args.hue_curve = hue_curve | |
args.saturation_curve = saturation_curve | |
args.lightness_curve = lightness_curve | |
args.color_match_animate = color_match_animate | |
args.depth_model_weight = depth_model_weight | |
args.near_plane = near_plane | |
args.far_plane = far_plane | |
args.fov_curve = fov_curve | |
args.depth_blur_curve = depth_blur_curve | |
args.depth_warp_curve = depth_warp_curve | |
args.save_depth_maps = save_depth_maps | |
args.camera_type = camera_type | |
args.render_mode = render_mode | |
args.mask_power = mask_power | |
args.use_inpainting_model = use_inpainting_model | |
args.inpaint_border = inpaint_border | |
args.mask_min_value = mask_min_value | |
args.mask_binarization_thr = mask_binarization_thr | |
args.save_inpaint_masks = save_inpaint_masks | |
args.video_init_path = video_init_path | |
args.extract_nth_frame = extract_nth_frame | |
args.video_mix_in_curve = video_mix_in_curve | |
args.video_flow_warp = video_flow_warp | |
args.fps = fps | |
args.reverse = reverse | |
animation_prompts = { | |
0: f_promt, | |
2: s_promt, | |
} | |
negative_prompt = "" | |
# Create Animator object to orchestrate the rendering | |
animator = Animator( | |
api_context=context, | |
animation_prompts=animation_prompts, | |
negative_prompt=negative_prompt, | |
args=args | |
) | |
# Define output folder path | |
image_path = "/tmp/frames/" | |
output_dir = os.path.join(image_path, "output") | |
if not os.path.exists(output_dir): | |
os.makedirs(output_dir) | |
# Render each frame of animation | |
images = [] | |
for idx, frame in enumerate(animator.render()): | |
file_path = os.path.join(output_dir, f"frame_{idx:05d}.png") | |
frame.save(file_path) | |
print("Created frame at:"+file_path) | |
images.append(file_path) | |
return images | |
with gr.Blocks() as demo: | |
gr.Markdown("Stability Animation") | |
f_promt = gr.Textbox(label="First Prompt", value="a photo of a cute cat") | |
s_promt = gr.Textbox(label="Second Prompt", value="a photo of a cute dog") | |
stability_key = gr.Textbox(label="Stability Key", value="") | |
with gr.Accordion(): | |
width = 512 | |
height = 512 | |
sampler = 'K_dpmpp_2m' | |
model = "stable-diffusion-xl-1024-v1-0" | |
custom_model = "" | |
seed = -1 | |
cfg_scale = 7 | |
clip_guidance = None | |
init_image = '' | |
init_sizing = 'stretch' | |
mask_path = "" | |
mask_invert = False | |
preset = default= None | |
animation_mode = '3D warp' | |
max_frames = 72 | |
border = 'replicate' | |
noise_add_curve = "0:(0.02)" | |
noise_scale_curve = "0:(0.99)" | |
strength_curve = "0:(0.65)", | |
steps_curve = "0:(30)" | |
steps_strength_adj = False, | |
interpolate_prompts = False, | |
locked_seed = False | |
angle = "0:(0)" | |
zoom = "0:(1)" | |
translation_x = "0:(0)" | |
translation_y = "0:(0)" | |
translation_z = "0:(0)" | |
rotation_x = "0:(0)" | |
rotation_y = "0:(0)" | |
rotation_z = "0:(0)" | |
diffusion_cadence_curve = "0:(1)" | |
cadence_interp = 'mix' | |
cadence_spans = False | |
color_coherence = 'LAB' | |
brightness_curve = "0:(1.0)" | |
contrast_curve = "0:(1.0)" | |
hue_curve = "0:(0.0)" | |
saturation_curve = "0:(1.0)" | |
lightness_curve = "0:(0.0)" | |
color_match_animate = True | |
depth_model_weight = 0.3 | |
near_plane = 200 | |
far_plane = 10000 | |
fov_curve = "0:(25)" | |
depth_blur_curve = "0:(0.0)" | |
depth_warp_curve = "0:(1.0)" | |
save_depth_maps = False | |
camera_type = 'perspective' | |
render_mode = 'mesh' | |
mask_power = 0.3 | |
use_inpainting_model = False, | |
inpaint_border = False, | |
mask_min_value = "0:(0.25)", | |
mask_binarization_thr = 0.5 | |
save_inpaint_masks = False | |
video_init_path = "" | |
extract_nth_frame = 1 | |
video_mix_in_curve = "0:(0.02)" | |
video_flow_warp = True | |
fps = 12 | |
reverse = False | |
outimg = gr.File(label="Generated Files") | |
btn = gr.Button('Anim') | |
btn.click(fn=anim, inputs=[f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse], outputs=[outimg],api_name="AnimAPI") | |
demo.launch() |