Spaces:
Runtime error
Runtime error
File size: 12,263 Bytes
ff3c6aa ef4aa37 998b8a0 ff3c6aa 0b11a54 998b8a0 0b11a54 ff3c6aa d8f6d3d 5273c29 ddabfc0 5273c29 0b11a54 067894a 0b11a54 af6d115 0b11a54 af6d115 5d8217d 049f113 02d0fb2 049f113 af6d115 ef4aa37 5aef69b af6d115 527ccf6 bb5f0a2 ef4aa37 abfd2c7 c02f1e3 830ea02 5273c29 d174226 ebd0974 377f02b d9bda94 377f02b d9bda94 377f02b d9bda94 377f02b ebd0974 377f02b d9bda94 377f02b d9bda94 377f02b 1f14d54 af9c298 0b11a54 4230814 d174226 fe2bf46 c02f1e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
import os
import cv2
import gradio as gr
from stability_sdk.api import Context
from stability_sdk.animation import AnimationArgs, Animator
STABILITY_HOST = "grpc.stability.ai:443"
def anim(f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse):
# Connect to Stability API
context = Context(STABILITY_HOST, stability_key)
# Test the connection
context.get_user_info()
print("Connection successfuly!")
# Configure the animation
args = AnimationArgs()
args.interpolate_prompts = interpolate_prompts
args.locked_seed = locked_seed
args.max_frames = max_frames
args.seed = seed
args.strength_curve = strength_curve
args.diffusion_cadence_curve = diffusion_cadence_curve
args.cadence_interp = cadence_interp
args.width = width
args.height = height
args.sampler = sampler
args.model = model
args.custom_model = custom_model
args.seed = seed
args.cfg_scale = cfg_scale
args.clip_guidance = clip_guidance
args.init_image = init_image
args.init_sizing = init_sizing
args.mask_path = mask_path
args.mask_invert = mask_invert
args.preset = preset
args.animation_mode = animation_mode
args.max_frames = max_frames
args.border = border
args.noise_add_curve = noise_add_curve
args.noise_scale_curve = noise_scale_curve
args.strength_curve = strength_curve
args.steps_curve = steps_curve
args.steps_strength_adj = steps_strength_adj
args.interpolate_prompts = interpolate_prompts
args.locked_seed = locked_seed
args.angle = angle
args.zoom = zoom
args.translation_x = translation_x
args.translation_y = translation_y
args.translation_z = translation_z
args.rotation_x = rotation_x
args.rotation_y = rotation_y
args.rotation_z = rotation_z
args.diffusion_cadence_curve = diffusion_cadence_curve
args.cadence_spans = cadence_spans
args.color_coherence = color_coherence
args.brightness_curve = brightness_curve
args.contrast_curve = contrast_curve
args.hue_curve = hue_curve
args.saturation_curve = saturation_curve
args.lightness_curve = lightness_curve
args.color_match_animate = color_match_animate
args.depth_model_weight = depth_model_weight
args.near_plane = near_plane
args.far_plane = far_plane
args.fov_curve = fov_curve
args.depth_blur_curve = depth_blur_curve
args.depth_warp_curve = depth_warp_curve
args.save_depth_maps = save_depth_maps
args.camera_type = camera_type
args.render_mode = render_mode
args.mask_power = mask_power
args.use_inpainting_model = use_inpainting_model
args.inpaint_border = inpaint_border
args.mask_min_value = mask_min_value
args.mask_binarization_thr = mask_binarization_thr
args.save_inpaint_masks = save_inpaint_masks
args.video_init_path = video_init_path
args.extract_nth_frame = extract_nth_frame
args.video_mix_in_curve = video_mix_in_curve
args.video_flow_warp = video_flow_warp
args.fps = fps
args.reverse = reverse
animation_prompts = {
0: f_promt,
2: s_promt,
}
negative_prompt = ""
# Create Animator object to orchestrate the rendering
animator = Animator(
api_context=context,
animation_prompts=animation_prompts,
negative_prompt=negative_prompt,
args=args
)
# Define output folder path
image_path = "/tmp/frames/"
output_dir = os.path.join(image_path, "output")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Render each frame of animation
# Render each frame of animation
images = []
for idx, frame in enumerate(animator.render()):
file_path = os.path.join(output_dir, f"frame_{idx:05d}.png")
frame.save(file_path)
print("Created frame at:"+file_path)
images.append(file_path)
# Define the codec using VideoWriter_fourcc() and create a VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # or use 'XVID'
height, width, _ = cv2.imread(images[0]).shape # get the frame size from the first image
video_path = os.path.join(output_dir, 'output.mp4') # specify your output video file path here
out = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
# Write each frame to the video file
for image_path in images:
frame = cv2.imread(image_path)
out.write(frame)
# Release the VideoWriter
out.release()
with gr.Blocks() as demo:
gr.Markdown("Stability Animation 1")
f_promt = gr.Textbox(label="First Prompt", value="a photo of a cute cat")
s_promt = gr.Textbox(label="Second Prompt", value="a photo of a cute dog")
stability_key = gr.Textbox(label="Stability Key", value="")
with gr.Accordion(open=False):
width = gr.Slider(minimum=100, maximum=1000, value=512, label="Width")
height = gr.Slider(minimum=100, maximum=1000, value=512, label="Height")
sampler = gr.Dropdown(choices=['K_dpmpp_2m'], value='K_dpmpp_2m', label="Sampler")
model = gr.Dropdown(choices=[
"stable-diffusion-512-v2-1", "stable-diffusion-xl-beta-v2-2-2", "stable-diffusion-xl-1024-v0-9",
"stable-diffusion-xl-1024-v1-0", "custom"
], value="stable-diffusion-xl-1024-v1-0", label="Model")
custom_model = gr.Textbox(value="", label="Custom Model")
seed = gr.Slider(minimum=-1, maximum=100, value=-1, label="Seed")
cfg_scale = gr.Slider(minimum=1, maximum=10, value=7, label="Cfg Scale")
clip_guidance = gr.Dropdown(choices=["None", "Simple", "FastBlue", "FastGreen"], value="None", label="Clip Guidance")
init_image = gr.Textbox(value='', label="Init Image")
init_sizing = gr.Dropdown(choices=['stretch'], value='stretch', label="Init Sizing")
mask_path = gr.Textbox(value="", label="Mask Path")
mask_invert = gr.Checkbox(value=False, label="Mask Invert")
preset = gr.Dropdown(value="cinematic", label="Preset", choices=[
'None', '3d-model', 'analog-film', 'anime', 'cinematic', 'comic-book', 'digital-art',
'enhance', 'fantasy-art', 'isometric', 'line-art', 'low-poly', 'modeling-compound',
'neon-punk', 'origami', 'photographic', 'pixel-art',
])
animation_mode = gr.Dropdown(choices=['3D warp'], value='3D warp', label="Animation Mode")
max_frames = gr.Slider(minimum=1, maximum=100, value=5, label="Max Frames")
border = gr.Dropdown(choices=['replicate'], value='replicate', label="Border")
noise_add_curve = gr.Textbox(value="0:(0.02)", label="Noise Add Curve")
noise_scale_curve = gr.Textbox(value="0:(0.99)", label="Noise Scale Curve")
strength_curve = gr.Textbox(value="0:(0.65)", label="Strength Curve")
steps_curve = gr.Textbox(value="0:(30)", label="Steps Curve")
steps_strength_adj = gr.Checkbox(value=False, label="Steps Strength Adj")
interpolate_prompts = gr.Checkbox(value=False, label="Interpolate Prompts")
locked_seed = gr.Checkbox(value=False, label="Locked Seed")
angle = gr.Textbox(value="0:(0)", label="Angle")
zoom = gr.Textbox(value="0:(1)", label="Zoom")
translation_x = gr.Textbox(value="0:(0)", label="Translation X")
translation_y = gr.Textbox(value="0:(0)", label="Translation Y")
translation_z = gr.Textbox(value="0:(0)", label="Translation Z")
rotation_x = gr.Textbox(value="0:(0)", label="Rotation X")
rotation_y = gr.Textbox(value="0:(0)", label="Rotation Y")
rotation_z = gr.Textbox(value="0:(0)", label="Rotation Z")
diffusion_cadence_curve = gr.Textbox(value="0:(1)", label="Diffusion Cadence Curve")
cadence_interp = gr.Dropdown(choices=['film', 'mix', 'rife', 'vae-lerp', 'vae-slerp'], value='mix', label="Cadence Interp")
cadence_spans = gr.Checkbox(value=False, label="Cadence Spans")
color_coherence = gr.Dropdown(choices=['None', 'HSV', 'LAB', 'RGB'], value='LAB', label="Color Coherence")
brightness_curve = gr.Textbox(value="0:(1.0)", label="Brightness Curve")
contrast_curve = gr.Textbox(value="0:(1.0)", label="Contrast Curve")
hue_curve = gr.Textbox(value="0:(0.0)", label="Hue Curve")
saturation_curve = gr.Textbox(value="0:(1.0)", label="Saturation Curve")
lightness_curve = gr.Textbox(value="0:(0.0)", label="Lightness Curve")
color_match_animate = gr.Checkbox(value=True, label="Color Match Animate")
depth_model_weight = gr.Slider(minimum=0, maximum=1, value=0.3, step=0.1, label="Depth Model Weight")
near_plane = gr.Slider(minimum=1, maximum=1000, value=200, label="Near Plane")
far_plane = gr.Slider(minimum=1, maximum=10000, value=10000, label="Far Plane")
fov_curve = gr.Textbox(value="0:(25)", label="Fov Curve")
depth_blur_curve = gr.Textbox(value="0:(0.0)", label="Depth Blur Curve")
depth_warp_curve = gr.Textbox(value="0:(1.0)", label="Depth Warp Curve")
save_depth_maps = gr.Checkbox(value=False, label="Save Depth Maps")
camera_type = gr.Dropdown(choices=['perspective'], value='perspective', label="Camera Type")
render_mode = gr.Dropdown(choices=['mesh'], value='mesh', label="Render Mode")
mask_power = gr.Slider(minimum=0, maximum=1, value=0.3, step=0.1, label="Mask Power")
use_inpainting_model = gr.Checkbox(value=False, label="Use Inpainting Model")
inpaint_border = gr.Checkbox(value=False, label="Inpaint Border")
mask_min_value = gr.Textbox(value="0:(0.25)", label="Mask Min Value")
mask_binarization_thr = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Mask Binarization Threshold")
save_inpaint_masks = gr.Checkbox(value=False, label="Save Inpaint Masks")
video_init_path = gr.Textbox(value="", label="Video Init Path")
extract_nth_frame = gr.Slider(minimum=1, maximum=10, value=1, label="Extract Nth Frame")
video_mix_in_curve = gr.Textbox(value="0:(0.02)", label="Video Mix In Curve")
video_flow_warp = gr.Checkbox(value=True, label="Video Flow Warp")
fps = gr.Slider(minimum=1, maximum=60, value=12, label="FPS")
reverse = gr.Checkbox(value=False, label="Reverse")
outimg = gr.File(label="Generated Files")
btn = gr.Button('Anim')
btn.click(fn=anim, inputs=[f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse], outputs=[outimg],api_name="AnimAPI")
demo.launch() |