Spaces:
Running
on
Zero
Running
on
Zero
[DEFAULTS] | |
#name of the run | |
name = stable_audio_tools | |
# the batch size | |
batch_size = 8 | |
# number of GPUs to use for training | |
num_gpus = 4 | |
# number of nodes to use for training | |
num_nodes = 1 | |
# Multi-GPU strategy for PyTorch Lightning | |
strategy = "" | |
# Precision to use for training | |
precision = "16-mixed" | |
# number of CPU workers for the DataLoader | |
num_workers = 8 | |
# the random seed | |
seed = 42 | |
# Batches for gradient accumulation | |
accum_batches = 1 | |
# Number of steps between checkpoints | |
checkpoint_every = 20 | |
# trainer checkpoint file to restart training from | |
ckpt_path = '' | |
# model checkpoint file to start a new training run from | |
pretrained_ckpt_path = '' | |
# Checkpoint path for the pretransform model if needed | |
pretransform_ckpt_path = '' | |
# configuration model specifying model hyperparameters | |
model_config = '' | |
# configuration for datasets | |
dataset_config = '' | |
# directory to save the checkpoints in | |
save_dir = '' | |
# gradient_clip_val passed into PyTorch Lightning Trainer | |
gradient_clip_val = 0.0 | |
# remove the weight norm from the pretransform model | |
remove_pretransform_weight_norm = '' |