#!/usr/bin/env bash TOKENIZERS_PARALLELISM=false python3 run_distillation_nodes.py \ --model_name_or_path "./nb-distil-large-init-0811" \ --teacher_model_name_or_path "NbAiLab/nb-whisper-large" \ --train_dataset_name "NbAiLab/annotated_distil_raw_ncc_speech_v7_large" \ --train_dataset_config_name "" \ --train_split_name "train" \ --eval_dataset_name "NbAiLab/annotated_distil_raw_ncc_speech_v7_large" \ --eval_dataset_config_name "" \ --eval_split_name "validation" \ --eval_steps 500 \ --save_steps 1000 \ --warmup_steps 1000 \ --learning_rate 0.0001 \ --lr_scheduler_type "linear" \ --logging_steps 200 \ --save_total_limit 1 \ --max_steps 50000 \ --wer_threshold 10 \ --per_device_train_batch_size 16\ --per_device_eval_batch_size 16 \ --dataloader_num_workers 16 \ --dtype "bfloat16" \ --output_dir "./" \ --do_train \ --do_eval \ --use_scan \ --gradient_checkpointing \ --overwrite_output_dir \ --predict_with_generate \ --freeze_encoder \ --streaming \ --use_auth_token \ --report_to "wandb" \ --wandb_project "nb-distil-whisper-large-flax2" \ --wandb_name "flax lr1e4" \ --save_code_to_wandb \ --save_train_state \ --hub_model_id "NbAiLab/nb-distil-whisper-large-flax3" \ --push_to_hub