leixa commited on
Commit
ecce98b
·
verified ·
1 Parent(s): 63e2446

Training in progress, step 130, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cb15a74b3702f66007f2eb3fb5d517e7d45ca36b21e019f87cd567d4eeca531
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3bc0f4fd031baa461a08a73c9d0b5c6557be7235ea59fbd23db3fc7dcaeaf52
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5888324401df92ecd87b8c160db131f2352fa716b1dc5c258a5e358f39586d3
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bffac84eaac97cbaa77d1adc85cb7ccd4ffae53d401e54121aef1789235dda6a
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93bd630772456bf478ad7859a7b0ea3f36dda482f354ce665d158b5f4b742076
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8f50bcb282a535c8d60e0cd286b37ee0fb35f76262156b1484dc636b0a4dd7d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96bbb5cc30267ffbb6fcba5b12ec3b2b908df1e2e422c7d381d682f082ea4159
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:507bfe4270aab5b2aa070e93d9c3404af996914dd61586771e0cc504e5b0252c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03640040444893832,
5
  "eval_steps": 13,
6
- "global_step": 117,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -360,6 +360,42 @@
360
  "eval_samples_per_second": 13.876,
361
  "eval_steps_per_second": 1.735,
362
  "step": 117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
  }
364
  ],
365
  "logging_steps": 3,
@@ -379,7 +415,7 @@
379
  "attributes": {}
380
  }
381
  },
382
- "total_flos": 1.7360162435314483e+17,
383
  "train_batch_size": 8,
384
  "trial_name": null,
385
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04044489383215369,
5
  "eval_steps": 13,
6
+ "global_step": 130,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
360
  "eval_samples_per_second": 13.876,
361
  "eval_steps_per_second": 1.735,
362
  "step": 117
363
+ },
364
+ {
365
+ "epoch": 0.03733374815275725,
366
+ "grad_norm": 1.1355212926864624,
367
+ "learning_rate": 5.454212938299255e-06,
368
+ "loss": 1.4929,
369
+ "step": 120
370
+ },
371
+ {
372
+ "epoch": 0.03826709185657618,
373
+ "grad_norm": 1.2554372549057007,
374
+ "learning_rate": 4.4499481138022544e-06,
375
+ "loss": 1.4357,
376
+ "step": 123
377
+ },
378
+ {
379
+ "epoch": 0.03920043556039512,
380
+ "grad_norm": 1.289537787437439,
381
+ "learning_rate": 3.5387801599533475e-06,
382
+ "loss": 1.4257,
383
+ "step": 126
384
+ },
385
+ {
386
+ "epoch": 0.04013377926421405,
387
+ "grad_norm": 1.439562201499939,
388
+ "learning_rate": 2.7248368952908053e-06,
389
+ "loss": 1.4607,
390
+ "step": 129
391
+ },
392
+ {
393
+ "epoch": 0.04044489383215369,
394
+ "eval_loss": 1.4785248041152954,
395
+ "eval_runtime": 390.0687,
396
+ "eval_samples_per_second": 13.88,
397
+ "eval_steps_per_second": 1.736,
398
+ "step": 130
399
  }
400
  ],
401
  "logging_steps": 3,
 
415
  "attributes": {}
416
  }
417
  },
418
+ "total_flos": 1.9289069372571648e+17,
419
  "train_batch_size": 8,
420
  "trial_name": null,
421
  "trial_params": null