WangXFng commited on
Commit
7b8ddf5
·
verified ·
1 Parent(s): ba51430

Model save

Browse files
README.md CHANGED
@@ -41,6 +41,7 @@ The following hyperparameters were used during training:
41
  - total_train_batch_size: 256
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
 
44
  - num_epochs: 4
45
 
46
  ### Training results
@@ -49,7 +50,8 @@ The following hyperparameters were used during training:
49
 
50
  ### Framework versions
51
 
52
- - PEFT 0.13.2
53
  - Transformers 4.45.2
54
- - Pytorch 2.4.1
55
- - Tokenizers 0.20.1
 
 
41
  - total_train_batch_size: 256
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
+ - lr_scheduler_warmup_steps: 2
45
  - num_epochs: 4
46
 
47
  ### Training results
 
50
 
51
  ### Framework versions
52
 
53
+ - PEFT 0.13.0
54
  - Transformers 4.45.2
55
+ - Pytorch 2.4.0
56
+ - Datasets 2.21.0
57
+ - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "o_proj",
25
  "down_proj",
26
- "v_proj",
27
- "up_proj",
28
  "k_proj",
29
- "gate_proj"
 
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "down_proj",
 
 
24
  "k_proj",
25
+ "up_proj",
26
+ "o_proj",
27
+ "gate_proj",
28
+ "q_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6367bd7f0fb3ab036311a0aab0599e1ed32039ee2f89d86153d7b2b9be39edba
3
  size 1635969696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a62fcaa260a8997f02feab49617d5deee6d1dec793e9405094db6fea534bf21
3
  size 1635969696
trainer_state.json CHANGED
@@ -10,68 +10,68 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
- "grad_norm": 0.9188441634178162,
14
- "learning_rate": 8.786407766990292e-05,
15
- "loss": 0.8368,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
- "grad_norm": 0.6881112456321716,
21
- "learning_rate": 7.572815533980583e-05,
22
- "loss": 0.4246,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
- "grad_norm": 0.6344935297966003,
28
- "learning_rate": 6.359223300970875e-05,
29
- "loss": 0.3804,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
- "grad_norm": 0.6141209006309509,
35
- "learning_rate": 5.145631067961165e-05,
36
- "loss": 0.3646,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
- "grad_norm": 0.5896692276000977,
42
- "learning_rate": 3.9320388349514564e-05,
43
- "loss": 0.3538,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
- "grad_norm": 0.6480565667152405,
49
- "learning_rate": 2.7184466019417475e-05,
50
- "loss": 0.3462,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
- "grad_norm": 0.6381719708442688,
56
- "learning_rate": 1.5048543689320387e-05,
57
- "loss": 0.3338,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
- "grad_norm": 0.610920250415802,
63
- "learning_rate": 2.912621359223301e-06,
64
- "loss": 0.3259,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4445804612483994e+18,
71
- "train_loss": 0.4179296539825143,
72
- "train_runtime": 17434.171,
73
- "train_samples_per_second": 30.248,
74
- "train_steps_per_second": 0.118
75
  }
76
  ],
77
  "logging_steps": 250,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
+ "grad_norm": 1.1003823280334473,
14
+ "learning_rate": 8.794946550048592e-05,
15
+ "loss": 0.8551,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
+ "grad_norm": 0.7180963158607483,
21
+ "learning_rate": 7.580174927113704e-05,
22
+ "loss": 0.4137,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.6523966789245605,
28
+ "learning_rate": 6.365403304178815e-05,
29
+ "loss": 0.3735,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
+ "grad_norm": 0.6745087504386902,
35
+ "learning_rate": 5.150631681243926e-05,
36
+ "loss": 0.3583,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.6706854104995728,
42
+ "learning_rate": 3.9358600583090386e-05,
43
+ "loss": 0.345,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
+ "grad_norm": 0.7672198414802551,
49
+ "learning_rate": 2.72108843537415e-05,
50
+ "loss": 0.333,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.7942991256713867,
56
+ "learning_rate": 1.5063168124392615e-05,
57
+ "loss": 0.3142,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
+ "grad_norm": 0.7696407437324524,
63
+ "learning_rate": 2.915451895043732e-06,
64
+ "loss": 0.302,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4445804612483994e+18,
71
+ "train_loss": 0.40855656966422366,
72
+ "train_runtime": 23649.5822,
73
+ "train_samples_per_second": 22.298,
74
+ "train_steps_per_second": 0.087
75
  }
76
  ],
77
  "logging_steps": 250,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7484e26a2443ab001dada28afc5e4336045ee6f01f36bb190af4ea081f0e6eb8
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b5fb93126c6b5a18f8421a4eeddaac11f98a23d05e6f786996af63a656ca025
3
  size 5240