Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- adapter_config.json +34 -0
- checkpoint-1000/adapter_config.json +34 -0
- checkpoint-2000/adapter_config.json +34 -0
- checkpoint-2000/added_tokens.json +24 -0
- checkpoint-2000/latest +1 -0
- checkpoint-2000/merges.txt +0 -0
- checkpoint-2000/special_tokens_map.json +31 -0
- checkpoint-2000/tokenizer_config.json +208 -0
- checkpoint-2000/trainer_state.json +1457 -0
- checkpoint-2000/vocab.json +0 -0
- checkpoint-2000/zero_to_fp32.py +674 -0
- checkpoint-2500/README.md +202 -0
- checkpoint-2500/adapter_config.json +34 -0
- checkpoint-2500/added_tokens.json +24 -0
- checkpoint-2500/latest +1 -0
- checkpoint-2500/merges.txt +0 -0
- checkpoint-2500/special_tokens_map.json +31 -0
- checkpoint-2500/tokenizer_config.json +208 -0
- checkpoint-2500/trainer_state.json +1815 -0
- checkpoint-2500/vocab.json +0 -0
- checkpoint-2500/zero_to_fp32.py +674 -0
- checkpoint-3000/README.md +202 -0
- checkpoint-4000/adapter_config.json +34 -0
- checkpoint-4000/added_tokens.json +24 -0
- checkpoint-4000/latest +1 -0
- checkpoint-4000/merges.txt +0 -0
- checkpoint-4000/special_tokens_map.json +31 -0
- checkpoint-4000/tokenizer_config.json +208 -0
- checkpoint-4000/trainer_state.json +2881 -0
- checkpoint-4000/vocab.json +0 -0
- checkpoint-4000/zero_to_fp32.py +674 -0
- checkpoint-4500/README.md +202 -0
- checkpoint-4500/adapter_config.json +34 -0
- checkpoint-4500/added_tokens.json +24 -0
- checkpoint-4500/latest +1 -0
- checkpoint-4500/merges.txt +0 -0
- checkpoint-4500/special_tokens_map.json +31 -0
- checkpoint-4500/tokenizer_config.json +208 -0
- checkpoint-4500/trainer_state.json +3239 -0
- checkpoint-4500/vocab.json +0 -0
- checkpoint-4500/zero_to_fp32.py +674 -0
- checkpoint-5000/README.md +202 -0
- checkpoint-5000/adapter_config.json +34 -0
- checkpoint-5000/added_tokens.json +24 -0
- checkpoint-5000/latest +1 -0
- checkpoint-5000/merges.txt +0 -0
- checkpoint-5000/special_tokens_map.json +31 -0
- checkpoint-5000/tokenizer_config.json +208 -0
- checkpoint-5000/trainer_state.json +3597 -0
- checkpoint-5000/vocab.json +0 -0
adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-1000/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-2000/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-2000/added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
checkpoint-2000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step2000
|
checkpoint-2000/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-2000/special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
checkpoint-2000/tokenizer_config.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|endoftext|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"model_max_length": 131072,
|
203 |
+
"pad_token": "<|endoftext|>",
|
204 |
+
"padding_side": "right",
|
205 |
+
"split_special_tokens": false,
|
206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
207 |
+
"unk_token": null
|
208 |
+
}
|
checkpoint-2000/trainer_state.json
ADDED
@@ -0,0 +1,1457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.697072549851506,
|
5 |
+
"eval_steps": 600,
|
6 |
+
"global_step": 2000,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.00848536274925753,
|
13 |
+
"grad_norm": 0.4898678891363344,
|
14 |
+
"learning_rate": 8.488964346349746e-07,
|
15 |
+
"loss": 1.8056,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.01697072549851506,
|
20 |
+
"grad_norm": 0.3537473179717183,
|
21 |
+
"learning_rate": 1.6977928692699491e-06,
|
22 |
+
"loss": 1.7621,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.025456088247772592,
|
27 |
+
"grad_norm": 0.28215953004159977,
|
28 |
+
"learning_rate": 2.546689303904924e-06,
|
29 |
+
"loss": 1.7571,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.03394145099703012,
|
34 |
+
"grad_norm": 0.27446565146764923,
|
35 |
+
"learning_rate": 3.3955857385398982e-06,
|
36 |
+
"loss": 1.7136,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.04242681374628765,
|
41 |
+
"grad_norm": 0.17051549768176558,
|
42 |
+
"learning_rate": 4.244482173174873e-06,
|
43 |
+
"loss": 1.6767,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.050912176495545185,
|
48 |
+
"grad_norm": 0.17763882467320422,
|
49 |
+
"learning_rate": 5.093378607809848e-06,
|
50 |
+
"loss": 1.6371,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.05939753924480271,
|
55 |
+
"grad_norm": 0.14311462596290048,
|
56 |
+
"learning_rate": 5.942275042444822e-06,
|
57 |
+
"loss": 1.6324,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.06788290199406025,
|
62 |
+
"grad_norm": 0.1659540846071645,
|
63 |
+
"learning_rate": 6.7911714770797965e-06,
|
64 |
+
"loss": 1.6062,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.07636826474331777,
|
69 |
+
"grad_norm": 0.20064072815620043,
|
70 |
+
"learning_rate": 7.640067911714771e-06,
|
71 |
+
"loss": 1.5832,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.0848536274925753,
|
76 |
+
"grad_norm": 0.2179045681711979,
|
77 |
+
"learning_rate": 8.488964346349745e-06,
|
78 |
+
"loss": 1.5898,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.09333899024183284,
|
83 |
+
"grad_norm": 0.23866012053128668,
|
84 |
+
"learning_rate": 9.337860780984721e-06,
|
85 |
+
"loss": 1.5924,
|
86 |
+
"step": 110
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.10182435299109037,
|
90 |
+
"grad_norm": 0.18578051776430282,
|
91 |
+
"learning_rate": 1.0186757215619695e-05,
|
92 |
+
"loss": 1.5877,
|
93 |
+
"step": 120
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.1103097157403479,
|
97 |
+
"grad_norm": 0.2216509707409362,
|
98 |
+
"learning_rate": 1.103565365025467e-05,
|
99 |
+
"loss": 1.5947,
|
100 |
+
"step": 130
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.11879507848960542,
|
104 |
+
"grad_norm": 0.20427142255694086,
|
105 |
+
"learning_rate": 1.1884550084889643e-05,
|
106 |
+
"loss": 1.5841,
|
107 |
+
"step": 140
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.12728044123886295,
|
111 |
+
"grad_norm": 0.1765851415675038,
|
112 |
+
"learning_rate": 1.2733446519524619e-05,
|
113 |
+
"loss": 1.5878,
|
114 |
+
"step": 150
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.1357658039881205,
|
118 |
+
"grad_norm": 0.1769355117060811,
|
119 |
+
"learning_rate": 1.3582342954159593e-05,
|
120 |
+
"loss": 1.5795,
|
121 |
+
"step": 160
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.14425116673737803,
|
125 |
+
"grad_norm": 0.1617675663096666,
|
126 |
+
"learning_rate": 1.4431239388794569e-05,
|
127 |
+
"loss": 1.5549,
|
128 |
+
"step": 170
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.15273652948663555,
|
132 |
+
"grad_norm": 0.17302259072151574,
|
133 |
+
"learning_rate": 1.5280135823429543e-05,
|
134 |
+
"loss": 1.5808,
|
135 |
+
"step": 180
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.1612218922358931,
|
139 |
+
"grad_norm": 0.16876039012432806,
|
140 |
+
"learning_rate": 1.6129032258064517e-05,
|
141 |
+
"loss": 1.5676,
|
142 |
+
"step": 190
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.1697072549851506,
|
146 |
+
"grad_norm": 0.19627360154037596,
|
147 |
+
"learning_rate": 1.697792869269949e-05,
|
148 |
+
"loss": 1.5598,
|
149 |
+
"step": 200
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.17819261773440814,
|
153 |
+
"grad_norm": 0.16078510362361015,
|
154 |
+
"learning_rate": 1.7826825127334465e-05,
|
155 |
+
"loss": 1.5667,
|
156 |
+
"step": 210
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.18667798048366568,
|
160 |
+
"grad_norm": 0.16044786518959703,
|
161 |
+
"learning_rate": 1.8675721561969442e-05,
|
162 |
+
"loss": 1.5815,
|
163 |
+
"step": 220
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.1951633432329232,
|
167 |
+
"grad_norm": 0.15656958873834717,
|
168 |
+
"learning_rate": 1.9524617996604416e-05,
|
169 |
+
"loss": 1.5576,
|
170 |
+
"step": 230
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.20364870598218074,
|
174 |
+
"grad_norm": 0.1687290471357602,
|
175 |
+
"learning_rate": 2.037351443123939e-05,
|
176 |
+
"loss": 1.5453,
|
177 |
+
"step": 240
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.21213406873143828,
|
181 |
+
"grad_norm": 0.1519017348276184,
|
182 |
+
"learning_rate": 2.1222410865874364e-05,
|
183 |
+
"loss": 1.5554,
|
184 |
+
"step": 250
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.2206194314806958,
|
188 |
+
"grad_norm": 0.15761892005160086,
|
189 |
+
"learning_rate": 2.207130730050934e-05,
|
190 |
+
"loss": 1.5494,
|
191 |
+
"step": 260
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.22910479422995333,
|
195 |
+
"grad_norm": 0.16857088482977495,
|
196 |
+
"learning_rate": 2.2920203735144312e-05,
|
197 |
+
"loss": 1.5794,
|
198 |
+
"step": 270
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.23759015697921085,
|
202 |
+
"grad_norm": 0.1678705209913503,
|
203 |
+
"learning_rate": 2.3769100169779286e-05,
|
204 |
+
"loss": 1.5373,
|
205 |
+
"step": 280
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.2460755197284684,
|
209 |
+
"grad_norm": 0.14812649566587394,
|
210 |
+
"learning_rate": 2.461799660441426e-05,
|
211 |
+
"loss": 1.5504,
|
212 |
+
"step": 290
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.2545608824777259,
|
216 |
+
"grad_norm": 0.17651916734325857,
|
217 |
+
"learning_rate": 2.5466893039049238e-05,
|
218 |
+
"loss": 1.5607,
|
219 |
+
"step": 300
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.26304624522698344,
|
223 |
+
"grad_norm": 0.14883055338507856,
|
224 |
+
"learning_rate": 2.6315789473684212e-05,
|
225 |
+
"loss": 1.5311,
|
226 |
+
"step": 310
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.271531607976241,
|
230 |
+
"grad_norm": 0.15787522753231265,
|
231 |
+
"learning_rate": 2.7164685908319186e-05,
|
232 |
+
"loss": 1.5656,
|
233 |
+
"step": 320
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.2800169707254985,
|
237 |
+
"grad_norm": 0.1625232940237689,
|
238 |
+
"learning_rate": 2.801358234295416e-05,
|
239 |
+
"loss": 1.5686,
|
240 |
+
"step": 330
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.28850233347475607,
|
244 |
+
"grad_norm": 0.18505951289343867,
|
245 |
+
"learning_rate": 2.8862478777589137e-05,
|
246 |
+
"loss": 1.5474,
|
247 |
+
"step": 340
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.29698769622401355,
|
251 |
+
"grad_norm": 0.13785772316349984,
|
252 |
+
"learning_rate": 2.9711375212224108e-05,
|
253 |
+
"loss": 1.5696,
|
254 |
+
"step": 350
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.3054730589732711,
|
258 |
+
"grad_norm": 0.13531274658248552,
|
259 |
+
"learning_rate": 3.0560271646859086e-05,
|
260 |
+
"loss": 1.5551,
|
261 |
+
"step": 360
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.31395842172252864,
|
265 |
+
"grad_norm": 0.1366381415368909,
|
266 |
+
"learning_rate": 3.140916808149406e-05,
|
267 |
+
"loss": 1.524,
|
268 |
+
"step": 370
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.3224437844717862,
|
272 |
+
"grad_norm": 0.14587220569353926,
|
273 |
+
"learning_rate": 3.2258064516129034e-05,
|
274 |
+
"loss": 1.5515,
|
275 |
+
"step": 380
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.3309291472210437,
|
279 |
+
"grad_norm": 0.13336349383744864,
|
280 |
+
"learning_rate": 3.310696095076401e-05,
|
281 |
+
"loss": 1.5457,
|
282 |
+
"step": 390
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.3394145099703012,
|
286 |
+
"grad_norm": 0.1772016947970983,
|
287 |
+
"learning_rate": 3.395585738539898e-05,
|
288 |
+
"loss": 1.5582,
|
289 |
+
"step": 400
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.34789987271955874,
|
293 |
+
"grad_norm": 0.13819420575084573,
|
294 |
+
"learning_rate": 3.4804753820033956e-05,
|
295 |
+
"loss": 1.5326,
|
296 |
+
"step": 410
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.3563852354688163,
|
300 |
+
"grad_norm": 0.12729862167862188,
|
301 |
+
"learning_rate": 3.565365025466893e-05,
|
302 |
+
"loss": 1.5387,
|
303 |
+
"step": 420
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.3648705982180738,
|
307 |
+
"grad_norm": 0.11777082851399363,
|
308 |
+
"learning_rate": 3.6502546689303904e-05,
|
309 |
+
"loss": 1.5587,
|
310 |
+
"step": 430
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.37335596096733137,
|
314 |
+
"grad_norm": 0.15372268131323022,
|
315 |
+
"learning_rate": 3.7351443123938885e-05,
|
316 |
+
"loss": 1.5362,
|
317 |
+
"step": 440
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.3818413237165889,
|
321 |
+
"grad_norm": 0.12616185572252248,
|
322 |
+
"learning_rate": 3.820033955857386e-05,
|
323 |
+
"loss": 1.5548,
|
324 |
+
"step": 450
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.3903266864658464,
|
328 |
+
"grad_norm": 0.1311200786303391,
|
329 |
+
"learning_rate": 3.904923599320883e-05,
|
330 |
+
"loss": 1.5409,
|
331 |
+
"step": 460
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.39881204921510394,
|
335 |
+
"grad_norm": 0.1707919112561785,
|
336 |
+
"learning_rate": 3.989813242784381e-05,
|
337 |
+
"loss": 1.5509,
|
338 |
+
"step": 470
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.4072974119643615,
|
342 |
+
"grad_norm": 0.14660149264284913,
|
343 |
+
"learning_rate": 4.074702886247878e-05,
|
344 |
+
"loss": 1.5433,
|
345 |
+
"step": 480
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.415782774713619,
|
349 |
+
"grad_norm": 0.12478895483834351,
|
350 |
+
"learning_rate": 4.1595925297113755e-05,
|
351 |
+
"loss": 1.5382,
|
352 |
+
"step": 490
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.42426813746287656,
|
356 |
+
"grad_norm": 0.12327957445795817,
|
357 |
+
"learning_rate": 4.244482173174873e-05,
|
358 |
+
"loss": 1.5515,
|
359 |
+
"step": 500
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.43275350021213405,
|
363 |
+
"grad_norm": 0.12922777738650987,
|
364 |
+
"learning_rate": 4.32937181663837e-05,
|
365 |
+
"loss": 1.5688,
|
366 |
+
"step": 510
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.4412388629613916,
|
370 |
+
"grad_norm": 0.12486802189783415,
|
371 |
+
"learning_rate": 4.414261460101868e-05,
|
372 |
+
"loss": 1.5452,
|
373 |
+
"step": 520
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.44972422571064913,
|
377 |
+
"grad_norm": 0.1360610874577123,
|
378 |
+
"learning_rate": 4.499151103565366e-05,
|
379 |
+
"loss": 1.5493,
|
380 |
+
"step": 530
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.45820958845990667,
|
384 |
+
"grad_norm": 0.1884897685356775,
|
385 |
+
"learning_rate": 4.5840407470288625e-05,
|
386 |
+
"loss": 1.5511,
|
387 |
+
"step": 540
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.4666949512091642,
|
391 |
+
"grad_norm": 0.12446302384809525,
|
392 |
+
"learning_rate": 4.6689303904923606e-05,
|
393 |
+
"loss": 1.5458,
|
394 |
+
"step": 550
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.4751803139584217,
|
398 |
+
"grad_norm": 0.13169591804768588,
|
399 |
+
"learning_rate": 4.753820033955857e-05,
|
400 |
+
"loss": 1.5569,
|
401 |
+
"step": 560
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.48366567670767924,
|
405 |
+
"grad_norm": 0.1343809247449631,
|
406 |
+
"learning_rate": 4.8387096774193554e-05,
|
407 |
+
"loss": 1.5408,
|
408 |
+
"step": 570
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.4921510394569368,
|
412 |
+
"grad_norm": 0.14024589853602,
|
413 |
+
"learning_rate": 4.923599320882852e-05,
|
414 |
+
"loss": 1.5487,
|
415 |
+
"step": 580
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.5006364022061943,
|
419 |
+
"grad_norm": 0.16240429253875313,
|
420 |
+
"learning_rate": 4.999999560970061e-05,
|
421 |
+
"loss": 1.5488,
|
422 |
+
"step": 590
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.5091217649554518,
|
426 |
+
"grad_norm": 0.12575424857894482,
|
427 |
+
"learning_rate": 4.999946877563971e-05,
|
428 |
+
"loss": 1.532,
|
429 |
+
"step": 600
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.5091217649554518,
|
433 |
+
"eval_loss": 1.519254446029663,
|
434 |
+
"eval_runtime": 53.3242,
|
435 |
+
"eval_samples_per_second": 7.145,
|
436 |
+
"eval_steps_per_second": 0.9,
|
437 |
+
"step": 600
|
438 |
+
},
|
439 |
+
{
|
440 |
+
"epoch": 0.5176071277047094,
|
441 |
+
"grad_norm": 0.18688482756329736,
|
442 |
+
"learning_rate": 4.999806390290309e-05,
|
443 |
+
"loss": 1.5544,
|
444 |
+
"step": 610
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.5260924904539669,
|
448 |
+
"grad_norm": 0.12425469431830571,
|
449 |
+
"learning_rate": 4.999578104083307e-05,
|
450 |
+
"loss": 1.5443,
|
451 |
+
"step": 620
|
452 |
+
},
|
453 |
+
{
|
454 |
+
"epoch": 0.5345778532032245,
|
455 |
+
"grad_norm": 0.1299027485420099,
|
456 |
+
"learning_rate": 4.999262026960902e-05,
|
457 |
+
"loss": 1.5569,
|
458 |
+
"step": 630
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"epoch": 0.543063215952482,
|
462 |
+
"grad_norm": 0.11441754852508934,
|
463 |
+
"learning_rate": 4.998858170024449e-05,
|
464 |
+
"loss": 1.5316,
|
465 |
+
"step": 640
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"epoch": 0.5515485787017395,
|
469 |
+
"grad_norm": 0.14888547248976478,
|
470 |
+
"learning_rate": 4.998366547458326e-05,
|
471 |
+
"loss": 1.5177,
|
472 |
+
"step": 650
|
473 |
+
},
|
474 |
+
{
|
475 |
+
"epoch": 0.560033941450997,
|
476 |
+
"grad_norm": 0.14859292774768867,
|
477 |
+
"learning_rate": 4.997787176529449e-05,
|
478 |
+
"loss": 1.5394,
|
479 |
+
"step": 660
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"epoch": 0.5685193042002545,
|
483 |
+
"grad_norm": 0.12499154376539734,
|
484 |
+
"learning_rate": 4.997120077586651e-05,
|
485 |
+
"loss": 1.5554,
|
486 |
+
"step": 670
|
487 |
+
},
|
488 |
+
{
|
489 |
+
"epoch": 0.5770046669495121,
|
490 |
+
"grad_norm": 0.1218974898058821,
|
491 |
+
"learning_rate": 4.9963652740599774e-05,
|
492 |
+
"loss": 1.5335,
|
493 |
+
"step": 680
|
494 |
+
},
|
495 |
+
{
|
496 |
+
"epoch": 0.5854900296987696,
|
497 |
+
"grad_norm": 0.1273110498715124,
|
498 |
+
"learning_rate": 4.995522792459859e-05,
|
499 |
+
"loss": 1.5349,
|
500 |
+
"step": 690
|
501 |
+
},
|
502 |
+
{
|
503 |
+
"epoch": 0.5939753924480271,
|
504 |
+
"grad_norm": 0.12115412881719101,
|
505 |
+
"learning_rate": 4.994592662376183e-05,
|
506 |
+
"loss": 1.5419,
|
507 |
+
"step": 700
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"epoch": 0.6024607551972847,
|
511 |
+
"grad_norm": 0.14855096330233286,
|
512 |
+
"learning_rate": 4.99357491647725e-05,
|
513 |
+
"loss": 1.513,
|
514 |
+
"step": 710
|
515 |
+
},
|
516 |
+
{
|
517 |
+
"epoch": 0.6109461179465422,
|
518 |
+
"grad_norm": 0.11407988659327956,
|
519 |
+
"learning_rate": 4.992469590508628e-05,
|
520 |
+
"loss": 1.5243,
|
521 |
+
"step": 720
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.6194314806957998,
|
525 |
+
"grad_norm": 0.1197712643781127,
|
526 |
+
"learning_rate": 4.9912767232919035e-05,
|
527 |
+
"loss": 1.5177,
|
528 |
+
"step": 730
|
529 |
+
},
|
530 |
+
{
|
531 |
+
"epoch": 0.6279168434450573,
|
532 |
+
"grad_norm": 0.12400515877262065,
|
533 |
+
"learning_rate": 4.9899963567233074e-05,
|
534 |
+
"loss": 1.5619,
|
535 |
+
"step": 740
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"epoch": 0.6364022061943148,
|
539 |
+
"grad_norm": 0.12250385257708406,
|
540 |
+
"learning_rate": 4.988628535772249e-05,
|
541 |
+
"loss": 1.539,
|
542 |
+
"step": 750
|
543 |
+
},
|
544 |
+
{
|
545 |
+
"epoch": 0.6448875689435724,
|
546 |
+
"grad_norm": 0.1262441090496857,
|
547 |
+
"learning_rate": 4.987173308479738e-05,
|
548 |
+
"loss": 1.5195,
|
549 |
+
"step": 760
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.6533729316928298,
|
553 |
+
"grad_norm": 0.12459694416473029,
|
554 |
+
"learning_rate": 4.985630725956694e-05,
|
555 |
+
"loss": 1.5462,
|
556 |
+
"step": 770
|
557 |
+
},
|
558 |
+
{
|
559 |
+
"epoch": 0.6618582944420874,
|
560 |
+
"grad_norm": 0.12985189006106762,
|
561 |
+
"learning_rate": 4.9840008423821527e-05,
|
562 |
+
"loss": 1.5113,
|
563 |
+
"step": 780
|
564 |
+
},
|
565 |
+
{
|
566 |
+
"epoch": 0.6703436571913449,
|
567 |
+
"grad_norm": 0.12689306141471304,
|
568 |
+
"learning_rate": 4.9822837150013636e-05,
|
569 |
+
"loss": 1.5201,
|
570 |
+
"step": 790
|
571 |
+
},
|
572 |
+
{
|
573 |
+
"epoch": 0.6788290199406024,
|
574 |
+
"grad_norm": 0.15393156370587963,
|
575 |
+
"learning_rate": 4.980479404123778e-05,
|
576 |
+
"loss": 1.5121,
|
577 |
+
"step": 800
|
578 |
+
},
|
579 |
+
{
|
580 |
+
"epoch": 0.68731438268986,
|
581 |
+
"grad_norm": 0.13213701895207608,
|
582 |
+
"learning_rate": 4.978587973120931e-05,
|
583 |
+
"loss": 1.5307,
|
584 |
+
"step": 810
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"epoch": 0.6957997454391175,
|
588 |
+
"grad_norm": 0.11561354931316294,
|
589 |
+
"learning_rate": 4.9766094884242184e-05,
|
590 |
+
"loss": 1.5316,
|
591 |
+
"step": 820
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"epoch": 0.7042851081883751,
|
595 |
+
"grad_norm": 0.12414772399330044,
|
596 |
+
"learning_rate": 4.974544019522559e-05,
|
597 |
+
"loss": 1.5148,
|
598 |
+
"step": 830
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"epoch": 0.7127704709376326,
|
602 |
+
"grad_norm": 0.1171652849153521,
|
603 |
+
"learning_rate": 4.972391638959959e-05,
|
604 |
+
"loss": 1.5096,
|
605 |
+
"step": 840
|
606 |
+
},
|
607 |
+
{
|
608 |
+
"epoch": 0.7212558336868902,
|
609 |
+
"grad_norm": 0.12868937349582316,
|
610 |
+
"learning_rate": 4.9701524223329585e-05,
|
611 |
+
"loss": 1.5282,
|
612 |
+
"step": 850
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 0.7297411964361477,
|
616 |
+
"grad_norm": 0.1200015077117309,
|
617 |
+
"learning_rate": 4.967826448287981e-05,
|
618 |
+
"loss": 1.5512,
|
619 |
+
"step": 860
|
620 |
+
},
|
621 |
+
{
|
622 |
+
"epoch": 0.7382265591854051,
|
623 |
+
"grad_norm": 0.12340885660045105,
|
624 |
+
"learning_rate": 4.96541379851857e-05,
|
625 |
+
"loss": 1.5394,
|
626 |
+
"step": 870
|
627 |
+
},
|
628 |
+
{
|
629 |
+
"epoch": 0.7467119219346627,
|
630 |
+
"grad_norm": 0.12976937691467555,
|
631 |
+
"learning_rate": 4.962914557762517e-05,
|
632 |
+
"loss": 1.51,
|
633 |
+
"step": 880
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"epoch": 0.7551972846839202,
|
637 |
+
"grad_norm": 0.11912878476038466,
|
638 |
+
"learning_rate": 4.9603288137988905e-05,
|
639 |
+
"loss": 1.5294,
|
640 |
+
"step": 890
|
641 |
+
},
|
642 |
+
{
|
643 |
+
"epoch": 0.7636826474331778,
|
644 |
+
"grad_norm": 0.1299625480337927,
|
645 |
+
"learning_rate": 4.957656657444947e-05,
|
646 |
+
"loss": 1.507,
|
647 |
+
"step": 900
|
648 |
+
},
|
649 |
+
{
|
650 |
+
"epoch": 0.7721680101824353,
|
651 |
+
"grad_norm": 0.12380144459698468,
|
652 |
+
"learning_rate": 4.954898182552946e-05,
|
653 |
+
"loss": 1.5376,
|
654 |
+
"step": 910
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.7806533729316928,
|
658 |
+
"grad_norm": 0.13139339643682763,
|
659 |
+
"learning_rate": 4.9520534860068535e-05,
|
660 |
+
"loss": 1.5291,
|
661 |
+
"step": 920
|
662 |
+
},
|
663 |
+
{
|
664 |
+
"epoch": 0.7891387356809504,
|
665 |
+
"grad_norm": 0.13088956203983898,
|
666 |
+
"learning_rate": 4.949122667718935e-05,
|
667 |
+
"loss": 1.5239,
|
668 |
+
"step": 930
|
669 |
+
},
|
670 |
+
{
|
671 |
+
"epoch": 0.7976240984302079,
|
672 |
+
"grad_norm": 0.12586052988453703,
|
673 |
+
"learning_rate": 4.94610583062625e-05,
|
674 |
+
"loss": 1.5525,
|
675 |
+
"step": 940
|
676 |
+
},
|
677 |
+
{
|
678 |
+
"epoch": 0.8061094611794655,
|
679 |
+
"grad_norm": 0.12020996031652877,
|
680 |
+
"learning_rate": 4.943003080687035e-05,
|
681 |
+
"loss": 1.5525,
|
682 |
+
"step": 950
|
683 |
+
},
|
684 |
+
{
|
685 |
+
"epoch": 0.814594823928723,
|
686 |
+
"grad_norm": 0.12866375954060869,
|
687 |
+
"learning_rate": 4.9398145268769856e-05,
|
688 |
+
"loss": 1.5266,
|
689 |
+
"step": 960
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"epoch": 0.8230801866779804,
|
693 |
+
"grad_norm": 0.13166136756817035,
|
694 |
+
"learning_rate": 4.936540281185423e-05,
|
695 |
+
"loss": 1.5041,
|
696 |
+
"step": 970
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"epoch": 0.831565549427238,
|
700 |
+
"grad_norm": 0.12481946698483787,
|
701 |
+
"learning_rate": 4.933180458611364e-05,
|
702 |
+
"loss": 1.5271,
|
703 |
+
"step": 980
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 0.8400509121764955,
|
707 |
+
"grad_norm": 0.12264463761209114,
|
708 |
+
"learning_rate": 4.9297351771594844e-05,
|
709 |
+
"loss": 1.5354,
|
710 |
+
"step": 990
|
711 |
+
},
|
712 |
+
{
|
713 |
+
"epoch": 0.8485362749257531,
|
714 |
+
"grad_norm": 0.11985452856537594,
|
715 |
+
"learning_rate": 4.926204557835968e-05,
|
716 |
+
"loss": 1.5167,
|
717 |
+
"step": 1000
|
718 |
+
},
|
719 |
+
{
|
720 |
+
"epoch": 0.8570216376750106,
|
721 |
+
"grad_norm": 0.13125396521190327,
|
722 |
+
"learning_rate": 4.9225887246442634e-05,
|
723 |
+
"loss": 1.5282,
|
724 |
+
"step": 1010
|
725 |
+
},
|
726 |
+
{
|
727 |
+
"epoch": 0.8655070004242681,
|
728 |
+
"grad_norm": 0.12730192328072554,
|
729 |
+
"learning_rate": 4.918887804580725e-05,
|
730 |
+
"loss": 1.5089,
|
731 |
+
"step": 1020
|
732 |
+
},
|
733 |
+
{
|
734 |
+
"epoch": 0.8739923631735257,
|
735 |
+
"grad_norm": 0.12724644219344786,
|
736 |
+
"learning_rate": 4.915101927630153e-05,
|
737 |
+
"loss": 1.4964,
|
738 |
+
"step": 1030
|
739 |
+
},
|
740 |
+
{
|
741 |
+
"epoch": 0.8824777259227832,
|
742 |
+
"grad_norm": 0.13578611501833232,
|
743 |
+
"learning_rate": 4.911231226761227e-05,
|
744 |
+
"loss": 1.5189,
|
745 |
+
"step": 1040
|
746 |
+
},
|
747 |
+
{
|
748 |
+
"epoch": 0.8909630886720408,
|
749 |
+
"grad_norm": 0.13577513964986457,
|
750 |
+
"learning_rate": 4.90727583792184e-05,
|
751 |
+
"loss": 1.5149,
|
752 |
+
"step": 1050
|
753 |
+
},
|
754 |
+
{
|
755 |
+
"epoch": 0.8994484514212983,
|
756 |
+
"grad_norm": 0.1269735011676505,
|
757 |
+
"learning_rate": 4.903235900034317e-05,
|
758 |
+
"loss": 1.5066,
|
759 |
+
"step": 1060
|
760 |
+
},
|
761 |
+
{
|
762 |
+
"epoch": 0.9079338141705557,
|
763 |
+
"grad_norm": 0.13250058214235566,
|
764 |
+
"learning_rate": 4.899111554990543e-05,
|
765 |
+
"loss": 1.5129,
|
766 |
+
"step": 1070
|
767 |
+
},
|
768 |
+
{
|
769 |
+
"epoch": 0.9164191769198133,
|
770 |
+
"grad_norm": 0.13130735246433495,
|
771 |
+
"learning_rate": 4.894902947646975e-05,
|
772 |
+
"loss": 1.5156,
|
773 |
+
"step": 1080
|
774 |
+
},
|
775 |
+
{
|
776 |
+
"epoch": 0.9249045396690708,
|
777 |
+
"grad_norm": 0.1273580180253049,
|
778 |
+
"learning_rate": 4.890610225819553e-05,
|
779 |
+
"loss": 1.5324,
|
780 |
+
"step": 1090
|
781 |
+
},
|
782 |
+
{
|
783 |
+
"epoch": 0.9333899024183284,
|
784 |
+
"grad_norm": 0.13155314243939242,
|
785 |
+
"learning_rate": 4.8862335402785136e-05,
|
786 |
+
"loss": 1.5106,
|
787 |
+
"step": 1100
|
788 |
+
},
|
789 |
+
{
|
790 |
+
"epoch": 0.9418752651675859,
|
791 |
+
"grad_norm": 0.13564895211984299,
|
792 |
+
"learning_rate": 4.88177304474309e-05,
|
793 |
+
"loss": 1.5067,
|
794 |
+
"step": 1110
|
795 |
+
},
|
796 |
+
{
|
797 |
+
"epoch": 0.9503606279168434,
|
798 |
+
"grad_norm": 0.12774735587114736,
|
799 |
+
"learning_rate": 4.877228895876115e-05,
|
800 |
+
"loss": 1.5182,
|
801 |
+
"step": 1120
|
802 |
+
},
|
803 |
+
{
|
804 |
+
"epoch": 0.958845990666101,
|
805 |
+
"grad_norm": 0.1307997709537685,
|
806 |
+
"learning_rate": 4.872601253278517e-05,
|
807 |
+
"loss": 1.4969,
|
808 |
+
"step": 1130
|
809 |
+
},
|
810 |
+
{
|
811 |
+
"epoch": 0.9673313534153585,
|
812 |
+
"grad_norm": 0.1304794845040634,
|
813 |
+
"learning_rate": 4.867890279483717e-05,
|
814 |
+
"loss": 1.5264,
|
815 |
+
"step": 1140
|
816 |
+
},
|
817 |
+
{
|
818 |
+
"epoch": 0.9758167161646161,
|
819 |
+
"grad_norm": 0.13666141796489684,
|
820 |
+
"learning_rate": 4.8630961399519206e-05,
|
821 |
+
"loss": 1.5467,
|
822 |
+
"step": 1150
|
823 |
+
},
|
824 |
+
{
|
825 |
+
"epoch": 0.9843020789138736,
|
826 |
+
"grad_norm": 0.1370278303190263,
|
827 |
+
"learning_rate": 4.8582190030643e-05,
|
828 |
+
"loss": 1.5127,
|
829 |
+
"step": 1160
|
830 |
+
},
|
831 |
+
{
|
832 |
+
"epoch": 0.9927874416631312,
|
833 |
+
"grad_norm": 0.1390936629299565,
|
834 |
+
"learning_rate": 4.8532590401170894e-05,
|
835 |
+
"loss": 1.5058,
|
836 |
+
"step": 1170
|
837 |
+
},
|
838 |
+
{
|
839 |
+
"epoch": 1.0012728044123886,
|
840 |
+
"grad_norm": 0.12934475548108287,
|
841 |
+
"learning_rate": 4.848216425315561e-05,
|
842 |
+
"loss": 1.5202,
|
843 |
+
"step": 1180
|
844 |
+
},
|
845 |
+
{
|
846 |
+
"epoch": 1.0097581671616462,
|
847 |
+
"grad_norm": 0.13898591683370803,
|
848 |
+
"learning_rate": 4.843091335767913e-05,
|
849 |
+
"loss": 1.4563,
|
850 |
+
"step": 1190
|
851 |
+
},
|
852 |
+
{
|
853 |
+
"epoch": 1.0182435299109036,
|
854 |
+
"grad_norm": 0.17488231535826249,
|
855 |
+
"learning_rate": 4.837883951479043e-05,
|
856 |
+
"loss": 1.4402,
|
857 |
+
"step": 1200
|
858 |
+
},
|
859 |
+
{
|
860 |
+
"epoch": 1.0182435299109036,
|
861 |
+
"eval_loss": 1.4955657720565796,
|
862 |
+
"eval_runtime": 52.424,
|
863 |
+
"eval_samples_per_second": 7.268,
|
864 |
+
"eval_steps_per_second": 0.916,
|
865 |
+
"step": 1200
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"epoch": 1.0267288926601612,
|
869 |
+
"grad_norm": 0.1536036344095855,
|
870 |
+
"learning_rate": 4.832594455344229e-05,
|
871 |
+
"loss": 1.4848,
|
872 |
+
"step": 1210
|
873 |
+
},
|
874 |
+
{
|
875 |
+
"epoch": 1.0352142554094188,
|
876 |
+
"grad_norm": 0.15762414421336599,
|
877 |
+
"learning_rate": 4.827223033142706e-05,
|
878 |
+
"loss": 1.4567,
|
879 |
+
"step": 1220
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"epoch": 1.0436996181586762,
|
883 |
+
"grad_norm": 0.15058229398130366,
|
884 |
+
"learning_rate": 4.8217698735311414e-05,
|
885 |
+
"loss": 1.4672,
|
886 |
+
"step": 1230
|
887 |
+
},
|
888 |
+
{
|
889 |
+
"epoch": 1.0521849809079338,
|
890 |
+
"grad_norm": 0.16010992835678386,
|
891 |
+
"learning_rate": 4.8162351680370044e-05,
|
892 |
+
"loss": 1.4458,
|
893 |
+
"step": 1240
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"epoch": 1.0606703436571914,
|
897 |
+
"grad_norm": 0.16758816000341356,
|
898 |
+
"learning_rate": 4.810619111051847e-05,
|
899 |
+
"loss": 1.4842,
|
900 |
+
"step": 1250
|
901 |
+
},
|
902 |
+
{
|
903 |
+
"epoch": 1.069155706406449,
|
904 |
+
"grad_norm": 0.16559260972674986,
|
905 |
+
"learning_rate": 4.8049218998244696e-05,
|
906 |
+
"loss": 1.4556,
|
907 |
+
"step": 1260
|
908 |
+
},
|
909 |
+
{
|
910 |
+
"epoch": 1.0776410691557063,
|
911 |
+
"grad_norm": 0.17237632034416966,
|
912 |
+
"learning_rate": 4.7991437344539966e-05,
|
913 |
+
"loss": 1.4813,
|
914 |
+
"step": 1270
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"epoch": 1.086126431904964,
|
918 |
+
"grad_norm": 0.17112756741722487,
|
919 |
+
"learning_rate": 4.793284817882845e-05,
|
920 |
+
"loss": 1.4535,
|
921 |
+
"step": 1280
|
922 |
+
},
|
923 |
+
{
|
924 |
+
"epoch": 1.0946117946542215,
|
925 |
+
"grad_norm": 0.16828572707718548,
|
926 |
+
"learning_rate": 4.787345355889604e-05,
|
927 |
+
"loss": 1.4344,
|
928 |
+
"step": 1290
|
929 |
+
},
|
930 |
+
{
|
931 |
+
"epoch": 1.103097157403479,
|
932 |
+
"grad_norm": 0.15709986047041227,
|
933 |
+
"learning_rate": 4.7813255570817985e-05,
|
934 |
+
"loss": 1.4744,
|
935 |
+
"step": 1300
|
936 |
+
},
|
937 |
+
{
|
938 |
+
"epoch": 1.1115825201527365,
|
939 |
+
"grad_norm": 0.16651547128146313,
|
940 |
+
"learning_rate": 4.775225632888568e-05,
|
941 |
+
"loss": 1.4561,
|
942 |
+
"step": 1310
|
943 |
+
},
|
944 |
+
{
|
945 |
+
"epoch": 1.120067882901994,
|
946 |
+
"grad_norm": 0.16750176017515714,
|
947 |
+
"learning_rate": 4.76904579755324e-05,
|
948 |
+
"loss": 1.4616,
|
949 |
+
"step": 1320
|
950 |
+
},
|
951 |
+
{
|
952 |
+
"epoch": 1.1285532456512515,
|
953 |
+
"grad_norm": 0.1608016567554825,
|
954 |
+
"learning_rate": 4.7627862681258037e-05,
|
955 |
+
"loss": 1.4593,
|
956 |
+
"step": 1330
|
957 |
+
},
|
958 |
+
{
|
959 |
+
"epoch": 1.137038608400509,
|
960 |
+
"grad_norm": 0.21390766919038295,
|
961 |
+
"learning_rate": 4.756447264455287e-05,
|
962 |
+
"loss": 1.4484,
|
963 |
+
"step": 1340
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"epoch": 1.1455239711497667,
|
967 |
+
"grad_norm": 0.16826883293172662,
|
968 |
+
"learning_rate": 4.750029009182038e-05,
|
969 |
+
"loss": 1.4703,
|
970 |
+
"step": 1350
|
971 |
+
},
|
972 |
+
{
|
973 |
+
"epoch": 1.1540093338990243,
|
974 |
+
"grad_norm": 0.17431508867079595,
|
975 |
+
"learning_rate": 4.7435317277299e-05,
|
976 |
+
"loss": 1.4701,
|
977 |
+
"step": 1360
|
978 |
+
},
|
979 |
+
{
|
980 |
+
"epoch": 1.1624946966482816,
|
981 |
+
"grad_norm": 0.15973851467570443,
|
982 |
+
"learning_rate": 4.736955648298299e-05,
|
983 |
+
"loss": 1.4503,
|
984 |
+
"step": 1370
|
985 |
+
},
|
986 |
+
{
|
987 |
+
"epoch": 1.1709800593975392,
|
988 |
+
"grad_norm": 0.1887713767970947,
|
989 |
+
"learning_rate": 4.730301001854225e-05,
|
990 |
+
"loss": 1.4624,
|
991 |
+
"step": 1380
|
992 |
+
},
|
993 |
+
{
|
994 |
+
"epoch": 1.1794654221467968,
|
995 |
+
"grad_norm": 0.16898695344997974,
|
996 |
+
"learning_rate": 4.7235680221241216e-05,
|
997 |
+
"loss": 1.4452,
|
998 |
+
"step": 1390
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"epoch": 1.1879507848960542,
|
1002 |
+
"grad_norm": 0.20014553287073528,
|
1003 |
+
"learning_rate": 4.716756945585681e-05,
|
1004 |
+
"loss": 1.4717,
|
1005 |
+
"step": 1400
|
1006 |
+
},
|
1007 |
+
{
|
1008 |
+
"epoch": 1.1964361476453118,
|
1009 |
+
"grad_norm": 0.17137954325200072,
|
1010 |
+
"learning_rate": 4.709868011459528e-05,
|
1011 |
+
"loss": 1.4403,
|
1012 |
+
"step": 1410
|
1013 |
+
},
|
1014 |
+
{
|
1015 |
+
"epoch": 1.2049215103945694,
|
1016 |
+
"grad_norm": 0.17801721751888322,
|
1017 |
+
"learning_rate": 4.7029014617008294e-05,
|
1018 |
+
"loss": 1.4339,
|
1019 |
+
"step": 1420
|
1020 |
+
},
|
1021 |
+
{
|
1022 |
+
"epoch": 1.213406873143827,
|
1023 |
+
"grad_norm": 0.17139613676642362,
|
1024 |
+
"learning_rate": 4.695857540990789e-05,
|
1025 |
+
"loss": 1.4573,
|
1026 |
+
"step": 1430
|
1027 |
+
},
|
1028 |
+
{
|
1029 |
+
"epoch": 1.2218922358930844,
|
1030 |
+
"grad_norm": 0.16971403514498054,
|
1031 |
+
"learning_rate": 4.688736496728058e-05,
|
1032 |
+
"loss": 1.4282,
|
1033 |
+
"step": 1440
|
1034 |
+
},
|
1035 |
+
{
|
1036 |
+
"epoch": 1.230377598642342,
|
1037 |
+
"grad_norm": 0.17200272420880428,
|
1038 |
+
"learning_rate": 4.681538579020038e-05,
|
1039 |
+
"loss": 1.4434,
|
1040 |
+
"step": 1450
|
1041 |
+
},
|
1042 |
+
{
|
1043 |
+
"epoch": 1.2388629613915996,
|
1044 |
+
"grad_norm": 0.17208160407432616,
|
1045 |
+
"learning_rate": 4.6742640406741106e-05,
|
1046 |
+
"loss": 1.45,
|
1047 |
+
"step": 1460
|
1048 |
+
},
|
1049 |
+
{
|
1050 |
+
"epoch": 1.247348324140857,
|
1051 |
+
"grad_norm": 0.1939626212901777,
|
1052 |
+
"learning_rate": 4.666913137188743e-05,
|
1053 |
+
"loss": 1.4608,
|
1054 |
+
"step": 1470
|
1055 |
+
},
|
1056 |
+
{
|
1057 |
+
"epoch": 1.2558336868901145,
|
1058 |
+
"grad_norm": 0.17291794493304186,
|
1059 |
+
"learning_rate": 4.6594861267445236e-05,
|
1060 |
+
"loss": 1.4671,
|
1061 |
+
"step": 1480
|
1062 |
+
},
|
1063 |
+
{
|
1064 |
+
"epoch": 1.2643190496393721,
|
1065 |
+
"grad_norm": 0.18219792041638924,
|
1066 |
+
"learning_rate": 4.651983270195093e-05,
|
1067 |
+
"loss": 1.4262,
|
1068 |
+
"step": 1490
|
1069 |
+
},
|
1070 |
+
{
|
1071 |
+
"epoch": 1.2728044123886297,
|
1072 |
+
"grad_norm": 0.18086437830489926,
|
1073 |
+
"learning_rate": 4.644404831057979e-05,
|
1074 |
+
"loss": 1.4455,
|
1075 |
+
"step": 1500
|
1076 |
+
},
|
1077 |
+
{
|
1078 |
+
"epoch": 1.281289775137887,
|
1079 |
+
"grad_norm": 0.17417619624549402,
|
1080 |
+
"learning_rate": 4.636751075505344e-05,
|
1081 |
+
"loss": 1.4873,
|
1082 |
+
"step": 1510
|
1083 |
+
},
|
1084 |
+
{
|
1085 |
+
"epoch": 1.2897751378871447,
|
1086 |
+
"grad_norm": 0.18354282411845188,
|
1087 |
+
"learning_rate": 4.629022272354637e-05,
|
1088 |
+
"loss": 1.4525,
|
1089 |
+
"step": 1520
|
1090 |
+
},
|
1091 |
+
{
|
1092 |
+
"epoch": 1.298260500636402,
|
1093 |
+
"grad_norm": 0.17985617345325455,
|
1094 |
+
"learning_rate": 4.621218693059149e-05,
|
1095 |
+
"loss": 1.4303,
|
1096 |
+
"step": 1530
|
1097 |
+
},
|
1098 |
+
{
|
1099 |
+
"epoch": 1.3067458633856597,
|
1100 |
+
"grad_norm": 0.1809708317849863,
|
1101 |
+
"learning_rate": 4.6133406116984795e-05,
|
1102 |
+
"loss": 1.4631,
|
1103 |
+
"step": 1540
|
1104 |
+
},
|
1105 |
+
{
|
1106 |
+
"epoch": 1.3152312261349173,
|
1107 |
+
"grad_norm": 0.17487374671212322,
|
1108 |
+
"learning_rate": 4.6053883049689145e-05,
|
1109 |
+
"loss": 1.4482,
|
1110 |
+
"step": 1550
|
1111 |
+
},
|
1112 |
+
{
|
1113 |
+
"epoch": 1.3237165888841749,
|
1114 |
+
"grad_norm": 0.19912807671077193,
|
1115 |
+
"learning_rate": 4.5973620521737036e-05,
|
1116 |
+
"loss": 1.4497,
|
1117 |
+
"step": 1560
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"epoch": 1.3322019516334322,
|
1121 |
+
"grad_norm": 0.17853627546912074,
|
1122 |
+
"learning_rate": 4.5892621352132514e-05,
|
1123 |
+
"loss": 1.4456,
|
1124 |
+
"step": 1570
|
1125 |
+
},
|
1126 |
+
{
|
1127 |
+
"epoch": 1.3406873143826898,
|
1128 |
+
"grad_norm": 0.18252596927754394,
|
1129 |
+
"learning_rate": 4.581088838575218e-05,
|
1130 |
+
"loss": 1.4328,
|
1131 |
+
"step": 1580
|
1132 |
+
},
|
1133 |
+
{
|
1134 |
+
"epoch": 1.3491726771319474,
|
1135 |
+
"grad_norm": 0.17604951053556211,
|
1136 |
+
"learning_rate": 4.572842449324525e-05,
|
1137 |
+
"loss": 1.4442,
|
1138 |
+
"step": 1590
|
1139 |
+
},
|
1140 |
+
{
|
1141 |
+
"epoch": 1.3576580398812048,
|
1142 |
+
"grad_norm": 0.18358942463311748,
|
1143 |
+
"learning_rate": 4.564523257093275e-05,
|
1144 |
+
"loss": 1.4338,
|
1145 |
+
"step": 1600
|
1146 |
+
},
|
1147 |
+
{
|
1148 |
+
"epoch": 1.3661434026304624,
|
1149 |
+
"grad_norm": 0.20508703236267142,
|
1150 |
+
"learning_rate": 4.5561315540705774e-05,
|
1151 |
+
"loss": 1.4445,
|
1152 |
+
"step": 1610
|
1153 |
+
},
|
1154 |
+
{
|
1155 |
+
"epoch": 1.37462876537972,
|
1156 |
+
"grad_norm": 0.18486352550747187,
|
1157 |
+
"learning_rate": 4.547667634992288e-05,
|
1158 |
+
"loss": 1.4261,
|
1159 |
+
"step": 1620
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"epoch": 1.3831141281289776,
|
1163 |
+
"grad_norm": 0.17492766465456316,
|
1164 |
+
"learning_rate": 4.539131797130656e-05,
|
1165 |
+
"loss": 1.4258,
|
1166 |
+
"step": 1630
|
1167 |
+
},
|
1168 |
+
{
|
1169 |
+
"epoch": 1.391599490878235,
|
1170 |
+
"grad_norm": 0.19692876587833674,
|
1171 |
+
"learning_rate": 4.530524340283881e-05,
|
1172 |
+
"loss": 1.4349,
|
1173 |
+
"step": 1640
|
1174 |
+
},
|
1175 |
+
{
|
1176 |
+
"epoch": 1.4000848536274926,
|
1177 |
+
"grad_norm": 0.19155373430892478,
|
1178 |
+
"learning_rate": 4.521845566765589e-05,
|
1179 |
+
"loss": 1.4536,
|
1180 |
+
"step": 1650
|
1181 |
+
},
|
1182 |
+
{
|
1183 |
+
"epoch": 1.4085702163767502,
|
1184 |
+
"grad_norm": 0.18544325977459192,
|
1185 |
+
"learning_rate": 4.513095781394208e-05,
|
1186 |
+
"loss": 1.4363,
|
1187 |
+
"step": 1660
|
1188 |
+
},
|
1189 |
+
{
|
1190 |
+
"epoch": 1.4170555791260075,
|
1191 |
+
"grad_norm": 0.177828004720666,
|
1192 |
+
"learning_rate": 4.504275291482267e-05,
|
1193 |
+
"loss": 1.4595,
|
1194 |
+
"step": 1670
|
1195 |
+
},
|
1196 |
+
{
|
1197 |
+
"epoch": 1.4255409418752651,
|
1198 |
+
"grad_norm": 0.17855432230356816,
|
1199 |
+
"learning_rate": 4.495384406825601e-05,
|
1200 |
+
"loss": 1.4211,
|
1201 |
+
"step": 1680
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"epoch": 1.4340263046245227,
|
1205 |
+
"grad_norm": 0.20232492538380317,
|
1206 |
+
"learning_rate": 4.486423439692469e-05,
|
1207 |
+
"loss": 1.4189,
|
1208 |
+
"step": 1690
|
1209 |
+
},
|
1210 |
+
{
|
1211 |
+
"epoch": 1.4425116673737803,
|
1212 |
+
"grad_norm": 0.1975109303350431,
|
1213 |
+
"learning_rate": 4.477392704812585e-05,
|
1214 |
+
"loss": 1.4565,
|
1215 |
+
"step": 1700
|
1216 |
+
},
|
1217 |
+
{
|
1218 |
+
"epoch": 1.4509970301230377,
|
1219 |
+
"grad_norm": 0.19619010830399825,
|
1220 |
+
"learning_rate": 4.468292519366071e-05,
|
1221 |
+
"loss": 1.4382,
|
1222 |
+
"step": 1710
|
1223 |
+
},
|
1224 |
+
{
|
1225 |
+
"epoch": 1.4594823928722953,
|
1226 |
+
"grad_norm": 0.18168826428246143,
|
1227 |
+
"learning_rate": 4.459123202972308e-05,
|
1228 |
+
"loss": 1.4471,
|
1229 |
+
"step": 1720
|
1230 |
+
},
|
1231 |
+
{
|
1232 |
+
"epoch": 1.4679677556215527,
|
1233 |
+
"grad_norm": 0.1923264062362399,
|
1234 |
+
"learning_rate": 4.449885077678717e-05,
|
1235 |
+
"loss": 1.4153,
|
1236 |
+
"step": 1730
|
1237 |
+
},
|
1238 |
+
{
|
1239 |
+
"epoch": 1.4764531183708103,
|
1240 |
+
"grad_norm": 0.1907937313040222,
|
1241 |
+
"learning_rate": 4.440578467949445e-05,
|
1242 |
+
"loss": 1.4432,
|
1243 |
+
"step": 1740
|
1244 |
+
},
|
1245 |
+
{
|
1246 |
+
"epoch": 1.4849384811200679,
|
1247 |
+
"grad_norm": 0.19107457667767244,
|
1248 |
+
"learning_rate": 4.431203700653968e-05,
|
1249 |
+
"loss": 1.4285,
|
1250 |
+
"step": 1750
|
1251 |
+
},
|
1252 |
+
{
|
1253 |
+
"epoch": 1.4934238438693255,
|
1254 |
+
"grad_norm": 0.19847350429107552,
|
1255 |
+
"learning_rate": 4.421761105055613e-05,
|
1256 |
+
"loss": 1.4383,
|
1257 |
+
"step": 1760
|
1258 |
+
},
|
1259 |
+
{
|
1260 |
+
"epoch": 1.501909206618583,
|
1261 |
+
"grad_norm": 0.18536475556610216,
|
1262 |
+
"learning_rate": 4.4122510127999937e-05,
|
1263 |
+
"loss": 1.42,
|
1264 |
+
"step": 1770
|
1265 |
+
},
|
1266 |
+
{
|
1267 |
+
"epoch": 1.5103945693678404,
|
1268 |
+
"grad_norm": 0.18481023473586697,
|
1269 |
+
"learning_rate": 4.4026737579033584e-05,
|
1270 |
+
"loss": 1.4384,
|
1271 |
+
"step": 1780
|
1272 |
+
},
|
1273 |
+
{
|
1274 |
+
"epoch": 1.518879932117098,
|
1275 |
+
"grad_norm": 0.20863867505874642,
|
1276 |
+
"learning_rate": 4.393029676740864e-05,
|
1277 |
+
"loss": 1.4543,
|
1278 |
+
"step": 1790
|
1279 |
+
},
|
1280 |
+
{
|
1281 |
+
"epoch": 1.5273652948663554,
|
1282 |
+
"grad_norm": 0.1816036870853105,
|
1283 |
+
"learning_rate": 4.3833191080347575e-05,
|
1284 |
+
"loss": 1.434,
|
1285 |
+
"step": 1800
|
1286 |
+
},
|
1287 |
+
{
|
1288 |
+
"epoch": 1.5273652948663554,
|
1289 |
+
"eval_loss": 1.4622184038162231,
|
1290 |
+
"eval_runtime": 52.4041,
|
1291 |
+
"eval_samples_per_second": 7.27,
|
1292 |
+
"eval_steps_per_second": 0.916,
|
1293 |
+
"step": 1800
|
1294 |
+
},
|
1295 |
+
{
|
1296 |
+
"epoch": 1.535850657615613,
|
1297 |
+
"grad_norm": 0.19378252368958881,
|
1298 |
+
"learning_rate": 4.3735423928424815e-05,
|
1299 |
+
"loss": 1.4275,
|
1300 |
+
"step": 1810
|
1301 |
+
},
|
1302 |
+
{
|
1303 |
+
"epoch": 1.5443360203648706,
|
1304 |
+
"grad_norm": 0.20453331251433848,
|
1305 |
+
"learning_rate": 4.363699874544697e-05,
|
1306 |
+
"loss": 1.4203,
|
1307 |
+
"step": 1820
|
1308 |
+
},
|
1309 |
+
{
|
1310 |
+
"epoch": 1.5528213831141282,
|
1311 |
+
"grad_norm": 0.26684319417219377,
|
1312 |
+
"learning_rate": 4.3537918988332156e-05,
|
1313 |
+
"loss": 1.4372,
|
1314 |
+
"step": 1830
|
1315 |
+
},
|
1316 |
+
{
|
1317 |
+
"epoch": 1.5613067458633858,
|
1318 |
+
"grad_norm": 0.25745160303419773,
|
1319 |
+
"learning_rate": 4.343818813698868e-05,
|
1320 |
+
"loss": 1.4082,
|
1321 |
+
"step": 1840
|
1322 |
+
},
|
1323 |
+
{
|
1324 |
+
"epoch": 1.5697921086126432,
|
1325 |
+
"grad_norm": 0.19969727996700776,
|
1326 |
+
"learning_rate": 4.3337809694192765e-05,
|
1327 |
+
"loss": 1.4314,
|
1328 |
+
"step": 1850
|
1329 |
+
},
|
1330 |
+
{
|
1331 |
+
"epoch": 1.5782774713619008,
|
1332 |
+
"grad_norm": 0.20117210832277968,
|
1333 |
+
"learning_rate": 4.3236787185465525e-05,
|
1334 |
+
"loss": 1.4293,
|
1335 |
+
"step": 1860
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"epoch": 1.5867628341111581,
|
1339 |
+
"grad_norm": 0.20173003641028897,
|
1340 |
+
"learning_rate": 4.313512415894913e-05,
|
1341 |
+
"loss": 1.4406,
|
1342 |
+
"step": 1870
|
1343 |
+
},
|
1344 |
+
{
|
1345 |
+
"epoch": 1.5952481968604157,
|
1346 |
+
"grad_norm": 0.20304770794371527,
|
1347 |
+
"learning_rate": 4.303282418528224e-05,
|
1348 |
+
"loss": 1.4286,
|
1349 |
+
"step": 1880
|
1350 |
+
},
|
1351 |
+
{
|
1352 |
+
"epoch": 1.6037335596096733,
|
1353 |
+
"grad_norm": 0.19126658907738198,
|
1354 |
+
"learning_rate": 4.292989085747452e-05,
|
1355 |
+
"loss": 1.4184,
|
1356 |
+
"step": 1890
|
1357 |
+
},
|
1358 |
+
{
|
1359 |
+
"epoch": 1.612218922358931,
|
1360 |
+
"grad_norm": 0.20069554966453027,
|
1361 |
+
"learning_rate": 4.282632779078051e-05,
|
1362 |
+
"loss": 1.4133,
|
1363 |
+
"step": 1900
|
1364 |
+
},
|
1365 |
+
{
|
1366 |
+
"epoch": 1.6207042851081885,
|
1367 |
+
"grad_norm": 0.1952881519566686,
|
1368 |
+
"learning_rate": 4.2722138622572624e-05,
|
1369 |
+
"loss": 1.4432,
|
1370 |
+
"step": 1910
|
1371 |
+
},
|
1372 |
+
{
|
1373 |
+
"epoch": 1.629189647857446,
|
1374 |
+
"grad_norm": 0.19763704668680288,
|
1375 |
+
"learning_rate": 4.261732701221339e-05,
|
1376 |
+
"loss": 1.3921,
|
1377 |
+
"step": 1920
|
1378 |
+
},
|
1379 |
+
{
|
1380 |
+
"epoch": 1.6376750106067033,
|
1381 |
+
"grad_norm": 0.19821464294464497,
|
1382 |
+
"learning_rate": 4.2511896640926925e-05,
|
1383 |
+
"loss": 1.4454,
|
1384 |
+
"step": 1930
|
1385 |
+
},
|
1386 |
+
{
|
1387 |
+
"epoch": 1.6461603733559609,
|
1388 |
+
"grad_norm": 0.20456545626297834,
|
1389 |
+
"learning_rate": 4.240585121166966e-05,
|
1390 |
+
"loss": 1.4147,
|
1391 |
+
"step": 1940
|
1392 |
+
},
|
1393 |
+
{
|
1394 |
+
"epoch": 1.6546457361052185,
|
1395 |
+
"grad_norm": 0.2119092529186395,
|
1396 |
+
"learning_rate": 4.229919444900027e-05,
|
1397 |
+
"loss": 1.3969,
|
1398 |
+
"step": 1950
|
1399 |
+
},
|
1400 |
+
{
|
1401 |
+
"epoch": 1.663131098854476,
|
1402 |
+
"grad_norm": 0.20330157582122357,
|
1403 |
+
"learning_rate": 4.2191930098948865e-05,
|
1404 |
+
"loss": 1.426,
|
1405 |
+
"step": 1960
|
1406 |
+
},
|
1407 |
+
{
|
1408 |
+
"epoch": 1.6716164616037337,
|
1409 |
+
"grad_norm": 0.21761164739298738,
|
1410 |
+
"learning_rate": 4.2084061928885406e-05,
|
1411 |
+
"loss": 1.4246,
|
1412 |
+
"step": 1970
|
1413 |
+
},
|
1414 |
+
{
|
1415 |
+
"epoch": 1.680101824352991,
|
1416 |
+
"grad_norm": 0.19331588142071401,
|
1417 |
+
"learning_rate": 4.197559372738741e-05,
|
1418 |
+
"loss": 1.4305,
|
1419 |
+
"step": 1980
|
1420 |
+
},
|
1421 |
+
{
|
1422 |
+
"epoch": 1.6885871871022486,
|
1423 |
+
"grad_norm": 0.20188460724329996,
|
1424 |
+
"learning_rate": 4.186652930410685e-05,
|
1425 |
+
"loss": 1.4153,
|
1426 |
+
"step": 1990
|
1427 |
+
},
|
1428 |
+
{
|
1429 |
+
"epoch": 1.697072549851506,
|
1430 |
+
"grad_norm": 0.20988950033571588,
|
1431 |
+
"learning_rate": 4.1756872489636425e-05,
|
1432 |
+
"loss": 1.3894,
|
1433 |
+
"step": 2000
|
1434 |
+
}
|
1435 |
+
],
|
1436 |
+
"logging_steps": 10,
|
1437 |
+
"max_steps": 5890,
|
1438 |
+
"num_input_tokens_seen": 0,
|
1439 |
+
"num_train_epochs": 5,
|
1440 |
+
"save_steps": 500,
|
1441 |
+
"stateful_callbacks": {
|
1442 |
+
"TrainerControl": {
|
1443 |
+
"args": {
|
1444 |
+
"should_epoch_stop": false,
|
1445 |
+
"should_evaluate": false,
|
1446 |
+
"should_log": false,
|
1447 |
+
"should_save": true,
|
1448 |
+
"should_training_stop": false
|
1449 |
+
},
|
1450 |
+
"attributes": {}
|
1451 |
+
}
|
1452 |
+
},
|
1453 |
+
"total_flos": 1681059326459904.0,
|
1454 |
+
"train_batch_size": 2,
|
1455 |
+
"trial_name": null,
|
1456 |
+
"trial_params": null
|
1457 |
+
}
|
checkpoint-2000/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-2000/zero_to_fp32.py
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example:
|
14 |
+
# python zero_to_fp32.py . output_dir/
|
15 |
+
# or
|
16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import torch
|
20 |
+
import glob
|
21 |
+
import math
|
22 |
+
import os
|
23 |
+
import re
|
24 |
+
import json
|
25 |
+
from tqdm import tqdm
|
26 |
+
from collections import OrderedDict
|
27 |
+
from dataclasses import dataclass
|
28 |
+
|
29 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
30 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
31 |
+
from deepspeed.utils import logger
|
32 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
33 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
34 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class zero_model_state:
|
39 |
+
buffers: dict()
|
40 |
+
param_shapes: dict()
|
41 |
+
shared_params: list
|
42 |
+
ds_version: int
|
43 |
+
frozen_param_shapes: dict()
|
44 |
+
frozen_param_fragments: dict()
|
45 |
+
|
46 |
+
|
47 |
+
debug = 0
|
48 |
+
|
49 |
+
# load to cpu
|
50 |
+
device = torch.device('cpu')
|
51 |
+
|
52 |
+
|
53 |
+
def atoi(text):
|
54 |
+
return int(text) if text.isdigit() else text
|
55 |
+
|
56 |
+
|
57 |
+
def natural_keys(text):
|
58 |
+
'''
|
59 |
+
alist.sort(key=natural_keys) sorts in human order
|
60 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
61 |
+
(See Toothy's implementation in the comments)
|
62 |
+
'''
|
63 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
64 |
+
|
65 |
+
|
66 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
67 |
+
if not os.path.isdir(checkpoint_dir):
|
68 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
69 |
+
|
70 |
+
# there should be only one file
|
71 |
+
if zero_stage <= 2:
|
72 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
73 |
+
elif zero_stage == 3:
|
74 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
75 |
+
|
76 |
+
if not os.path.exists(file):
|
77 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
78 |
+
|
79 |
+
return file
|
80 |
+
|
81 |
+
|
82 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
83 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
84 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
85 |
+
|
86 |
+
if len(ckpt_files) == 0:
|
87 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
88 |
+
|
89 |
+
return ckpt_files
|
90 |
+
|
91 |
+
|
92 |
+
def get_optim_files(checkpoint_dir):
|
93 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
94 |
+
|
95 |
+
|
96 |
+
def get_model_state_files(checkpoint_dir):
|
97 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
98 |
+
|
99 |
+
|
100 |
+
def parse_model_states(files):
|
101 |
+
zero_model_states = []
|
102 |
+
for file in files:
|
103 |
+
state_dict = torch.load(file, map_location=device)
|
104 |
+
|
105 |
+
if BUFFER_NAMES not in state_dict:
|
106 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
107 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
108 |
+
if debug:
|
109 |
+
print("Found buffers:", buffer_names)
|
110 |
+
|
111 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
112 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
113 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
114 |
+
|
115 |
+
# collect parameters that are included in param_shapes
|
116 |
+
param_names = []
|
117 |
+
for s in param_shapes:
|
118 |
+
for name in s.keys():
|
119 |
+
param_names.append(name)
|
120 |
+
|
121 |
+
# update with frozen parameters
|
122 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
123 |
+
if frozen_param_shapes is not None:
|
124 |
+
if debug:
|
125 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
126 |
+
param_names += list(frozen_param_shapes.keys())
|
127 |
+
|
128 |
+
# handle shared params
|
129 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
130 |
+
|
131 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
132 |
+
|
133 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
134 |
+
|
135 |
+
z_model_state = zero_model_state(buffers=buffers,
|
136 |
+
param_shapes=param_shapes,
|
137 |
+
shared_params=shared_params,
|
138 |
+
ds_version=ds_version,
|
139 |
+
frozen_param_shapes=frozen_param_shapes,
|
140 |
+
frozen_param_fragments=frozen_param_fragments)
|
141 |
+
zero_model_states.append(z_model_state)
|
142 |
+
|
143 |
+
return zero_model_states
|
144 |
+
|
145 |
+
|
146 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
147 |
+
total_files = len(files)
|
148 |
+
state_dicts = []
|
149 |
+
for f in files:
|
150 |
+
state_dict = torch.load(f, map_location=device)
|
151 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
152 |
+
# and also handle the case where it was already removed by another helper script
|
153 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
154 |
+
state_dicts.append(state_dict)
|
155 |
+
|
156 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
157 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
158 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
159 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
160 |
+
|
161 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
162 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
163 |
+
# use the max of the partition_count to get the dp world_size.
|
164 |
+
|
165 |
+
if type(world_size) is list:
|
166 |
+
world_size = max(world_size)
|
167 |
+
|
168 |
+
if world_size != total_files:
|
169 |
+
raise ValueError(
|
170 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
171 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
172 |
+
)
|
173 |
+
|
174 |
+
# the groups are named differently in each stage
|
175 |
+
if zero_stage <= 2:
|
176 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
177 |
+
elif zero_stage == 3:
|
178 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
179 |
+
else:
|
180 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
181 |
+
|
182 |
+
if zero_stage <= 2:
|
183 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
184 |
+
elif zero_stage == 3:
|
185 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
186 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
187 |
+
#
|
188 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
189 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
190 |
+
|
191 |
+
fp32_flat_groups = [
|
192 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
193 |
+
]
|
194 |
+
|
195 |
+
return zero_stage, world_size, fp32_flat_groups
|
196 |
+
|
197 |
+
|
198 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
199 |
+
"""
|
200 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
201 |
+
|
202 |
+
Args:
|
203 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
204 |
+
|
205 |
+
"""
|
206 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
207 |
+
|
208 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
209 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
210 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
211 |
+
|
212 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
213 |
+
|
214 |
+
zero_model_states = parse_model_states(model_files)
|
215 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
216 |
+
|
217 |
+
if zero_stage <= 2:
|
218 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
219 |
+
exclude_frozen_parameters)
|
220 |
+
elif zero_stage == 3:
|
221 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
222 |
+
exclude_frozen_parameters)
|
223 |
+
|
224 |
+
|
225 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
226 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
227 |
+
return
|
228 |
+
|
229 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
230 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
231 |
+
|
232 |
+
if debug:
|
233 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
234 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
235 |
+
|
236 |
+
wanted_params = len(frozen_param_shapes)
|
237 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
238 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
239 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
240 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
241 |
+
|
242 |
+
total_params = 0
|
243 |
+
total_numel = 0
|
244 |
+
for name, shape in frozen_param_shapes.items():
|
245 |
+
total_params += 1
|
246 |
+
unpartitioned_numel = shape.numel()
|
247 |
+
total_numel += unpartitioned_numel
|
248 |
+
|
249 |
+
state_dict[name] = frozen_param_fragments[name]
|
250 |
+
|
251 |
+
if debug:
|
252 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
253 |
+
|
254 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
255 |
+
|
256 |
+
|
257 |
+
def _has_callable(obj, fn):
|
258 |
+
attr = getattr(obj, fn, None)
|
259 |
+
return callable(attr)
|
260 |
+
|
261 |
+
|
262 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
263 |
+
param_shapes = zero_model_states[0].param_shapes
|
264 |
+
|
265 |
+
# Reconstruction protocol:
|
266 |
+
#
|
267 |
+
# XXX: document this
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
for i in range(world_size):
|
271 |
+
for j in range(len(fp32_flat_groups[0])):
|
272 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
273 |
+
|
274 |
+
# XXX: memory usage doubles here (zero2)
|
275 |
+
num_param_groups = len(fp32_flat_groups[0])
|
276 |
+
merged_single_partition_of_fp32_groups = []
|
277 |
+
for i in range(num_param_groups):
|
278 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
279 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
280 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
281 |
+
avail_numel = sum(
|
282 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
283 |
+
|
284 |
+
if debug:
|
285 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
286 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
287 |
+
# not asserting if there is a mismatch due to possible padding
|
288 |
+
print(f"Have {avail_numel} numels to process.")
|
289 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
290 |
+
|
291 |
+
# params
|
292 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
293 |
+
# out-of-core computing solution
|
294 |
+
total_numel = 0
|
295 |
+
total_params = 0
|
296 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
297 |
+
offset = 0
|
298 |
+
avail_numel = full_single_fp32_vector.numel()
|
299 |
+
for name, shape in shapes.items():
|
300 |
+
|
301 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
302 |
+
total_numel += unpartitioned_numel
|
303 |
+
total_params += 1
|
304 |
+
|
305 |
+
if debug:
|
306 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
307 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
308 |
+
offset += unpartitioned_numel
|
309 |
+
|
310 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
311 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
312 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
313 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
314 |
+
align_to = 2 * world_size
|
315 |
+
|
316 |
+
def zero2_align(x):
|
317 |
+
return align_to * math.ceil(x / align_to)
|
318 |
+
|
319 |
+
if debug:
|
320 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
321 |
+
|
322 |
+
offset = zero2_align(offset)
|
323 |
+
avail_numel = zero2_align(avail_numel)
|
324 |
+
|
325 |
+
if debug:
|
326 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
327 |
+
|
328 |
+
# Sanity check
|
329 |
+
if offset != avail_numel:
|
330 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
331 |
+
|
332 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
333 |
+
|
334 |
+
|
335 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
336 |
+
exclude_frozen_parameters):
|
337 |
+
state_dict = OrderedDict()
|
338 |
+
|
339 |
+
# buffers
|
340 |
+
buffers = zero_model_states[0].buffers
|
341 |
+
state_dict.update(buffers)
|
342 |
+
if debug:
|
343 |
+
print(f"added {len(buffers)} buffers")
|
344 |
+
|
345 |
+
if not exclude_frozen_parameters:
|
346 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
347 |
+
|
348 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
349 |
+
|
350 |
+
# recover shared parameters
|
351 |
+
for pair in zero_model_states[0].shared_params:
|
352 |
+
if pair[1] in state_dict:
|
353 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
354 |
+
|
355 |
+
return state_dict
|
356 |
+
|
357 |
+
|
358 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
359 |
+
remainder = unpartitioned_numel % world_size
|
360 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
361 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
362 |
+
return partitioned_numel, padding_numel
|
363 |
+
|
364 |
+
|
365 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
366 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
367 |
+
return
|
368 |
+
|
369 |
+
if debug:
|
370 |
+
for i in range(world_size):
|
371 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
372 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
373 |
+
|
374 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
375 |
+
wanted_params = len(frozen_param_shapes)
|
376 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
377 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
378 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
379 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
380 |
+
|
381 |
+
total_params = 0
|
382 |
+
total_numel = 0
|
383 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
384 |
+
total_params += 1
|
385 |
+
unpartitioned_numel = shape.numel()
|
386 |
+
total_numel += unpartitioned_numel
|
387 |
+
|
388 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
389 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
390 |
+
|
391 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
print(
|
395 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
396 |
+
)
|
397 |
+
|
398 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
399 |
+
|
400 |
+
|
401 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
402 |
+
param_shapes = zero_model_states[0].param_shapes
|
403 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
404 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
405 |
+
# param, re-consolidating each param, while dealing with padding if any
|
406 |
+
|
407 |
+
# merge list of dicts, preserving order
|
408 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
409 |
+
|
410 |
+
if debug:
|
411 |
+
for i in range(world_size):
|
412 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
413 |
+
|
414 |
+
wanted_params = len(param_shapes)
|
415 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
416 |
+
# not asserting if there is a mismatch due to possible padding
|
417 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
418 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
419 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
420 |
+
|
421 |
+
# params
|
422 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
423 |
+
# out-of-core computing solution
|
424 |
+
offset = 0
|
425 |
+
total_numel = 0
|
426 |
+
total_params = 0
|
427 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
|
428 |
+
unpartitioned_numel = shape.numel()
|
429 |
+
total_numel += unpartitioned_numel
|
430 |
+
total_params += 1
|
431 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
432 |
+
|
433 |
+
if debug:
|
434 |
+
print(
|
435 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
436 |
+
)
|
437 |
+
|
438 |
+
# XXX: memory usage doubles here
|
439 |
+
state_dict[name] = torch.cat(
|
440 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
441 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
442 |
+
offset += partitioned_numel
|
443 |
+
|
444 |
+
offset *= world_size
|
445 |
+
|
446 |
+
# Sanity check
|
447 |
+
if offset != avail_numel:
|
448 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
449 |
+
|
450 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
451 |
+
|
452 |
+
|
453 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
454 |
+
exclude_frozen_parameters):
|
455 |
+
state_dict = OrderedDict()
|
456 |
+
|
457 |
+
# buffers
|
458 |
+
buffers = zero_model_states[0].buffers
|
459 |
+
state_dict.update(buffers)
|
460 |
+
if debug:
|
461 |
+
print(f"added {len(buffers)} buffers")
|
462 |
+
|
463 |
+
if not exclude_frozen_parameters:
|
464 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
465 |
+
|
466 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
467 |
+
|
468 |
+
# recover shared parameters
|
469 |
+
for pair in zero_model_states[0].shared_params:
|
470 |
+
if pair[1] in state_dict:
|
471 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
472 |
+
|
473 |
+
return state_dict
|
474 |
+
|
475 |
+
|
476 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
477 |
+
"""
|
478 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
479 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
480 |
+
via a model hub.
|
481 |
+
|
482 |
+
Args:
|
483 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
484 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
485 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
486 |
+
|
487 |
+
Returns:
|
488 |
+
- pytorch ``state_dict``
|
489 |
+
|
490 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
491 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
492 |
+
the checkpoint.
|
493 |
+
|
494 |
+
A typical usage might be ::
|
495 |
+
|
496 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
497 |
+
# do the training and checkpoint saving
|
498 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
499 |
+
model = model.cpu() # move to cpu
|
500 |
+
model.load_state_dict(state_dict)
|
501 |
+
# submit to model hub or save the model to share with others
|
502 |
+
|
503 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
504 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
505 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
506 |
+
|
507 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
508 |
+
|
509 |
+
"""
|
510 |
+
if tag is None:
|
511 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
512 |
+
if os.path.isfile(latest_path):
|
513 |
+
with open(latest_path, 'r') as fd:
|
514 |
+
tag = fd.read().strip()
|
515 |
+
else:
|
516 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
517 |
+
|
518 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
519 |
+
|
520 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
521 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
522 |
+
|
523 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
524 |
+
|
525 |
+
|
526 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
527 |
+
output_dir,
|
528 |
+
max_shard_size="5GB",
|
529 |
+
safe_serialization=False,
|
530 |
+
tag=None,
|
531 |
+
exclude_frozen_parameters=False):
|
532 |
+
"""
|
533 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
534 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
535 |
+
|
536 |
+
Args:
|
537 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
538 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
539 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
540 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
541 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
542 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
543 |
+
"""
|
544 |
+
# Dependency pre-check
|
545 |
+
if safe_serialization:
|
546 |
+
try:
|
547 |
+
from safetensors.torch import save_file
|
548 |
+
except ImportError:
|
549 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
550 |
+
raise
|
551 |
+
if max_shard_size is not None:
|
552 |
+
try:
|
553 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
554 |
+
except ImportError:
|
555 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
556 |
+
raise
|
557 |
+
|
558 |
+
# Convert zero checkpoint to state_dict
|
559 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
560 |
+
|
561 |
+
# Shard the model if it is too big.
|
562 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
563 |
+
if max_shard_size is not None:
|
564 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
565 |
+
state_dict_split = split_torch_state_dict_into_shards(state_dict,
|
566 |
+
filename_pattern=filename_pattern,
|
567 |
+
max_shard_size=max_shard_size)
|
568 |
+
else:
|
569 |
+
from collections import namedtuple
|
570 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
571 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
572 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
573 |
+
|
574 |
+
# Save the model
|
575 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
576 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
577 |
+
shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
|
578 |
+
output_path = os.path.join(output_dir, shard_file)
|
579 |
+
if safe_serialization:
|
580 |
+
save_file(shard, output_path, metadata={"format": "pt"})
|
581 |
+
else:
|
582 |
+
torch.save(shard, output_path)
|
583 |
+
|
584 |
+
# Save index if sharded
|
585 |
+
if state_dict_split.is_sharded:
|
586 |
+
index = {
|
587 |
+
"metadata": state_dict_split.metadata,
|
588 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
589 |
+
}
|
590 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
591 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
592 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
593 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
594 |
+
f.write(content)
|
595 |
+
|
596 |
+
|
597 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
598 |
+
"""
|
599 |
+
1. Put the provided model to cpu
|
600 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
601 |
+
3. Load it into the provided model
|
602 |
+
|
603 |
+
Args:
|
604 |
+
- ``model``: the model object to update
|
605 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
606 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
607 |
+
|
608 |
+
Returns:
|
609 |
+
- ``model`: modified model
|
610 |
+
|
611 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
612 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
613 |
+
conveniently placed for you in the checkpoint folder.
|
614 |
+
|
615 |
+
A typical usage might be ::
|
616 |
+
|
617 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
618 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
619 |
+
# submit to model hub or save the model to share with others
|
620 |
+
|
621 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
622 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
623 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
624 |
+
|
625 |
+
"""
|
626 |
+
logger.info(f"Extracting fp32 weights")
|
627 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
628 |
+
|
629 |
+
logger.info(f"Overwriting model with fp32 weights")
|
630 |
+
model = model.cpu()
|
631 |
+
model.load_state_dict(state_dict, strict=False)
|
632 |
+
|
633 |
+
return model
|
634 |
+
|
635 |
+
|
636 |
+
if __name__ == "__main__":
|
637 |
+
parser = argparse.ArgumentParser()
|
638 |
+
parser.add_argument("checkpoint_dir",
|
639 |
+
type=str,
|
640 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
641 |
+
parser.add_argument("output_dir",
|
642 |
+
type=str,
|
643 |
+
help="directory to the pytorch fp32 state_dict output files"
|
644 |
+
"(e.g. path/checkpoint-12-output/)")
|
645 |
+
parser.add_argument(
|
646 |
+
"--max_shard_size",
|
647 |
+
type=str,
|
648 |
+
default="5GB",
|
649 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
650 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
651 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
652 |
+
"without CPU OOM issues.")
|
653 |
+
parser.add_argument(
|
654 |
+
"--safe_serialization",
|
655 |
+
default=False,
|
656 |
+
action='store_true',
|
657 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
658 |
+
parser.add_argument("-t",
|
659 |
+
"--tag",
|
660 |
+
type=str,
|
661 |
+
default=None,
|
662 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
663 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
664 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
665 |
+
args = parser.parse_args()
|
666 |
+
|
667 |
+
debug = args.debug
|
668 |
+
|
669 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
670 |
+
args.output_dir,
|
671 |
+
max_shard_size=args.max_shard_size,
|
672 |
+
safe_serialization=args.safe_serialization,
|
673 |
+
tag=args.tag,
|
674 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
checkpoint-2500/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: Qwen/Qwen2.5-72B
|
3 |
+
library_name: peft
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.12.0
|
checkpoint-2500/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-2500/added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
checkpoint-2500/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step2500
|
checkpoint-2500/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-2500/special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
checkpoint-2500/tokenizer_config.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|endoftext|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"model_max_length": 131072,
|
203 |
+
"pad_token": "<|endoftext|>",
|
204 |
+
"padding_side": "right",
|
205 |
+
"split_special_tokens": false,
|
206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
207 |
+
"unk_token": null
|
208 |
+
}
|
checkpoint-2500/trainer_state.json
ADDED
@@ -0,0 +1,1815 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.1213406873143827,
|
5 |
+
"eval_steps": 600,
|
6 |
+
"global_step": 2500,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.00848536274925753,
|
13 |
+
"grad_norm": 0.4898678891363344,
|
14 |
+
"learning_rate": 8.488964346349746e-07,
|
15 |
+
"loss": 1.8056,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.01697072549851506,
|
20 |
+
"grad_norm": 0.3537473179717183,
|
21 |
+
"learning_rate": 1.6977928692699491e-06,
|
22 |
+
"loss": 1.7621,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.025456088247772592,
|
27 |
+
"grad_norm": 0.28215953004159977,
|
28 |
+
"learning_rate": 2.546689303904924e-06,
|
29 |
+
"loss": 1.7571,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.03394145099703012,
|
34 |
+
"grad_norm": 0.27446565146764923,
|
35 |
+
"learning_rate": 3.3955857385398982e-06,
|
36 |
+
"loss": 1.7136,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.04242681374628765,
|
41 |
+
"grad_norm": 0.17051549768176558,
|
42 |
+
"learning_rate": 4.244482173174873e-06,
|
43 |
+
"loss": 1.6767,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.050912176495545185,
|
48 |
+
"grad_norm": 0.17763882467320422,
|
49 |
+
"learning_rate": 5.093378607809848e-06,
|
50 |
+
"loss": 1.6371,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.05939753924480271,
|
55 |
+
"grad_norm": 0.14311462596290048,
|
56 |
+
"learning_rate": 5.942275042444822e-06,
|
57 |
+
"loss": 1.6324,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.06788290199406025,
|
62 |
+
"grad_norm": 0.1659540846071645,
|
63 |
+
"learning_rate": 6.7911714770797965e-06,
|
64 |
+
"loss": 1.6062,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.07636826474331777,
|
69 |
+
"grad_norm": 0.20064072815620043,
|
70 |
+
"learning_rate": 7.640067911714771e-06,
|
71 |
+
"loss": 1.5832,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.0848536274925753,
|
76 |
+
"grad_norm": 0.2179045681711979,
|
77 |
+
"learning_rate": 8.488964346349745e-06,
|
78 |
+
"loss": 1.5898,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.09333899024183284,
|
83 |
+
"grad_norm": 0.23866012053128668,
|
84 |
+
"learning_rate": 9.337860780984721e-06,
|
85 |
+
"loss": 1.5924,
|
86 |
+
"step": 110
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.10182435299109037,
|
90 |
+
"grad_norm": 0.18578051776430282,
|
91 |
+
"learning_rate": 1.0186757215619695e-05,
|
92 |
+
"loss": 1.5877,
|
93 |
+
"step": 120
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.1103097157403479,
|
97 |
+
"grad_norm": 0.2216509707409362,
|
98 |
+
"learning_rate": 1.103565365025467e-05,
|
99 |
+
"loss": 1.5947,
|
100 |
+
"step": 130
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.11879507848960542,
|
104 |
+
"grad_norm": 0.20427142255694086,
|
105 |
+
"learning_rate": 1.1884550084889643e-05,
|
106 |
+
"loss": 1.5841,
|
107 |
+
"step": 140
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.12728044123886295,
|
111 |
+
"grad_norm": 0.1765851415675038,
|
112 |
+
"learning_rate": 1.2733446519524619e-05,
|
113 |
+
"loss": 1.5878,
|
114 |
+
"step": 150
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.1357658039881205,
|
118 |
+
"grad_norm": 0.1769355117060811,
|
119 |
+
"learning_rate": 1.3582342954159593e-05,
|
120 |
+
"loss": 1.5795,
|
121 |
+
"step": 160
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.14425116673737803,
|
125 |
+
"grad_norm": 0.1617675663096666,
|
126 |
+
"learning_rate": 1.4431239388794569e-05,
|
127 |
+
"loss": 1.5549,
|
128 |
+
"step": 170
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.15273652948663555,
|
132 |
+
"grad_norm": 0.17302259072151574,
|
133 |
+
"learning_rate": 1.5280135823429543e-05,
|
134 |
+
"loss": 1.5808,
|
135 |
+
"step": 180
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.1612218922358931,
|
139 |
+
"grad_norm": 0.16876039012432806,
|
140 |
+
"learning_rate": 1.6129032258064517e-05,
|
141 |
+
"loss": 1.5676,
|
142 |
+
"step": 190
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.1697072549851506,
|
146 |
+
"grad_norm": 0.19627360154037596,
|
147 |
+
"learning_rate": 1.697792869269949e-05,
|
148 |
+
"loss": 1.5598,
|
149 |
+
"step": 200
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.17819261773440814,
|
153 |
+
"grad_norm": 0.16078510362361015,
|
154 |
+
"learning_rate": 1.7826825127334465e-05,
|
155 |
+
"loss": 1.5667,
|
156 |
+
"step": 210
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.18667798048366568,
|
160 |
+
"grad_norm": 0.16044786518959703,
|
161 |
+
"learning_rate": 1.8675721561969442e-05,
|
162 |
+
"loss": 1.5815,
|
163 |
+
"step": 220
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.1951633432329232,
|
167 |
+
"grad_norm": 0.15656958873834717,
|
168 |
+
"learning_rate": 1.9524617996604416e-05,
|
169 |
+
"loss": 1.5576,
|
170 |
+
"step": 230
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.20364870598218074,
|
174 |
+
"grad_norm": 0.1687290471357602,
|
175 |
+
"learning_rate": 2.037351443123939e-05,
|
176 |
+
"loss": 1.5453,
|
177 |
+
"step": 240
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.21213406873143828,
|
181 |
+
"grad_norm": 0.1519017348276184,
|
182 |
+
"learning_rate": 2.1222410865874364e-05,
|
183 |
+
"loss": 1.5554,
|
184 |
+
"step": 250
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.2206194314806958,
|
188 |
+
"grad_norm": 0.15761892005160086,
|
189 |
+
"learning_rate": 2.207130730050934e-05,
|
190 |
+
"loss": 1.5494,
|
191 |
+
"step": 260
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.22910479422995333,
|
195 |
+
"grad_norm": 0.16857088482977495,
|
196 |
+
"learning_rate": 2.2920203735144312e-05,
|
197 |
+
"loss": 1.5794,
|
198 |
+
"step": 270
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.23759015697921085,
|
202 |
+
"grad_norm": 0.1678705209913503,
|
203 |
+
"learning_rate": 2.3769100169779286e-05,
|
204 |
+
"loss": 1.5373,
|
205 |
+
"step": 280
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.2460755197284684,
|
209 |
+
"grad_norm": 0.14812649566587394,
|
210 |
+
"learning_rate": 2.461799660441426e-05,
|
211 |
+
"loss": 1.5504,
|
212 |
+
"step": 290
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.2545608824777259,
|
216 |
+
"grad_norm": 0.17651916734325857,
|
217 |
+
"learning_rate": 2.5466893039049238e-05,
|
218 |
+
"loss": 1.5607,
|
219 |
+
"step": 300
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.26304624522698344,
|
223 |
+
"grad_norm": 0.14883055338507856,
|
224 |
+
"learning_rate": 2.6315789473684212e-05,
|
225 |
+
"loss": 1.5311,
|
226 |
+
"step": 310
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.271531607976241,
|
230 |
+
"grad_norm": 0.15787522753231265,
|
231 |
+
"learning_rate": 2.7164685908319186e-05,
|
232 |
+
"loss": 1.5656,
|
233 |
+
"step": 320
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.2800169707254985,
|
237 |
+
"grad_norm": 0.1625232940237689,
|
238 |
+
"learning_rate": 2.801358234295416e-05,
|
239 |
+
"loss": 1.5686,
|
240 |
+
"step": 330
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.28850233347475607,
|
244 |
+
"grad_norm": 0.18505951289343867,
|
245 |
+
"learning_rate": 2.8862478777589137e-05,
|
246 |
+
"loss": 1.5474,
|
247 |
+
"step": 340
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.29698769622401355,
|
251 |
+
"grad_norm": 0.13785772316349984,
|
252 |
+
"learning_rate": 2.9711375212224108e-05,
|
253 |
+
"loss": 1.5696,
|
254 |
+
"step": 350
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.3054730589732711,
|
258 |
+
"grad_norm": 0.13531274658248552,
|
259 |
+
"learning_rate": 3.0560271646859086e-05,
|
260 |
+
"loss": 1.5551,
|
261 |
+
"step": 360
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.31395842172252864,
|
265 |
+
"grad_norm": 0.1366381415368909,
|
266 |
+
"learning_rate": 3.140916808149406e-05,
|
267 |
+
"loss": 1.524,
|
268 |
+
"step": 370
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.3224437844717862,
|
272 |
+
"grad_norm": 0.14587220569353926,
|
273 |
+
"learning_rate": 3.2258064516129034e-05,
|
274 |
+
"loss": 1.5515,
|
275 |
+
"step": 380
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.3309291472210437,
|
279 |
+
"grad_norm": 0.13336349383744864,
|
280 |
+
"learning_rate": 3.310696095076401e-05,
|
281 |
+
"loss": 1.5457,
|
282 |
+
"step": 390
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.3394145099703012,
|
286 |
+
"grad_norm": 0.1772016947970983,
|
287 |
+
"learning_rate": 3.395585738539898e-05,
|
288 |
+
"loss": 1.5582,
|
289 |
+
"step": 400
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.34789987271955874,
|
293 |
+
"grad_norm": 0.13819420575084573,
|
294 |
+
"learning_rate": 3.4804753820033956e-05,
|
295 |
+
"loss": 1.5326,
|
296 |
+
"step": 410
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.3563852354688163,
|
300 |
+
"grad_norm": 0.12729862167862188,
|
301 |
+
"learning_rate": 3.565365025466893e-05,
|
302 |
+
"loss": 1.5387,
|
303 |
+
"step": 420
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.3648705982180738,
|
307 |
+
"grad_norm": 0.11777082851399363,
|
308 |
+
"learning_rate": 3.6502546689303904e-05,
|
309 |
+
"loss": 1.5587,
|
310 |
+
"step": 430
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.37335596096733137,
|
314 |
+
"grad_norm": 0.15372268131323022,
|
315 |
+
"learning_rate": 3.7351443123938885e-05,
|
316 |
+
"loss": 1.5362,
|
317 |
+
"step": 440
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.3818413237165889,
|
321 |
+
"grad_norm": 0.12616185572252248,
|
322 |
+
"learning_rate": 3.820033955857386e-05,
|
323 |
+
"loss": 1.5548,
|
324 |
+
"step": 450
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.3903266864658464,
|
328 |
+
"grad_norm": 0.1311200786303391,
|
329 |
+
"learning_rate": 3.904923599320883e-05,
|
330 |
+
"loss": 1.5409,
|
331 |
+
"step": 460
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.39881204921510394,
|
335 |
+
"grad_norm": 0.1707919112561785,
|
336 |
+
"learning_rate": 3.989813242784381e-05,
|
337 |
+
"loss": 1.5509,
|
338 |
+
"step": 470
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.4072974119643615,
|
342 |
+
"grad_norm": 0.14660149264284913,
|
343 |
+
"learning_rate": 4.074702886247878e-05,
|
344 |
+
"loss": 1.5433,
|
345 |
+
"step": 480
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.415782774713619,
|
349 |
+
"grad_norm": 0.12478895483834351,
|
350 |
+
"learning_rate": 4.1595925297113755e-05,
|
351 |
+
"loss": 1.5382,
|
352 |
+
"step": 490
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.42426813746287656,
|
356 |
+
"grad_norm": 0.12327957445795817,
|
357 |
+
"learning_rate": 4.244482173174873e-05,
|
358 |
+
"loss": 1.5515,
|
359 |
+
"step": 500
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.43275350021213405,
|
363 |
+
"grad_norm": 0.12922777738650987,
|
364 |
+
"learning_rate": 4.32937181663837e-05,
|
365 |
+
"loss": 1.5688,
|
366 |
+
"step": 510
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.4412388629613916,
|
370 |
+
"grad_norm": 0.12486802189783415,
|
371 |
+
"learning_rate": 4.414261460101868e-05,
|
372 |
+
"loss": 1.5452,
|
373 |
+
"step": 520
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.44972422571064913,
|
377 |
+
"grad_norm": 0.1360610874577123,
|
378 |
+
"learning_rate": 4.499151103565366e-05,
|
379 |
+
"loss": 1.5493,
|
380 |
+
"step": 530
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.45820958845990667,
|
384 |
+
"grad_norm": 0.1884897685356775,
|
385 |
+
"learning_rate": 4.5840407470288625e-05,
|
386 |
+
"loss": 1.5511,
|
387 |
+
"step": 540
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.4666949512091642,
|
391 |
+
"grad_norm": 0.12446302384809525,
|
392 |
+
"learning_rate": 4.6689303904923606e-05,
|
393 |
+
"loss": 1.5458,
|
394 |
+
"step": 550
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.4751803139584217,
|
398 |
+
"grad_norm": 0.13169591804768588,
|
399 |
+
"learning_rate": 4.753820033955857e-05,
|
400 |
+
"loss": 1.5569,
|
401 |
+
"step": 560
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.48366567670767924,
|
405 |
+
"grad_norm": 0.1343809247449631,
|
406 |
+
"learning_rate": 4.8387096774193554e-05,
|
407 |
+
"loss": 1.5408,
|
408 |
+
"step": 570
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.4921510394569368,
|
412 |
+
"grad_norm": 0.14024589853602,
|
413 |
+
"learning_rate": 4.923599320882852e-05,
|
414 |
+
"loss": 1.5487,
|
415 |
+
"step": 580
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.5006364022061943,
|
419 |
+
"grad_norm": 0.16240429253875313,
|
420 |
+
"learning_rate": 4.999999560970061e-05,
|
421 |
+
"loss": 1.5488,
|
422 |
+
"step": 590
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.5091217649554518,
|
426 |
+
"grad_norm": 0.12575424857894482,
|
427 |
+
"learning_rate": 4.999946877563971e-05,
|
428 |
+
"loss": 1.532,
|
429 |
+
"step": 600
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.5091217649554518,
|
433 |
+
"eval_loss": 1.519254446029663,
|
434 |
+
"eval_runtime": 53.3242,
|
435 |
+
"eval_samples_per_second": 7.145,
|
436 |
+
"eval_steps_per_second": 0.9,
|
437 |
+
"step": 600
|
438 |
+
},
|
439 |
+
{
|
440 |
+
"epoch": 0.5176071277047094,
|
441 |
+
"grad_norm": 0.18688482756329736,
|
442 |
+
"learning_rate": 4.999806390290309e-05,
|
443 |
+
"loss": 1.5544,
|
444 |
+
"step": 610
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.5260924904539669,
|
448 |
+
"grad_norm": 0.12425469431830571,
|
449 |
+
"learning_rate": 4.999578104083307e-05,
|
450 |
+
"loss": 1.5443,
|
451 |
+
"step": 620
|
452 |
+
},
|
453 |
+
{
|
454 |
+
"epoch": 0.5345778532032245,
|
455 |
+
"grad_norm": 0.1299027485420099,
|
456 |
+
"learning_rate": 4.999262026960902e-05,
|
457 |
+
"loss": 1.5569,
|
458 |
+
"step": 630
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"epoch": 0.543063215952482,
|
462 |
+
"grad_norm": 0.11441754852508934,
|
463 |
+
"learning_rate": 4.998858170024449e-05,
|
464 |
+
"loss": 1.5316,
|
465 |
+
"step": 640
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"epoch": 0.5515485787017395,
|
469 |
+
"grad_norm": 0.14888547248976478,
|
470 |
+
"learning_rate": 4.998366547458326e-05,
|
471 |
+
"loss": 1.5177,
|
472 |
+
"step": 650
|
473 |
+
},
|
474 |
+
{
|
475 |
+
"epoch": 0.560033941450997,
|
476 |
+
"grad_norm": 0.14859292774768867,
|
477 |
+
"learning_rate": 4.997787176529449e-05,
|
478 |
+
"loss": 1.5394,
|
479 |
+
"step": 660
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"epoch": 0.5685193042002545,
|
483 |
+
"grad_norm": 0.12499154376539734,
|
484 |
+
"learning_rate": 4.997120077586651e-05,
|
485 |
+
"loss": 1.5554,
|
486 |
+
"step": 670
|
487 |
+
},
|
488 |
+
{
|
489 |
+
"epoch": 0.5770046669495121,
|
490 |
+
"grad_norm": 0.1218974898058821,
|
491 |
+
"learning_rate": 4.9963652740599774e-05,
|
492 |
+
"loss": 1.5335,
|
493 |
+
"step": 680
|
494 |
+
},
|
495 |
+
{
|
496 |
+
"epoch": 0.5854900296987696,
|
497 |
+
"grad_norm": 0.1273110498715124,
|
498 |
+
"learning_rate": 4.995522792459859e-05,
|
499 |
+
"loss": 1.5349,
|
500 |
+
"step": 690
|
501 |
+
},
|
502 |
+
{
|
503 |
+
"epoch": 0.5939753924480271,
|
504 |
+
"grad_norm": 0.12115412881719101,
|
505 |
+
"learning_rate": 4.994592662376183e-05,
|
506 |
+
"loss": 1.5419,
|
507 |
+
"step": 700
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"epoch": 0.6024607551972847,
|
511 |
+
"grad_norm": 0.14855096330233286,
|
512 |
+
"learning_rate": 4.99357491647725e-05,
|
513 |
+
"loss": 1.513,
|
514 |
+
"step": 710
|
515 |
+
},
|
516 |
+
{
|
517 |
+
"epoch": 0.6109461179465422,
|
518 |
+
"grad_norm": 0.11407988659327956,
|
519 |
+
"learning_rate": 4.992469590508628e-05,
|
520 |
+
"loss": 1.5243,
|
521 |
+
"step": 720
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.6194314806957998,
|
525 |
+
"grad_norm": 0.1197712643781127,
|
526 |
+
"learning_rate": 4.9912767232919035e-05,
|
527 |
+
"loss": 1.5177,
|
528 |
+
"step": 730
|
529 |
+
},
|
530 |
+
{
|
531 |
+
"epoch": 0.6279168434450573,
|
532 |
+
"grad_norm": 0.12400515877262065,
|
533 |
+
"learning_rate": 4.9899963567233074e-05,
|
534 |
+
"loss": 1.5619,
|
535 |
+
"step": 740
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"epoch": 0.6364022061943148,
|
539 |
+
"grad_norm": 0.12250385257708406,
|
540 |
+
"learning_rate": 4.988628535772249e-05,
|
541 |
+
"loss": 1.539,
|
542 |
+
"step": 750
|
543 |
+
},
|
544 |
+
{
|
545 |
+
"epoch": 0.6448875689435724,
|
546 |
+
"grad_norm": 0.1262441090496857,
|
547 |
+
"learning_rate": 4.987173308479738e-05,
|
548 |
+
"loss": 1.5195,
|
549 |
+
"step": 760
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.6533729316928298,
|
553 |
+
"grad_norm": 0.12459694416473029,
|
554 |
+
"learning_rate": 4.985630725956694e-05,
|
555 |
+
"loss": 1.5462,
|
556 |
+
"step": 770
|
557 |
+
},
|
558 |
+
{
|
559 |
+
"epoch": 0.6618582944420874,
|
560 |
+
"grad_norm": 0.12985189006106762,
|
561 |
+
"learning_rate": 4.9840008423821527e-05,
|
562 |
+
"loss": 1.5113,
|
563 |
+
"step": 780
|
564 |
+
},
|
565 |
+
{
|
566 |
+
"epoch": 0.6703436571913449,
|
567 |
+
"grad_norm": 0.12689306141471304,
|
568 |
+
"learning_rate": 4.9822837150013636e-05,
|
569 |
+
"loss": 1.5201,
|
570 |
+
"step": 790
|
571 |
+
},
|
572 |
+
{
|
573 |
+
"epoch": 0.6788290199406024,
|
574 |
+
"grad_norm": 0.15393156370587963,
|
575 |
+
"learning_rate": 4.980479404123778e-05,
|
576 |
+
"loss": 1.5121,
|
577 |
+
"step": 800
|
578 |
+
},
|
579 |
+
{
|
580 |
+
"epoch": 0.68731438268986,
|
581 |
+
"grad_norm": 0.13213701895207608,
|
582 |
+
"learning_rate": 4.978587973120931e-05,
|
583 |
+
"loss": 1.5307,
|
584 |
+
"step": 810
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"epoch": 0.6957997454391175,
|
588 |
+
"grad_norm": 0.11561354931316294,
|
589 |
+
"learning_rate": 4.9766094884242184e-05,
|
590 |
+
"loss": 1.5316,
|
591 |
+
"step": 820
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"epoch": 0.7042851081883751,
|
595 |
+
"grad_norm": 0.12414772399330044,
|
596 |
+
"learning_rate": 4.974544019522559e-05,
|
597 |
+
"loss": 1.5148,
|
598 |
+
"step": 830
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"epoch": 0.7127704709376326,
|
602 |
+
"grad_norm": 0.1171652849153521,
|
603 |
+
"learning_rate": 4.972391638959959e-05,
|
604 |
+
"loss": 1.5096,
|
605 |
+
"step": 840
|
606 |
+
},
|
607 |
+
{
|
608 |
+
"epoch": 0.7212558336868902,
|
609 |
+
"grad_norm": 0.12868937349582316,
|
610 |
+
"learning_rate": 4.9701524223329585e-05,
|
611 |
+
"loss": 1.5282,
|
612 |
+
"step": 850
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 0.7297411964361477,
|
616 |
+
"grad_norm": 0.1200015077117309,
|
617 |
+
"learning_rate": 4.967826448287981e-05,
|
618 |
+
"loss": 1.5512,
|
619 |
+
"step": 860
|
620 |
+
},
|
621 |
+
{
|
622 |
+
"epoch": 0.7382265591854051,
|
623 |
+
"grad_norm": 0.12340885660045105,
|
624 |
+
"learning_rate": 4.96541379851857e-05,
|
625 |
+
"loss": 1.5394,
|
626 |
+
"step": 870
|
627 |
+
},
|
628 |
+
{
|
629 |
+
"epoch": 0.7467119219346627,
|
630 |
+
"grad_norm": 0.12976937691467555,
|
631 |
+
"learning_rate": 4.962914557762517e-05,
|
632 |
+
"loss": 1.51,
|
633 |
+
"step": 880
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"epoch": 0.7551972846839202,
|
637 |
+
"grad_norm": 0.11912878476038466,
|
638 |
+
"learning_rate": 4.9603288137988905e-05,
|
639 |
+
"loss": 1.5294,
|
640 |
+
"step": 890
|
641 |
+
},
|
642 |
+
{
|
643 |
+
"epoch": 0.7636826474331778,
|
644 |
+
"grad_norm": 0.1299625480337927,
|
645 |
+
"learning_rate": 4.957656657444947e-05,
|
646 |
+
"loss": 1.507,
|
647 |
+
"step": 900
|
648 |
+
},
|
649 |
+
{
|
650 |
+
"epoch": 0.7721680101824353,
|
651 |
+
"grad_norm": 0.12380144459698468,
|
652 |
+
"learning_rate": 4.954898182552946e-05,
|
653 |
+
"loss": 1.5376,
|
654 |
+
"step": 910
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.7806533729316928,
|
658 |
+
"grad_norm": 0.13139339643682763,
|
659 |
+
"learning_rate": 4.9520534860068535e-05,
|
660 |
+
"loss": 1.5291,
|
661 |
+
"step": 920
|
662 |
+
},
|
663 |
+
{
|
664 |
+
"epoch": 0.7891387356809504,
|
665 |
+
"grad_norm": 0.13088956203983898,
|
666 |
+
"learning_rate": 4.949122667718935e-05,
|
667 |
+
"loss": 1.5239,
|
668 |
+
"step": 930
|
669 |
+
},
|
670 |
+
{
|
671 |
+
"epoch": 0.7976240984302079,
|
672 |
+
"grad_norm": 0.12586052988453703,
|
673 |
+
"learning_rate": 4.94610583062625e-05,
|
674 |
+
"loss": 1.5525,
|
675 |
+
"step": 940
|
676 |
+
},
|
677 |
+
{
|
678 |
+
"epoch": 0.8061094611794655,
|
679 |
+
"grad_norm": 0.12020996031652877,
|
680 |
+
"learning_rate": 4.943003080687035e-05,
|
681 |
+
"loss": 1.5525,
|
682 |
+
"step": 950
|
683 |
+
},
|
684 |
+
{
|
685 |
+
"epoch": 0.814594823928723,
|
686 |
+
"grad_norm": 0.12866375954060869,
|
687 |
+
"learning_rate": 4.9398145268769856e-05,
|
688 |
+
"loss": 1.5266,
|
689 |
+
"step": 960
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"epoch": 0.8230801866779804,
|
693 |
+
"grad_norm": 0.13166136756817035,
|
694 |
+
"learning_rate": 4.936540281185423e-05,
|
695 |
+
"loss": 1.5041,
|
696 |
+
"step": 970
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"epoch": 0.831565549427238,
|
700 |
+
"grad_norm": 0.12481946698483787,
|
701 |
+
"learning_rate": 4.933180458611364e-05,
|
702 |
+
"loss": 1.5271,
|
703 |
+
"step": 980
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 0.8400509121764955,
|
707 |
+
"grad_norm": 0.12264463761209114,
|
708 |
+
"learning_rate": 4.9297351771594844e-05,
|
709 |
+
"loss": 1.5354,
|
710 |
+
"step": 990
|
711 |
+
},
|
712 |
+
{
|
713 |
+
"epoch": 0.8485362749257531,
|
714 |
+
"grad_norm": 0.11985452856537594,
|
715 |
+
"learning_rate": 4.926204557835968e-05,
|
716 |
+
"loss": 1.5167,
|
717 |
+
"step": 1000
|
718 |
+
},
|
719 |
+
{
|
720 |
+
"epoch": 0.8570216376750106,
|
721 |
+
"grad_norm": 0.13125396521190327,
|
722 |
+
"learning_rate": 4.9225887246442634e-05,
|
723 |
+
"loss": 1.5282,
|
724 |
+
"step": 1010
|
725 |
+
},
|
726 |
+
{
|
727 |
+
"epoch": 0.8655070004242681,
|
728 |
+
"grad_norm": 0.12730192328072554,
|
729 |
+
"learning_rate": 4.918887804580725e-05,
|
730 |
+
"loss": 1.5089,
|
731 |
+
"step": 1020
|
732 |
+
},
|
733 |
+
{
|
734 |
+
"epoch": 0.8739923631735257,
|
735 |
+
"grad_norm": 0.12724644219344786,
|
736 |
+
"learning_rate": 4.915101927630153e-05,
|
737 |
+
"loss": 1.4964,
|
738 |
+
"step": 1030
|
739 |
+
},
|
740 |
+
{
|
741 |
+
"epoch": 0.8824777259227832,
|
742 |
+
"grad_norm": 0.13578611501833232,
|
743 |
+
"learning_rate": 4.911231226761227e-05,
|
744 |
+
"loss": 1.5189,
|
745 |
+
"step": 1040
|
746 |
+
},
|
747 |
+
{
|
748 |
+
"epoch": 0.8909630886720408,
|
749 |
+
"grad_norm": 0.13577513964986457,
|
750 |
+
"learning_rate": 4.90727583792184e-05,
|
751 |
+
"loss": 1.5149,
|
752 |
+
"step": 1050
|
753 |
+
},
|
754 |
+
{
|
755 |
+
"epoch": 0.8994484514212983,
|
756 |
+
"grad_norm": 0.1269735011676505,
|
757 |
+
"learning_rate": 4.903235900034317e-05,
|
758 |
+
"loss": 1.5066,
|
759 |
+
"step": 1060
|
760 |
+
},
|
761 |
+
{
|
762 |
+
"epoch": 0.9079338141705557,
|
763 |
+
"grad_norm": 0.13250058214235566,
|
764 |
+
"learning_rate": 4.899111554990543e-05,
|
765 |
+
"loss": 1.5129,
|
766 |
+
"step": 1070
|
767 |
+
},
|
768 |
+
{
|
769 |
+
"epoch": 0.9164191769198133,
|
770 |
+
"grad_norm": 0.13130735246433495,
|
771 |
+
"learning_rate": 4.894902947646975e-05,
|
772 |
+
"loss": 1.5156,
|
773 |
+
"step": 1080
|
774 |
+
},
|
775 |
+
{
|
776 |
+
"epoch": 0.9249045396690708,
|
777 |
+
"grad_norm": 0.1273580180253049,
|
778 |
+
"learning_rate": 4.890610225819553e-05,
|
779 |
+
"loss": 1.5324,
|
780 |
+
"step": 1090
|
781 |
+
},
|
782 |
+
{
|
783 |
+
"epoch": 0.9333899024183284,
|
784 |
+
"grad_norm": 0.13155314243939242,
|
785 |
+
"learning_rate": 4.8862335402785136e-05,
|
786 |
+
"loss": 1.5106,
|
787 |
+
"step": 1100
|
788 |
+
},
|
789 |
+
{
|
790 |
+
"epoch": 0.9418752651675859,
|
791 |
+
"grad_norm": 0.13564895211984299,
|
792 |
+
"learning_rate": 4.88177304474309e-05,
|
793 |
+
"loss": 1.5067,
|
794 |
+
"step": 1110
|
795 |
+
},
|
796 |
+
{
|
797 |
+
"epoch": 0.9503606279168434,
|
798 |
+
"grad_norm": 0.12774735587114736,
|
799 |
+
"learning_rate": 4.877228895876115e-05,
|
800 |
+
"loss": 1.5182,
|
801 |
+
"step": 1120
|
802 |
+
},
|
803 |
+
{
|
804 |
+
"epoch": 0.958845990666101,
|
805 |
+
"grad_norm": 0.1307997709537685,
|
806 |
+
"learning_rate": 4.872601253278517e-05,
|
807 |
+
"loss": 1.4969,
|
808 |
+
"step": 1130
|
809 |
+
},
|
810 |
+
{
|
811 |
+
"epoch": 0.9673313534153585,
|
812 |
+
"grad_norm": 0.1304794845040634,
|
813 |
+
"learning_rate": 4.867890279483717e-05,
|
814 |
+
"loss": 1.5264,
|
815 |
+
"step": 1140
|
816 |
+
},
|
817 |
+
{
|
818 |
+
"epoch": 0.9758167161646161,
|
819 |
+
"grad_norm": 0.13666141796489684,
|
820 |
+
"learning_rate": 4.8630961399519206e-05,
|
821 |
+
"loss": 1.5467,
|
822 |
+
"step": 1150
|
823 |
+
},
|
824 |
+
{
|
825 |
+
"epoch": 0.9843020789138736,
|
826 |
+
"grad_norm": 0.1370278303190263,
|
827 |
+
"learning_rate": 4.8582190030643e-05,
|
828 |
+
"loss": 1.5127,
|
829 |
+
"step": 1160
|
830 |
+
},
|
831 |
+
{
|
832 |
+
"epoch": 0.9927874416631312,
|
833 |
+
"grad_norm": 0.1390936629299565,
|
834 |
+
"learning_rate": 4.8532590401170894e-05,
|
835 |
+
"loss": 1.5058,
|
836 |
+
"step": 1170
|
837 |
+
},
|
838 |
+
{
|
839 |
+
"epoch": 1.0012728044123886,
|
840 |
+
"grad_norm": 0.12934475548108287,
|
841 |
+
"learning_rate": 4.848216425315561e-05,
|
842 |
+
"loss": 1.5202,
|
843 |
+
"step": 1180
|
844 |
+
},
|
845 |
+
{
|
846 |
+
"epoch": 1.0097581671616462,
|
847 |
+
"grad_norm": 0.13898591683370803,
|
848 |
+
"learning_rate": 4.843091335767913e-05,
|
849 |
+
"loss": 1.4563,
|
850 |
+
"step": 1190
|
851 |
+
},
|
852 |
+
{
|
853 |
+
"epoch": 1.0182435299109036,
|
854 |
+
"grad_norm": 0.17488231535826249,
|
855 |
+
"learning_rate": 4.837883951479043e-05,
|
856 |
+
"loss": 1.4402,
|
857 |
+
"step": 1200
|
858 |
+
},
|
859 |
+
{
|
860 |
+
"epoch": 1.0182435299109036,
|
861 |
+
"eval_loss": 1.4955657720565796,
|
862 |
+
"eval_runtime": 52.424,
|
863 |
+
"eval_samples_per_second": 7.268,
|
864 |
+
"eval_steps_per_second": 0.916,
|
865 |
+
"step": 1200
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"epoch": 1.0267288926601612,
|
869 |
+
"grad_norm": 0.1536036344095855,
|
870 |
+
"learning_rate": 4.832594455344229e-05,
|
871 |
+
"loss": 1.4848,
|
872 |
+
"step": 1210
|
873 |
+
},
|
874 |
+
{
|
875 |
+
"epoch": 1.0352142554094188,
|
876 |
+
"grad_norm": 0.15762414421336599,
|
877 |
+
"learning_rate": 4.827223033142706e-05,
|
878 |
+
"loss": 1.4567,
|
879 |
+
"step": 1220
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"epoch": 1.0436996181586762,
|
883 |
+
"grad_norm": 0.15058229398130366,
|
884 |
+
"learning_rate": 4.8217698735311414e-05,
|
885 |
+
"loss": 1.4672,
|
886 |
+
"step": 1230
|
887 |
+
},
|
888 |
+
{
|
889 |
+
"epoch": 1.0521849809079338,
|
890 |
+
"grad_norm": 0.16010992835678386,
|
891 |
+
"learning_rate": 4.8162351680370044e-05,
|
892 |
+
"loss": 1.4458,
|
893 |
+
"step": 1240
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"epoch": 1.0606703436571914,
|
897 |
+
"grad_norm": 0.16758816000341356,
|
898 |
+
"learning_rate": 4.810619111051847e-05,
|
899 |
+
"loss": 1.4842,
|
900 |
+
"step": 1250
|
901 |
+
},
|
902 |
+
{
|
903 |
+
"epoch": 1.069155706406449,
|
904 |
+
"grad_norm": 0.16559260972674986,
|
905 |
+
"learning_rate": 4.8049218998244696e-05,
|
906 |
+
"loss": 1.4556,
|
907 |
+
"step": 1260
|
908 |
+
},
|
909 |
+
{
|
910 |
+
"epoch": 1.0776410691557063,
|
911 |
+
"grad_norm": 0.17237632034416966,
|
912 |
+
"learning_rate": 4.7991437344539966e-05,
|
913 |
+
"loss": 1.4813,
|
914 |
+
"step": 1270
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"epoch": 1.086126431904964,
|
918 |
+
"grad_norm": 0.17112756741722487,
|
919 |
+
"learning_rate": 4.793284817882845e-05,
|
920 |
+
"loss": 1.4535,
|
921 |
+
"step": 1280
|
922 |
+
},
|
923 |
+
{
|
924 |
+
"epoch": 1.0946117946542215,
|
925 |
+
"grad_norm": 0.16828572707718548,
|
926 |
+
"learning_rate": 4.787345355889604e-05,
|
927 |
+
"loss": 1.4344,
|
928 |
+
"step": 1290
|
929 |
+
},
|
930 |
+
{
|
931 |
+
"epoch": 1.103097157403479,
|
932 |
+
"grad_norm": 0.15709986047041227,
|
933 |
+
"learning_rate": 4.7813255570817985e-05,
|
934 |
+
"loss": 1.4744,
|
935 |
+
"step": 1300
|
936 |
+
},
|
937 |
+
{
|
938 |
+
"epoch": 1.1115825201527365,
|
939 |
+
"grad_norm": 0.16651547128146313,
|
940 |
+
"learning_rate": 4.775225632888568e-05,
|
941 |
+
"loss": 1.4561,
|
942 |
+
"step": 1310
|
943 |
+
},
|
944 |
+
{
|
945 |
+
"epoch": 1.120067882901994,
|
946 |
+
"grad_norm": 0.16750176017515714,
|
947 |
+
"learning_rate": 4.76904579755324e-05,
|
948 |
+
"loss": 1.4616,
|
949 |
+
"step": 1320
|
950 |
+
},
|
951 |
+
{
|
952 |
+
"epoch": 1.1285532456512515,
|
953 |
+
"grad_norm": 0.1608016567554825,
|
954 |
+
"learning_rate": 4.7627862681258037e-05,
|
955 |
+
"loss": 1.4593,
|
956 |
+
"step": 1330
|
957 |
+
},
|
958 |
+
{
|
959 |
+
"epoch": 1.137038608400509,
|
960 |
+
"grad_norm": 0.21390766919038295,
|
961 |
+
"learning_rate": 4.756447264455287e-05,
|
962 |
+
"loss": 1.4484,
|
963 |
+
"step": 1340
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"epoch": 1.1455239711497667,
|
967 |
+
"grad_norm": 0.16826883293172662,
|
968 |
+
"learning_rate": 4.750029009182038e-05,
|
969 |
+
"loss": 1.4703,
|
970 |
+
"step": 1350
|
971 |
+
},
|
972 |
+
{
|
973 |
+
"epoch": 1.1540093338990243,
|
974 |
+
"grad_norm": 0.17431508867079595,
|
975 |
+
"learning_rate": 4.7435317277299e-05,
|
976 |
+
"loss": 1.4701,
|
977 |
+
"step": 1360
|
978 |
+
},
|
979 |
+
{
|
980 |
+
"epoch": 1.1624946966482816,
|
981 |
+
"grad_norm": 0.15973851467570443,
|
982 |
+
"learning_rate": 4.736955648298299e-05,
|
983 |
+
"loss": 1.4503,
|
984 |
+
"step": 1370
|
985 |
+
},
|
986 |
+
{
|
987 |
+
"epoch": 1.1709800593975392,
|
988 |
+
"grad_norm": 0.1887713767970947,
|
989 |
+
"learning_rate": 4.730301001854225e-05,
|
990 |
+
"loss": 1.4624,
|
991 |
+
"step": 1380
|
992 |
+
},
|
993 |
+
{
|
994 |
+
"epoch": 1.1794654221467968,
|
995 |
+
"grad_norm": 0.16898695344997974,
|
996 |
+
"learning_rate": 4.7235680221241216e-05,
|
997 |
+
"loss": 1.4452,
|
998 |
+
"step": 1390
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"epoch": 1.1879507848960542,
|
1002 |
+
"grad_norm": 0.20014553287073528,
|
1003 |
+
"learning_rate": 4.716756945585681e-05,
|
1004 |
+
"loss": 1.4717,
|
1005 |
+
"step": 1400
|
1006 |
+
},
|
1007 |
+
{
|
1008 |
+
"epoch": 1.1964361476453118,
|
1009 |
+
"grad_norm": 0.17137954325200072,
|
1010 |
+
"learning_rate": 4.709868011459528e-05,
|
1011 |
+
"loss": 1.4403,
|
1012 |
+
"step": 1410
|
1013 |
+
},
|
1014 |
+
{
|
1015 |
+
"epoch": 1.2049215103945694,
|
1016 |
+
"grad_norm": 0.17801721751888322,
|
1017 |
+
"learning_rate": 4.7029014617008294e-05,
|
1018 |
+
"loss": 1.4339,
|
1019 |
+
"step": 1420
|
1020 |
+
},
|
1021 |
+
{
|
1022 |
+
"epoch": 1.213406873143827,
|
1023 |
+
"grad_norm": 0.17139613676642362,
|
1024 |
+
"learning_rate": 4.695857540990789e-05,
|
1025 |
+
"loss": 1.4573,
|
1026 |
+
"step": 1430
|
1027 |
+
},
|
1028 |
+
{
|
1029 |
+
"epoch": 1.2218922358930844,
|
1030 |
+
"grad_norm": 0.16971403514498054,
|
1031 |
+
"learning_rate": 4.688736496728058e-05,
|
1032 |
+
"loss": 1.4282,
|
1033 |
+
"step": 1440
|
1034 |
+
},
|
1035 |
+
{
|
1036 |
+
"epoch": 1.230377598642342,
|
1037 |
+
"grad_norm": 0.17200272420880428,
|
1038 |
+
"learning_rate": 4.681538579020038e-05,
|
1039 |
+
"loss": 1.4434,
|
1040 |
+
"step": 1450
|
1041 |
+
},
|
1042 |
+
{
|
1043 |
+
"epoch": 1.2388629613915996,
|
1044 |
+
"grad_norm": 0.17208160407432616,
|
1045 |
+
"learning_rate": 4.6742640406741106e-05,
|
1046 |
+
"loss": 1.45,
|
1047 |
+
"step": 1460
|
1048 |
+
},
|
1049 |
+
{
|
1050 |
+
"epoch": 1.247348324140857,
|
1051 |
+
"grad_norm": 0.1939626212901777,
|
1052 |
+
"learning_rate": 4.666913137188743e-05,
|
1053 |
+
"loss": 1.4608,
|
1054 |
+
"step": 1470
|
1055 |
+
},
|
1056 |
+
{
|
1057 |
+
"epoch": 1.2558336868901145,
|
1058 |
+
"grad_norm": 0.17291794493304186,
|
1059 |
+
"learning_rate": 4.6594861267445236e-05,
|
1060 |
+
"loss": 1.4671,
|
1061 |
+
"step": 1480
|
1062 |
+
},
|
1063 |
+
{
|
1064 |
+
"epoch": 1.2643190496393721,
|
1065 |
+
"grad_norm": 0.18219792041638924,
|
1066 |
+
"learning_rate": 4.651983270195093e-05,
|
1067 |
+
"loss": 1.4262,
|
1068 |
+
"step": 1490
|
1069 |
+
},
|
1070 |
+
{
|
1071 |
+
"epoch": 1.2728044123886297,
|
1072 |
+
"grad_norm": 0.18086437830489926,
|
1073 |
+
"learning_rate": 4.644404831057979e-05,
|
1074 |
+
"loss": 1.4455,
|
1075 |
+
"step": 1500
|
1076 |
+
},
|
1077 |
+
{
|
1078 |
+
"epoch": 1.281289775137887,
|
1079 |
+
"grad_norm": 0.17417619624549402,
|
1080 |
+
"learning_rate": 4.636751075505344e-05,
|
1081 |
+
"loss": 1.4873,
|
1082 |
+
"step": 1510
|
1083 |
+
},
|
1084 |
+
{
|
1085 |
+
"epoch": 1.2897751378871447,
|
1086 |
+
"grad_norm": 0.18354282411845188,
|
1087 |
+
"learning_rate": 4.629022272354637e-05,
|
1088 |
+
"loss": 1.4525,
|
1089 |
+
"step": 1520
|
1090 |
+
},
|
1091 |
+
{
|
1092 |
+
"epoch": 1.298260500636402,
|
1093 |
+
"grad_norm": 0.17985617345325455,
|
1094 |
+
"learning_rate": 4.621218693059149e-05,
|
1095 |
+
"loss": 1.4303,
|
1096 |
+
"step": 1530
|
1097 |
+
},
|
1098 |
+
{
|
1099 |
+
"epoch": 1.3067458633856597,
|
1100 |
+
"grad_norm": 0.1809708317849863,
|
1101 |
+
"learning_rate": 4.6133406116984795e-05,
|
1102 |
+
"loss": 1.4631,
|
1103 |
+
"step": 1540
|
1104 |
+
},
|
1105 |
+
{
|
1106 |
+
"epoch": 1.3152312261349173,
|
1107 |
+
"grad_norm": 0.17487374671212322,
|
1108 |
+
"learning_rate": 4.6053883049689145e-05,
|
1109 |
+
"loss": 1.4482,
|
1110 |
+
"step": 1550
|
1111 |
+
},
|
1112 |
+
{
|
1113 |
+
"epoch": 1.3237165888841749,
|
1114 |
+
"grad_norm": 0.19912807671077193,
|
1115 |
+
"learning_rate": 4.5973620521737036e-05,
|
1116 |
+
"loss": 1.4497,
|
1117 |
+
"step": 1560
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"epoch": 1.3322019516334322,
|
1121 |
+
"grad_norm": 0.17853627546912074,
|
1122 |
+
"learning_rate": 4.5892621352132514e-05,
|
1123 |
+
"loss": 1.4456,
|
1124 |
+
"step": 1570
|
1125 |
+
},
|
1126 |
+
{
|
1127 |
+
"epoch": 1.3406873143826898,
|
1128 |
+
"grad_norm": 0.18252596927754394,
|
1129 |
+
"learning_rate": 4.581088838575218e-05,
|
1130 |
+
"loss": 1.4328,
|
1131 |
+
"step": 1580
|
1132 |
+
},
|
1133 |
+
{
|
1134 |
+
"epoch": 1.3491726771319474,
|
1135 |
+
"grad_norm": 0.17604951053556211,
|
1136 |
+
"learning_rate": 4.572842449324525e-05,
|
1137 |
+
"loss": 1.4442,
|
1138 |
+
"step": 1590
|
1139 |
+
},
|
1140 |
+
{
|
1141 |
+
"epoch": 1.3576580398812048,
|
1142 |
+
"grad_norm": 0.18358942463311748,
|
1143 |
+
"learning_rate": 4.564523257093275e-05,
|
1144 |
+
"loss": 1.4338,
|
1145 |
+
"step": 1600
|
1146 |
+
},
|
1147 |
+
{
|
1148 |
+
"epoch": 1.3661434026304624,
|
1149 |
+
"grad_norm": 0.20508703236267142,
|
1150 |
+
"learning_rate": 4.5561315540705774e-05,
|
1151 |
+
"loss": 1.4445,
|
1152 |
+
"step": 1610
|
1153 |
+
},
|
1154 |
+
{
|
1155 |
+
"epoch": 1.37462876537972,
|
1156 |
+
"grad_norm": 0.18486352550747187,
|
1157 |
+
"learning_rate": 4.547667634992288e-05,
|
1158 |
+
"loss": 1.4261,
|
1159 |
+
"step": 1620
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"epoch": 1.3831141281289776,
|
1163 |
+
"grad_norm": 0.17492766465456316,
|
1164 |
+
"learning_rate": 4.539131797130656e-05,
|
1165 |
+
"loss": 1.4258,
|
1166 |
+
"step": 1630
|
1167 |
+
},
|
1168 |
+
{
|
1169 |
+
"epoch": 1.391599490878235,
|
1170 |
+
"grad_norm": 0.19692876587833674,
|
1171 |
+
"learning_rate": 4.530524340283881e-05,
|
1172 |
+
"loss": 1.4349,
|
1173 |
+
"step": 1640
|
1174 |
+
},
|
1175 |
+
{
|
1176 |
+
"epoch": 1.4000848536274926,
|
1177 |
+
"grad_norm": 0.19155373430892478,
|
1178 |
+
"learning_rate": 4.521845566765589e-05,
|
1179 |
+
"loss": 1.4536,
|
1180 |
+
"step": 1650
|
1181 |
+
},
|
1182 |
+
{
|
1183 |
+
"epoch": 1.4085702163767502,
|
1184 |
+
"grad_norm": 0.18544325977459192,
|
1185 |
+
"learning_rate": 4.513095781394208e-05,
|
1186 |
+
"loss": 1.4363,
|
1187 |
+
"step": 1660
|
1188 |
+
},
|
1189 |
+
{
|
1190 |
+
"epoch": 1.4170555791260075,
|
1191 |
+
"grad_norm": 0.177828004720666,
|
1192 |
+
"learning_rate": 4.504275291482267e-05,
|
1193 |
+
"loss": 1.4595,
|
1194 |
+
"step": 1670
|
1195 |
+
},
|
1196 |
+
{
|
1197 |
+
"epoch": 1.4255409418752651,
|
1198 |
+
"grad_norm": 0.17855432230356816,
|
1199 |
+
"learning_rate": 4.495384406825601e-05,
|
1200 |
+
"loss": 1.4211,
|
1201 |
+
"step": 1680
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"epoch": 1.4340263046245227,
|
1205 |
+
"grad_norm": 0.20232492538380317,
|
1206 |
+
"learning_rate": 4.486423439692469e-05,
|
1207 |
+
"loss": 1.4189,
|
1208 |
+
"step": 1690
|
1209 |
+
},
|
1210 |
+
{
|
1211 |
+
"epoch": 1.4425116673737803,
|
1212 |
+
"grad_norm": 0.1975109303350431,
|
1213 |
+
"learning_rate": 4.477392704812585e-05,
|
1214 |
+
"loss": 1.4565,
|
1215 |
+
"step": 1700
|
1216 |
+
},
|
1217 |
+
{
|
1218 |
+
"epoch": 1.4509970301230377,
|
1219 |
+
"grad_norm": 0.19619010830399825,
|
1220 |
+
"learning_rate": 4.468292519366071e-05,
|
1221 |
+
"loss": 1.4382,
|
1222 |
+
"step": 1710
|
1223 |
+
},
|
1224 |
+
{
|
1225 |
+
"epoch": 1.4594823928722953,
|
1226 |
+
"grad_norm": 0.18168826428246143,
|
1227 |
+
"learning_rate": 4.459123202972308e-05,
|
1228 |
+
"loss": 1.4471,
|
1229 |
+
"step": 1720
|
1230 |
+
},
|
1231 |
+
{
|
1232 |
+
"epoch": 1.4679677556215527,
|
1233 |
+
"grad_norm": 0.1923264062362399,
|
1234 |
+
"learning_rate": 4.449885077678717e-05,
|
1235 |
+
"loss": 1.4153,
|
1236 |
+
"step": 1730
|
1237 |
+
},
|
1238 |
+
{
|
1239 |
+
"epoch": 1.4764531183708103,
|
1240 |
+
"grad_norm": 0.1907937313040222,
|
1241 |
+
"learning_rate": 4.440578467949445e-05,
|
1242 |
+
"loss": 1.4432,
|
1243 |
+
"step": 1740
|
1244 |
+
},
|
1245 |
+
{
|
1246 |
+
"epoch": 1.4849384811200679,
|
1247 |
+
"grad_norm": 0.19107457667767244,
|
1248 |
+
"learning_rate": 4.431203700653968e-05,
|
1249 |
+
"loss": 1.4285,
|
1250 |
+
"step": 1750
|
1251 |
+
},
|
1252 |
+
{
|
1253 |
+
"epoch": 1.4934238438693255,
|
1254 |
+
"grad_norm": 0.19847350429107552,
|
1255 |
+
"learning_rate": 4.421761105055613e-05,
|
1256 |
+
"loss": 1.4383,
|
1257 |
+
"step": 1760
|
1258 |
+
},
|
1259 |
+
{
|
1260 |
+
"epoch": 1.501909206618583,
|
1261 |
+
"grad_norm": 0.18536475556610216,
|
1262 |
+
"learning_rate": 4.4122510127999937e-05,
|
1263 |
+
"loss": 1.42,
|
1264 |
+
"step": 1770
|
1265 |
+
},
|
1266 |
+
{
|
1267 |
+
"epoch": 1.5103945693678404,
|
1268 |
+
"grad_norm": 0.18481023473586697,
|
1269 |
+
"learning_rate": 4.4026737579033584e-05,
|
1270 |
+
"loss": 1.4384,
|
1271 |
+
"step": 1780
|
1272 |
+
},
|
1273 |
+
{
|
1274 |
+
"epoch": 1.518879932117098,
|
1275 |
+
"grad_norm": 0.20863867505874642,
|
1276 |
+
"learning_rate": 4.393029676740864e-05,
|
1277 |
+
"loss": 1.4543,
|
1278 |
+
"step": 1790
|
1279 |
+
},
|
1280 |
+
{
|
1281 |
+
"epoch": 1.5273652948663554,
|
1282 |
+
"grad_norm": 0.1816036870853105,
|
1283 |
+
"learning_rate": 4.3833191080347575e-05,
|
1284 |
+
"loss": 1.434,
|
1285 |
+
"step": 1800
|
1286 |
+
},
|
1287 |
+
{
|
1288 |
+
"epoch": 1.5273652948663554,
|
1289 |
+
"eval_loss": 1.4622184038162231,
|
1290 |
+
"eval_runtime": 52.4041,
|
1291 |
+
"eval_samples_per_second": 7.27,
|
1292 |
+
"eval_steps_per_second": 0.916,
|
1293 |
+
"step": 1800
|
1294 |
+
},
|
1295 |
+
{
|
1296 |
+
"epoch": 1.535850657615613,
|
1297 |
+
"grad_norm": 0.19378252368958881,
|
1298 |
+
"learning_rate": 4.3735423928424815e-05,
|
1299 |
+
"loss": 1.4275,
|
1300 |
+
"step": 1810
|
1301 |
+
},
|
1302 |
+
{
|
1303 |
+
"epoch": 1.5443360203648706,
|
1304 |
+
"grad_norm": 0.20453331251433848,
|
1305 |
+
"learning_rate": 4.363699874544697e-05,
|
1306 |
+
"loss": 1.4203,
|
1307 |
+
"step": 1820
|
1308 |
+
},
|
1309 |
+
{
|
1310 |
+
"epoch": 1.5528213831141282,
|
1311 |
+
"grad_norm": 0.26684319417219377,
|
1312 |
+
"learning_rate": 4.3537918988332156e-05,
|
1313 |
+
"loss": 1.4372,
|
1314 |
+
"step": 1830
|
1315 |
+
},
|
1316 |
+
{
|
1317 |
+
"epoch": 1.5613067458633858,
|
1318 |
+
"grad_norm": 0.25745160303419773,
|
1319 |
+
"learning_rate": 4.343818813698868e-05,
|
1320 |
+
"loss": 1.4082,
|
1321 |
+
"step": 1840
|
1322 |
+
},
|
1323 |
+
{
|
1324 |
+
"epoch": 1.5697921086126432,
|
1325 |
+
"grad_norm": 0.19969727996700776,
|
1326 |
+
"learning_rate": 4.3337809694192765e-05,
|
1327 |
+
"loss": 1.4314,
|
1328 |
+
"step": 1850
|
1329 |
+
},
|
1330 |
+
{
|
1331 |
+
"epoch": 1.5782774713619008,
|
1332 |
+
"grad_norm": 0.20117210832277968,
|
1333 |
+
"learning_rate": 4.3236787185465525e-05,
|
1334 |
+
"loss": 1.4293,
|
1335 |
+
"step": 1860
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"epoch": 1.5867628341111581,
|
1339 |
+
"grad_norm": 0.20173003641028897,
|
1340 |
+
"learning_rate": 4.313512415894913e-05,
|
1341 |
+
"loss": 1.4406,
|
1342 |
+
"step": 1870
|
1343 |
+
},
|
1344 |
+
{
|
1345 |
+
"epoch": 1.5952481968604157,
|
1346 |
+
"grad_norm": 0.20304770794371527,
|
1347 |
+
"learning_rate": 4.303282418528224e-05,
|
1348 |
+
"loss": 1.4286,
|
1349 |
+
"step": 1880
|
1350 |
+
},
|
1351 |
+
{
|
1352 |
+
"epoch": 1.6037335596096733,
|
1353 |
+
"grad_norm": 0.19126658907738198,
|
1354 |
+
"learning_rate": 4.292989085747452e-05,
|
1355 |
+
"loss": 1.4184,
|
1356 |
+
"step": 1890
|
1357 |
+
},
|
1358 |
+
{
|
1359 |
+
"epoch": 1.612218922358931,
|
1360 |
+
"grad_norm": 0.20069554966453027,
|
1361 |
+
"learning_rate": 4.282632779078051e-05,
|
1362 |
+
"loss": 1.4133,
|
1363 |
+
"step": 1900
|
1364 |
+
},
|
1365 |
+
{
|
1366 |
+
"epoch": 1.6207042851081885,
|
1367 |
+
"grad_norm": 0.1952881519566686,
|
1368 |
+
"learning_rate": 4.2722138622572624e-05,
|
1369 |
+
"loss": 1.4432,
|
1370 |
+
"step": 1910
|
1371 |
+
},
|
1372 |
+
{
|
1373 |
+
"epoch": 1.629189647857446,
|
1374 |
+
"grad_norm": 0.19763704668680288,
|
1375 |
+
"learning_rate": 4.261732701221339e-05,
|
1376 |
+
"loss": 1.3921,
|
1377 |
+
"step": 1920
|
1378 |
+
},
|
1379 |
+
{
|
1380 |
+
"epoch": 1.6376750106067033,
|
1381 |
+
"grad_norm": 0.19821464294464497,
|
1382 |
+
"learning_rate": 4.2511896640926925e-05,
|
1383 |
+
"loss": 1.4454,
|
1384 |
+
"step": 1930
|
1385 |
+
},
|
1386 |
+
{
|
1387 |
+
"epoch": 1.6461603733559609,
|
1388 |
+
"grad_norm": 0.20456545626297834,
|
1389 |
+
"learning_rate": 4.240585121166966e-05,
|
1390 |
+
"loss": 1.4147,
|
1391 |
+
"step": 1940
|
1392 |
+
},
|
1393 |
+
{
|
1394 |
+
"epoch": 1.6546457361052185,
|
1395 |
+
"grad_norm": 0.2119092529186395,
|
1396 |
+
"learning_rate": 4.229919444900027e-05,
|
1397 |
+
"loss": 1.3969,
|
1398 |
+
"step": 1950
|
1399 |
+
},
|
1400 |
+
{
|
1401 |
+
"epoch": 1.663131098854476,
|
1402 |
+
"grad_norm": 0.20330157582122357,
|
1403 |
+
"learning_rate": 4.2191930098948865e-05,
|
1404 |
+
"loss": 1.426,
|
1405 |
+
"step": 1960
|
1406 |
+
},
|
1407 |
+
{
|
1408 |
+
"epoch": 1.6716164616037337,
|
1409 |
+
"grad_norm": 0.21761164739298738,
|
1410 |
+
"learning_rate": 4.2084061928885406e-05,
|
1411 |
+
"loss": 1.4246,
|
1412 |
+
"step": 1970
|
1413 |
+
},
|
1414 |
+
{
|
1415 |
+
"epoch": 1.680101824352991,
|
1416 |
+
"grad_norm": 0.19331588142071401,
|
1417 |
+
"learning_rate": 4.197559372738741e-05,
|
1418 |
+
"loss": 1.4305,
|
1419 |
+
"step": 1980
|
1420 |
+
},
|
1421 |
+
{
|
1422 |
+
"epoch": 1.6885871871022486,
|
1423 |
+
"grad_norm": 0.20188460724329996,
|
1424 |
+
"learning_rate": 4.186652930410685e-05,
|
1425 |
+
"loss": 1.4153,
|
1426 |
+
"step": 1990
|
1427 |
+
},
|
1428 |
+
{
|
1429 |
+
"epoch": 1.697072549851506,
|
1430 |
+
"grad_norm": 0.20988950033571588,
|
1431 |
+
"learning_rate": 4.1756872489636425e-05,
|
1432 |
+
"loss": 1.3894,
|
1433 |
+
"step": 2000
|
1434 |
+
},
|
1435 |
+
{
|
1436 |
+
"epoch": 1.7055579126007636,
|
1437 |
+
"grad_norm": 0.1966475893123187,
|
1438 |
+
"learning_rate": 4.1646627135374916e-05,
|
1439 |
+
"loss": 1.3962,
|
1440 |
+
"step": 2010
|
1441 |
+
},
|
1442 |
+
{
|
1443 |
+
"epoch": 1.7140432753500212,
|
1444 |
+
"grad_norm": 0.20785207367991768,
|
1445 |
+
"learning_rate": 4.1535797113392004e-05,
|
1446 |
+
"loss": 1.4037,
|
1447 |
+
"step": 2020
|
1448 |
+
},
|
1449 |
+
{
|
1450 |
+
"epoch": 1.7225286380992788,
|
1451 |
+
"grad_norm": 0.2029940281663133,
|
1452 |
+
"learning_rate": 4.1424386316292224e-05,
|
1453 |
+
"loss": 1.4011,
|
1454 |
+
"step": 2030
|
1455 |
+
},
|
1456 |
+
{
|
1457 |
+
"epoch": 1.7310140008485364,
|
1458 |
+
"grad_norm": 0.2247844551379277,
|
1459 |
+
"learning_rate": 4.131239865707829e-05,
|
1460 |
+
"loss": 1.4084,
|
1461 |
+
"step": 2040
|
1462 |
+
},
|
1463 |
+
{
|
1464 |
+
"epoch": 1.7394993635977938,
|
1465 |
+
"grad_norm": 0.20900441746105022,
|
1466 |
+
"learning_rate": 4.11998380690136e-05,
|
1467 |
+
"loss": 1.4235,
|
1468 |
+
"step": 2050
|
1469 |
+
},
|
1470 |
+
{
|
1471 |
+
"epoch": 1.7479847263470514,
|
1472 |
+
"grad_norm": 0.20362408546889926,
|
1473 |
+
"learning_rate": 4.108670850548416e-05,
|
1474 |
+
"loss": 1.4204,
|
1475 |
+
"step": 2060
|
1476 |
+
},
|
1477 |
+
{
|
1478 |
+
"epoch": 1.7564700890963088,
|
1479 |
+
"grad_norm": 0.22281567946240438,
|
1480 |
+
"learning_rate": 4.097301393985968e-05,
|
1481 |
+
"loss": 1.4023,
|
1482 |
+
"step": 2070
|
1483 |
+
},
|
1484 |
+
{
|
1485 |
+
"epoch": 1.7649554518455663,
|
1486 |
+
"grad_norm": 0.20867113178797225,
|
1487 |
+
"learning_rate": 4.085875836535404e-05,
|
1488 |
+
"loss": 1.3895,
|
1489 |
+
"step": 2080
|
1490 |
+
},
|
1491 |
+
{
|
1492 |
+
"epoch": 1.773440814594824,
|
1493 |
+
"grad_norm": 0.22113231886160947,
|
1494 |
+
"learning_rate": 4.0743945794885063e-05,
|
1495 |
+
"loss": 1.3963,
|
1496 |
+
"step": 2090
|
1497 |
+
},
|
1498 |
+
{
|
1499 |
+
"epoch": 1.7819261773440815,
|
1500 |
+
"grad_norm": 0.22334563577844263,
|
1501 |
+
"learning_rate": 4.062858026093351e-05,
|
1502 |
+
"loss": 1.3988,
|
1503 |
+
"step": 2100
|
1504 |
+
},
|
1505 |
+
{
|
1506 |
+
"epoch": 1.7904115400933391,
|
1507 |
+
"grad_norm": 0.23218581668265403,
|
1508 |
+
"learning_rate": 4.051266581540152e-05,
|
1509 |
+
"loss": 1.4068,
|
1510 |
+
"step": 2110
|
1511 |
+
},
|
1512 |
+
{
|
1513 |
+
"epoch": 1.7988969028425965,
|
1514 |
+
"grad_norm": 0.20295589384571033,
|
1515 |
+
"learning_rate": 4.0396206529470234e-05,
|
1516 |
+
"loss": 1.3883,
|
1517 |
+
"step": 2120
|
1518 |
+
},
|
1519 |
+
{
|
1520 |
+
"epoch": 1.8073822655918539,
|
1521 |
+
"grad_norm": 0.22861611442392848,
|
1522 |
+
"learning_rate": 4.027920649345687e-05,
|
1523 |
+
"loss": 1.4043,
|
1524 |
+
"step": 2130
|
1525 |
+
},
|
1526 |
+
{
|
1527 |
+
"epoch": 1.8158676283411115,
|
1528 |
+
"grad_norm": 0.2083012771089638,
|
1529 |
+
"learning_rate": 4.0161669816671e-05,
|
1530 |
+
"loss": 1.398,
|
1531 |
+
"step": 2140
|
1532 |
+
},
|
1533 |
+
{
|
1534 |
+
"epoch": 1.824352991090369,
|
1535 |
+
"grad_norm": 0.21936173231840464,
|
1536 |
+
"learning_rate": 4.004360062727028e-05,
|
1537 |
+
"loss": 1.4142,
|
1538 |
+
"step": 2150
|
1539 |
+
},
|
1540 |
+
{
|
1541 |
+
"epoch": 1.8328383538396267,
|
1542 |
+
"grad_norm": 0.21383435796328337,
|
1543 |
+
"learning_rate": 3.9925003072115406e-05,
|
1544 |
+
"loss": 1.4138,
|
1545 |
+
"step": 2160
|
1546 |
+
},
|
1547 |
+
{
|
1548 |
+
"epoch": 1.8413237165888843,
|
1549 |
+
"grad_norm": 0.23301608248270392,
|
1550 |
+
"learning_rate": 3.9805881316624506e-05,
|
1551 |
+
"loss": 1.4195,
|
1552 |
+
"step": 2170
|
1553 |
+
},
|
1554 |
+
{
|
1555 |
+
"epoch": 1.8498090793381419,
|
1556 |
+
"grad_norm": 0.22424766656883474,
|
1557 |
+
"learning_rate": 3.968623954462681e-05,
|
1558 |
+
"loss": 1.4011,
|
1559 |
+
"step": 2180
|
1560 |
+
},
|
1561 |
+
{
|
1562 |
+
"epoch": 1.8582944420873992,
|
1563 |
+
"grad_norm": 0.21286417342881453,
|
1564 |
+
"learning_rate": 3.9566081958215734e-05,
|
1565 |
+
"loss": 1.409,
|
1566 |
+
"step": 2190
|
1567 |
+
},
|
1568 |
+
{
|
1569 |
+
"epoch": 1.8667798048366566,
|
1570 |
+
"grad_norm": 0.21944800687444807,
|
1571 |
+
"learning_rate": 3.9445412777601284e-05,
|
1572 |
+
"loss": 1.3877,
|
1573 |
+
"step": 2200
|
1574 |
+
},
|
1575 |
+
{
|
1576 |
+
"epoch": 1.8752651675859142,
|
1577 |
+
"grad_norm": 0.23113173625974803,
|
1578 |
+
"learning_rate": 3.932423624096181e-05,
|
1579 |
+
"loss": 1.4089,
|
1580 |
+
"step": 2210
|
1581 |
+
},
|
1582 |
+
{
|
1583 |
+
"epoch": 1.8837505303351718,
|
1584 |
+
"grad_norm": 0.2081941699587778,
|
1585 |
+
"learning_rate": 3.920255660429517e-05,
|
1586 |
+
"loss": 1.4024,
|
1587 |
+
"step": 2220
|
1588 |
+
},
|
1589 |
+
{
|
1590 |
+
"epoch": 1.8922358930844294,
|
1591 |
+
"grad_norm": 0.2188685806654701,
|
1592 |
+
"learning_rate": 3.908037814126927e-05,
|
1593 |
+
"loss": 1.3878,
|
1594 |
+
"step": 2230
|
1595 |
+
},
|
1596 |
+
{
|
1597 |
+
"epoch": 1.900721255833687,
|
1598 |
+
"grad_norm": 0.22761843244757962,
|
1599 |
+
"learning_rate": 3.895770514307193e-05,
|
1600 |
+
"loss": 1.4004,
|
1601 |
+
"step": 2240
|
1602 |
+
},
|
1603 |
+
{
|
1604 |
+
"epoch": 1.9092066185829444,
|
1605 |
+
"grad_norm": 0.23309183623120422,
|
1606 |
+
"learning_rate": 3.883454191826017e-05,
|
1607 |
+
"loss": 1.4188,
|
1608 |
+
"step": 2250
|
1609 |
+
},
|
1610 |
+
{
|
1611 |
+
"epoch": 1.917691981332202,
|
1612 |
+
"grad_norm": 0.20329785843911802,
|
1613 |
+
"learning_rate": 3.871089279260891e-05,
|
1614 |
+
"loss": 1.3893,
|
1615 |
+
"step": 2260
|
1616 |
+
},
|
1617 |
+
{
|
1618 |
+
"epoch": 1.9261773440814594,
|
1619 |
+
"grad_norm": 0.23470973193726366,
|
1620 |
+
"learning_rate": 3.8586762108958995e-05,
|
1621 |
+
"loss": 1.3974,
|
1622 |
+
"step": 2270
|
1623 |
+
},
|
1624 |
+
{
|
1625 |
+
"epoch": 1.934662706830717,
|
1626 |
+
"grad_norm": 0.22779136837044714,
|
1627 |
+
"learning_rate": 3.8462154227064725e-05,
|
1628 |
+
"loss": 1.4115,
|
1629 |
+
"step": 2280
|
1630 |
+
},
|
1631 |
+
{
|
1632 |
+
"epoch": 1.9431480695799745,
|
1633 |
+
"grad_norm": 0.22338952315651892,
|
1634 |
+
"learning_rate": 3.833707352344068e-05,
|
1635 |
+
"loss": 1.3873,
|
1636 |
+
"step": 2290
|
1637 |
+
},
|
1638 |
+
{
|
1639 |
+
"epoch": 1.9516334323292321,
|
1640 |
+
"grad_norm": 0.23069304025882129,
|
1641 |
+
"learning_rate": 3.821152439120801e-05,
|
1642 |
+
"loss": 1.3944,
|
1643 |
+
"step": 2300
|
1644 |
+
},
|
1645 |
+
{
|
1646 |
+
"epoch": 1.9601187950784897,
|
1647 |
+
"grad_norm": 0.23590596270163203,
|
1648 |
+
"learning_rate": 3.808551123994018e-05,
|
1649 |
+
"loss": 1.3857,
|
1650 |
+
"step": 2310
|
1651 |
+
},
|
1652 |
+
{
|
1653 |
+
"epoch": 1.9686041578277471,
|
1654 |
+
"grad_norm": 0.22545661808214923,
|
1655 |
+
"learning_rate": 3.795903849550805e-05,
|
1656 |
+
"loss": 1.3628,
|
1657 |
+
"step": 2320
|
1658 |
+
},
|
1659 |
+
{
|
1660 |
+
"epoch": 1.9770895205770047,
|
1661 |
+
"grad_norm": 0.2450769875954842,
|
1662 |
+
"learning_rate": 3.7832110599924455e-05,
|
1663 |
+
"loss": 1.4079,
|
1664 |
+
"step": 2330
|
1665 |
+
},
|
1666 |
+
{
|
1667 |
+
"epoch": 1.985574883326262,
|
1668 |
+
"grad_norm": 0.22931499326784313,
|
1669 |
+
"learning_rate": 3.7704732011188166e-05,
|
1670 |
+
"loss": 1.379,
|
1671 |
+
"step": 2340
|
1672 |
+
},
|
1673 |
+
{
|
1674 |
+
"epoch": 1.9940602460755197,
|
1675 |
+
"grad_norm": 0.22417244507397657,
|
1676 |
+
"learning_rate": 3.7576907203127346e-05,
|
1677 |
+
"loss": 1.4035,
|
1678 |
+
"step": 2350
|
1679 |
+
},
|
1680 |
+
{
|
1681 |
+
"epoch": 2.0025456088247773,
|
1682 |
+
"grad_norm": 0.24496197221575314,
|
1683 |
+
"learning_rate": 3.7448640665242406e-05,
|
1684 |
+
"loss": 1.442,
|
1685 |
+
"step": 2360
|
1686 |
+
},
|
1687 |
+
{
|
1688 |
+
"epoch": 2.011030971574035,
|
1689 |
+
"grad_norm": 0.2532740296990078,
|
1690 |
+
"learning_rate": 3.73199369025483e-05,
|
1691 |
+
"loss": 1.2672,
|
1692 |
+
"step": 2370
|
1693 |
+
},
|
1694 |
+
{
|
1695 |
+
"epoch": 2.0195163343232925,
|
1696 |
+
"grad_norm": 0.2890155987968593,
|
1697 |
+
"learning_rate": 3.7190800435416355e-05,
|
1698 |
+
"loss": 1.246,
|
1699 |
+
"step": 2380
|
1700 |
+
},
|
1701 |
+
{
|
1702 |
+
"epoch": 2.02800169707255,
|
1703 |
+
"grad_norm": 0.2541972565696406,
|
1704 |
+
"learning_rate": 3.706123579941545e-05,
|
1705 |
+
"loss": 1.2603,
|
1706 |
+
"step": 2390
|
1707 |
+
},
|
1708 |
+
{
|
1709 |
+
"epoch": 2.036487059821807,
|
1710 |
+
"grad_norm": 0.2530140862527023,
|
1711 |
+
"learning_rate": 3.693124754515272e-05,
|
1712 |
+
"loss": 1.2638,
|
1713 |
+
"step": 2400
|
1714 |
+
},
|
1715 |
+
{
|
1716 |
+
"epoch": 2.036487059821807,
|
1717 |
+
"eval_loss": 1.435962438583374,
|
1718 |
+
"eval_runtime": 52.582,
|
1719 |
+
"eval_samples_per_second": 7.246,
|
1720 |
+
"eval_steps_per_second": 0.913,
|
1721 |
+
"step": 2400
|
1722 |
+
},
|
1723 |
+
{
|
1724 |
+
"epoch": 2.044972422571065,
|
1725 |
+
"grad_norm": 0.25100458343337734,
|
1726 |
+
"learning_rate": 3.680084023811377e-05,
|
1727 |
+
"loss": 1.2711,
|
1728 |
+
"step": 2410
|
1729 |
+
},
|
1730 |
+
{
|
1731 |
+
"epoch": 2.0534577853203224,
|
1732 |
+
"grad_norm": 0.2695727673292618,
|
1733 |
+
"learning_rate": 3.66700184585023e-05,
|
1734 |
+
"loss": 1.2578,
|
1735 |
+
"step": 2420
|
1736 |
+
},
|
1737 |
+
{
|
1738 |
+
"epoch": 2.06194314806958,
|
1739 |
+
"grad_norm": 0.2605068415443213,
|
1740 |
+
"learning_rate": 3.6538786801079226e-05,
|
1741 |
+
"loss": 1.2506,
|
1742 |
+
"step": 2430
|
1743 |
+
},
|
1744 |
+
{
|
1745 |
+
"epoch": 2.0704285108188376,
|
1746 |
+
"grad_norm": 0.27415607207865045,
|
1747 |
+
"learning_rate": 3.64071498750013e-05,
|
1748 |
+
"loss": 1.2852,
|
1749 |
+
"step": 2440
|
1750 |
+
},
|
1751 |
+
{
|
1752 |
+
"epoch": 2.078913873568095,
|
1753 |
+
"grad_norm": 0.2688900338206285,
|
1754 |
+
"learning_rate": 3.627511230365928e-05,
|
1755 |
+
"loss": 1.2695,
|
1756 |
+
"step": 2450
|
1757 |
+
},
|
1758 |
+
{
|
1759 |
+
"epoch": 2.0873992363173524,
|
1760 |
+
"grad_norm": 0.2750825805336503,
|
1761 |
+
"learning_rate": 3.614267872451546e-05,
|
1762 |
+
"loss": 1.2643,
|
1763 |
+
"step": 2460
|
1764 |
+
},
|
1765 |
+
{
|
1766 |
+
"epoch": 2.09588459906661,
|
1767 |
+
"grad_norm": 0.2659269066581903,
|
1768 |
+
"learning_rate": 3.600985378894086e-05,
|
1769 |
+
"loss": 1.2868,
|
1770 |
+
"step": 2470
|
1771 |
+
},
|
1772 |
+
{
|
1773 |
+
"epoch": 2.1043699618158676,
|
1774 |
+
"grad_norm": 0.24411151291321526,
|
1775 |
+
"learning_rate": 3.587664216205183e-05,
|
1776 |
+
"loss": 1.2571,
|
1777 |
+
"step": 2480
|
1778 |
+
},
|
1779 |
+
{
|
1780 |
+
"epoch": 2.112855324565125,
|
1781 |
+
"grad_norm": 0.2574194755634052,
|
1782 |
+
"learning_rate": 3.574304852254621e-05,
|
1783 |
+
"loss": 1.2769,
|
1784 |
+
"step": 2490
|
1785 |
+
},
|
1786 |
+
{
|
1787 |
+
"epoch": 2.1213406873143827,
|
1788 |
+
"grad_norm": 0.2894545074998905,
|
1789 |
+
"learning_rate": 3.5609077562538997e-05,
|
1790 |
+
"loss": 1.2469,
|
1791 |
+
"step": 2500
|
1792 |
+
}
|
1793 |
+
],
|
1794 |
+
"logging_steps": 10,
|
1795 |
+
"max_steps": 5890,
|
1796 |
+
"num_input_tokens_seen": 0,
|
1797 |
+
"num_train_epochs": 5,
|
1798 |
+
"save_steps": 500,
|
1799 |
+
"stateful_callbacks": {
|
1800 |
+
"TrainerControl": {
|
1801 |
+
"args": {
|
1802 |
+
"should_epoch_stop": false,
|
1803 |
+
"should_evaluate": false,
|
1804 |
+
"should_log": false,
|
1805 |
+
"should_save": true,
|
1806 |
+
"should_training_stop": false
|
1807 |
+
},
|
1808 |
+
"attributes": {}
|
1809 |
+
}
|
1810 |
+
},
|
1811 |
+
"total_flos": 2101429250555904.0,
|
1812 |
+
"train_batch_size": 2,
|
1813 |
+
"trial_name": null,
|
1814 |
+
"trial_params": null
|
1815 |
+
}
|
checkpoint-2500/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-2500/zero_to_fp32.py
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example:
|
14 |
+
# python zero_to_fp32.py . output_dir/
|
15 |
+
# or
|
16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import torch
|
20 |
+
import glob
|
21 |
+
import math
|
22 |
+
import os
|
23 |
+
import re
|
24 |
+
import json
|
25 |
+
from tqdm import tqdm
|
26 |
+
from collections import OrderedDict
|
27 |
+
from dataclasses import dataclass
|
28 |
+
|
29 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
30 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
31 |
+
from deepspeed.utils import logger
|
32 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
33 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
34 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class zero_model_state:
|
39 |
+
buffers: dict()
|
40 |
+
param_shapes: dict()
|
41 |
+
shared_params: list
|
42 |
+
ds_version: int
|
43 |
+
frozen_param_shapes: dict()
|
44 |
+
frozen_param_fragments: dict()
|
45 |
+
|
46 |
+
|
47 |
+
debug = 0
|
48 |
+
|
49 |
+
# load to cpu
|
50 |
+
device = torch.device('cpu')
|
51 |
+
|
52 |
+
|
53 |
+
def atoi(text):
|
54 |
+
return int(text) if text.isdigit() else text
|
55 |
+
|
56 |
+
|
57 |
+
def natural_keys(text):
|
58 |
+
'''
|
59 |
+
alist.sort(key=natural_keys) sorts in human order
|
60 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
61 |
+
(See Toothy's implementation in the comments)
|
62 |
+
'''
|
63 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
64 |
+
|
65 |
+
|
66 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
67 |
+
if not os.path.isdir(checkpoint_dir):
|
68 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
69 |
+
|
70 |
+
# there should be only one file
|
71 |
+
if zero_stage <= 2:
|
72 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
73 |
+
elif zero_stage == 3:
|
74 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
75 |
+
|
76 |
+
if not os.path.exists(file):
|
77 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
78 |
+
|
79 |
+
return file
|
80 |
+
|
81 |
+
|
82 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
83 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
84 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
85 |
+
|
86 |
+
if len(ckpt_files) == 0:
|
87 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
88 |
+
|
89 |
+
return ckpt_files
|
90 |
+
|
91 |
+
|
92 |
+
def get_optim_files(checkpoint_dir):
|
93 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
94 |
+
|
95 |
+
|
96 |
+
def get_model_state_files(checkpoint_dir):
|
97 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
98 |
+
|
99 |
+
|
100 |
+
def parse_model_states(files):
|
101 |
+
zero_model_states = []
|
102 |
+
for file in files:
|
103 |
+
state_dict = torch.load(file, map_location=device)
|
104 |
+
|
105 |
+
if BUFFER_NAMES not in state_dict:
|
106 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
107 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
108 |
+
if debug:
|
109 |
+
print("Found buffers:", buffer_names)
|
110 |
+
|
111 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
112 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
113 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
114 |
+
|
115 |
+
# collect parameters that are included in param_shapes
|
116 |
+
param_names = []
|
117 |
+
for s in param_shapes:
|
118 |
+
for name in s.keys():
|
119 |
+
param_names.append(name)
|
120 |
+
|
121 |
+
# update with frozen parameters
|
122 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
123 |
+
if frozen_param_shapes is not None:
|
124 |
+
if debug:
|
125 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
126 |
+
param_names += list(frozen_param_shapes.keys())
|
127 |
+
|
128 |
+
# handle shared params
|
129 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
130 |
+
|
131 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
132 |
+
|
133 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
134 |
+
|
135 |
+
z_model_state = zero_model_state(buffers=buffers,
|
136 |
+
param_shapes=param_shapes,
|
137 |
+
shared_params=shared_params,
|
138 |
+
ds_version=ds_version,
|
139 |
+
frozen_param_shapes=frozen_param_shapes,
|
140 |
+
frozen_param_fragments=frozen_param_fragments)
|
141 |
+
zero_model_states.append(z_model_state)
|
142 |
+
|
143 |
+
return zero_model_states
|
144 |
+
|
145 |
+
|
146 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
147 |
+
total_files = len(files)
|
148 |
+
state_dicts = []
|
149 |
+
for f in files:
|
150 |
+
state_dict = torch.load(f, map_location=device)
|
151 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
152 |
+
# and also handle the case where it was already removed by another helper script
|
153 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
154 |
+
state_dicts.append(state_dict)
|
155 |
+
|
156 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
157 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
158 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
159 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
160 |
+
|
161 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
162 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
163 |
+
# use the max of the partition_count to get the dp world_size.
|
164 |
+
|
165 |
+
if type(world_size) is list:
|
166 |
+
world_size = max(world_size)
|
167 |
+
|
168 |
+
if world_size != total_files:
|
169 |
+
raise ValueError(
|
170 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
171 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
172 |
+
)
|
173 |
+
|
174 |
+
# the groups are named differently in each stage
|
175 |
+
if zero_stage <= 2:
|
176 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
177 |
+
elif zero_stage == 3:
|
178 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
179 |
+
else:
|
180 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
181 |
+
|
182 |
+
if zero_stage <= 2:
|
183 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
184 |
+
elif zero_stage == 3:
|
185 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
186 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
187 |
+
#
|
188 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
189 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
190 |
+
|
191 |
+
fp32_flat_groups = [
|
192 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
193 |
+
]
|
194 |
+
|
195 |
+
return zero_stage, world_size, fp32_flat_groups
|
196 |
+
|
197 |
+
|
198 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
199 |
+
"""
|
200 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
201 |
+
|
202 |
+
Args:
|
203 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
204 |
+
|
205 |
+
"""
|
206 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
207 |
+
|
208 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
209 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
210 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
211 |
+
|
212 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
213 |
+
|
214 |
+
zero_model_states = parse_model_states(model_files)
|
215 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
216 |
+
|
217 |
+
if zero_stage <= 2:
|
218 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
219 |
+
exclude_frozen_parameters)
|
220 |
+
elif zero_stage == 3:
|
221 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
222 |
+
exclude_frozen_parameters)
|
223 |
+
|
224 |
+
|
225 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
226 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
227 |
+
return
|
228 |
+
|
229 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
230 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
231 |
+
|
232 |
+
if debug:
|
233 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
234 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
235 |
+
|
236 |
+
wanted_params = len(frozen_param_shapes)
|
237 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
238 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
239 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
240 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
241 |
+
|
242 |
+
total_params = 0
|
243 |
+
total_numel = 0
|
244 |
+
for name, shape in frozen_param_shapes.items():
|
245 |
+
total_params += 1
|
246 |
+
unpartitioned_numel = shape.numel()
|
247 |
+
total_numel += unpartitioned_numel
|
248 |
+
|
249 |
+
state_dict[name] = frozen_param_fragments[name]
|
250 |
+
|
251 |
+
if debug:
|
252 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
253 |
+
|
254 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
255 |
+
|
256 |
+
|
257 |
+
def _has_callable(obj, fn):
|
258 |
+
attr = getattr(obj, fn, None)
|
259 |
+
return callable(attr)
|
260 |
+
|
261 |
+
|
262 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
263 |
+
param_shapes = zero_model_states[0].param_shapes
|
264 |
+
|
265 |
+
# Reconstruction protocol:
|
266 |
+
#
|
267 |
+
# XXX: document this
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
for i in range(world_size):
|
271 |
+
for j in range(len(fp32_flat_groups[0])):
|
272 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
273 |
+
|
274 |
+
# XXX: memory usage doubles here (zero2)
|
275 |
+
num_param_groups = len(fp32_flat_groups[0])
|
276 |
+
merged_single_partition_of_fp32_groups = []
|
277 |
+
for i in range(num_param_groups):
|
278 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
279 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
280 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
281 |
+
avail_numel = sum(
|
282 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
283 |
+
|
284 |
+
if debug:
|
285 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
286 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
287 |
+
# not asserting if there is a mismatch due to possible padding
|
288 |
+
print(f"Have {avail_numel} numels to process.")
|
289 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
290 |
+
|
291 |
+
# params
|
292 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
293 |
+
# out-of-core computing solution
|
294 |
+
total_numel = 0
|
295 |
+
total_params = 0
|
296 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
297 |
+
offset = 0
|
298 |
+
avail_numel = full_single_fp32_vector.numel()
|
299 |
+
for name, shape in shapes.items():
|
300 |
+
|
301 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
302 |
+
total_numel += unpartitioned_numel
|
303 |
+
total_params += 1
|
304 |
+
|
305 |
+
if debug:
|
306 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
307 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
308 |
+
offset += unpartitioned_numel
|
309 |
+
|
310 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
311 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
312 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
313 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
314 |
+
align_to = 2 * world_size
|
315 |
+
|
316 |
+
def zero2_align(x):
|
317 |
+
return align_to * math.ceil(x / align_to)
|
318 |
+
|
319 |
+
if debug:
|
320 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
321 |
+
|
322 |
+
offset = zero2_align(offset)
|
323 |
+
avail_numel = zero2_align(avail_numel)
|
324 |
+
|
325 |
+
if debug:
|
326 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
327 |
+
|
328 |
+
# Sanity check
|
329 |
+
if offset != avail_numel:
|
330 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
331 |
+
|
332 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
333 |
+
|
334 |
+
|
335 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
336 |
+
exclude_frozen_parameters):
|
337 |
+
state_dict = OrderedDict()
|
338 |
+
|
339 |
+
# buffers
|
340 |
+
buffers = zero_model_states[0].buffers
|
341 |
+
state_dict.update(buffers)
|
342 |
+
if debug:
|
343 |
+
print(f"added {len(buffers)} buffers")
|
344 |
+
|
345 |
+
if not exclude_frozen_parameters:
|
346 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
347 |
+
|
348 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
349 |
+
|
350 |
+
# recover shared parameters
|
351 |
+
for pair in zero_model_states[0].shared_params:
|
352 |
+
if pair[1] in state_dict:
|
353 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
354 |
+
|
355 |
+
return state_dict
|
356 |
+
|
357 |
+
|
358 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
359 |
+
remainder = unpartitioned_numel % world_size
|
360 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
361 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
362 |
+
return partitioned_numel, padding_numel
|
363 |
+
|
364 |
+
|
365 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
366 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
367 |
+
return
|
368 |
+
|
369 |
+
if debug:
|
370 |
+
for i in range(world_size):
|
371 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
372 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
373 |
+
|
374 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
375 |
+
wanted_params = len(frozen_param_shapes)
|
376 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
377 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
378 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
379 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
380 |
+
|
381 |
+
total_params = 0
|
382 |
+
total_numel = 0
|
383 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
384 |
+
total_params += 1
|
385 |
+
unpartitioned_numel = shape.numel()
|
386 |
+
total_numel += unpartitioned_numel
|
387 |
+
|
388 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
389 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
390 |
+
|
391 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
print(
|
395 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
396 |
+
)
|
397 |
+
|
398 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
399 |
+
|
400 |
+
|
401 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
402 |
+
param_shapes = zero_model_states[0].param_shapes
|
403 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
404 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
405 |
+
# param, re-consolidating each param, while dealing with padding if any
|
406 |
+
|
407 |
+
# merge list of dicts, preserving order
|
408 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
409 |
+
|
410 |
+
if debug:
|
411 |
+
for i in range(world_size):
|
412 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
413 |
+
|
414 |
+
wanted_params = len(param_shapes)
|
415 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
416 |
+
# not asserting if there is a mismatch due to possible padding
|
417 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
418 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
419 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
420 |
+
|
421 |
+
# params
|
422 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
423 |
+
# out-of-core computing solution
|
424 |
+
offset = 0
|
425 |
+
total_numel = 0
|
426 |
+
total_params = 0
|
427 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
|
428 |
+
unpartitioned_numel = shape.numel()
|
429 |
+
total_numel += unpartitioned_numel
|
430 |
+
total_params += 1
|
431 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
432 |
+
|
433 |
+
if debug:
|
434 |
+
print(
|
435 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
436 |
+
)
|
437 |
+
|
438 |
+
# XXX: memory usage doubles here
|
439 |
+
state_dict[name] = torch.cat(
|
440 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
441 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
442 |
+
offset += partitioned_numel
|
443 |
+
|
444 |
+
offset *= world_size
|
445 |
+
|
446 |
+
# Sanity check
|
447 |
+
if offset != avail_numel:
|
448 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
449 |
+
|
450 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
451 |
+
|
452 |
+
|
453 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
454 |
+
exclude_frozen_parameters):
|
455 |
+
state_dict = OrderedDict()
|
456 |
+
|
457 |
+
# buffers
|
458 |
+
buffers = zero_model_states[0].buffers
|
459 |
+
state_dict.update(buffers)
|
460 |
+
if debug:
|
461 |
+
print(f"added {len(buffers)} buffers")
|
462 |
+
|
463 |
+
if not exclude_frozen_parameters:
|
464 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
465 |
+
|
466 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
467 |
+
|
468 |
+
# recover shared parameters
|
469 |
+
for pair in zero_model_states[0].shared_params:
|
470 |
+
if pair[1] in state_dict:
|
471 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
472 |
+
|
473 |
+
return state_dict
|
474 |
+
|
475 |
+
|
476 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
477 |
+
"""
|
478 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
479 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
480 |
+
via a model hub.
|
481 |
+
|
482 |
+
Args:
|
483 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
484 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
485 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
486 |
+
|
487 |
+
Returns:
|
488 |
+
- pytorch ``state_dict``
|
489 |
+
|
490 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
491 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
492 |
+
the checkpoint.
|
493 |
+
|
494 |
+
A typical usage might be ::
|
495 |
+
|
496 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
497 |
+
# do the training and checkpoint saving
|
498 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
499 |
+
model = model.cpu() # move to cpu
|
500 |
+
model.load_state_dict(state_dict)
|
501 |
+
# submit to model hub or save the model to share with others
|
502 |
+
|
503 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
504 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
505 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
506 |
+
|
507 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
508 |
+
|
509 |
+
"""
|
510 |
+
if tag is None:
|
511 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
512 |
+
if os.path.isfile(latest_path):
|
513 |
+
with open(latest_path, 'r') as fd:
|
514 |
+
tag = fd.read().strip()
|
515 |
+
else:
|
516 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
517 |
+
|
518 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
519 |
+
|
520 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
521 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
522 |
+
|
523 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
524 |
+
|
525 |
+
|
526 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
527 |
+
output_dir,
|
528 |
+
max_shard_size="5GB",
|
529 |
+
safe_serialization=False,
|
530 |
+
tag=None,
|
531 |
+
exclude_frozen_parameters=False):
|
532 |
+
"""
|
533 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
534 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
535 |
+
|
536 |
+
Args:
|
537 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
538 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
539 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
540 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
541 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
542 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
543 |
+
"""
|
544 |
+
# Dependency pre-check
|
545 |
+
if safe_serialization:
|
546 |
+
try:
|
547 |
+
from safetensors.torch import save_file
|
548 |
+
except ImportError:
|
549 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
550 |
+
raise
|
551 |
+
if max_shard_size is not None:
|
552 |
+
try:
|
553 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
554 |
+
except ImportError:
|
555 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
556 |
+
raise
|
557 |
+
|
558 |
+
# Convert zero checkpoint to state_dict
|
559 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
560 |
+
|
561 |
+
# Shard the model if it is too big.
|
562 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
563 |
+
if max_shard_size is not None:
|
564 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
565 |
+
state_dict_split = split_torch_state_dict_into_shards(state_dict,
|
566 |
+
filename_pattern=filename_pattern,
|
567 |
+
max_shard_size=max_shard_size)
|
568 |
+
else:
|
569 |
+
from collections import namedtuple
|
570 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
571 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
572 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
573 |
+
|
574 |
+
# Save the model
|
575 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
576 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
577 |
+
shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
|
578 |
+
output_path = os.path.join(output_dir, shard_file)
|
579 |
+
if safe_serialization:
|
580 |
+
save_file(shard, output_path, metadata={"format": "pt"})
|
581 |
+
else:
|
582 |
+
torch.save(shard, output_path)
|
583 |
+
|
584 |
+
# Save index if sharded
|
585 |
+
if state_dict_split.is_sharded:
|
586 |
+
index = {
|
587 |
+
"metadata": state_dict_split.metadata,
|
588 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
589 |
+
}
|
590 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
591 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
592 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
593 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
594 |
+
f.write(content)
|
595 |
+
|
596 |
+
|
597 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
598 |
+
"""
|
599 |
+
1. Put the provided model to cpu
|
600 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
601 |
+
3. Load it into the provided model
|
602 |
+
|
603 |
+
Args:
|
604 |
+
- ``model``: the model object to update
|
605 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
606 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
607 |
+
|
608 |
+
Returns:
|
609 |
+
- ``model`: modified model
|
610 |
+
|
611 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
612 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
613 |
+
conveniently placed for you in the checkpoint folder.
|
614 |
+
|
615 |
+
A typical usage might be ::
|
616 |
+
|
617 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
618 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
619 |
+
# submit to model hub or save the model to share with others
|
620 |
+
|
621 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
622 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
623 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
624 |
+
|
625 |
+
"""
|
626 |
+
logger.info(f"Extracting fp32 weights")
|
627 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
628 |
+
|
629 |
+
logger.info(f"Overwriting model with fp32 weights")
|
630 |
+
model = model.cpu()
|
631 |
+
model.load_state_dict(state_dict, strict=False)
|
632 |
+
|
633 |
+
return model
|
634 |
+
|
635 |
+
|
636 |
+
if __name__ == "__main__":
|
637 |
+
parser = argparse.ArgumentParser()
|
638 |
+
parser.add_argument("checkpoint_dir",
|
639 |
+
type=str,
|
640 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
641 |
+
parser.add_argument("output_dir",
|
642 |
+
type=str,
|
643 |
+
help="directory to the pytorch fp32 state_dict output files"
|
644 |
+
"(e.g. path/checkpoint-12-output/)")
|
645 |
+
parser.add_argument(
|
646 |
+
"--max_shard_size",
|
647 |
+
type=str,
|
648 |
+
default="5GB",
|
649 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
650 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
651 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
652 |
+
"without CPU OOM issues.")
|
653 |
+
parser.add_argument(
|
654 |
+
"--safe_serialization",
|
655 |
+
default=False,
|
656 |
+
action='store_true',
|
657 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
658 |
+
parser.add_argument("-t",
|
659 |
+
"--tag",
|
660 |
+
type=str,
|
661 |
+
default=None,
|
662 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
663 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
664 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
665 |
+
args = parser.parse_args()
|
666 |
+
|
667 |
+
debug = args.debug
|
668 |
+
|
669 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
670 |
+
args.output_dir,
|
671 |
+
max_shard_size=args.max_shard_size,
|
672 |
+
safe_serialization=args.safe_serialization,
|
673 |
+
tag=args.tag,
|
674 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
checkpoint-3000/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: Qwen/Qwen2.5-72B
|
3 |
+
library_name: peft
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.12.0
|
checkpoint-4000/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-4000/added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
checkpoint-4000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step4000
|
checkpoint-4000/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-4000/special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
checkpoint-4000/tokenizer_config.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|endoftext|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"model_max_length": 131072,
|
203 |
+
"pad_token": "<|endoftext|>",
|
204 |
+
"padding_side": "right",
|
205 |
+
"split_special_tokens": false,
|
206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
207 |
+
"unk_token": null
|
208 |
+
}
|
checkpoint-4000/trainer_state.json
ADDED
@@ -0,0 +1,2881 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.3941450997030125,
|
5 |
+
"eval_steps": 600,
|
6 |
+
"global_step": 4000,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.00848536274925753,
|
13 |
+
"grad_norm": 0.4898678891363344,
|
14 |
+
"learning_rate": 8.488964346349746e-07,
|
15 |
+
"loss": 1.8056,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.01697072549851506,
|
20 |
+
"grad_norm": 0.3537473179717183,
|
21 |
+
"learning_rate": 1.6977928692699491e-06,
|
22 |
+
"loss": 1.7621,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.025456088247772592,
|
27 |
+
"grad_norm": 0.28215953004159977,
|
28 |
+
"learning_rate": 2.546689303904924e-06,
|
29 |
+
"loss": 1.7571,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.03394145099703012,
|
34 |
+
"grad_norm": 0.27446565146764923,
|
35 |
+
"learning_rate": 3.3955857385398982e-06,
|
36 |
+
"loss": 1.7136,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.04242681374628765,
|
41 |
+
"grad_norm": 0.17051549768176558,
|
42 |
+
"learning_rate": 4.244482173174873e-06,
|
43 |
+
"loss": 1.6767,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.050912176495545185,
|
48 |
+
"grad_norm": 0.17763882467320422,
|
49 |
+
"learning_rate": 5.093378607809848e-06,
|
50 |
+
"loss": 1.6371,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.05939753924480271,
|
55 |
+
"grad_norm": 0.14311462596290048,
|
56 |
+
"learning_rate": 5.942275042444822e-06,
|
57 |
+
"loss": 1.6324,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.06788290199406025,
|
62 |
+
"grad_norm": 0.1659540846071645,
|
63 |
+
"learning_rate": 6.7911714770797965e-06,
|
64 |
+
"loss": 1.6062,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.07636826474331777,
|
69 |
+
"grad_norm": 0.20064072815620043,
|
70 |
+
"learning_rate": 7.640067911714771e-06,
|
71 |
+
"loss": 1.5832,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.0848536274925753,
|
76 |
+
"grad_norm": 0.2179045681711979,
|
77 |
+
"learning_rate": 8.488964346349745e-06,
|
78 |
+
"loss": 1.5898,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.09333899024183284,
|
83 |
+
"grad_norm": 0.23866012053128668,
|
84 |
+
"learning_rate": 9.337860780984721e-06,
|
85 |
+
"loss": 1.5924,
|
86 |
+
"step": 110
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.10182435299109037,
|
90 |
+
"grad_norm": 0.18578051776430282,
|
91 |
+
"learning_rate": 1.0186757215619695e-05,
|
92 |
+
"loss": 1.5877,
|
93 |
+
"step": 120
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.1103097157403479,
|
97 |
+
"grad_norm": 0.2216509707409362,
|
98 |
+
"learning_rate": 1.103565365025467e-05,
|
99 |
+
"loss": 1.5947,
|
100 |
+
"step": 130
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.11879507848960542,
|
104 |
+
"grad_norm": 0.20427142255694086,
|
105 |
+
"learning_rate": 1.1884550084889643e-05,
|
106 |
+
"loss": 1.5841,
|
107 |
+
"step": 140
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.12728044123886295,
|
111 |
+
"grad_norm": 0.1765851415675038,
|
112 |
+
"learning_rate": 1.2733446519524619e-05,
|
113 |
+
"loss": 1.5878,
|
114 |
+
"step": 150
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.1357658039881205,
|
118 |
+
"grad_norm": 0.1769355117060811,
|
119 |
+
"learning_rate": 1.3582342954159593e-05,
|
120 |
+
"loss": 1.5795,
|
121 |
+
"step": 160
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.14425116673737803,
|
125 |
+
"grad_norm": 0.1617675663096666,
|
126 |
+
"learning_rate": 1.4431239388794569e-05,
|
127 |
+
"loss": 1.5549,
|
128 |
+
"step": 170
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.15273652948663555,
|
132 |
+
"grad_norm": 0.17302259072151574,
|
133 |
+
"learning_rate": 1.5280135823429543e-05,
|
134 |
+
"loss": 1.5808,
|
135 |
+
"step": 180
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.1612218922358931,
|
139 |
+
"grad_norm": 0.16876039012432806,
|
140 |
+
"learning_rate": 1.6129032258064517e-05,
|
141 |
+
"loss": 1.5676,
|
142 |
+
"step": 190
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.1697072549851506,
|
146 |
+
"grad_norm": 0.19627360154037596,
|
147 |
+
"learning_rate": 1.697792869269949e-05,
|
148 |
+
"loss": 1.5598,
|
149 |
+
"step": 200
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.17819261773440814,
|
153 |
+
"grad_norm": 0.16078510362361015,
|
154 |
+
"learning_rate": 1.7826825127334465e-05,
|
155 |
+
"loss": 1.5667,
|
156 |
+
"step": 210
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.18667798048366568,
|
160 |
+
"grad_norm": 0.16044786518959703,
|
161 |
+
"learning_rate": 1.8675721561969442e-05,
|
162 |
+
"loss": 1.5815,
|
163 |
+
"step": 220
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.1951633432329232,
|
167 |
+
"grad_norm": 0.15656958873834717,
|
168 |
+
"learning_rate": 1.9524617996604416e-05,
|
169 |
+
"loss": 1.5576,
|
170 |
+
"step": 230
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.20364870598218074,
|
174 |
+
"grad_norm": 0.1687290471357602,
|
175 |
+
"learning_rate": 2.037351443123939e-05,
|
176 |
+
"loss": 1.5453,
|
177 |
+
"step": 240
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.21213406873143828,
|
181 |
+
"grad_norm": 0.1519017348276184,
|
182 |
+
"learning_rate": 2.1222410865874364e-05,
|
183 |
+
"loss": 1.5554,
|
184 |
+
"step": 250
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.2206194314806958,
|
188 |
+
"grad_norm": 0.15761892005160086,
|
189 |
+
"learning_rate": 2.207130730050934e-05,
|
190 |
+
"loss": 1.5494,
|
191 |
+
"step": 260
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.22910479422995333,
|
195 |
+
"grad_norm": 0.16857088482977495,
|
196 |
+
"learning_rate": 2.2920203735144312e-05,
|
197 |
+
"loss": 1.5794,
|
198 |
+
"step": 270
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.23759015697921085,
|
202 |
+
"grad_norm": 0.1678705209913503,
|
203 |
+
"learning_rate": 2.3769100169779286e-05,
|
204 |
+
"loss": 1.5373,
|
205 |
+
"step": 280
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.2460755197284684,
|
209 |
+
"grad_norm": 0.14812649566587394,
|
210 |
+
"learning_rate": 2.461799660441426e-05,
|
211 |
+
"loss": 1.5504,
|
212 |
+
"step": 290
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.2545608824777259,
|
216 |
+
"grad_norm": 0.17651916734325857,
|
217 |
+
"learning_rate": 2.5466893039049238e-05,
|
218 |
+
"loss": 1.5607,
|
219 |
+
"step": 300
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.26304624522698344,
|
223 |
+
"grad_norm": 0.14883055338507856,
|
224 |
+
"learning_rate": 2.6315789473684212e-05,
|
225 |
+
"loss": 1.5311,
|
226 |
+
"step": 310
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.271531607976241,
|
230 |
+
"grad_norm": 0.15787522753231265,
|
231 |
+
"learning_rate": 2.7164685908319186e-05,
|
232 |
+
"loss": 1.5656,
|
233 |
+
"step": 320
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.2800169707254985,
|
237 |
+
"grad_norm": 0.1625232940237689,
|
238 |
+
"learning_rate": 2.801358234295416e-05,
|
239 |
+
"loss": 1.5686,
|
240 |
+
"step": 330
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.28850233347475607,
|
244 |
+
"grad_norm": 0.18505951289343867,
|
245 |
+
"learning_rate": 2.8862478777589137e-05,
|
246 |
+
"loss": 1.5474,
|
247 |
+
"step": 340
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.29698769622401355,
|
251 |
+
"grad_norm": 0.13785772316349984,
|
252 |
+
"learning_rate": 2.9711375212224108e-05,
|
253 |
+
"loss": 1.5696,
|
254 |
+
"step": 350
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.3054730589732711,
|
258 |
+
"grad_norm": 0.13531274658248552,
|
259 |
+
"learning_rate": 3.0560271646859086e-05,
|
260 |
+
"loss": 1.5551,
|
261 |
+
"step": 360
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.31395842172252864,
|
265 |
+
"grad_norm": 0.1366381415368909,
|
266 |
+
"learning_rate": 3.140916808149406e-05,
|
267 |
+
"loss": 1.524,
|
268 |
+
"step": 370
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.3224437844717862,
|
272 |
+
"grad_norm": 0.14587220569353926,
|
273 |
+
"learning_rate": 3.2258064516129034e-05,
|
274 |
+
"loss": 1.5515,
|
275 |
+
"step": 380
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.3309291472210437,
|
279 |
+
"grad_norm": 0.13336349383744864,
|
280 |
+
"learning_rate": 3.310696095076401e-05,
|
281 |
+
"loss": 1.5457,
|
282 |
+
"step": 390
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.3394145099703012,
|
286 |
+
"grad_norm": 0.1772016947970983,
|
287 |
+
"learning_rate": 3.395585738539898e-05,
|
288 |
+
"loss": 1.5582,
|
289 |
+
"step": 400
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.34789987271955874,
|
293 |
+
"grad_norm": 0.13819420575084573,
|
294 |
+
"learning_rate": 3.4804753820033956e-05,
|
295 |
+
"loss": 1.5326,
|
296 |
+
"step": 410
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.3563852354688163,
|
300 |
+
"grad_norm": 0.12729862167862188,
|
301 |
+
"learning_rate": 3.565365025466893e-05,
|
302 |
+
"loss": 1.5387,
|
303 |
+
"step": 420
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.3648705982180738,
|
307 |
+
"grad_norm": 0.11777082851399363,
|
308 |
+
"learning_rate": 3.6502546689303904e-05,
|
309 |
+
"loss": 1.5587,
|
310 |
+
"step": 430
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.37335596096733137,
|
314 |
+
"grad_norm": 0.15372268131323022,
|
315 |
+
"learning_rate": 3.7351443123938885e-05,
|
316 |
+
"loss": 1.5362,
|
317 |
+
"step": 440
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.3818413237165889,
|
321 |
+
"grad_norm": 0.12616185572252248,
|
322 |
+
"learning_rate": 3.820033955857386e-05,
|
323 |
+
"loss": 1.5548,
|
324 |
+
"step": 450
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.3903266864658464,
|
328 |
+
"grad_norm": 0.1311200786303391,
|
329 |
+
"learning_rate": 3.904923599320883e-05,
|
330 |
+
"loss": 1.5409,
|
331 |
+
"step": 460
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.39881204921510394,
|
335 |
+
"grad_norm": 0.1707919112561785,
|
336 |
+
"learning_rate": 3.989813242784381e-05,
|
337 |
+
"loss": 1.5509,
|
338 |
+
"step": 470
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.4072974119643615,
|
342 |
+
"grad_norm": 0.14660149264284913,
|
343 |
+
"learning_rate": 4.074702886247878e-05,
|
344 |
+
"loss": 1.5433,
|
345 |
+
"step": 480
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.415782774713619,
|
349 |
+
"grad_norm": 0.12478895483834351,
|
350 |
+
"learning_rate": 4.1595925297113755e-05,
|
351 |
+
"loss": 1.5382,
|
352 |
+
"step": 490
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.42426813746287656,
|
356 |
+
"grad_norm": 0.12327957445795817,
|
357 |
+
"learning_rate": 4.244482173174873e-05,
|
358 |
+
"loss": 1.5515,
|
359 |
+
"step": 500
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.43275350021213405,
|
363 |
+
"grad_norm": 0.12922777738650987,
|
364 |
+
"learning_rate": 4.32937181663837e-05,
|
365 |
+
"loss": 1.5688,
|
366 |
+
"step": 510
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.4412388629613916,
|
370 |
+
"grad_norm": 0.12486802189783415,
|
371 |
+
"learning_rate": 4.414261460101868e-05,
|
372 |
+
"loss": 1.5452,
|
373 |
+
"step": 520
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.44972422571064913,
|
377 |
+
"grad_norm": 0.1360610874577123,
|
378 |
+
"learning_rate": 4.499151103565366e-05,
|
379 |
+
"loss": 1.5493,
|
380 |
+
"step": 530
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.45820958845990667,
|
384 |
+
"grad_norm": 0.1884897685356775,
|
385 |
+
"learning_rate": 4.5840407470288625e-05,
|
386 |
+
"loss": 1.5511,
|
387 |
+
"step": 540
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.4666949512091642,
|
391 |
+
"grad_norm": 0.12446302384809525,
|
392 |
+
"learning_rate": 4.6689303904923606e-05,
|
393 |
+
"loss": 1.5458,
|
394 |
+
"step": 550
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.4751803139584217,
|
398 |
+
"grad_norm": 0.13169591804768588,
|
399 |
+
"learning_rate": 4.753820033955857e-05,
|
400 |
+
"loss": 1.5569,
|
401 |
+
"step": 560
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.48366567670767924,
|
405 |
+
"grad_norm": 0.1343809247449631,
|
406 |
+
"learning_rate": 4.8387096774193554e-05,
|
407 |
+
"loss": 1.5408,
|
408 |
+
"step": 570
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.4921510394569368,
|
412 |
+
"grad_norm": 0.14024589853602,
|
413 |
+
"learning_rate": 4.923599320882852e-05,
|
414 |
+
"loss": 1.5487,
|
415 |
+
"step": 580
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.5006364022061943,
|
419 |
+
"grad_norm": 0.16240429253875313,
|
420 |
+
"learning_rate": 4.999999560970061e-05,
|
421 |
+
"loss": 1.5488,
|
422 |
+
"step": 590
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.5091217649554518,
|
426 |
+
"grad_norm": 0.12575424857894482,
|
427 |
+
"learning_rate": 4.999946877563971e-05,
|
428 |
+
"loss": 1.532,
|
429 |
+
"step": 600
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.5091217649554518,
|
433 |
+
"eval_loss": 1.519254446029663,
|
434 |
+
"eval_runtime": 53.3242,
|
435 |
+
"eval_samples_per_second": 7.145,
|
436 |
+
"eval_steps_per_second": 0.9,
|
437 |
+
"step": 600
|
438 |
+
},
|
439 |
+
{
|
440 |
+
"epoch": 0.5176071277047094,
|
441 |
+
"grad_norm": 0.18688482756329736,
|
442 |
+
"learning_rate": 4.999806390290309e-05,
|
443 |
+
"loss": 1.5544,
|
444 |
+
"step": 610
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.5260924904539669,
|
448 |
+
"grad_norm": 0.12425469431830571,
|
449 |
+
"learning_rate": 4.999578104083307e-05,
|
450 |
+
"loss": 1.5443,
|
451 |
+
"step": 620
|
452 |
+
},
|
453 |
+
{
|
454 |
+
"epoch": 0.5345778532032245,
|
455 |
+
"grad_norm": 0.1299027485420099,
|
456 |
+
"learning_rate": 4.999262026960902e-05,
|
457 |
+
"loss": 1.5569,
|
458 |
+
"step": 630
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"epoch": 0.543063215952482,
|
462 |
+
"grad_norm": 0.11441754852508934,
|
463 |
+
"learning_rate": 4.998858170024449e-05,
|
464 |
+
"loss": 1.5316,
|
465 |
+
"step": 640
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"epoch": 0.5515485787017395,
|
469 |
+
"grad_norm": 0.14888547248976478,
|
470 |
+
"learning_rate": 4.998366547458326e-05,
|
471 |
+
"loss": 1.5177,
|
472 |
+
"step": 650
|
473 |
+
},
|
474 |
+
{
|
475 |
+
"epoch": 0.560033941450997,
|
476 |
+
"grad_norm": 0.14859292774768867,
|
477 |
+
"learning_rate": 4.997787176529449e-05,
|
478 |
+
"loss": 1.5394,
|
479 |
+
"step": 660
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"epoch": 0.5685193042002545,
|
483 |
+
"grad_norm": 0.12499154376539734,
|
484 |
+
"learning_rate": 4.997120077586651e-05,
|
485 |
+
"loss": 1.5554,
|
486 |
+
"step": 670
|
487 |
+
},
|
488 |
+
{
|
489 |
+
"epoch": 0.5770046669495121,
|
490 |
+
"grad_norm": 0.1218974898058821,
|
491 |
+
"learning_rate": 4.9963652740599774e-05,
|
492 |
+
"loss": 1.5335,
|
493 |
+
"step": 680
|
494 |
+
},
|
495 |
+
{
|
496 |
+
"epoch": 0.5854900296987696,
|
497 |
+
"grad_norm": 0.1273110498715124,
|
498 |
+
"learning_rate": 4.995522792459859e-05,
|
499 |
+
"loss": 1.5349,
|
500 |
+
"step": 690
|
501 |
+
},
|
502 |
+
{
|
503 |
+
"epoch": 0.5939753924480271,
|
504 |
+
"grad_norm": 0.12115412881719101,
|
505 |
+
"learning_rate": 4.994592662376183e-05,
|
506 |
+
"loss": 1.5419,
|
507 |
+
"step": 700
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"epoch": 0.6024607551972847,
|
511 |
+
"grad_norm": 0.14855096330233286,
|
512 |
+
"learning_rate": 4.99357491647725e-05,
|
513 |
+
"loss": 1.513,
|
514 |
+
"step": 710
|
515 |
+
},
|
516 |
+
{
|
517 |
+
"epoch": 0.6109461179465422,
|
518 |
+
"grad_norm": 0.11407988659327956,
|
519 |
+
"learning_rate": 4.992469590508628e-05,
|
520 |
+
"loss": 1.5243,
|
521 |
+
"step": 720
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.6194314806957998,
|
525 |
+
"grad_norm": 0.1197712643781127,
|
526 |
+
"learning_rate": 4.9912767232919035e-05,
|
527 |
+
"loss": 1.5177,
|
528 |
+
"step": 730
|
529 |
+
},
|
530 |
+
{
|
531 |
+
"epoch": 0.6279168434450573,
|
532 |
+
"grad_norm": 0.12400515877262065,
|
533 |
+
"learning_rate": 4.9899963567233074e-05,
|
534 |
+
"loss": 1.5619,
|
535 |
+
"step": 740
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"epoch": 0.6364022061943148,
|
539 |
+
"grad_norm": 0.12250385257708406,
|
540 |
+
"learning_rate": 4.988628535772249e-05,
|
541 |
+
"loss": 1.539,
|
542 |
+
"step": 750
|
543 |
+
},
|
544 |
+
{
|
545 |
+
"epoch": 0.6448875689435724,
|
546 |
+
"grad_norm": 0.1262441090496857,
|
547 |
+
"learning_rate": 4.987173308479738e-05,
|
548 |
+
"loss": 1.5195,
|
549 |
+
"step": 760
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.6533729316928298,
|
553 |
+
"grad_norm": 0.12459694416473029,
|
554 |
+
"learning_rate": 4.985630725956694e-05,
|
555 |
+
"loss": 1.5462,
|
556 |
+
"step": 770
|
557 |
+
},
|
558 |
+
{
|
559 |
+
"epoch": 0.6618582944420874,
|
560 |
+
"grad_norm": 0.12985189006106762,
|
561 |
+
"learning_rate": 4.9840008423821527e-05,
|
562 |
+
"loss": 1.5113,
|
563 |
+
"step": 780
|
564 |
+
},
|
565 |
+
{
|
566 |
+
"epoch": 0.6703436571913449,
|
567 |
+
"grad_norm": 0.12689306141471304,
|
568 |
+
"learning_rate": 4.9822837150013636e-05,
|
569 |
+
"loss": 1.5201,
|
570 |
+
"step": 790
|
571 |
+
},
|
572 |
+
{
|
573 |
+
"epoch": 0.6788290199406024,
|
574 |
+
"grad_norm": 0.15393156370587963,
|
575 |
+
"learning_rate": 4.980479404123778e-05,
|
576 |
+
"loss": 1.5121,
|
577 |
+
"step": 800
|
578 |
+
},
|
579 |
+
{
|
580 |
+
"epoch": 0.68731438268986,
|
581 |
+
"grad_norm": 0.13213701895207608,
|
582 |
+
"learning_rate": 4.978587973120931e-05,
|
583 |
+
"loss": 1.5307,
|
584 |
+
"step": 810
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"epoch": 0.6957997454391175,
|
588 |
+
"grad_norm": 0.11561354931316294,
|
589 |
+
"learning_rate": 4.9766094884242184e-05,
|
590 |
+
"loss": 1.5316,
|
591 |
+
"step": 820
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"epoch": 0.7042851081883751,
|
595 |
+
"grad_norm": 0.12414772399330044,
|
596 |
+
"learning_rate": 4.974544019522559e-05,
|
597 |
+
"loss": 1.5148,
|
598 |
+
"step": 830
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"epoch": 0.7127704709376326,
|
602 |
+
"grad_norm": 0.1171652849153521,
|
603 |
+
"learning_rate": 4.972391638959959e-05,
|
604 |
+
"loss": 1.5096,
|
605 |
+
"step": 840
|
606 |
+
},
|
607 |
+
{
|
608 |
+
"epoch": 0.7212558336868902,
|
609 |
+
"grad_norm": 0.12868937349582316,
|
610 |
+
"learning_rate": 4.9701524223329585e-05,
|
611 |
+
"loss": 1.5282,
|
612 |
+
"step": 850
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 0.7297411964361477,
|
616 |
+
"grad_norm": 0.1200015077117309,
|
617 |
+
"learning_rate": 4.967826448287981e-05,
|
618 |
+
"loss": 1.5512,
|
619 |
+
"step": 860
|
620 |
+
},
|
621 |
+
{
|
622 |
+
"epoch": 0.7382265591854051,
|
623 |
+
"grad_norm": 0.12340885660045105,
|
624 |
+
"learning_rate": 4.96541379851857e-05,
|
625 |
+
"loss": 1.5394,
|
626 |
+
"step": 870
|
627 |
+
},
|
628 |
+
{
|
629 |
+
"epoch": 0.7467119219346627,
|
630 |
+
"grad_norm": 0.12976937691467555,
|
631 |
+
"learning_rate": 4.962914557762517e-05,
|
632 |
+
"loss": 1.51,
|
633 |
+
"step": 880
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"epoch": 0.7551972846839202,
|
637 |
+
"grad_norm": 0.11912878476038466,
|
638 |
+
"learning_rate": 4.9603288137988905e-05,
|
639 |
+
"loss": 1.5294,
|
640 |
+
"step": 890
|
641 |
+
},
|
642 |
+
{
|
643 |
+
"epoch": 0.7636826474331778,
|
644 |
+
"grad_norm": 0.1299625480337927,
|
645 |
+
"learning_rate": 4.957656657444947e-05,
|
646 |
+
"loss": 1.507,
|
647 |
+
"step": 900
|
648 |
+
},
|
649 |
+
{
|
650 |
+
"epoch": 0.7721680101824353,
|
651 |
+
"grad_norm": 0.12380144459698468,
|
652 |
+
"learning_rate": 4.954898182552946e-05,
|
653 |
+
"loss": 1.5376,
|
654 |
+
"step": 910
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.7806533729316928,
|
658 |
+
"grad_norm": 0.13139339643682763,
|
659 |
+
"learning_rate": 4.9520534860068535e-05,
|
660 |
+
"loss": 1.5291,
|
661 |
+
"step": 920
|
662 |
+
},
|
663 |
+
{
|
664 |
+
"epoch": 0.7891387356809504,
|
665 |
+
"grad_norm": 0.13088956203983898,
|
666 |
+
"learning_rate": 4.949122667718935e-05,
|
667 |
+
"loss": 1.5239,
|
668 |
+
"step": 930
|
669 |
+
},
|
670 |
+
{
|
671 |
+
"epoch": 0.7976240984302079,
|
672 |
+
"grad_norm": 0.12586052988453703,
|
673 |
+
"learning_rate": 4.94610583062625e-05,
|
674 |
+
"loss": 1.5525,
|
675 |
+
"step": 940
|
676 |
+
},
|
677 |
+
{
|
678 |
+
"epoch": 0.8061094611794655,
|
679 |
+
"grad_norm": 0.12020996031652877,
|
680 |
+
"learning_rate": 4.943003080687035e-05,
|
681 |
+
"loss": 1.5525,
|
682 |
+
"step": 950
|
683 |
+
},
|
684 |
+
{
|
685 |
+
"epoch": 0.814594823928723,
|
686 |
+
"grad_norm": 0.12866375954060869,
|
687 |
+
"learning_rate": 4.9398145268769856e-05,
|
688 |
+
"loss": 1.5266,
|
689 |
+
"step": 960
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"epoch": 0.8230801866779804,
|
693 |
+
"grad_norm": 0.13166136756817035,
|
694 |
+
"learning_rate": 4.936540281185423e-05,
|
695 |
+
"loss": 1.5041,
|
696 |
+
"step": 970
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"epoch": 0.831565549427238,
|
700 |
+
"grad_norm": 0.12481946698483787,
|
701 |
+
"learning_rate": 4.933180458611364e-05,
|
702 |
+
"loss": 1.5271,
|
703 |
+
"step": 980
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 0.8400509121764955,
|
707 |
+
"grad_norm": 0.12264463761209114,
|
708 |
+
"learning_rate": 4.9297351771594844e-05,
|
709 |
+
"loss": 1.5354,
|
710 |
+
"step": 990
|
711 |
+
},
|
712 |
+
{
|
713 |
+
"epoch": 0.8485362749257531,
|
714 |
+
"grad_norm": 0.11985452856537594,
|
715 |
+
"learning_rate": 4.926204557835968e-05,
|
716 |
+
"loss": 1.5167,
|
717 |
+
"step": 1000
|
718 |
+
},
|
719 |
+
{
|
720 |
+
"epoch": 0.8570216376750106,
|
721 |
+
"grad_norm": 0.13125396521190327,
|
722 |
+
"learning_rate": 4.9225887246442634e-05,
|
723 |
+
"loss": 1.5282,
|
724 |
+
"step": 1010
|
725 |
+
},
|
726 |
+
{
|
727 |
+
"epoch": 0.8655070004242681,
|
728 |
+
"grad_norm": 0.12730192328072554,
|
729 |
+
"learning_rate": 4.918887804580725e-05,
|
730 |
+
"loss": 1.5089,
|
731 |
+
"step": 1020
|
732 |
+
},
|
733 |
+
{
|
734 |
+
"epoch": 0.8739923631735257,
|
735 |
+
"grad_norm": 0.12724644219344786,
|
736 |
+
"learning_rate": 4.915101927630153e-05,
|
737 |
+
"loss": 1.4964,
|
738 |
+
"step": 1030
|
739 |
+
},
|
740 |
+
{
|
741 |
+
"epoch": 0.8824777259227832,
|
742 |
+
"grad_norm": 0.13578611501833232,
|
743 |
+
"learning_rate": 4.911231226761227e-05,
|
744 |
+
"loss": 1.5189,
|
745 |
+
"step": 1040
|
746 |
+
},
|
747 |
+
{
|
748 |
+
"epoch": 0.8909630886720408,
|
749 |
+
"grad_norm": 0.13577513964986457,
|
750 |
+
"learning_rate": 4.90727583792184e-05,
|
751 |
+
"loss": 1.5149,
|
752 |
+
"step": 1050
|
753 |
+
},
|
754 |
+
{
|
755 |
+
"epoch": 0.8994484514212983,
|
756 |
+
"grad_norm": 0.1269735011676505,
|
757 |
+
"learning_rate": 4.903235900034317e-05,
|
758 |
+
"loss": 1.5066,
|
759 |
+
"step": 1060
|
760 |
+
},
|
761 |
+
{
|
762 |
+
"epoch": 0.9079338141705557,
|
763 |
+
"grad_norm": 0.13250058214235566,
|
764 |
+
"learning_rate": 4.899111554990543e-05,
|
765 |
+
"loss": 1.5129,
|
766 |
+
"step": 1070
|
767 |
+
},
|
768 |
+
{
|
769 |
+
"epoch": 0.9164191769198133,
|
770 |
+
"grad_norm": 0.13130735246433495,
|
771 |
+
"learning_rate": 4.894902947646975e-05,
|
772 |
+
"loss": 1.5156,
|
773 |
+
"step": 1080
|
774 |
+
},
|
775 |
+
{
|
776 |
+
"epoch": 0.9249045396690708,
|
777 |
+
"grad_norm": 0.1273580180253049,
|
778 |
+
"learning_rate": 4.890610225819553e-05,
|
779 |
+
"loss": 1.5324,
|
780 |
+
"step": 1090
|
781 |
+
},
|
782 |
+
{
|
783 |
+
"epoch": 0.9333899024183284,
|
784 |
+
"grad_norm": 0.13155314243939242,
|
785 |
+
"learning_rate": 4.8862335402785136e-05,
|
786 |
+
"loss": 1.5106,
|
787 |
+
"step": 1100
|
788 |
+
},
|
789 |
+
{
|
790 |
+
"epoch": 0.9418752651675859,
|
791 |
+
"grad_norm": 0.13564895211984299,
|
792 |
+
"learning_rate": 4.88177304474309e-05,
|
793 |
+
"loss": 1.5067,
|
794 |
+
"step": 1110
|
795 |
+
},
|
796 |
+
{
|
797 |
+
"epoch": 0.9503606279168434,
|
798 |
+
"grad_norm": 0.12774735587114736,
|
799 |
+
"learning_rate": 4.877228895876115e-05,
|
800 |
+
"loss": 1.5182,
|
801 |
+
"step": 1120
|
802 |
+
},
|
803 |
+
{
|
804 |
+
"epoch": 0.958845990666101,
|
805 |
+
"grad_norm": 0.1307997709537685,
|
806 |
+
"learning_rate": 4.872601253278517e-05,
|
807 |
+
"loss": 1.4969,
|
808 |
+
"step": 1130
|
809 |
+
},
|
810 |
+
{
|
811 |
+
"epoch": 0.9673313534153585,
|
812 |
+
"grad_norm": 0.1304794845040634,
|
813 |
+
"learning_rate": 4.867890279483717e-05,
|
814 |
+
"loss": 1.5264,
|
815 |
+
"step": 1140
|
816 |
+
},
|
817 |
+
{
|
818 |
+
"epoch": 0.9758167161646161,
|
819 |
+
"grad_norm": 0.13666141796489684,
|
820 |
+
"learning_rate": 4.8630961399519206e-05,
|
821 |
+
"loss": 1.5467,
|
822 |
+
"step": 1150
|
823 |
+
},
|
824 |
+
{
|
825 |
+
"epoch": 0.9843020789138736,
|
826 |
+
"grad_norm": 0.1370278303190263,
|
827 |
+
"learning_rate": 4.8582190030643e-05,
|
828 |
+
"loss": 1.5127,
|
829 |
+
"step": 1160
|
830 |
+
},
|
831 |
+
{
|
832 |
+
"epoch": 0.9927874416631312,
|
833 |
+
"grad_norm": 0.1390936629299565,
|
834 |
+
"learning_rate": 4.8532590401170894e-05,
|
835 |
+
"loss": 1.5058,
|
836 |
+
"step": 1170
|
837 |
+
},
|
838 |
+
{
|
839 |
+
"epoch": 1.0012728044123886,
|
840 |
+
"grad_norm": 0.12934475548108287,
|
841 |
+
"learning_rate": 4.848216425315561e-05,
|
842 |
+
"loss": 1.5202,
|
843 |
+
"step": 1180
|
844 |
+
},
|
845 |
+
{
|
846 |
+
"epoch": 1.0097581671616462,
|
847 |
+
"grad_norm": 0.13898591683370803,
|
848 |
+
"learning_rate": 4.843091335767913e-05,
|
849 |
+
"loss": 1.4563,
|
850 |
+
"step": 1190
|
851 |
+
},
|
852 |
+
{
|
853 |
+
"epoch": 1.0182435299109036,
|
854 |
+
"grad_norm": 0.17488231535826249,
|
855 |
+
"learning_rate": 4.837883951479043e-05,
|
856 |
+
"loss": 1.4402,
|
857 |
+
"step": 1200
|
858 |
+
},
|
859 |
+
{
|
860 |
+
"epoch": 1.0182435299109036,
|
861 |
+
"eval_loss": 1.4955657720565796,
|
862 |
+
"eval_runtime": 52.424,
|
863 |
+
"eval_samples_per_second": 7.268,
|
864 |
+
"eval_steps_per_second": 0.916,
|
865 |
+
"step": 1200
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"epoch": 1.0267288926601612,
|
869 |
+
"grad_norm": 0.1536036344095855,
|
870 |
+
"learning_rate": 4.832594455344229e-05,
|
871 |
+
"loss": 1.4848,
|
872 |
+
"step": 1210
|
873 |
+
},
|
874 |
+
{
|
875 |
+
"epoch": 1.0352142554094188,
|
876 |
+
"grad_norm": 0.15762414421336599,
|
877 |
+
"learning_rate": 4.827223033142706e-05,
|
878 |
+
"loss": 1.4567,
|
879 |
+
"step": 1220
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"epoch": 1.0436996181586762,
|
883 |
+
"grad_norm": 0.15058229398130366,
|
884 |
+
"learning_rate": 4.8217698735311414e-05,
|
885 |
+
"loss": 1.4672,
|
886 |
+
"step": 1230
|
887 |
+
},
|
888 |
+
{
|
889 |
+
"epoch": 1.0521849809079338,
|
890 |
+
"grad_norm": 0.16010992835678386,
|
891 |
+
"learning_rate": 4.8162351680370044e-05,
|
892 |
+
"loss": 1.4458,
|
893 |
+
"step": 1240
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"epoch": 1.0606703436571914,
|
897 |
+
"grad_norm": 0.16758816000341356,
|
898 |
+
"learning_rate": 4.810619111051847e-05,
|
899 |
+
"loss": 1.4842,
|
900 |
+
"step": 1250
|
901 |
+
},
|
902 |
+
{
|
903 |
+
"epoch": 1.069155706406449,
|
904 |
+
"grad_norm": 0.16559260972674986,
|
905 |
+
"learning_rate": 4.8049218998244696e-05,
|
906 |
+
"loss": 1.4556,
|
907 |
+
"step": 1260
|
908 |
+
},
|
909 |
+
{
|
910 |
+
"epoch": 1.0776410691557063,
|
911 |
+
"grad_norm": 0.17237632034416966,
|
912 |
+
"learning_rate": 4.7991437344539966e-05,
|
913 |
+
"loss": 1.4813,
|
914 |
+
"step": 1270
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"epoch": 1.086126431904964,
|
918 |
+
"grad_norm": 0.17112756741722487,
|
919 |
+
"learning_rate": 4.793284817882845e-05,
|
920 |
+
"loss": 1.4535,
|
921 |
+
"step": 1280
|
922 |
+
},
|
923 |
+
{
|
924 |
+
"epoch": 1.0946117946542215,
|
925 |
+
"grad_norm": 0.16828572707718548,
|
926 |
+
"learning_rate": 4.787345355889604e-05,
|
927 |
+
"loss": 1.4344,
|
928 |
+
"step": 1290
|
929 |
+
},
|
930 |
+
{
|
931 |
+
"epoch": 1.103097157403479,
|
932 |
+
"grad_norm": 0.15709986047041227,
|
933 |
+
"learning_rate": 4.7813255570817985e-05,
|
934 |
+
"loss": 1.4744,
|
935 |
+
"step": 1300
|
936 |
+
},
|
937 |
+
{
|
938 |
+
"epoch": 1.1115825201527365,
|
939 |
+
"grad_norm": 0.16651547128146313,
|
940 |
+
"learning_rate": 4.775225632888568e-05,
|
941 |
+
"loss": 1.4561,
|
942 |
+
"step": 1310
|
943 |
+
},
|
944 |
+
{
|
945 |
+
"epoch": 1.120067882901994,
|
946 |
+
"grad_norm": 0.16750176017515714,
|
947 |
+
"learning_rate": 4.76904579755324e-05,
|
948 |
+
"loss": 1.4616,
|
949 |
+
"step": 1320
|
950 |
+
},
|
951 |
+
{
|
952 |
+
"epoch": 1.1285532456512515,
|
953 |
+
"grad_norm": 0.1608016567554825,
|
954 |
+
"learning_rate": 4.7627862681258037e-05,
|
955 |
+
"loss": 1.4593,
|
956 |
+
"step": 1330
|
957 |
+
},
|
958 |
+
{
|
959 |
+
"epoch": 1.137038608400509,
|
960 |
+
"grad_norm": 0.21390766919038295,
|
961 |
+
"learning_rate": 4.756447264455287e-05,
|
962 |
+
"loss": 1.4484,
|
963 |
+
"step": 1340
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"epoch": 1.1455239711497667,
|
967 |
+
"grad_norm": 0.16826883293172662,
|
968 |
+
"learning_rate": 4.750029009182038e-05,
|
969 |
+
"loss": 1.4703,
|
970 |
+
"step": 1350
|
971 |
+
},
|
972 |
+
{
|
973 |
+
"epoch": 1.1540093338990243,
|
974 |
+
"grad_norm": 0.17431508867079595,
|
975 |
+
"learning_rate": 4.7435317277299e-05,
|
976 |
+
"loss": 1.4701,
|
977 |
+
"step": 1360
|
978 |
+
},
|
979 |
+
{
|
980 |
+
"epoch": 1.1624946966482816,
|
981 |
+
"grad_norm": 0.15973851467570443,
|
982 |
+
"learning_rate": 4.736955648298299e-05,
|
983 |
+
"loss": 1.4503,
|
984 |
+
"step": 1370
|
985 |
+
},
|
986 |
+
{
|
987 |
+
"epoch": 1.1709800593975392,
|
988 |
+
"grad_norm": 0.1887713767970947,
|
989 |
+
"learning_rate": 4.730301001854225e-05,
|
990 |
+
"loss": 1.4624,
|
991 |
+
"step": 1380
|
992 |
+
},
|
993 |
+
{
|
994 |
+
"epoch": 1.1794654221467968,
|
995 |
+
"grad_norm": 0.16898695344997974,
|
996 |
+
"learning_rate": 4.7235680221241216e-05,
|
997 |
+
"loss": 1.4452,
|
998 |
+
"step": 1390
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"epoch": 1.1879507848960542,
|
1002 |
+
"grad_norm": 0.20014553287073528,
|
1003 |
+
"learning_rate": 4.716756945585681e-05,
|
1004 |
+
"loss": 1.4717,
|
1005 |
+
"step": 1400
|
1006 |
+
},
|
1007 |
+
{
|
1008 |
+
"epoch": 1.1964361476453118,
|
1009 |
+
"grad_norm": 0.17137954325200072,
|
1010 |
+
"learning_rate": 4.709868011459528e-05,
|
1011 |
+
"loss": 1.4403,
|
1012 |
+
"step": 1410
|
1013 |
+
},
|
1014 |
+
{
|
1015 |
+
"epoch": 1.2049215103945694,
|
1016 |
+
"grad_norm": 0.17801721751888322,
|
1017 |
+
"learning_rate": 4.7029014617008294e-05,
|
1018 |
+
"loss": 1.4339,
|
1019 |
+
"step": 1420
|
1020 |
+
},
|
1021 |
+
{
|
1022 |
+
"epoch": 1.213406873143827,
|
1023 |
+
"grad_norm": 0.17139613676642362,
|
1024 |
+
"learning_rate": 4.695857540990789e-05,
|
1025 |
+
"loss": 1.4573,
|
1026 |
+
"step": 1430
|
1027 |
+
},
|
1028 |
+
{
|
1029 |
+
"epoch": 1.2218922358930844,
|
1030 |
+
"grad_norm": 0.16971403514498054,
|
1031 |
+
"learning_rate": 4.688736496728058e-05,
|
1032 |
+
"loss": 1.4282,
|
1033 |
+
"step": 1440
|
1034 |
+
},
|
1035 |
+
{
|
1036 |
+
"epoch": 1.230377598642342,
|
1037 |
+
"grad_norm": 0.17200272420880428,
|
1038 |
+
"learning_rate": 4.681538579020038e-05,
|
1039 |
+
"loss": 1.4434,
|
1040 |
+
"step": 1450
|
1041 |
+
},
|
1042 |
+
{
|
1043 |
+
"epoch": 1.2388629613915996,
|
1044 |
+
"grad_norm": 0.17208160407432616,
|
1045 |
+
"learning_rate": 4.6742640406741106e-05,
|
1046 |
+
"loss": 1.45,
|
1047 |
+
"step": 1460
|
1048 |
+
},
|
1049 |
+
{
|
1050 |
+
"epoch": 1.247348324140857,
|
1051 |
+
"grad_norm": 0.1939626212901777,
|
1052 |
+
"learning_rate": 4.666913137188743e-05,
|
1053 |
+
"loss": 1.4608,
|
1054 |
+
"step": 1470
|
1055 |
+
},
|
1056 |
+
{
|
1057 |
+
"epoch": 1.2558336868901145,
|
1058 |
+
"grad_norm": 0.17291794493304186,
|
1059 |
+
"learning_rate": 4.6594861267445236e-05,
|
1060 |
+
"loss": 1.4671,
|
1061 |
+
"step": 1480
|
1062 |
+
},
|
1063 |
+
{
|
1064 |
+
"epoch": 1.2643190496393721,
|
1065 |
+
"grad_norm": 0.18219792041638924,
|
1066 |
+
"learning_rate": 4.651983270195093e-05,
|
1067 |
+
"loss": 1.4262,
|
1068 |
+
"step": 1490
|
1069 |
+
},
|
1070 |
+
{
|
1071 |
+
"epoch": 1.2728044123886297,
|
1072 |
+
"grad_norm": 0.18086437830489926,
|
1073 |
+
"learning_rate": 4.644404831057979e-05,
|
1074 |
+
"loss": 1.4455,
|
1075 |
+
"step": 1500
|
1076 |
+
},
|
1077 |
+
{
|
1078 |
+
"epoch": 1.281289775137887,
|
1079 |
+
"grad_norm": 0.17417619624549402,
|
1080 |
+
"learning_rate": 4.636751075505344e-05,
|
1081 |
+
"loss": 1.4873,
|
1082 |
+
"step": 1510
|
1083 |
+
},
|
1084 |
+
{
|
1085 |
+
"epoch": 1.2897751378871447,
|
1086 |
+
"grad_norm": 0.18354282411845188,
|
1087 |
+
"learning_rate": 4.629022272354637e-05,
|
1088 |
+
"loss": 1.4525,
|
1089 |
+
"step": 1520
|
1090 |
+
},
|
1091 |
+
{
|
1092 |
+
"epoch": 1.298260500636402,
|
1093 |
+
"grad_norm": 0.17985617345325455,
|
1094 |
+
"learning_rate": 4.621218693059149e-05,
|
1095 |
+
"loss": 1.4303,
|
1096 |
+
"step": 1530
|
1097 |
+
},
|
1098 |
+
{
|
1099 |
+
"epoch": 1.3067458633856597,
|
1100 |
+
"grad_norm": 0.1809708317849863,
|
1101 |
+
"learning_rate": 4.6133406116984795e-05,
|
1102 |
+
"loss": 1.4631,
|
1103 |
+
"step": 1540
|
1104 |
+
},
|
1105 |
+
{
|
1106 |
+
"epoch": 1.3152312261349173,
|
1107 |
+
"grad_norm": 0.17487374671212322,
|
1108 |
+
"learning_rate": 4.6053883049689145e-05,
|
1109 |
+
"loss": 1.4482,
|
1110 |
+
"step": 1550
|
1111 |
+
},
|
1112 |
+
{
|
1113 |
+
"epoch": 1.3237165888841749,
|
1114 |
+
"grad_norm": 0.19912807671077193,
|
1115 |
+
"learning_rate": 4.5973620521737036e-05,
|
1116 |
+
"loss": 1.4497,
|
1117 |
+
"step": 1560
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"epoch": 1.3322019516334322,
|
1121 |
+
"grad_norm": 0.17853627546912074,
|
1122 |
+
"learning_rate": 4.5892621352132514e-05,
|
1123 |
+
"loss": 1.4456,
|
1124 |
+
"step": 1570
|
1125 |
+
},
|
1126 |
+
{
|
1127 |
+
"epoch": 1.3406873143826898,
|
1128 |
+
"grad_norm": 0.18252596927754394,
|
1129 |
+
"learning_rate": 4.581088838575218e-05,
|
1130 |
+
"loss": 1.4328,
|
1131 |
+
"step": 1580
|
1132 |
+
},
|
1133 |
+
{
|
1134 |
+
"epoch": 1.3491726771319474,
|
1135 |
+
"grad_norm": 0.17604951053556211,
|
1136 |
+
"learning_rate": 4.572842449324525e-05,
|
1137 |
+
"loss": 1.4442,
|
1138 |
+
"step": 1590
|
1139 |
+
},
|
1140 |
+
{
|
1141 |
+
"epoch": 1.3576580398812048,
|
1142 |
+
"grad_norm": 0.18358942463311748,
|
1143 |
+
"learning_rate": 4.564523257093275e-05,
|
1144 |
+
"loss": 1.4338,
|
1145 |
+
"step": 1600
|
1146 |
+
},
|
1147 |
+
{
|
1148 |
+
"epoch": 1.3661434026304624,
|
1149 |
+
"grad_norm": 0.20508703236267142,
|
1150 |
+
"learning_rate": 4.5561315540705774e-05,
|
1151 |
+
"loss": 1.4445,
|
1152 |
+
"step": 1610
|
1153 |
+
},
|
1154 |
+
{
|
1155 |
+
"epoch": 1.37462876537972,
|
1156 |
+
"grad_norm": 0.18486352550747187,
|
1157 |
+
"learning_rate": 4.547667634992288e-05,
|
1158 |
+
"loss": 1.4261,
|
1159 |
+
"step": 1620
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"epoch": 1.3831141281289776,
|
1163 |
+
"grad_norm": 0.17492766465456316,
|
1164 |
+
"learning_rate": 4.539131797130656e-05,
|
1165 |
+
"loss": 1.4258,
|
1166 |
+
"step": 1630
|
1167 |
+
},
|
1168 |
+
{
|
1169 |
+
"epoch": 1.391599490878235,
|
1170 |
+
"grad_norm": 0.19692876587833674,
|
1171 |
+
"learning_rate": 4.530524340283881e-05,
|
1172 |
+
"loss": 1.4349,
|
1173 |
+
"step": 1640
|
1174 |
+
},
|
1175 |
+
{
|
1176 |
+
"epoch": 1.4000848536274926,
|
1177 |
+
"grad_norm": 0.19155373430892478,
|
1178 |
+
"learning_rate": 4.521845566765589e-05,
|
1179 |
+
"loss": 1.4536,
|
1180 |
+
"step": 1650
|
1181 |
+
},
|
1182 |
+
{
|
1183 |
+
"epoch": 1.4085702163767502,
|
1184 |
+
"grad_norm": 0.18544325977459192,
|
1185 |
+
"learning_rate": 4.513095781394208e-05,
|
1186 |
+
"loss": 1.4363,
|
1187 |
+
"step": 1660
|
1188 |
+
},
|
1189 |
+
{
|
1190 |
+
"epoch": 1.4170555791260075,
|
1191 |
+
"grad_norm": 0.177828004720666,
|
1192 |
+
"learning_rate": 4.504275291482267e-05,
|
1193 |
+
"loss": 1.4595,
|
1194 |
+
"step": 1670
|
1195 |
+
},
|
1196 |
+
{
|
1197 |
+
"epoch": 1.4255409418752651,
|
1198 |
+
"grad_norm": 0.17855432230356816,
|
1199 |
+
"learning_rate": 4.495384406825601e-05,
|
1200 |
+
"loss": 1.4211,
|
1201 |
+
"step": 1680
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"epoch": 1.4340263046245227,
|
1205 |
+
"grad_norm": 0.20232492538380317,
|
1206 |
+
"learning_rate": 4.486423439692469e-05,
|
1207 |
+
"loss": 1.4189,
|
1208 |
+
"step": 1690
|
1209 |
+
},
|
1210 |
+
{
|
1211 |
+
"epoch": 1.4425116673737803,
|
1212 |
+
"grad_norm": 0.1975109303350431,
|
1213 |
+
"learning_rate": 4.477392704812585e-05,
|
1214 |
+
"loss": 1.4565,
|
1215 |
+
"step": 1700
|
1216 |
+
},
|
1217 |
+
{
|
1218 |
+
"epoch": 1.4509970301230377,
|
1219 |
+
"grad_norm": 0.19619010830399825,
|
1220 |
+
"learning_rate": 4.468292519366071e-05,
|
1221 |
+
"loss": 1.4382,
|
1222 |
+
"step": 1710
|
1223 |
+
},
|
1224 |
+
{
|
1225 |
+
"epoch": 1.4594823928722953,
|
1226 |
+
"grad_norm": 0.18168826428246143,
|
1227 |
+
"learning_rate": 4.459123202972308e-05,
|
1228 |
+
"loss": 1.4471,
|
1229 |
+
"step": 1720
|
1230 |
+
},
|
1231 |
+
{
|
1232 |
+
"epoch": 1.4679677556215527,
|
1233 |
+
"grad_norm": 0.1923264062362399,
|
1234 |
+
"learning_rate": 4.449885077678717e-05,
|
1235 |
+
"loss": 1.4153,
|
1236 |
+
"step": 1730
|
1237 |
+
},
|
1238 |
+
{
|
1239 |
+
"epoch": 1.4764531183708103,
|
1240 |
+
"grad_norm": 0.1907937313040222,
|
1241 |
+
"learning_rate": 4.440578467949445e-05,
|
1242 |
+
"loss": 1.4432,
|
1243 |
+
"step": 1740
|
1244 |
+
},
|
1245 |
+
{
|
1246 |
+
"epoch": 1.4849384811200679,
|
1247 |
+
"grad_norm": 0.19107457667767244,
|
1248 |
+
"learning_rate": 4.431203700653968e-05,
|
1249 |
+
"loss": 1.4285,
|
1250 |
+
"step": 1750
|
1251 |
+
},
|
1252 |
+
{
|
1253 |
+
"epoch": 1.4934238438693255,
|
1254 |
+
"grad_norm": 0.19847350429107552,
|
1255 |
+
"learning_rate": 4.421761105055613e-05,
|
1256 |
+
"loss": 1.4383,
|
1257 |
+
"step": 1760
|
1258 |
+
},
|
1259 |
+
{
|
1260 |
+
"epoch": 1.501909206618583,
|
1261 |
+
"grad_norm": 0.18536475556610216,
|
1262 |
+
"learning_rate": 4.4122510127999937e-05,
|
1263 |
+
"loss": 1.42,
|
1264 |
+
"step": 1770
|
1265 |
+
},
|
1266 |
+
{
|
1267 |
+
"epoch": 1.5103945693678404,
|
1268 |
+
"grad_norm": 0.18481023473586697,
|
1269 |
+
"learning_rate": 4.4026737579033584e-05,
|
1270 |
+
"loss": 1.4384,
|
1271 |
+
"step": 1780
|
1272 |
+
},
|
1273 |
+
{
|
1274 |
+
"epoch": 1.518879932117098,
|
1275 |
+
"grad_norm": 0.20863867505874642,
|
1276 |
+
"learning_rate": 4.393029676740864e-05,
|
1277 |
+
"loss": 1.4543,
|
1278 |
+
"step": 1790
|
1279 |
+
},
|
1280 |
+
{
|
1281 |
+
"epoch": 1.5273652948663554,
|
1282 |
+
"grad_norm": 0.1816036870853105,
|
1283 |
+
"learning_rate": 4.3833191080347575e-05,
|
1284 |
+
"loss": 1.434,
|
1285 |
+
"step": 1800
|
1286 |
+
},
|
1287 |
+
{
|
1288 |
+
"epoch": 1.5273652948663554,
|
1289 |
+
"eval_loss": 1.4622184038162231,
|
1290 |
+
"eval_runtime": 52.4041,
|
1291 |
+
"eval_samples_per_second": 7.27,
|
1292 |
+
"eval_steps_per_second": 0.916,
|
1293 |
+
"step": 1800
|
1294 |
+
},
|
1295 |
+
{
|
1296 |
+
"epoch": 1.535850657615613,
|
1297 |
+
"grad_norm": 0.19378252368958881,
|
1298 |
+
"learning_rate": 4.3735423928424815e-05,
|
1299 |
+
"loss": 1.4275,
|
1300 |
+
"step": 1810
|
1301 |
+
},
|
1302 |
+
{
|
1303 |
+
"epoch": 1.5443360203648706,
|
1304 |
+
"grad_norm": 0.20453331251433848,
|
1305 |
+
"learning_rate": 4.363699874544697e-05,
|
1306 |
+
"loss": 1.4203,
|
1307 |
+
"step": 1820
|
1308 |
+
},
|
1309 |
+
{
|
1310 |
+
"epoch": 1.5528213831141282,
|
1311 |
+
"grad_norm": 0.26684319417219377,
|
1312 |
+
"learning_rate": 4.3537918988332156e-05,
|
1313 |
+
"loss": 1.4372,
|
1314 |
+
"step": 1830
|
1315 |
+
},
|
1316 |
+
{
|
1317 |
+
"epoch": 1.5613067458633858,
|
1318 |
+
"grad_norm": 0.25745160303419773,
|
1319 |
+
"learning_rate": 4.343818813698868e-05,
|
1320 |
+
"loss": 1.4082,
|
1321 |
+
"step": 1840
|
1322 |
+
},
|
1323 |
+
{
|
1324 |
+
"epoch": 1.5697921086126432,
|
1325 |
+
"grad_norm": 0.19969727996700776,
|
1326 |
+
"learning_rate": 4.3337809694192765e-05,
|
1327 |
+
"loss": 1.4314,
|
1328 |
+
"step": 1850
|
1329 |
+
},
|
1330 |
+
{
|
1331 |
+
"epoch": 1.5782774713619008,
|
1332 |
+
"grad_norm": 0.20117210832277968,
|
1333 |
+
"learning_rate": 4.3236787185465525e-05,
|
1334 |
+
"loss": 1.4293,
|
1335 |
+
"step": 1860
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"epoch": 1.5867628341111581,
|
1339 |
+
"grad_norm": 0.20173003641028897,
|
1340 |
+
"learning_rate": 4.313512415894913e-05,
|
1341 |
+
"loss": 1.4406,
|
1342 |
+
"step": 1870
|
1343 |
+
},
|
1344 |
+
{
|
1345 |
+
"epoch": 1.5952481968604157,
|
1346 |
+
"grad_norm": 0.20304770794371527,
|
1347 |
+
"learning_rate": 4.303282418528224e-05,
|
1348 |
+
"loss": 1.4286,
|
1349 |
+
"step": 1880
|
1350 |
+
},
|
1351 |
+
{
|
1352 |
+
"epoch": 1.6037335596096733,
|
1353 |
+
"grad_norm": 0.19126658907738198,
|
1354 |
+
"learning_rate": 4.292989085747452e-05,
|
1355 |
+
"loss": 1.4184,
|
1356 |
+
"step": 1890
|
1357 |
+
},
|
1358 |
+
{
|
1359 |
+
"epoch": 1.612218922358931,
|
1360 |
+
"grad_norm": 0.20069554966453027,
|
1361 |
+
"learning_rate": 4.282632779078051e-05,
|
1362 |
+
"loss": 1.4133,
|
1363 |
+
"step": 1900
|
1364 |
+
},
|
1365 |
+
{
|
1366 |
+
"epoch": 1.6207042851081885,
|
1367 |
+
"grad_norm": 0.1952881519566686,
|
1368 |
+
"learning_rate": 4.2722138622572624e-05,
|
1369 |
+
"loss": 1.4432,
|
1370 |
+
"step": 1910
|
1371 |
+
},
|
1372 |
+
{
|
1373 |
+
"epoch": 1.629189647857446,
|
1374 |
+
"grad_norm": 0.19763704668680288,
|
1375 |
+
"learning_rate": 4.261732701221339e-05,
|
1376 |
+
"loss": 1.3921,
|
1377 |
+
"step": 1920
|
1378 |
+
},
|
1379 |
+
{
|
1380 |
+
"epoch": 1.6376750106067033,
|
1381 |
+
"grad_norm": 0.19821464294464497,
|
1382 |
+
"learning_rate": 4.2511896640926925e-05,
|
1383 |
+
"loss": 1.4454,
|
1384 |
+
"step": 1930
|
1385 |
+
},
|
1386 |
+
{
|
1387 |
+
"epoch": 1.6461603733559609,
|
1388 |
+
"grad_norm": 0.20456545626297834,
|
1389 |
+
"learning_rate": 4.240585121166966e-05,
|
1390 |
+
"loss": 1.4147,
|
1391 |
+
"step": 1940
|
1392 |
+
},
|
1393 |
+
{
|
1394 |
+
"epoch": 1.6546457361052185,
|
1395 |
+
"grad_norm": 0.2119092529186395,
|
1396 |
+
"learning_rate": 4.229919444900027e-05,
|
1397 |
+
"loss": 1.3969,
|
1398 |
+
"step": 1950
|
1399 |
+
},
|
1400 |
+
{
|
1401 |
+
"epoch": 1.663131098854476,
|
1402 |
+
"grad_norm": 0.20330157582122357,
|
1403 |
+
"learning_rate": 4.2191930098948865e-05,
|
1404 |
+
"loss": 1.426,
|
1405 |
+
"step": 1960
|
1406 |
+
},
|
1407 |
+
{
|
1408 |
+
"epoch": 1.6716164616037337,
|
1409 |
+
"grad_norm": 0.21761164739298738,
|
1410 |
+
"learning_rate": 4.2084061928885406e-05,
|
1411 |
+
"loss": 1.4246,
|
1412 |
+
"step": 1970
|
1413 |
+
},
|
1414 |
+
{
|
1415 |
+
"epoch": 1.680101824352991,
|
1416 |
+
"grad_norm": 0.19331588142071401,
|
1417 |
+
"learning_rate": 4.197559372738741e-05,
|
1418 |
+
"loss": 1.4305,
|
1419 |
+
"step": 1980
|
1420 |
+
},
|
1421 |
+
{
|
1422 |
+
"epoch": 1.6885871871022486,
|
1423 |
+
"grad_norm": 0.20188460724329996,
|
1424 |
+
"learning_rate": 4.186652930410685e-05,
|
1425 |
+
"loss": 1.4153,
|
1426 |
+
"step": 1990
|
1427 |
+
},
|
1428 |
+
{
|
1429 |
+
"epoch": 1.697072549851506,
|
1430 |
+
"grad_norm": 0.20988950033571588,
|
1431 |
+
"learning_rate": 4.1756872489636425e-05,
|
1432 |
+
"loss": 1.3894,
|
1433 |
+
"step": 2000
|
1434 |
+
},
|
1435 |
+
{
|
1436 |
+
"epoch": 1.7055579126007636,
|
1437 |
+
"grad_norm": 0.1966475893123187,
|
1438 |
+
"learning_rate": 4.1646627135374916e-05,
|
1439 |
+
"loss": 1.3962,
|
1440 |
+
"step": 2010
|
1441 |
+
},
|
1442 |
+
{
|
1443 |
+
"epoch": 1.7140432753500212,
|
1444 |
+
"grad_norm": 0.20785207367991768,
|
1445 |
+
"learning_rate": 4.1535797113392004e-05,
|
1446 |
+
"loss": 1.4037,
|
1447 |
+
"step": 2020
|
1448 |
+
},
|
1449 |
+
{
|
1450 |
+
"epoch": 1.7225286380992788,
|
1451 |
+
"grad_norm": 0.2029940281663133,
|
1452 |
+
"learning_rate": 4.1424386316292224e-05,
|
1453 |
+
"loss": 1.4011,
|
1454 |
+
"step": 2030
|
1455 |
+
},
|
1456 |
+
{
|
1457 |
+
"epoch": 1.7310140008485364,
|
1458 |
+
"grad_norm": 0.2247844551379277,
|
1459 |
+
"learning_rate": 4.131239865707829e-05,
|
1460 |
+
"loss": 1.4084,
|
1461 |
+
"step": 2040
|
1462 |
+
},
|
1463 |
+
{
|
1464 |
+
"epoch": 1.7394993635977938,
|
1465 |
+
"grad_norm": 0.20900441746105022,
|
1466 |
+
"learning_rate": 4.11998380690136e-05,
|
1467 |
+
"loss": 1.4235,
|
1468 |
+
"step": 2050
|
1469 |
+
},
|
1470 |
+
{
|
1471 |
+
"epoch": 1.7479847263470514,
|
1472 |
+
"grad_norm": 0.20362408546889926,
|
1473 |
+
"learning_rate": 4.108670850548416e-05,
|
1474 |
+
"loss": 1.4204,
|
1475 |
+
"step": 2060
|
1476 |
+
},
|
1477 |
+
{
|
1478 |
+
"epoch": 1.7564700890963088,
|
1479 |
+
"grad_norm": 0.22281567946240438,
|
1480 |
+
"learning_rate": 4.097301393985968e-05,
|
1481 |
+
"loss": 1.4023,
|
1482 |
+
"step": 2070
|
1483 |
+
},
|
1484 |
+
{
|
1485 |
+
"epoch": 1.7649554518455663,
|
1486 |
+
"grad_norm": 0.20867113178797225,
|
1487 |
+
"learning_rate": 4.085875836535404e-05,
|
1488 |
+
"loss": 1.3895,
|
1489 |
+
"step": 2080
|
1490 |
+
},
|
1491 |
+
{
|
1492 |
+
"epoch": 1.773440814594824,
|
1493 |
+
"grad_norm": 0.22113231886160947,
|
1494 |
+
"learning_rate": 4.0743945794885063e-05,
|
1495 |
+
"loss": 1.3963,
|
1496 |
+
"step": 2090
|
1497 |
+
},
|
1498 |
+
{
|
1499 |
+
"epoch": 1.7819261773440815,
|
1500 |
+
"grad_norm": 0.22334563577844263,
|
1501 |
+
"learning_rate": 4.062858026093351e-05,
|
1502 |
+
"loss": 1.3988,
|
1503 |
+
"step": 2100
|
1504 |
+
},
|
1505 |
+
{
|
1506 |
+
"epoch": 1.7904115400933391,
|
1507 |
+
"grad_norm": 0.23218581668265403,
|
1508 |
+
"learning_rate": 4.051266581540152e-05,
|
1509 |
+
"loss": 1.4068,
|
1510 |
+
"step": 2110
|
1511 |
+
},
|
1512 |
+
{
|
1513 |
+
"epoch": 1.7988969028425965,
|
1514 |
+
"grad_norm": 0.20295589384571033,
|
1515 |
+
"learning_rate": 4.0396206529470234e-05,
|
1516 |
+
"loss": 1.3883,
|
1517 |
+
"step": 2120
|
1518 |
+
},
|
1519 |
+
{
|
1520 |
+
"epoch": 1.8073822655918539,
|
1521 |
+
"grad_norm": 0.22861611442392848,
|
1522 |
+
"learning_rate": 4.027920649345687e-05,
|
1523 |
+
"loss": 1.4043,
|
1524 |
+
"step": 2130
|
1525 |
+
},
|
1526 |
+
{
|
1527 |
+
"epoch": 1.8158676283411115,
|
1528 |
+
"grad_norm": 0.2083012771089638,
|
1529 |
+
"learning_rate": 4.0161669816671e-05,
|
1530 |
+
"loss": 1.398,
|
1531 |
+
"step": 2140
|
1532 |
+
},
|
1533 |
+
{
|
1534 |
+
"epoch": 1.824352991090369,
|
1535 |
+
"grad_norm": 0.21936173231840464,
|
1536 |
+
"learning_rate": 4.004360062727028e-05,
|
1537 |
+
"loss": 1.4142,
|
1538 |
+
"step": 2150
|
1539 |
+
},
|
1540 |
+
{
|
1541 |
+
"epoch": 1.8328383538396267,
|
1542 |
+
"grad_norm": 0.21383435796328337,
|
1543 |
+
"learning_rate": 3.9925003072115406e-05,
|
1544 |
+
"loss": 1.4138,
|
1545 |
+
"step": 2160
|
1546 |
+
},
|
1547 |
+
{
|
1548 |
+
"epoch": 1.8413237165888843,
|
1549 |
+
"grad_norm": 0.23301608248270392,
|
1550 |
+
"learning_rate": 3.9805881316624506e-05,
|
1551 |
+
"loss": 1.4195,
|
1552 |
+
"step": 2170
|
1553 |
+
},
|
1554 |
+
{
|
1555 |
+
"epoch": 1.8498090793381419,
|
1556 |
+
"grad_norm": 0.22424766656883474,
|
1557 |
+
"learning_rate": 3.968623954462681e-05,
|
1558 |
+
"loss": 1.4011,
|
1559 |
+
"step": 2180
|
1560 |
+
},
|
1561 |
+
{
|
1562 |
+
"epoch": 1.8582944420873992,
|
1563 |
+
"grad_norm": 0.21286417342881453,
|
1564 |
+
"learning_rate": 3.9566081958215734e-05,
|
1565 |
+
"loss": 1.409,
|
1566 |
+
"step": 2190
|
1567 |
+
},
|
1568 |
+
{
|
1569 |
+
"epoch": 1.8667798048366566,
|
1570 |
+
"grad_norm": 0.21944800687444807,
|
1571 |
+
"learning_rate": 3.9445412777601284e-05,
|
1572 |
+
"loss": 1.3877,
|
1573 |
+
"step": 2200
|
1574 |
+
},
|
1575 |
+
{
|
1576 |
+
"epoch": 1.8752651675859142,
|
1577 |
+
"grad_norm": 0.23113173625974803,
|
1578 |
+
"learning_rate": 3.932423624096181e-05,
|
1579 |
+
"loss": 1.4089,
|
1580 |
+
"step": 2210
|
1581 |
+
},
|
1582 |
+
{
|
1583 |
+
"epoch": 1.8837505303351718,
|
1584 |
+
"grad_norm": 0.2081941699587778,
|
1585 |
+
"learning_rate": 3.920255660429517e-05,
|
1586 |
+
"loss": 1.4024,
|
1587 |
+
"step": 2220
|
1588 |
+
},
|
1589 |
+
{
|
1590 |
+
"epoch": 1.8922358930844294,
|
1591 |
+
"grad_norm": 0.2188685806654701,
|
1592 |
+
"learning_rate": 3.908037814126927e-05,
|
1593 |
+
"loss": 1.3878,
|
1594 |
+
"step": 2230
|
1595 |
+
},
|
1596 |
+
{
|
1597 |
+
"epoch": 1.900721255833687,
|
1598 |
+
"grad_norm": 0.22761843244757962,
|
1599 |
+
"learning_rate": 3.895770514307193e-05,
|
1600 |
+
"loss": 1.4004,
|
1601 |
+
"step": 2240
|
1602 |
+
},
|
1603 |
+
{
|
1604 |
+
"epoch": 1.9092066185829444,
|
1605 |
+
"grad_norm": 0.23309183623120422,
|
1606 |
+
"learning_rate": 3.883454191826017e-05,
|
1607 |
+
"loss": 1.4188,
|
1608 |
+
"step": 2250
|
1609 |
+
},
|
1610 |
+
{
|
1611 |
+
"epoch": 1.917691981332202,
|
1612 |
+
"grad_norm": 0.20329785843911802,
|
1613 |
+
"learning_rate": 3.871089279260891e-05,
|
1614 |
+
"loss": 1.3893,
|
1615 |
+
"step": 2260
|
1616 |
+
},
|
1617 |
+
{
|
1618 |
+
"epoch": 1.9261773440814594,
|
1619 |
+
"grad_norm": 0.23470973193726366,
|
1620 |
+
"learning_rate": 3.8586762108958995e-05,
|
1621 |
+
"loss": 1.3974,
|
1622 |
+
"step": 2270
|
1623 |
+
},
|
1624 |
+
{
|
1625 |
+
"epoch": 1.934662706830717,
|
1626 |
+
"grad_norm": 0.22779136837044714,
|
1627 |
+
"learning_rate": 3.8462154227064725e-05,
|
1628 |
+
"loss": 1.4115,
|
1629 |
+
"step": 2280
|
1630 |
+
},
|
1631 |
+
{
|
1632 |
+
"epoch": 1.9431480695799745,
|
1633 |
+
"grad_norm": 0.22338952315651892,
|
1634 |
+
"learning_rate": 3.833707352344068e-05,
|
1635 |
+
"loss": 1.3873,
|
1636 |
+
"step": 2290
|
1637 |
+
},
|
1638 |
+
{
|
1639 |
+
"epoch": 1.9516334323292321,
|
1640 |
+
"grad_norm": 0.23069304025882129,
|
1641 |
+
"learning_rate": 3.821152439120801e-05,
|
1642 |
+
"loss": 1.3944,
|
1643 |
+
"step": 2300
|
1644 |
+
},
|
1645 |
+
{
|
1646 |
+
"epoch": 1.9601187950784897,
|
1647 |
+
"grad_norm": 0.23590596270163203,
|
1648 |
+
"learning_rate": 3.808551123994018e-05,
|
1649 |
+
"loss": 1.3857,
|
1650 |
+
"step": 2310
|
1651 |
+
},
|
1652 |
+
{
|
1653 |
+
"epoch": 1.9686041578277471,
|
1654 |
+
"grad_norm": 0.22545661808214923,
|
1655 |
+
"learning_rate": 3.795903849550805e-05,
|
1656 |
+
"loss": 1.3628,
|
1657 |
+
"step": 2320
|
1658 |
+
},
|
1659 |
+
{
|
1660 |
+
"epoch": 1.9770895205770047,
|
1661 |
+
"grad_norm": 0.2450769875954842,
|
1662 |
+
"learning_rate": 3.7832110599924455e-05,
|
1663 |
+
"loss": 1.4079,
|
1664 |
+
"step": 2330
|
1665 |
+
},
|
1666 |
+
{
|
1667 |
+
"epoch": 1.985574883326262,
|
1668 |
+
"grad_norm": 0.22931499326784313,
|
1669 |
+
"learning_rate": 3.7704732011188166e-05,
|
1670 |
+
"loss": 1.379,
|
1671 |
+
"step": 2340
|
1672 |
+
},
|
1673 |
+
{
|
1674 |
+
"epoch": 1.9940602460755197,
|
1675 |
+
"grad_norm": 0.22417244507397657,
|
1676 |
+
"learning_rate": 3.7576907203127346e-05,
|
1677 |
+
"loss": 1.4035,
|
1678 |
+
"step": 2350
|
1679 |
+
},
|
1680 |
+
{
|
1681 |
+
"epoch": 2.0025456088247773,
|
1682 |
+
"grad_norm": 0.24496197221575314,
|
1683 |
+
"learning_rate": 3.7448640665242406e-05,
|
1684 |
+
"loss": 1.442,
|
1685 |
+
"step": 2360
|
1686 |
+
},
|
1687 |
+
{
|
1688 |
+
"epoch": 2.011030971574035,
|
1689 |
+
"grad_norm": 0.2532740296990078,
|
1690 |
+
"learning_rate": 3.73199369025483e-05,
|
1691 |
+
"loss": 1.2672,
|
1692 |
+
"step": 2370
|
1693 |
+
},
|
1694 |
+
{
|
1695 |
+
"epoch": 2.0195163343232925,
|
1696 |
+
"grad_norm": 0.2890155987968593,
|
1697 |
+
"learning_rate": 3.7190800435416355e-05,
|
1698 |
+
"loss": 1.246,
|
1699 |
+
"step": 2380
|
1700 |
+
},
|
1701 |
+
{
|
1702 |
+
"epoch": 2.02800169707255,
|
1703 |
+
"grad_norm": 0.2541972565696406,
|
1704 |
+
"learning_rate": 3.706123579941545e-05,
|
1705 |
+
"loss": 1.2603,
|
1706 |
+
"step": 2390
|
1707 |
+
},
|
1708 |
+
{
|
1709 |
+
"epoch": 2.036487059821807,
|
1710 |
+
"grad_norm": 0.2530140862527023,
|
1711 |
+
"learning_rate": 3.693124754515272e-05,
|
1712 |
+
"loss": 1.2638,
|
1713 |
+
"step": 2400
|
1714 |
+
},
|
1715 |
+
{
|
1716 |
+
"epoch": 2.036487059821807,
|
1717 |
+
"eval_loss": 1.435962438583374,
|
1718 |
+
"eval_runtime": 52.582,
|
1719 |
+
"eval_samples_per_second": 7.246,
|
1720 |
+
"eval_steps_per_second": 0.913,
|
1721 |
+
"step": 2400
|
1722 |
+
},
|
1723 |
+
{
|
1724 |
+
"epoch": 2.044972422571065,
|
1725 |
+
"grad_norm": 0.25100458343337734,
|
1726 |
+
"learning_rate": 3.680084023811377e-05,
|
1727 |
+
"loss": 1.2711,
|
1728 |
+
"step": 2410
|
1729 |
+
},
|
1730 |
+
{
|
1731 |
+
"epoch": 2.0534577853203224,
|
1732 |
+
"grad_norm": 0.2695727673292618,
|
1733 |
+
"learning_rate": 3.66700184585023e-05,
|
1734 |
+
"loss": 1.2578,
|
1735 |
+
"step": 2420
|
1736 |
+
},
|
1737 |
+
{
|
1738 |
+
"epoch": 2.06194314806958,
|
1739 |
+
"grad_norm": 0.2605068415443213,
|
1740 |
+
"learning_rate": 3.6538786801079226e-05,
|
1741 |
+
"loss": 1.2506,
|
1742 |
+
"step": 2430
|
1743 |
+
},
|
1744 |
+
{
|
1745 |
+
"epoch": 2.0704285108188376,
|
1746 |
+
"grad_norm": 0.27415607207865045,
|
1747 |
+
"learning_rate": 3.64071498750013e-05,
|
1748 |
+
"loss": 1.2852,
|
1749 |
+
"step": 2440
|
1750 |
+
},
|
1751 |
+
{
|
1752 |
+
"epoch": 2.078913873568095,
|
1753 |
+
"grad_norm": 0.2688900338206285,
|
1754 |
+
"learning_rate": 3.627511230365928e-05,
|
1755 |
+
"loss": 1.2695,
|
1756 |
+
"step": 2450
|
1757 |
+
},
|
1758 |
+
{
|
1759 |
+
"epoch": 2.0873992363173524,
|
1760 |
+
"grad_norm": 0.2750825805336503,
|
1761 |
+
"learning_rate": 3.614267872451546e-05,
|
1762 |
+
"loss": 1.2643,
|
1763 |
+
"step": 2460
|
1764 |
+
},
|
1765 |
+
{
|
1766 |
+
"epoch": 2.09588459906661,
|
1767 |
+
"grad_norm": 0.2659269066581903,
|
1768 |
+
"learning_rate": 3.600985378894086e-05,
|
1769 |
+
"loss": 1.2868,
|
1770 |
+
"step": 2470
|
1771 |
+
},
|
1772 |
+
{
|
1773 |
+
"epoch": 2.1043699618158676,
|
1774 |
+
"grad_norm": 0.24411151291321526,
|
1775 |
+
"learning_rate": 3.587664216205183e-05,
|
1776 |
+
"loss": 1.2571,
|
1777 |
+
"step": 2480
|
1778 |
+
},
|
1779 |
+
{
|
1780 |
+
"epoch": 2.112855324565125,
|
1781 |
+
"grad_norm": 0.2574194755634052,
|
1782 |
+
"learning_rate": 3.574304852254621e-05,
|
1783 |
+
"loss": 1.2769,
|
1784 |
+
"step": 2490
|
1785 |
+
},
|
1786 |
+
{
|
1787 |
+
"epoch": 2.1213406873143827,
|
1788 |
+
"grad_norm": 0.2894545074998905,
|
1789 |
+
"learning_rate": 3.5609077562538997e-05,
|
1790 |
+
"loss": 1.2469,
|
1791 |
+
"step": 2500
|
1792 |
+
},
|
1793 |
+
{
|
1794 |
+
"epoch": 2.1298260500636403,
|
1795 |
+
"grad_norm": 0.2828176429904294,
|
1796 |
+
"learning_rate": 3.547473398739754e-05,
|
1797 |
+
"loss": 1.2527,
|
1798 |
+
"step": 2510
|
1799 |
+
},
|
1800 |
+
{
|
1801 |
+
"epoch": 2.138311412812898,
|
1802 |
+
"grad_norm": 0.25886029771650565,
|
1803 |
+
"learning_rate": 3.5340022515576294e-05,
|
1804 |
+
"loss": 1.2578,
|
1805 |
+
"step": 2520
|
1806 |
+
},
|
1807 |
+
{
|
1808 |
+
"epoch": 2.146796775562155,
|
1809 |
+
"grad_norm": 0.2783799371621383,
|
1810 |
+
"learning_rate": 3.52049478784511e-05,
|
1811 |
+
"loss": 1.2489,
|
1812 |
+
"step": 2530
|
1813 |
+
},
|
1814 |
+
{
|
1815 |
+
"epoch": 2.1552821383114127,
|
1816 |
+
"grad_norm": 0.2753116113218978,
|
1817 |
+
"learning_rate": 3.506951482015297e-05,
|
1818 |
+
"loss": 1.275,
|
1819 |
+
"step": 2540
|
1820 |
+
},
|
1821 |
+
{
|
1822 |
+
"epoch": 2.1637675010606703,
|
1823 |
+
"grad_norm": 0.28115792079727675,
|
1824 |
+
"learning_rate": 3.493372809740152e-05,
|
1825 |
+
"loss": 1.2554,
|
1826 |
+
"step": 2550
|
1827 |
+
},
|
1828 |
+
{
|
1829 |
+
"epoch": 2.172252863809928,
|
1830 |
+
"grad_norm": 0.27954425325951715,
|
1831 |
+
"learning_rate": 3.479759247933785e-05,
|
1832 |
+
"loss": 1.2618,
|
1833 |
+
"step": 2560
|
1834 |
+
},
|
1835 |
+
{
|
1836 |
+
"epoch": 2.1807382265591855,
|
1837 |
+
"grad_norm": 0.27555174232347995,
|
1838 |
+
"learning_rate": 3.466111274735707e-05,
|
1839 |
+
"loss": 1.2598,
|
1840 |
+
"step": 2570
|
1841 |
+
},
|
1842 |
+
{
|
1843 |
+
"epoch": 2.189223589308443,
|
1844 |
+
"grad_norm": 0.27280827991301104,
|
1845 |
+
"learning_rate": 3.452429369494037e-05,
|
1846 |
+
"loss": 1.262,
|
1847 |
+
"step": 2580
|
1848 |
+
},
|
1849 |
+
{
|
1850 |
+
"epoch": 2.1977089520577007,
|
1851 |
+
"grad_norm": 0.2749685805551003,
|
1852 |
+
"learning_rate": 3.438714012748664e-05,
|
1853 |
+
"loss": 1.2683,
|
1854 |
+
"step": 2590
|
1855 |
+
},
|
1856 |
+
{
|
1857 |
+
"epoch": 2.206194314806958,
|
1858 |
+
"grad_norm": 0.2780594302788235,
|
1859 |
+
"learning_rate": 3.424965686214371e-05,
|
1860 |
+
"loss": 1.2462,
|
1861 |
+
"step": 2600
|
1862 |
+
},
|
1863 |
+
{
|
1864 |
+
"epoch": 2.2146796775562154,
|
1865 |
+
"grad_norm": 0.2942257416636676,
|
1866 |
+
"learning_rate": 3.411184872763915e-05,
|
1867 |
+
"loss": 1.2581,
|
1868 |
+
"step": 2610
|
1869 |
+
},
|
1870 |
+
{
|
1871 |
+
"epoch": 2.223165040305473,
|
1872 |
+
"grad_norm": 0.27000377333423803,
|
1873 |
+
"learning_rate": 3.39737205641107e-05,
|
1874 |
+
"loss": 1.2412,
|
1875 |
+
"step": 2620
|
1876 |
+
},
|
1877 |
+
{
|
1878 |
+
"epoch": 2.2316504030547306,
|
1879 |
+
"grad_norm": 0.28187507810449336,
|
1880 |
+
"learning_rate": 3.383527722293622e-05,
|
1881 |
+
"loss": 1.2659,
|
1882 |
+
"step": 2630
|
1883 |
+
},
|
1884 |
+
{
|
1885 |
+
"epoch": 2.240135765803988,
|
1886 |
+
"grad_norm": 0.2736213940552268,
|
1887 |
+
"learning_rate": 3.369652356656336e-05,
|
1888 |
+
"loss": 1.2553,
|
1889 |
+
"step": 2640
|
1890 |
+
},
|
1891 |
+
{
|
1892 |
+
"epoch": 2.248621128553246,
|
1893 |
+
"grad_norm": 0.29698834543438446,
|
1894 |
+
"learning_rate": 3.355746446833873e-05,
|
1895 |
+
"loss": 1.2714,
|
1896 |
+
"step": 2650
|
1897 |
+
},
|
1898 |
+
{
|
1899 |
+
"epoch": 2.257106491302503,
|
1900 |
+
"grad_norm": 0.2875128112484735,
|
1901 |
+
"learning_rate": 3.3418104812336786e-05,
|
1902 |
+
"loss": 1.2508,
|
1903 |
+
"step": 2660
|
1904 |
+
},
|
1905 |
+
{
|
1906 |
+
"epoch": 2.2655918540517606,
|
1907 |
+
"grad_norm": 0.3016647299373059,
|
1908 |
+
"learning_rate": 3.327844949318824e-05,
|
1909 |
+
"loss": 1.2451,
|
1910 |
+
"step": 2670
|
1911 |
+
},
|
1912 |
+
{
|
1913 |
+
"epoch": 2.274077216801018,
|
1914 |
+
"grad_norm": 0.27371321581702696,
|
1915 |
+
"learning_rate": 3.3138503415908176e-05,
|
1916 |
+
"loss": 1.2467,
|
1917 |
+
"step": 2680
|
1918 |
+
},
|
1919 |
+
{
|
1920 |
+
"epoch": 2.2825625795502757,
|
1921 |
+
"grad_norm": 0.28374547760120017,
|
1922 |
+
"learning_rate": 3.299827149572376e-05,
|
1923 |
+
"loss": 1.2452,
|
1924 |
+
"step": 2690
|
1925 |
+
},
|
1926 |
+
{
|
1927 |
+
"epoch": 2.2910479422995333,
|
1928 |
+
"grad_norm": 0.2805999278165284,
|
1929 |
+
"learning_rate": 3.285775865790166e-05,
|
1930 |
+
"loss": 1.2595,
|
1931 |
+
"step": 2700
|
1932 |
+
},
|
1933 |
+
{
|
1934 |
+
"epoch": 2.299533305048791,
|
1935 |
+
"grad_norm": 0.2758019804125597,
|
1936 |
+
"learning_rate": 3.271696983757496e-05,
|
1937 |
+
"loss": 1.2583,
|
1938 |
+
"step": 2710
|
1939 |
+
},
|
1940 |
+
{
|
1941 |
+
"epoch": 2.3080186677980485,
|
1942 |
+
"grad_norm": 0.27211127699988974,
|
1943 |
+
"learning_rate": 3.2575909979569906e-05,
|
1944 |
+
"loss": 1.2255,
|
1945 |
+
"step": 2720
|
1946 |
+
},
|
1947 |
+
{
|
1948 |
+
"epoch": 2.316504030547306,
|
1949 |
+
"grad_norm": 0.2741831859110416,
|
1950 |
+
"learning_rate": 3.243458403823223e-05,
|
1951 |
+
"loss": 1.2335,
|
1952 |
+
"step": 2730
|
1953 |
+
},
|
1954 |
+
{
|
1955 |
+
"epoch": 2.3249893932965633,
|
1956 |
+
"grad_norm": 0.287074507507,
|
1957 |
+
"learning_rate": 3.2292996977253075e-05,
|
1958 |
+
"loss": 1.2555,
|
1959 |
+
"step": 2740
|
1960 |
+
},
|
1961 |
+
{
|
1962 |
+
"epoch": 2.333474756045821,
|
1963 |
+
"grad_norm": 0.2760197579958247,
|
1964 |
+
"learning_rate": 3.215115376949474e-05,
|
1965 |
+
"loss": 1.2574,
|
1966 |
+
"step": 2750
|
1967 |
+
},
|
1968 |
+
{
|
1969 |
+
"epoch": 2.3419601187950785,
|
1970 |
+
"grad_norm": 0.29917391348714156,
|
1971 |
+
"learning_rate": 3.200905939681599e-05,
|
1972 |
+
"loss": 1.2232,
|
1973 |
+
"step": 2760
|
1974 |
+
},
|
1975 |
+
{
|
1976 |
+
"epoch": 2.350445481544336,
|
1977 |
+
"grad_norm": 0.2863180346672473,
|
1978 |
+
"learning_rate": 3.1866718849897044e-05,
|
1979 |
+
"loss": 1.2341,
|
1980 |
+
"step": 2770
|
1981 |
+
},
|
1982 |
+
{
|
1983 |
+
"epoch": 2.3589308442935937,
|
1984 |
+
"grad_norm": 0.2760526831444543,
|
1985 |
+
"learning_rate": 3.172413712806435e-05,
|
1986 |
+
"loss": 1.253,
|
1987 |
+
"step": 2780
|
1988 |
+
},
|
1989 |
+
{
|
1990 |
+
"epoch": 2.3674162070428513,
|
1991 |
+
"grad_norm": 0.29286413736773825,
|
1992 |
+
"learning_rate": 3.158131923911498e-05,
|
1993 |
+
"loss": 1.2617,
|
1994 |
+
"step": 2790
|
1995 |
+
},
|
1996 |
+
{
|
1997 |
+
"epoch": 2.3759015697921084,
|
1998 |
+
"grad_norm": 0.27643034174892955,
|
1999 |
+
"learning_rate": 3.143827019914072e-05,
|
2000 |
+
"loss": 1.2152,
|
2001 |
+
"step": 2800
|
2002 |
+
},
|
2003 |
+
{
|
2004 |
+
"epoch": 2.384386932541366,
|
2005 |
+
"grad_norm": 0.2939949433037669,
|
2006 |
+
"learning_rate": 3.12949950323519e-05,
|
2007 |
+
"loss": 1.2354,
|
2008 |
+
"step": 2810
|
2009 |
+
},
|
2010 |
+
{
|
2011 |
+
"epoch": 2.3928722952906236,
|
2012 |
+
"grad_norm": 0.2864245267570891,
|
2013 |
+
"learning_rate": 3.115149877090097e-05,
|
2014 |
+
"loss": 1.2447,
|
2015 |
+
"step": 2820
|
2016 |
+
},
|
2017 |
+
{
|
2018 |
+
"epoch": 2.401357658039881,
|
2019 |
+
"grad_norm": 0.2952829920235313,
|
2020 |
+
"learning_rate": 3.1007786454705724e-05,
|
2021 |
+
"loss": 1.2462,
|
2022 |
+
"step": 2830
|
2023 |
+
},
|
2024 |
+
{
|
2025 |
+
"epoch": 2.409843020789139,
|
2026 |
+
"grad_norm": 0.3032080033620836,
|
2027 |
+
"learning_rate": 3.0863863131272265e-05,
|
2028 |
+
"loss": 1.2317,
|
2029 |
+
"step": 2840
|
2030 |
+
},
|
2031 |
+
{
|
2032 |
+
"epoch": 2.4183283835383964,
|
2033 |
+
"grad_norm": 0.2678380639415362,
|
2034 |
+
"learning_rate": 3.07197338555178e-05,
|
2035 |
+
"loss": 1.2466,
|
2036 |
+
"step": 2850
|
2037 |
+
},
|
2038 |
+
{
|
2039 |
+
"epoch": 2.426813746287654,
|
2040 |
+
"grad_norm": 0.3000338098809928,
|
2041 |
+
"learning_rate": 3.0575403689593016e-05,
|
2042 |
+
"loss": 1.2469,
|
2043 |
+
"step": 2860
|
2044 |
+
},
|
2045 |
+
{
|
2046 |
+
"epoch": 2.435299109036911,
|
2047 |
+
"grad_norm": 0.2885428511714088,
|
2048 |
+
"learning_rate": 3.043087770270435e-05,
|
2049 |
+
"loss": 1.241,
|
2050 |
+
"step": 2870
|
2051 |
+
},
|
2052 |
+
{
|
2053 |
+
"epoch": 2.4437844717861688,
|
2054 |
+
"grad_norm": 0.2902606566366597,
|
2055 |
+
"learning_rate": 3.0286160970935906e-05,
|
2056 |
+
"loss": 1.2498,
|
2057 |
+
"step": 2880
|
2058 |
+
},
|
2059 |
+
{
|
2060 |
+
"epoch": 2.4522698345354264,
|
2061 |
+
"grad_norm": 0.2930924599960876,
|
2062 |
+
"learning_rate": 3.0141258577071184e-05,
|
2063 |
+
"loss": 1.2508,
|
2064 |
+
"step": 2890
|
2065 |
+
},
|
2066 |
+
{
|
2067 |
+
"epoch": 2.460755197284684,
|
2068 |
+
"grad_norm": 0.28761403953538467,
|
2069 |
+
"learning_rate": 2.9996175610414572e-05,
|
2070 |
+
"loss": 1.2379,
|
2071 |
+
"step": 2900
|
2072 |
+
},
|
2073 |
+
{
|
2074 |
+
"epoch": 2.4692405600339415,
|
2075 |
+
"grad_norm": 0.28888693356528744,
|
2076 |
+
"learning_rate": 2.9850917166612586e-05,
|
2077 |
+
"loss": 1.2383,
|
2078 |
+
"step": 2910
|
2079 |
+
},
|
2080 |
+
{
|
2081 |
+
"epoch": 2.477725922783199,
|
2082 |
+
"grad_norm": 0.29714323219094924,
|
2083 |
+
"learning_rate": 2.9705488347474896e-05,
|
2084 |
+
"loss": 1.2221,
|
2085 |
+
"step": 2920
|
2086 |
+
},
|
2087 |
+
{
|
2088 |
+
"epoch": 2.4862112855324563,
|
2089 |
+
"grad_norm": 0.3024332099011336,
|
2090 |
+
"learning_rate": 2.9559894260795144e-05,
|
2091 |
+
"loss": 1.2417,
|
2092 |
+
"step": 2930
|
2093 |
+
},
|
2094 |
+
{
|
2095 |
+
"epoch": 2.494696648281714,
|
2096 |
+
"grad_norm": 0.2900123354730048,
|
2097 |
+
"learning_rate": 2.9414140020171554e-05,
|
2098 |
+
"loss": 1.2543,
|
2099 |
+
"step": 2940
|
2100 |
+
},
|
2101 |
+
{
|
2102 |
+
"epoch": 2.5031820110309715,
|
2103 |
+
"grad_norm": 0.30122390943433014,
|
2104 |
+
"learning_rate": 2.926823074482733e-05,
|
2105 |
+
"loss": 1.2542,
|
2106 |
+
"step": 2950
|
2107 |
+
},
|
2108 |
+
{
|
2109 |
+
"epoch": 2.511667373780229,
|
2110 |
+
"grad_norm": 0.2860208265471049,
|
2111 |
+
"learning_rate": 2.912217155943083e-05,
|
2112 |
+
"loss": 1.2335,
|
2113 |
+
"step": 2960
|
2114 |
+
},
|
2115 |
+
{
|
2116 |
+
"epoch": 2.5201527365294867,
|
2117 |
+
"grad_norm": 0.28980498979259595,
|
2118 |
+
"learning_rate": 2.897596759391561e-05,
|
2119 |
+
"loss": 1.2458,
|
2120 |
+
"step": 2970
|
2121 |
+
},
|
2122 |
+
{
|
2123 |
+
"epoch": 2.5286380992787443,
|
2124 |
+
"grad_norm": 0.30074882444504475,
|
2125 |
+
"learning_rate": 2.8829623983300242e-05,
|
2126 |
+
"loss": 1.2498,
|
2127 |
+
"step": 2980
|
2128 |
+
},
|
2129 |
+
{
|
2130 |
+
"epoch": 2.537123462028002,
|
2131 |
+
"grad_norm": 0.2929721105596463,
|
2132 |
+
"learning_rate": 2.868314586750794e-05,
|
2133 |
+
"loss": 1.2686,
|
2134 |
+
"step": 2990
|
2135 |
+
},
|
2136 |
+
{
|
2137 |
+
"epoch": 2.5456088247772595,
|
2138 |
+
"grad_norm": 0.291755235343187,
|
2139 |
+
"learning_rate": 2.853653839118605e-05,
|
2140 |
+
"loss": 1.2456,
|
2141 |
+
"step": 3000
|
2142 |
+
},
|
2143 |
+
{
|
2144 |
+
"epoch": 2.5456088247772595,
|
2145 |
+
"eval_loss": 1.4051239490509033,
|
2146 |
+
"eval_runtime": 52.7875,
|
2147 |
+
"eval_samples_per_second": 7.218,
|
2148 |
+
"eval_steps_per_second": 0.909,
|
2149 |
+
"step": 3000
|
2150 |
+
},
|
2151 |
+
{
|
2152 |
+
"epoch": 2.5540941875265166,
|
2153 |
+
"grad_norm": 0.3056527705148328,
|
2154 |
+
"learning_rate": 2.8389806703525383e-05,
|
2155 |
+
"loss": 1.2321,
|
2156 |
+
"step": 3010
|
2157 |
+
},
|
2158 |
+
{
|
2159 |
+
"epoch": 2.562579550275774,
|
2160 |
+
"grad_norm": 0.29756401069688737,
|
2161 |
+
"learning_rate": 2.8242955958079303e-05,
|
2162 |
+
"loss": 1.2341,
|
2163 |
+
"step": 3020
|
2164 |
+
},
|
2165 |
+
{
|
2166 |
+
"epoch": 2.571064913025032,
|
2167 |
+
"grad_norm": 0.3077048874608071,
|
2168 |
+
"learning_rate": 2.809599131258276e-05,
|
2169 |
+
"loss": 1.2475,
|
2170 |
+
"step": 3030
|
2171 |
+
},
|
2172 |
+
{
|
2173 |
+
"epoch": 2.5795502757742894,
|
2174 |
+
"grad_norm": 0.31006088313098146,
|
2175 |
+
"learning_rate": 2.7948917928771158e-05,
|
2176 |
+
"loss": 1.2381,
|
2177 |
+
"step": 3040
|
2178 |
+
},
|
2179 |
+
{
|
2180 |
+
"epoch": 2.588035638523547,
|
2181 |
+
"grad_norm": 0.3086227102652305,
|
2182 |
+
"learning_rate": 2.7801740972199014e-05,
|
2183 |
+
"loss": 1.2386,
|
2184 |
+
"step": 3050
|
2185 |
+
},
|
2186 |
+
{
|
2187 |
+
"epoch": 2.596521001272804,
|
2188 |
+
"grad_norm": 0.2909420805400902,
|
2189 |
+
"learning_rate": 2.7654465612058573e-05,
|
2190 |
+
"loss": 1.2071,
|
2191 |
+
"step": 3060
|
2192 |
+
},
|
2193 |
+
{
|
2194 |
+
"epoch": 2.6050063640220618,
|
2195 |
+
"grad_norm": 0.30310956499188235,
|
2196 |
+
"learning_rate": 2.7507097020998246e-05,
|
2197 |
+
"loss": 1.2206,
|
2198 |
+
"step": 3070
|
2199 |
+
},
|
2200 |
+
{
|
2201 |
+
"epoch": 2.6134917267713194,
|
2202 |
+
"grad_norm": 0.2873915382033808,
|
2203 |
+
"learning_rate": 2.7359640374940904e-05,
|
2204 |
+
"loss": 1.2346,
|
2205 |
+
"step": 3080
|
2206 |
+
},
|
2207 |
+
{
|
2208 |
+
"epoch": 2.621977089520577,
|
2209 |
+
"grad_norm": 0.29404028686651285,
|
2210 |
+
"learning_rate": 2.7212100852902133e-05,
|
2211 |
+
"loss": 1.2209,
|
2212 |
+
"step": 3090
|
2213 |
+
},
|
2214 |
+
{
|
2215 |
+
"epoch": 2.6304624522698346,
|
2216 |
+
"grad_norm": 0.2967558623710032,
|
2217 |
+
"learning_rate": 2.7064483636808313e-05,
|
2218 |
+
"loss": 1.2471,
|
2219 |
+
"step": 3100
|
2220 |
+
},
|
2221 |
+
{
|
2222 |
+
"epoch": 2.638947815019092,
|
2223 |
+
"grad_norm": 0.28348844201193973,
|
2224 |
+
"learning_rate": 2.6916793911314593e-05,
|
2225 |
+
"loss": 1.2271,
|
2226 |
+
"step": 3110
|
2227 |
+
},
|
2228 |
+
{
|
2229 |
+
"epoch": 2.6474331777683497,
|
2230 |
+
"grad_norm": 0.30908939180701456,
|
2231 |
+
"learning_rate": 2.6769036863622842e-05,
|
2232 |
+
"loss": 1.2348,
|
2233 |
+
"step": 3120
|
2234 |
+
},
|
2235 |
+
{
|
2236 |
+
"epoch": 2.6559185405176073,
|
2237 |
+
"grad_norm": 0.296064586506253,
|
2238 |
+
"learning_rate": 2.6621217683299437e-05,
|
2239 |
+
"loss": 1.2118,
|
2240 |
+
"step": 3130
|
2241 |
+
},
|
2242 |
+
{
|
2243 |
+
"epoch": 2.6644039032668645,
|
2244 |
+
"grad_norm": 0.29223118946191284,
|
2245 |
+
"learning_rate": 2.647334156209299e-05,
|
2246 |
+
"loss": 1.2368,
|
2247 |
+
"step": 3140
|
2248 |
+
},
|
2249 |
+
{
|
2250 |
+
"epoch": 2.672889266016122,
|
2251 |
+
"grad_norm": 0.2974562276968823,
|
2252 |
+
"learning_rate": 2.6325413693752004e-05,
|
2253 |
+
"loss": 1.2392,
|
2254 |
+
"step": 3150
|
2255 |
+
},
|
2256 |
+
{
|
2257 |
+
"epoch": 2.6813746287653797,
|
2258 |
+
"grad_norm": 0.30862646184519243,
|
2259 |
+
"learning_rate": 2.6177439273842463e-05,
|
2260 |
+
"loss": 1.244,
|
2261 |
+
"step": 3160
|
2262 |
+
},
|
2263 |
+
{
|
2264 |
+
"epoch": 2.6898599915146373,
|
2265 |
+
"grad_norm": 0.2958164221091078,
|
2266 |
+
"learning_rate": 2.602942349956536e-05,
|
2267 |
+
"loss": 1.2377,
|
2268 |
+
"step": 3170
|
2269 |
+
},
|
2270 |
+
{
|
2271 |
+
"epoch": 2.698345354263895,
|
2272 |
+
"grad_norm": 0.2941370782364945,
|
2273 |
+
"learning_rate": 2.5881371569574125e-05,
|
2274 |
+
"loss": 1.2296,
|
2275 |
+
"step": 3180
|
2276 |
+
},
|
2277 |
+
{
|
2278 |
+
"epoch": 2.7068307170131525,
|
2279 |
+
"grad_norm": 0.2949825785995608,
|
2280 |
+
"learning_rate": 2.5733288683792084e-05,
|
2281 |
+
"loss": 1.2292,
|
2282 |
+
"step": 3190
|
2283 |
+
},
|
2284 |
+
{
|
2285 |
+
"epoch": 2.7153160797624096,
|
2286 |
+
"grad_norm": 0.3020705479686205,
|
2287 |
+
"learning_rate": 2.558518004322979e-05,
|
2288 |
+
"loss": 1.2371,
|
2289 |
+
"step": 3200
|
2290 |
+
},
|
2291 |
+
{
|
2292 |
+
"epoch": 2.7238014425116672,
|
2293 |
+
"grad_norm": 0.3134012668403533,
|
2294 |
+
"learning_rate": 2.5437050849802356e-05,
|
2295 |
+
"loss": 1.2257,
|
2296 |
+
"step": 3210
|
2297 |
+
},
|
2298 |
+
{
|
2299 |
+
"epoch": 2.732286805260925,
|
2300 |
+
"grad_norm": 0.320012175903156,
|
2301 |
+
"learning_rate": 2.528890630614677e-05,
|
2302 |
+
"loss": 1.215,
|
2303 |
+
"step": 3220
|
2304 |
+
},
|
2305 |
+
{
|
2306 |
+
"epoch": 2.7407721680101824,
|
2307 |
+
"grad_norm": 0.2936053513063533,
|
2308 |
+
"learning_rate": 2.514075161543915e-05,
|
2309 |
+
"loss": 1.2364,
|
2310 |
+
"step": 3230
|
2311 |
+
},
|
2312 |
+
{
|
2313 |
+
"epoch": 2.74925753075944,
|
2314 |
+
"grad_norm": 0.30515854484741317,
|
2315 |
+
"learning_rate": 2.499259198121201e-05,
|
2316 |
+
"loss": 1.2117,
|
2317 |
+
"step": 3240
|
2318 |
+
},
|
2319 |
+
{
|
2320 |
+
"epoch": 2.7577428935086976,
|
2321 |
+
"grad_norm": 0.29763910785937486,
|
2322 |
+
"learning_rate": 2.484443260717147e-05,
|
2323 |
+
"loss": 1.2583,
|
2324 |
+
"step": 3250
|
2325 |
+
},
|
2326 |
+
{
|
2327 |
+
"epoch": 2.766228256257955,
|
2328 |
+
"grad_norm": 0.2975895109826329,
|
2329 |
+
"learning_rate": 2.4696278697014538e-05,
|
2330 |
+
"loss": 1.2153,
|
2331 |
+
"step": 3260
|
2332 |
+
},
|
2333 |
+
{
|
2334 |
+
"epoch": 2.774713619007213,
|
2335 |
+
"grad_norm": 0.2778934357454163,
|
2336 |
+
"learning_rate": 2.4548135454246306e-05,
|
2337 |
+
"loss": 1.2291,
|
2338 |
+
"step": 3270
|
2339 |
+
},
|
2340 |
+
{
|
2341 |
+
"epoch": 2.78319898175647,
|
2342 |
+
"grad_norm": 0.32172302391314234,
|
2343 |
+
"learning_rate": 2.4400008081997196e-05,
|
2344 |
+
"loss": 1.214,
|
2345 |
+
"step": 3280
|
2346 |
+
},
|
2347 |
+
{
|
2348 |
+
"epoch": 2.7916843445057276,
|
2349 |
+
"grad_norm": 0.3008060855751439,
|
2350 |
+
"learning_rate": 2.425190178284024e-05,
|
2351 |
+
"loss": 1.234,
|
2352 |
+
"step": 3290
|
2353 |
+
},
|
2354 |
+
{
|
2355 |
+
"epoch": 2.800169707254985,
|
2356 |
+
"grad_norm": 0.3093107834664431,
|
2357 |
+
"learning_rate": 2.4103821758608307e-05,
|
2358 |
+
"loss": 1.2492,
|
2359 |
+
"step": 3300
|
2360 |
+
},
|
2361 |
+
{
|
2362 |
+
"epoch": 2.8086550700042427,
|
2363 |
+
"grad_norm": 0.30554454329766617,
|
2364 |
+
"learning_rate": 2.3955773210211465e-05,
|
2365 |
+
"loss": 1.2401,
|
2366 |
+
"step": 3310
|
2367 |
+
},
|
2368 |
+
{
|
2369 |
+
"epoch": 2.8171404327535003,
|
2370 |
+
"grad_norm": 0.2997812898388053,
|
2371 |
+
"learning_rate": 2.380776133745425e-05,
|
2372 |
+
"loss": 1.2089,
|
2373 |
+
"step": 3320
|
2374 |
+
},
|
2375 |
+
{
|
2376 |
+
"epoch": 2.8256257955027575,
|
2377 |
+
"grad_norm": 0.2962123179493644,
|
2378 |
+
"learning_rate": 2.3659791338853066e-05,
|
2379 |
+
"loss": 1.2324,
|
2380 |
+
"step": 3330
|
2381 |
+
},
|
2382 |
+
{
|
2383 |
+
"epoch": 2.834111158252015,
|
2384 |
+
"grad_norm": 0.31328021490753843,
|
2385 |
+
"learning_rate": 2.3511868411453623e-05,
|
2386 |
+
"loss": 1.214,
|
2387 |
+
"step": 3340
|
2388 |
+
},
|
2389 |
+
{
|
2390 |
+
"epoch": 2.8425965210012727,
|
2391 |
+
"grad_norm": 0.30414013736166967,
|
2392 |
+
"learning_rate": 2.3363997750648357e-05,
|
2393 |
+
"loss": 1.2142,
|
2394 |
+
"step": 3350
|
2395 |
+
},
|
2396 |
+
{
|
2397 |
+
"epoch": 2.8510818837505303,
|
2398 |
+
"grad_norm": 0.3079683108015467,
|
2399 |
+
"learning_rate": 2.3216184549994006e-05,
|
2400 |
+
"loss": 1.2137,
|
2401 |
+
"step": 3360
|
2402 |
+
},
|
2403 |
+
{
|
2404 |
+
"epoch": 2.859567246499788,
|
2405 |
+
"grad_norm": 0.31165537422962203,
|
2406 |
+
"learning_rate": 2.3068434001029173e-05,
|
2407 |
+
"loss": 1.1915,
|
2408 |
+
"step": 3370
|
2409 |
+
},
|
2410 |
+
{
|
2411 |
+
"epoch": 2.8680526092490455,
|
2412 |
+
"grad_norm": 0.3190451157121095,
|
2413 |
+
"learning_rate": 2.2920751293091948e-05,
|
2414 |
+
"loss": 1.2193,
|
2415 |
+
"step": 3380
|
2416 |
+
},
|
2417 |
+
{
|
2418 |
+
"epoch": 2.876537971998303,
|
2419 |
+
"grad_norm": 0.30247955079343214,
|
2420 |
+
"learning_rate": 2.277314161313774e-05,
|
2421 |
+
"loss": 1.2253,
|
2422 |
+
"step": 3390
|
2423 |
+
},
|
2424 |
+
{
|
2425 |
+
"epoch": 2.8850233347475607,
|
2426 |
+
"grad_norm": 0.2936629891547958,
|
2427 |
+
"learning_rate": 2.262561014555703e-05,
|
2428 |
+
"loss": 1.2136,
|
2429 |
+
"step": 3400
|
2430 |
+
},
|
2431 |
+
{
|
2432 |
+
"epoch": 2.893508697496818,
|
2433 |
+
"grad_norm": 0.3001872886250926,
|
2434 |
+
"learning_rate": 2.2478162071993298e-05,
|
2435 |
+
"loss": 1.2061,
|
2436 |
+
"step": 3410
|
2437 |
+
},
|
2438 |
+
{
|
2439 |
+
"epoch": 2.9019940602460754,
|
2440 |
+
"grad_norm": 0.3111993397471538,
|
2441 |
+
"learning_rate": 2.233080257116103e-05,
|
2442 |
+
"loss": 1.2193,
|
2443 |
+
"step": 3420
|
2444 |
+
},
|
2445 |
+
{
|
2446 |
+
"epoch": 2.910479422995333,
|
2447 |
+
"grad_norm": 0.3119972602479391,
|
2448 |
+
"learning_rate": 2.2183536818663856e-05,
|
2449 |
+
"loss": 1.2125,
|
2450 |
+
"step": 3430
|
2451 |
+
},
|
2452 |
+
{
|
2453 |
+
"epoch": 2.9189647857445906,
|
2454 |
+
"grad_norm": 0.30787460159489605,
|
2455 |
+
"learning_rate": 2.2036369986812713e-05,
|
2456 |
+
"loss": 1.215,
|
2457 |
+
"step": 3440
|
2458 |
+
},
|
2459 |
+
{
|
2460 |
+
"epoch": 2.927450148493848,
|
2461 |
+
"grad_norm": 0.3175794611301324,
|
2462 |
+
"learning_rate": 2.1889307244444252e-05,
|
2463 |
+
"loss": 1.2202,
|
2464 |
+
"step": 3450
|
2465 |
+
},
|
2466 |
+
{
|
2467 |
+
"epoch": 2.9359355112431054,
|
2468 |
+
"grad_norm": 0.2956361362699715,
|
2469 |
+
"learning_rate": 2.1742353756739247e-05,
|
2470 |
+
"loss": 1.2125,
|
2471 |
+
"step": 3460
|
2472 |
+
},
|
2473 |
+
{
|
2474 |
+
"epoch": 2.944420873992363,
|
2475 |
+
"grad_norm": 0.33942331815482624,
|
2476 |
+
"learning_rate": 2.1595514685041205e-05,
|
2477 |
+
"loss": 1.2173,
|
2478 |
+
"step": 3470
|
2479 |
+
},
|
2480 |
+
{
|
2481 |
+
"epoch": 2.9529062367416206,
|
2482 |
+
"grad_norm": 0.332180923100718,
|
2483 |
+
"learning_rate": 2.144879518667507e-05,
|
2484 |
+
"loss": 1.2266,
|
2485 |
+
"step": 3480
|
2486 |
+
},
|
2487 |
+
{
|
2488 |
+
"epoch": 2.961391599490878,
|
2489 |
+
"grad_norm": 0.3157083099981729,
|
2490 |
+
"learning_rate": 2.1302200414766123e-05,
|
2491 |
+
"loss": 1.2154,
|
2492 |
+
"step": 3490
|
2493 |
+
},
|
2494 |
+
{
|
2495 |
+
"epoch": 2.9698769622401358,
|
2496 |
+
"grad_norm": 0.3149298205272042,
|
2497 |
+
"learning_rate": 2.1155735518058914e-05,
|
2498 |
+
"loss": 1.2232,
|
2499 |
+
"step": 3500
|
2500 |
+
},
|
2501 |
+
{
|
2502 |
+
"epoch": 2.9783623249893934,
|
2503 |
+
"grad_norm": 0.3132779789709915,
|
2504 |
+
"learning_rate": 2.100940564073653e-05,
|
2505 |
+
"loss": 1.2299,
|
2506 |
+
"step": 3510
|
2507 |
+
},
|
2508 |
+
{
|
2509 |
+
"epoch": 2.986847687738651,
|
2510 |
+
"grad_norm": 0.29387121758746726,
|
2511 |
+
"learning_rate": 2.086321592223984e-05,
|
2512 |
+
"loss": 1.2219,
|
2513 |
+
"step": 3520
|
2514 |
+
},
|
2515 |
+
{
|
2516 |
+
"epoch": 2.9953330504879085,
|
2517 |
+
"grad_norm": 0.2954282506485496,
|
2518 |
+
"learning_rate": 2.0717171497087014e-05,
|
2519 |
+
"loss": 1.2321,
|
2520 |
+
"step": 3530
|
2521 |
+
},
|
2522 |
+
{
|
2523 |
+
"epoch": 3.0038184132371657,
|
2524 |
+
"grad_norm": 0.2991107469221935,
|
2525 |
+
"learning_rate": 2.057127749469321e-05,
|
2526 |
+
"loss": 1.2387,
|
2527 |
+
"step": 3540
|
2528 |
+
},
|
2529 |
+
{
|
2530 |
+
"epoch": 3.0123037759864233,
|
2531 |
+
"grad_norm": 0.3135732189536929,
|
2532 |
+
"learning_rate": 2.042553903919036e-05,
|
2533 |
+
"loss": 1.1058,
|
2534 |
+
"step": 3550
|
2535 |
+
},
|
2536 |
+
{
|
2537 |
+
"epoch": 3.020789138735681,
|
2538 |
+
"grad_norm": 0.3179199807851635,
|
2539 |
+
"learning_rate": 2.0279961249247274e-05,
|
2540 |
+
"loss": 1.0677,
|
2541 |
+
"step": 3560
|
2542 |
+
},
|
2543 |
+
{
|
2544 |
+
"epoch": 3.0292745014849385,
|
2545 |
+
"grad_norm": 0.3217398418422315,
|
2546 |
+
"learning_rate": 2.0134549237889765e-05,
|
2547 |
+
"loss": 1.0978,
|
2548 |
+
"step": 3570
|
2549 |
+
},
|
2550 |
+
{
|
2551 |
+
"epoch": 3.037759864234196,
|
2552 |
+
"grad_norm": 0.32343352041544976,
|
2553 |
+
"learning_rate": 1.9989308112321164e-05,
|
2554 |
+
"loss": 1.0791,
|
2555 |
+
"step": 3580
|
2556 |
+
},
|
2557 |
+
{
|
2558 |
+
"epoch": 3.0462452269834537,
|
2559 |
+
"grad_norm": 0.32321267638520695,
|
2560 |
+
"learning_rate": 1.9844242973742886e-05,
|
2561 |
+
"loss": 1.0991,
|
2562 |
+
"step": 3590
|
2563 |
+
},
|
2564 |
+
{
|
2565 |
+
"epoch": 3.0547305897327113,
|
2566 |
+
"grad_norm": 0.321242777420917,
|
2567 |
+
"learning_rate": 1.9699358917175297e-05,
|
2568 |
+
"loss": 1.104,
|
2569 |
+
"step": 3600
|
2570 |
+
},
|
2571 |
+
{
|
2572 |
+
"epoch": 3.0547305897327113,
|
2573 |
+
"eval_loss": 1.3918192386627197,
|
2574 |
+
"eval_runtime": 52.2671,
|
2575 |
+
"eval_samples_per_second": 7.289,
|
2576 |
+
"eval_steps_per_second": 0.918,
|
2577 |
+
"step": 3600
|
2578 |
+
},
|
2579 |
+
{
|
2580 |
+
"epoch": 3.0632159524819684,
|
2581 |
+
"grad_norm": 0.3313441880623986,
|
2582 |
+
"learning_rate": 1.9554661031278712e-05,
|
2583 |
+
"loss": 1.081,
|
2584 |
+
"step": 3610
|
2585 |
+
},
|
2586 |
+
{
|
2587 |
+
"epoch": 3.071701315231226,
|
2588 |
+
"grad_norm": 0.3866269219140372,
|
2589 |
+
"learning_rate": 1.9410154398174742e-05,
|
2590 |
+
"loss": 1.0826,
|
2591 |
+
"step": 3620
|
2592 |
+
},
|
2593 |
+
{
|
2594 |
+
"epoch": 3.0801866779804836,
|
2595 |
+
"grad_norm": 0.32719602353062216,
|
2596 |
+
"learning_rate": 1.9265844093267728e-05,
|
2597 |
+
"loss": 1.0934,
|
2598 |
+
"step": 3630
|
2599 |
+
},
|
2600 |
+
{
|
2601 |
+
"epoch": 3.088672040729741,
|
2602 |
+
"grad_norm": 0.3301678171750988,
|
2603 |
+
"learning_rate": 1.9121735185066537e-05,
|
2604 |
+
"loss": 1.1047,
|
2605 |
+
"step": 3640
|
2606 |
+
},
|
2607 |
+
{
|
2608 |
+
"epoch": 3.097157403478999,
|
2609 |
+
"grad_norm": 0.35644796539197005,
|
2610 |
+
"learning_rate": 1.8977832735006522e-05,
|
2611 |
+
"loss": 1.0994,
|
2612 |
+
"step": 3650
|
2613 |
+
},
|
2614 |
+
{
|
2615 |
+
"epoch": 3.1056427662282564,
|
2616 |
+
"grad_norm": 0.33733028940251475,
|
2617 |
+
"learning_rate": 1.8834141797271742e-05,
|
2618 |
+
"loss": 1.0972,
|
2619 |
+
"step": 3660
|
2620 |
+
},
|
2621 |
+
{
|
2622 |
+
"epoch": 3.114128128977514,
|
2623 |
+
"grad_norm": 0.32548774712269085,
|
2624 |
+
"learning_rate": 1.8690667418617462e-05,
|
2625 |
+
"loss": 1.1046,
|
2626 |
+
"step": 3670
|
2627 |
+
},
|
2628 |
+
{
|
2629 |
+
"epoch": 3.122613491726771,
|
2630 |
+
"grad_norm": 0.3117109384467469,
|
2631 |
+
"learning_rate": 1.854741463819291e-05,
|
2632 |
+
"loss": 1.0791,
|
2633 |
+
"step": 3680
|
2634 |
+
},
|
2635 |
+
{
|
2636 |
+
"epoch": 3.1310988544760288,
|
2637 |
+
"grad_norm": 0.3240987091142989,
|
2638 |
+
"learning_rate": 1.8404388487364242e-05,
|
2639 |
+
"loss": 1.0824,
|
2640 |
+
"step": 3690
|
2641 |
+
},
|
2642 |
+
{
|
2643 |
+
"epoch": 3.1395842172252864,
|
2644 |
+
"grad_norm": 0.3346401099182515,
|
2645 |
+
"learning_rate": 1.8261593989537895e-05,
|
2646 |
+
"loss": 1.0753,
|
2647 |
+
"step": 3700
|
2648 |
+
},
|
2649 |
+
{
|
2650 |
+
"epoch": 3.148069579974544,
|
2651 |
+
"grad_norm": 0.3270030472928521,
|
2652 |
+
"learning_rate": 1.81190361599841e-05,
|
2653 |
+
"loss": 1.0934,
|
2654 |
+
"step": 3710
|
2655 |
+
},
|
2656 |
+
{
|
2657 |
+
"epoch": 3.1565549427238015,
|
2658 |
+
"grad_norm": 0.34129438803355183,
|
2659 |
+
"learning_rate": 1.797672000566077e-05,
|
2660 |
+
"loss": 1.0766,
|
2661 |
+
"step": 3720
|
2662 |
+
},
|
2663 |
+
{
|
2664 |
+
"epoch": 3.165040305473059,
|
2665 |
+
"grad_norm": 0.37057591586243926,
|
2666 |
+
"learning_rate": 1.783465052503762e-05,
|
2667 |
+
"loss": 1.1049,
|
2668 |
+
"step": 3730
|
2669 |
+
},
|
2670 |
+
{
|
2671 |
+
"epoch": 3.1735256682223163,
|
2672 |
+
"grad_norm": 0.3331237337555744,
|
2673 |
+
"learning_rate": 1.769283270792065e-05,
|
2674 |
+
"loss": 1.0876,
|
2675 |
+
"step": 3740
|
2676 |
+
},
|
2677 |
+
{
|
2678 |
+
"epoch": 3.182011030971574,
|
2679 |
+
"grad_norm": 0.31904083541369294,
|
2680 |
+
"learning_rate": 1.7551271535276792e-05,
|
2681 |
+
"loss": 1.1206,
|
2682 |
+
"step": 3750
|
2683 |
+
},
|
2684 |
+
{
|
2685 |
+
"epoch": 3.1904963937208315,
|
2686 |
+
"grad_norm": 0.35953020953263576,
|
2687 |
+
"learning_rate": 1.74099719790591e-05,
|
2688 |
+
"loss": 1.0736,
|
2689 |
+
"step": 3760
|
2690 |
+
},
|
2691 |
+
{
|
2692 |
+
"epoch": 3.198981756470089,
|
2693 |
+
"grad_norm": 0.33595544857573634,
|
2694 |
+
"learning_rate": 1.7268939002032035e-05,
|
2695 |
+
"loss": 1.0969,
|
2696 |
+
"step": 3770
|
2697 |
+
},
|
2698 |
+
{
|
2699 |
+
"epoch": 3.2074671192193467,
|
2700 |
+
"grad_norm": 0.34180259956288195,
|
2701 |
+
"learning_rate": 1.7128177557597185e-05,
|
2702 |
+
"loss": 1.0972,
|
2703 |
+
"step": 3780
|
2704 |
+
},
|
2705 |
+
{
|
2706 |
+
"epoch": 3.2159524819686043,
|
2707 |
+
"grad_norm": 0.3268247890892541,
|
2708 |
+
"learning_rate": 1.6987692589619304e-05,
|
2709 |
+
"loss": 1.0737,
|
2710 |
+
"step": 3790
|
2711 |
+
},
|
2712 |
+
{
|
2713 |
+
"epoch": 3.224437844717862,
|
2714 |
+
"grad_norm": 0.3207792660130559,
|
2715 |
+
"learning_rate": 1.6847489032252627e-05,
|
2716 |
+
"loss": 1.0797,
|
2717 |
+
"step": 3800
|
2718 |
+
},
|
2719 |
+
{
|
2720 |
+
"epoch": 3.232923207467119,
|
2721 |
+
"grad_norm": 0.3467270107463577,
|
2722 |
+
"learning_rate": 1.6707571809767644e-05,
|
2723 |
+
"loss": 1.1024,
|
2724 |
+
"step": 3810
|
2725 |
+
},
|
2726 |
+
{
|
2727 |
+
"epoch": 3.2414085702163766,
|
2728 |
+
"grad_norm": 0.35579083917156773,
|
2729 |
+
"learning_rate": 1.656794583637807e-05,
|
2730 |
+
"loss": 1.1026,
|
2731 |
+
"step": 3820
|
2732 |
+
},
|
2733 |
+
{
|
2734 |
+
"epoch": 3.2498939329656342,
|
2735 |
+
"grad_norm": 0.3339161504484877,
|
2736 |
+
"learning_rate": 1.6428616016068304e-05,
|
2737 |
+
"loss": 1.0866,
|
2738 |
+
"step": 3830
|
2739 |
+
},
|
2740 |
+
{
|
2741 |
+
"epoch": 3.258379295714892,
|
2742 |
+
"grad_norm": 0.34920082505086103,
|
2743 |
+
"learning_rate": 1.628958724242117e-05,
|
2744 |
+
"loss": 1.0789,
|
2745 |
+
"step": 3840
|
2746 |
+
},
|
2747 |
+
{
|
2748 |
+
"epoch": 3.2668646584641494,
|
2749 |
+
"grad_norm": 0.34847114740056345,
|
2750 |
+
"learning_rate": 1.615086439844604e-05,
|
2751 |
+
"loss": 1.0849,
|
2752 |
+
"step": 3850
|
2753 |
+
},
|
2754 |
+
{
|
2755 |
+
"epoch": 3.275350021213407,
|
2756 |
+
"grad_norm": 0.3164317420955498,
|
2757 |
+
"learning_rate": 1.601245235640733e-05,
|
2758 |
+
"loss": 1.0984,
|
2759 |
+
"step": 3860
|
2760 |
+
},
|
2761 |
+
{
|
2762 |
+
"epoch": 3.283835383962664,
|
2763 |
+
"grad_norm": 0.32036510223283066,
|
2764 |
+
"learning_rate": 1.5874355977653392e-05,
|
2765 |
+
"loss": 1.1122,
|
2766 |
+
"step": 3870
|
2767 |
+
},
|
2768 |
+
{
|
2769 |
+
"epoch": 3.2923207467119218,
|
2770 |
+
"grad_norm": 0.3618051784441363,
|
2771 |
+
"learning_rate": 1.5736580112445738e-05,
|
2772 |
+
"loss": 1.0942,
|
2773 |
+
"step": 3880
|
2774 |
+
},
|
2775 |
+
{
|
2776 |
+
"epoch": 3.3008061094611794,
|
2777 |
+
"grad_norm": 0.3376776049321294,
|
2778 |
+
"learning_rate": 1.559912959978872e-05,
|
2779 |
+
"loss": 1.0898,
|
2780 |
+
"step": 3890
|
2781 |
+
},
|
2782 |
+
{
|
2783 |
+
"epoch": 3.309291472210437,
|
2784 |
+
"grad_norm": 0.3402374603685398,
|
2785 |
+
"learning_rate": 1.546200926725958e-05,
|
2786 |
+
"loss": 1.1061,
|
2787 |
+
"step": 3900
|
2788 |
+
},
|
2789 |
+
{
|
2790 |
+
"epoch": 3.3177768349596946,
|
2791 |
+
"grad_norm": 0.32072793842909514,
|
2792 |
+
"learning_rate": 1.5325223930838838e-05,
|
2793 |
+
"loss": 1.0995,
|
2794 |
+
"step": 3910
|
2795 |
+
},
|
2796 |
+
{
|
2797 |
+
"epoch": 3.326262197708952,
|
2798 |
+
"grad_norm": 0.336633795191635,
|
2799 |
+
"learning_rate": 1.518877839474122e-05,
|
2800 |
+
"loss": 1.0971,
|
2801 |
+
"step": 3920
|
2802 |
+
},
|
2803 |
+
{
|
2804 |
+
"epoch": 3.3347475604582097,
|
2805 |
+
"grad_norm": 0.36554031780441654,
|
2806 |
+
"learning_rate": 1.5052677451246877e-05,
|
2807 |
+
"loss": 1.0867,
|
2808 |
+
"step": 3930
|
2809 |
+
},
|
2810 |
+
{
|
2811 |
+
"epoch": 3.3432329232074673,
|
2812 |
+
"grad_norm": 0.3569408014521597,
|
2813 |
+
"learning_rate": 1.491692588053305e-05,
|
2814 |
+
"loss": 1.0992,
|
2815 |
+
"step": 3940
|
2816 |
+
},
|
2817 |
+
{
|
2818 |
+
"epoch": 3.3517182859567245,
|
2819 |
+
"grad_norm": 0.33844743386504,
|
2820 |
+
"learning_rate": 1.4781528450506232e-05,
|
2821 |
+
"loss": 1.103,
|
2822 |
+
"step": 3950
|
2823 |
+
},
|
2824 |
+
{
|
2825 |
+
"epoch": 3.360203648705982,
|
2826 |
+
"grad_norm": 0.31581742738052115,
|
2827 |
+
"learning_rate": 1.4646489916634687e-05,
|
2828 |
+
"loss": 1.0843,
|
2829 |
+
"step": 3960
|
2830 |
+
},
|
2831 |
+
{
|
2832 |
+
"epoch": 3.3686890114552397,
|
2833 |
+
"grad_norm": 0.3882340052077705,
|
2834 |
+
"learning_rate": 1.4511815021781411e-05,
|
2835 |
+
"loss": 1.1146,
|
2836 |
+
"step": 3970
|
2837 |
+
},
|
2838 |
+
{
|
2839 |
+
"epoch": 3.3771743742044973,
|
2840 |
+
"grad_norm": 0.3309843437274728,
|
2841 |
+
"learning_rate": 1.4377508496037567e-05,
|
2842 |
+
"loss": 1.0751,
|
2843 |
+
"step": 3980
|
2844 |
+
},
|
2845 |
+
{
|
2846 |
+
"epoch": 3.385659736953755,
|
2847 |
+
"grad_norm": 0.34050270685357675,
|
2848 |
+
"learning_rate": 1.4243575056556355e-05,
|
2849 |
+
"loss": 1.0995,
|
2850 |
+
"step": 3990
|
2851 |
+
},
|
2852 |
+
{
|
2853 |
+
"epoch": 3.3941450997030125,
|
2854 |
+
"grad_norm": 0.3459308872000444,
|
2855 |
+
"learning_rate": 1.4110019407387315e-05,
|
2856 |
+
"loss": 1.0817,
|
2857 |
+
"step": 4000
|
2858 |
+
}
|
2859 |
+
],
|
2860 |
+
"logging_steps": 10,
|
2861 |
+
"max_steps": 5890,
|
2862 |
+
"num_input_tokens_seen": 0,
|
2863 |
+
"num_train_epochs": 5,
|
2864 |
+
"save_steps": 500,
|
2865 |
+
"stateful_callbacks": {
|
2866 |
+
"TrainerControl": {
|
2867 |
+
"args": {
|
2868 |
+
"should_epoch_stop": false,
|
2869 |
+
"should_evaluate": false,
|
2870 |
+
"should_log": false,
|
2871 |
+
"should_save": true,
|
2872 |
+
"should_training_stop": false
|
2873 |
+
},
|
2874 |
+
"attributes": {}
|
2875 |
+
}
|
2876 |
+
},
|
2877 |
+
"total_flos": 3362539022843904.0,
|
2878 |
+
"train_batch_size": 2,
|
2879 |
+
"trial_name": null,
|
2880 |
+
"trial_params": null
|
2881 |
+
}
|
checkpoint-4000/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-4000/zero_to_fp32.py
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example:
|
14 |
+
# python zero_to_fp32.py . output_dir/
|
15 |
+
# or
|
16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import torch
|
20 |
+
import glob
|
21 |
+
import math
|
22 |
+
import os
|
23 |
+
import re
|
24 |
+
import json
|
25 |
+
from tqdm import tqdm
|
26 |
+
from collections import OrderedDict
|
27 |
+
from dataclasses import dataclass
|
28 |
+
|
29 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
30 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
31 |
+
from deepspeed.utils import logger
|
32 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
33 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
34 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class zero_model_state:
|
39 |
+
buffers: dict()
|
40 |
+
param_shapes: dict()
|
41 |
+
shared_params: list
|
42 |
+
ds_version: int
|
43 |
+
frozen_param_shapes: dict()
|
44 |
+
frozen_param_fragments: dict()
|
45 |
+
|
46 |
+
|
47 |
+
debug = 0
|
48 |
+
|
49 |
+
# load to cpu
|
50 |
+
device = torch.device('cpu')
|
51 |
+
|
52 |
+
|
53 |
+
def atoi(text):
|
54 |
+
return int(text) if text.isdigit() else text
|
55 |
+
|
56 |
+
|
57 |
+
def natural_keys(text):
|
58 |
+
'''
|
59 |
+
alist.sort(key=natural_keys) sorts in human order
|
60 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
61 |
+
(See Toothy's implementation in the comments)
|
62 |
+
'''
|
63 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
64 |
+
|
65 |
+
|
66 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
67 |
+
if not os.path.isdir(checkpoint_dir):
|
68 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
69 |
+
|
70 |
+
# there should be only one file
|
71 |
+
if zero_stage <= 2:
|
72 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
73 |
+
elif zero_stage == 3:
|
74 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
75 |
+
|
76 |
+
if not os.path.exists(file):
|
77 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
78 |
+
|
79 |
+
return file
|
80 |
+
|
81 |
+
|
82 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
83 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
84 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
85 |
+
|
86 |
+
if len(ckpt_files) == 0:
|
87 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
88 |
+
|
89 |
+
return ckpt_files
|
90 |
+
|
91 |
+
|
92 |
+
def get_optim_files(checkpoint_dir):
|
93 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
94 |
+
|
95 |
+
|
96 |
+
def get_model_state_files(checkpoint_dir):
|
97 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
98 |
+
|
99 |
+
|
100 |
+
def parse_model_states(files):
|
101 |
+
zero_model_states = []
|
102 |
+
for file in files:
|
103 |
+
state_dict = torch.load(file, map_location=device)
|
104 |
+
|
105 |
+
if BUFFER_NAMES not in state_dict:
|
106 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
107 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
108 |
+
if debug:
|
109 |
+
print("Found buffers:", buffer_names)
|
110 |
+
|
111 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
112 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
113 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
114 |
+
|
115 |
+
# collect parameters that are included in param_shapes
|
116 |
+
param_names = []
|
117 |
+
for s in param_shapes:
|
118 |
+
for name in s.keys():
|
119 |
+
param_names.append(name)
|
120 |
+
|
121 |
+
# update with frozen parameters
|
122 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
123 |
+
if frozen_param_shapes is not None:
|
124 |
+
if debug:
|
125 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
126 |
+
param_names += list(frozen_param_shapes.keys())
|
127 |
+
|
128 |
+
# handle shared params
|
129 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
130 |
+
|
131 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
132 |
+
|
133 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
134 |
+
|
135 |
+
z_model_state = zero_model_state(buffers=buffers,
|
136 |
+
param_shapes=param_shapes,
|
137 |
+
shared_params=shared_params,
|
138 |
+
ds_version=ds_version,
|
139 |
+
frozen_param_shapes=frozen_param_shapes,
|
140 |
+
frozen_param_fragments=frozen_param_fragments)
|
141 |
+
zero_model_states.append(z_model_state)
|
142 |
+
|
143 |
+
return zero_model_states
|
144 |
+
|
145 |
+
|
146 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
147 |
+
total_files = len(files)
|
148 |
+
state_dicts = []
|
149 |
+
for f in files:
|
150 |
+
state_dict = torch.load(f, map_location=device)
|
151 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
152 |
+
# and also handle the case where it was already removed by another helper script
|
153 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
154 |
+
state_dicts.append(state_dict)
|
155 |
+
|
156 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
157 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
158 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
159 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
160 |
+
|
161 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
162 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
163 |
+
# use the max of the partition_count to get the dp world_size.
|
164 |
+
|
165 |
+
if type(world_size) is list:
|
166 |
+
world_size = max(world_size)
|
167 |
+
|
168 |
+
if world_size != total_files:
|
169 |
+
raise ValueError(
|
170 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
171 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
172 |
+
)
|
173 |
+
|
174 |
+
# the groups are named differently in each stage
|
175 |
+
if zero_stage <= 2:
|
176 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
177 |
+
elif zero_stage == 3:
|
178 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
179 |
+
else:
|
180 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
181 |
+
|
182 |
+
if zero_stage <= 2:
|
183 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
184 |
+
elif zero_stage == 3:
|
185 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
186 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
187 |
+
#
|
188 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
189 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
190 |
+
|
191 |
+
fp32_flat_groups = [
|
192 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
193 |
+
]
|
194 |
+
|
195 |
+
return zero_stage, world_size, fp32_flat_groups
|
196 |
+
|
197 |
+
|
198 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
199 |
+
"""
|
200 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
201 |
+
|
202 |
+
Args:
|
203 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
204 |
+
|
205 |
+
"""
|
206 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
207 |
+
|
208 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
209 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
210 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
211 |
+
|
212 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
213 |
+
|
214 |
+
zero_model_states = parse_model_states(model_files)
|
215 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
216 |
+
|
217 |
+
if zero_stage <= 2:
|
218 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
219 |
+
exclude_frozen_parameters)
|
220 |
+
elif zero_stage == 3:
|
221 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
222 |
+
exclude_frozen_parameters)
|
223 |
+
|
224 |
+
|
225 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
226 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
227 |
+
return
|
228 |
+
|
229 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
230 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
231 |
+
|
232 |
+
if debug:
|
233 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
234 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
235 |
+
|
236 |
+
wanted_params = len(frozen_param_shapes)
|
237 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
238 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
239 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
240 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
241 |
+
|
242 |
+
total_params = 0
|
243 |
+
total_numel = 0
|
244 |
+
for name, shape in frozen_param_shapes.items():
|
245 |
+
total_params += 1
|
246 |
+
unpartitioned_numel = shape.numel()
|
247 |
+
total_numel += unpartitioned_numel
|
248 |
+
|
249 |
+
state_dict[name] = frozen_param_fragments[name]
|
250 |
+
|
251 |
+
if debug:
|
252 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
253 |
+
|
254 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
255 |
+
|
256 |
+
|
257 |
+
def _has_callable(obj, fn):
|
258 |
+
attr = getattr(obj, fn, None)
|
259 |
+
return callable(attr)
|
260 |
+
|
261 |
+
|
262 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
263 |
+
param_shapes = zero_model_states[0].param_shapes
|
264 |
+
|
265 |
+
# Reconstruction protocol:
|
266 |
+
#
|
267 |
+
# XXX: document this
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
for i in range(world_size):
|
271 |
+
for j in range(len(fp32_flat_groups[0])):
|
272 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
273 |
+
|
274 |
+
# XXX: memory usage doubles here (zero2)
|
275 |
+
num_param_groups = len(fp32_flat_groups[0])
|
276 |
+
merged_single_partition_of_fp32_groups = []
|
277 |
+
for i in range(num_param_groups):
|
278 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
279 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
280 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
281 |
+
avail_numel = sum(
|
282 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
283 |
+
|
284 |
+
if debug:
|
285 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
286 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
287 |
+
# not asserting if there is a mismatch due to possible padding
|
288 |
+
print(f"Have {avail_numel} numels to process.")
|
289 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
290 |
+
|
291 |
+
# params
|
292 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
293 |
+
# out-of-core computing solution
|
294 |
+
total_numel = 0
|
295 |
+
total_params = 0
|
296 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
297 |
+
offset = 0
|
298 |
+
avail_numel = full_single_fp32_vector.numel()
|
299 |
+
for name, shape in shapes.items():
|
300 |
+
|
301 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
302 |
+
total_numel += unpartitioned_numel
|
303 |
+
total_params += 1
|
304 |
+
|
305 |
+
if debug:
|
306 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
307 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
308 |
+
offset += unpartitioned_numel
|
309 |
+
|
310 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
311 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
312 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
313 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
314 |
+
align_to = 2 * world_size
|
315 |
+
|
316 |
+
def zero2_align(x):
|
317 |
+
return align_to * math.ceil(x / align_to)
|
318 |
+
|
319 |
+
if debug:
|
320 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
321 |
+
|
322 |
+
offset = zero2_align(offset)
|
323 |
+
avail_numel = zero2_align(avail_numel)
|
324 |
+
|
325 |
+
if debug:
|
326 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
327 |
+
|
328 |
+
# Sanity check
|
329 |
+
if offset != avail_numel:
|
330 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
331 |
+
|
332 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
333 |
+
|
334 |
+
|
335 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
336 |
+
exclude_frozen_parameters):
|
337 |
+
state_dict = OrderedDict()
|
338 |
+
|
339 |
+
# buffers
|
340 |
+
buffers = zero_model_states[0].buffers
|
341 |
+
state_dict.update(buffers)
|
342 |
+
if debug:
|
343 |
+
print(f"added {len(buffers)} buffers")
|
344 |
+
|
345 |
+
if not exclude_frozen_parameters:
|
346 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
347 |
+
|
348 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
349 |
+
|
350 |
+
# recover shared parameters
|
351 |
+
for pair in zero_model_states[0].shared_params:
|
352 |
+
if pair[1] in state_dict:
|
353 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
354 |
+
|
355 |
+
return state_dict
|
356 |
+
|
357 |
+
|
358 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
359 |
+
remainder = unpartitioned_numel % world_size
|
360 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
361 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
362 |
+
return partitioned_numel, padding_numel
|
363 |
+
|
364 |
+
|
365 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
366 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
367 |
+
return
|
368 |
+
|
369 |
+
if debug:
|
370 |
+
for i in range(world_size):
|
371 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
372 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
373 |
+
|
374 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
375 |
+
wanted_params = len(frozen_param_shapes)
|
376 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
377 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
378 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
379 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
380 |
+
|
381 |
+
total_params = 0
|
382 |
+
total_numel = 0
|
383 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
384 |
+
total_params += 1
|
385 |
+
unpartitioned_numel = shape.numel()
|
386 |
+
total_numel += unpartitioned_numel
|
387 |
+
|
388 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
389 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
390 |
+
|
391 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
print(
|
395 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
396 |
+
)
|
397 |
+
|
398 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
399 |
+
|
400 |
+
|
401 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
402 |
+
param_shapes = zero_model_states[0].param_shapes
|
403 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
404 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
405 |
+
# param, re-consolidating each param, while dealing with padding if any
|
406 |
+
|
407 |
+
# merge list of dicts, preserving order
|
408 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
409 |
+
|
410 |
+
if debug:
|
411 |
+
for i in range(world_size):
|
412 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
413 |
+
|
414 |
+
wanted_params = len(param_shapes)
|
415 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
416 |
+
# not asserting if there is a mismatch due to possible padding
|
417 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
418 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
419 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
420 |
+
|
421 |
+
# params
|
422 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
423 |
+
# out-of-core computing solution
|
424 |
+
offset = 0
|
425 |
+
total_numel = 0
|
426 |
+
total_params = 0
|
427 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
|
428 |
+
unpartitioned_numel = shape.numel()
|
429 |
+
total_numel += unpartitioned_numel
|
430 |
+
total_params += 1
|
431 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
432 |
+
|
433 |
+
if debug:
|
434 |
+
print(
|
435 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
436 |
+
)
|
437 |
+
|
438 |
+
# XXX: memory usage doubles here
|
439 |
+
state_dict[name] = torch.cat(
|
440 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
441 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
442 |
+
offset += partitioned_numel
|
443 |
+
|
444 |
+
offset *= world_size
|
445 |
+
|
446 |
+
# Sanity check
|
447 |
+
if offset != avail_numel:
|
448 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
449 |
+
|
450 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
451 |
+
|
452 |
+
|
453 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
454 |
+
exclude_frozen_parameters):
|
455 |
+
state_dict = OrderedDict()
|
456 |
+
|
457 |
+
# buffers
|
458 |
+
buffers = zero_model_states[0].buffers
|
459 |
+
state_dict.update(buffers)
|
460 |
+
if debug:
|
461 |
+
print(f"added {len(buffers)} buffers")
|
462 |
+
|
463 |
+
if not exclude_frozen_parameters:
|
464 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
465 |
+
|
466 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
467 |
+
|
468 |
+
# recover shared parameters
|
469 |
+
for pair in zero_model_states[0].shared_params:
|
470 |
+
if pair[1] in state_dict:
|
471 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
472 |
+
|
473 |
+
return state_dict
|
474 |
+
|
475 |
+
|
476 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
477 |
+
"""
|
478 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
479 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
480 |
+
via a model hub.
|
481 |
+
|
482 |
+
Args:
|
483 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
484 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
485 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
486 |
+
|
487 |
+
Returns:
|
488 |
+
- pytorch ``state_dict``
|
489 |
+
|
490 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
491 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
492 |
+
the checkpoint.
|
493 |
+
|
494 |
+
A typical usage might be ::
|
495 |
+
|
496 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
497 |
+
# do the training and checkpoint saving
|
498 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
499 |
+
model = model.cpu() # move to cpu
|
500 |
+
model.load_state_dict(state_dict)
|
501 |
+
# submit to model hub or save the model to share with others
|
502 |
+
|
503 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
504 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
505 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
506 |
+
|
507 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
508 |
+
|
509 |
+
"""
|
510 |
+
if tag is None:
|
511 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
512 |
+
if os.path.isfile(latest_path):
|
513 |
+
with open(latest_path, 'r') as fd:
|
514 |
+
tag = fd.read().strip()
|
515 |
+
else:
|
516 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
517 |
+
|
518 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
519 |
+
|
520 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
521 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
522 |
+
|
523 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
524 |
+
|
525 |
+
|
526 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
527 |
+
output_dir,
|
528 |
+
max_shard_size="5GB",
|
529 |
+
safe_serialization=False,
|
530 |
+
tag=None,
|
531 |
+
exclude_frozen_parameters=False):
|
532 |
+
"""
|
533 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
534 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
535 |
+
|
536 |
+
Args:
|
537 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
538 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
539 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
540 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
541 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
542 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
543 |
+
"""
|
544 |
+
# Dependency pre-check
|
545 |
+
if safe_serialization:
|
546 |
+
try:
|
547 |
+
from safetensors.torch import save_file
|
548 |
+
except ImportError:
|
549 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
550 |
+
raise
|
551 |
+
if max_shard_size is not None:
|
552 |
+
try:
|
553 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
554 |
+
except ImportError:
|
555 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
556 |
+
raise
|
557 |
+
|
558 |
+
# Convert zero checkpoint to state_dict
|
559 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
560 |
+
|
561 |
+
# Shard the model if it is too big.
|
562 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
563 |
+
if max_shard_size is not None:
|
564 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
565 |
+
state_dict_split = split_torch_state_dict_into_shards(state_dict,
|
566 |
+
filename_pattern=filename_pattern,
|
567 |
+
max_shard_size=max_shard_size)
|
568 |
+
else:
|
569 |
+
from collections import namedtuple
|
570 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
571 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
572 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
573 |
+
|
574 |
+
# Save the model
|
575 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
576 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
577 |
+
shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
|
578 |
+
output_path = os.path.join(output_dir, shard_file)
|
579 |
+
if safe_serialization:
|
580 |
+
save_file(shard, output_path, metadata={"format": "pt"})
|
581 |
+
else:
|
582 |
+
torch.save(shard, output_path)
|
583 |
+
|
584 |
+
# Save index if sharded
|
585 |
+
if state_dict_split.is_sharded:
|
586 |
+
index = {
|
587 |
+
"metadata": state_dict_split.metadata,
|
588 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
589 |
+
}
|
590 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
591 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
592 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
593 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
594 |
+
f.write(content)
|
595 |
+
|
596 |
+
|
597 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
598 |
+
"""
|
599 |
+
1. Put the provided model to cpu
|
600 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
601 |
+
3. Load it into the provided model
|
602 |
+
|
603 |
+
Args:
|
604 |
+
- ``model``: the model object to update
|
605 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
606 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
607 |
+
|
608 |
+
Returns:
|
609 |
+
- ``model`: modified model
|
610 |
+
|
611 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
612 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
613 |
+
conveniently placed for you in the checkpoint folder.
|
614 |
+
|
615 |
+
A typical usage might be ::
|
616 |
+
|
617 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
618 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
619 |
+
# submit to model hub or save the model to share with others
|
620 |
+
|
621 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
622 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
623 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
624 |
+
|
625 |
+
"""
|
626 |
+
logger.info(f"Extracting fp32 weights")
|
627 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
628 |
+
|
629 |
+
logger.info(f"Overwriting model with fp32 weights")
|
630 |
+
model = model.cpu()
|
631 |
+
model.load_state_dict(state_dict, strict=False)
|
632 |
+
|
633 |
+
return model
|
634 |
+
|
635 |
+
|
636 |
+
if __name__ == "__main__":
|
637 |
+
parser = argparse.ArgumentParser()
|
638 |
+
parser.add_argument("checkpoint_dir",
|
639 |
+
type=str,
|
640 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
641 |
+
parser.add_argument("output_dir",
|
642 |
+
type=str,
|
643 |
+
help="directory to the pytorch fp32 state_dict output files"
|
644 |
+
"(e.g. path/checkpoint-12-output/)")
|
645 |
+
parser.add_argument(
|
646 |
+
"--max_shard_size",
|
647 |
+
type=str,
|
648 |
+
default="5GB",
|
649 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
650 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
651 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
652 |
+
"without CPU OOM issues.")
|
653 |
+
parser.add_argument(
|
654 |
+
"--safe_serialization",
|
655 |
+
default=False,
|
656 |
+
action='store_true',
|
657 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
658 |
+
parser.add_argument("-t",
|
659 |
+
"--tag",
|
660 |
+
type=str,
|
661 |
+
default=None,
|
662 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
663 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
664 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
665 |
+
args = parser.parse_args()
|
666 |
+
|
667 |
+
debug = args.debug
|
668 |
+
|
669 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
670 |
+
args.output_dir,
|
671 |
+
max_shard_size=args.max_shard_size,
|
672 |
+
safe_serialization=args.safe_serialization,
|
673 |
+
tag=args.tag,
|
674 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
checkpoint-4500/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: Qwen/Qwen2.5-72B
|
3 |
+
library_name: peft
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.12.0
|
checkpoint-4500/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-4500/added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
checkpoint-4500/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step4500
|
checkpoint-4500/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-4500/special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
checkpoint-4500/tokenizer_config.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|endoftext|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"model_max_length": 131072,
|
203 |
+
"pad_token": "<|endoftext|>",
|
204 |
+
"padding_side": "right",
|
205 |
+
"split_special_tokens": false,
|
206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
207 |
+
"unk_token": null
|
208 |
+
}
|
checkpoint-4500/trainer_state.json
ADDED
@@ -0,0 +1,3239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.8184132371658888,
|
5 |
+
"eval_steps": 600,
|
6 |
+
"global_step": 4500,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.00848536274925753,
|
13 |
+
"grad_norm": 0.4898678891363344,
|
14 |
+
"learning_rate": 8.488964346349746e-07,
|
15 |
+
"loss": 1.8056,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.01697072549851506,
|
20 |
+
"grad_norm": 0.3537473179717183,
|
21 |
+
"learning_rate": 1.6977928692699491e-06,
|
22 |
+
"loss": 1.7621,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.025456088247772592,
|
27 |
+
"grad_norm": 0.28215953004159977,
|
28 |
+
"learning_rate": 2.546689303904924e-06,
|
29 |
+
"loss": 1.7571,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.03394145099703012,
|
34 |
+
"grad_norm": 0.27446565146764923,
|
35 |
+
"learning_rate": 3.3955857385398982e-06,
|
36 |
+
"loss": 1.7136,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.04242681374628765,
|
41 |
+
"grad_norm": 0.17051549768176558,
|
42 |
+
"learning_rate": 4.244482173174873e-06,
|
43 |
+
"loss": 1.6767,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.050912176495545185,
|
48 |
+
"grad_norm": 0.17763882467320422,
|
49 |
+
"learning_rate": 5.093378607809848e-06,
|
50 |
+
"loss": 1.6371,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.05939753924480271,
|
55 |
+
"grad_norm": 0.14311462596290048,
|
56 |
+
"learning_rate": 5.942275042444822e-06,
|
57 |
+
"loss": 1.6324,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.06788290199406025,
|
62 |
+
"grad_norm": 0.1659540846071645,
|
63 |
+
"learning_rate": 6.7911714770797965e-06,
|
64 |
+
"loss": 1.6062,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.07636826474331777,
|
69 |
+
"grad_norm": 0.20064072815620043,
|
70 |
+
"learning_rate": 7.640067911714771e-06,
|
71 |
+
"loss": 1.5832,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.0848536274925753,
|
76 |
+
"grad_norm": 0.2179045681711979,
|
77 |
+
"learning_rate": 8.488964346349745e-06,
|
78 |
+
"loss": 1.5898,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.09333899024183284,
|
83 |
+
"grad_norm": 0.23866012053128668,
|
84 |
+
"learning_rate": 9.337860780984721e-06,
|
85 |
+
"loss": 1.5924,
|
86 |
+
"step": 110
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.10182435299109037,
|
90 |
+
"grad_norm": 0.18578051776430282,
|
91 |
+
"learning_rate": 1.0186757215619695e-05,
|
92 |
+
"loss": 1.5877,
|
93 |
+
"step": 120
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.1103097157403479,
|
97 |
+
"grad_norm": 0.2216509707409362,
|
98 |
+
"learning_rate": 1.103565365025467e-05,
|
99 |
+
"loss": 1.5947,
|
100 |
+
"step": 130
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.11879507848960542,
|
104 |
+
"grad_norm": 0.20427142255694086,
|
105 |
+
"learning_rate": 1.1884550084889643e-05,
|
106 |
+
"loss": 1.5841,
|
107 |
+
"step": 140
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.12728044123886295,
|
111 |
+
"grad_norm": 0.1765851415675038,
|
112 |
+
"learning_rate": 1.2733446519524619e-05,
|
113 |
+
"loss": 1.5878,
|
114 |
+
"step": 150
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.1357658039881205,
|
118 |
+
"grad_norm": 0.1769355117060811,
|
119 |
+
"learning_rate": 1.3582342954159593e-05,
|
120 |
+
"loss": 1.5795,
|
121 |
+
"step": 160
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.14425116673737803,
|
125 |
+
"grad_norm": 0.1617675663096666,
|
126 |
+
"learning_rate": 1.4431239388794569e-05,
|
127 |
+
"loss": 1.5549,
|
128 |
+
"step": 170
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.15273652948663555,
|
132 |
+
"grad_norm": 0.17302259072151574,
|
133 |
+
"learning_rate": 1.5280135823429543e-05,
|
134 |
+
"loss": 1.5808,
|
135 |
+
"step": 180
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.1612218922358931,
|
139 |
+
"grad_norm": 0.16876039012432806,
|
140 |
+
"learning_rate": 1.6129032258064517e-05,
|
141 |
+
"loss": 1.5676,
|
142 |
+
"step": 190
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.1697072549851506,
|
146 |
+
"grad_norm": 0.19627360154037596,
|
147 |
+
"learning_rate": 1.697792869269949e-05,
|
148 |
+
"loss": 1.5598,
|
149 |
+
"step": 200
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.17819261773440814,
|
153 |
+
"grad_norm": 0.16078510362361015,
|
154 |
+
"learning_rate": 1.7826825127334465e-05,
|
155 |
+
"loss": 1.5667,
|
156 |
+
"step": 210
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.18667798048366568,
|
160 |
+
"grad_norm": 0.16044786518959703,
|
161 |
+
"learning_rate": 1.8675721561969442e-05,
|
162 |
+
"loss": 1.5815,
|
163 |
+
"step": 220
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.1951633432329232,
|
167 |
+
"grad_norm": 0.15656958873834717,
|
168 |
+
"learning_rate": 1.9524617996604416e-05,
|
169 |
+
"loss": 1.5576,
|
170 |
+
"step": 230
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.20364870598218074,
|
174 |
+
"grad_norm": 0.1687290471357602,
|
175 |
+
"learning_rate": 2.037351443123939e-05,
|
176 |
+
"loss": 1.5453,
|
177 |
+
"step": 240
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.21213406873143828,
|
181 |
+
"grad_norm": 0.1519017348276184,
|
182 |
+
"learning_rate": 2.1222410865874364e-05,
|
183 |
+
"loss": 1.5554,
|
184 |
+
"step": 250
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.2206194314806958,
|
188 |
+
"grad_norm": 0.15761892005160086,
|
189 |
+
"learning_rate": 2.207130730050934e-05,
|
190 |
+
"loss": 1.5494,
|
191 |
+
"step": 260
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.22910479422995333,
|
195 |
+
"grad_norm": 0.16857088482977495,
|
196 |
+
"learning_rate": 2.2920203735144312e-05,
|
197 |
+
"loss": 1.5794,
|
198 |
+
"step": 270
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.23759015697921085,
|
202 |
+
"grad_norm": 0.1678705209913503,
|
203 |
+
"learning_rate": 2.3769100169779286e-05,
|
204 |
+
"loss": 1.5373,
|
205 |
+
"step": 280
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.2460755197284684,
|
209 |
+
"grad_norm": 0.14812649566587394,
|
210 |
+
"learning_rate": 2.461799660441426e-05,
|
211 |
+
"loss": 1.5504,
|
212 |
+
"step": 290
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.2545608824777259,
|
216 |
+
"grad_norm": 0.17651916734325857,
|
217 |
+
"learning_rate": 2.5466893039049238e-05,
|
218 |
+
"loss": 1.5607,
|
219 |
+
"step": 300
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.26304624522698344,
|
223 |
+
"grad_norm": 0.14883055338507856,
|
224 |
+
"learning_rate": 2.6315789473684212e-05,
|
225 |
+
"loss": 1.5311,
|
226 |
+
"step": 310
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.271531607976241,
|
230 |
+
"grad_norm": 0.15787522753231265,
|
231 |
+
"learning_rate": 2.7164685908319186e-05,
|
232 |
+
"loss": 1.5656,
|
233 |
+
"step": 320
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.2800169707254985,
|
237 |
+
"grad_norm": 0.1625232940237689,
|
238 |
+
"learning_rate": 2.801358234295416e-05,
|
239 |
+
"loss": 1.5686,
|
240 |
+
"step": 330
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.28850233347475607,
|
244 |
+
"grad_norm": 0.18505951289343867,
|
245 |
+
"learning_rate": 2.8862478777589137e-05,
|
246 |
+
"loss": 1.5474,
|
247 |
+
"step": 340
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.29698769622401355,
|
251 |
+
"grad_norm": 0.13785772316349984,
|
252 |
+
"learning_rate": 2.9711375212224108e-05,
|
253 |
+
"loss": 1.5696,
|
254 |
+
"step": 350
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.3054730589732711,
|
258 |
+
"grad_norm": 0.13531274658248552,
|
259 |
+
"learning_rate": 3.0560271646859086e-05,
|
260 |
+
"loss": 1.5551,
|
261 |
+
"step": 360
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.31395842172252864,
|
265 |
+
"grad_norm": 0.1366381415368909,
|
266 |
+
"learning_rate": 3.140916808149406e-05,
|
267 |
+
"loss": 1.524,
|
268 |
+
"step": 370
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.3224437844717862,
|
272 |
+
"grad_norm": 0.14587220569353926,
|
273 |
+
"learning_rate": 3.2258064516129034e-05,
|
274 |
+
"loss": 1.5515,
|
275 |
+
"step": 380
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.3309291472210437,
|
279 |
+
"grad_norm": 0.13336349383744864,
|
280 |
+
"learning_rate": 3.310696095076401e-05,
|
281 |
+
"loss": 1.5457,
|
282 |
+
"step": 390
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.3394145099703012,
|
286 |
+
"grad_norm": 0.1772016947970983,
|
287 |
+
"learning_rate": 3.395585738539898e-05,
|
288 |
+
"loss": 1.5582,
|
289 |
+
"step": 400
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.34789987271955874,
|
293 |
+
"grad_norm": 0.13819420575084573,
|
294 |
+
"learning_rate": 3.4804753820033956e-05,
|
295 |
+
"loss": 1.5326,
|
296 |
+
"step": 410
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.3563852354688163,
|
300 |
+
"grad_norm": 0.12729862167862188,
|
301 |
+
"learning_rate": 3.565365025466893e-05,
|
302 |
+
"loss": 1.5387,
|
303 |
+
"step": 420
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.3648705982180738,
|
307 |
+
"grad_norm": 0.11777082851399363,
|
308 |
+
"learning_rate": 3.6502546689303904e-05,
|
309 |
+
"loss": 1.5587,
|
310 |
+
"step": 430
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.37335596096733137,
|
314 |
+
"grad_norm": 0.15372268131323022,
|
315 |
+
"learning_rate": 3.7351443123938885e-05,
|
316 |
+
"loss": 1.5362,
|
317 |
+
"step": 440
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.3818413237165889,
|
321 |
+
"grad_norm": 0.12616185572252248,
|
322 |
+
"learning_rate": 3.820033955857386e-05,
|
323 |
+
"loss": 1.5548,
|
324 |
+
"step": 450
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.3903266864658464,
|
328 |
+
"grad_norm": 0.1311200786303391,
|
329 |
+
"learning_rate": 3.904923599320883e-05,
|
330 |
+
"loss": 1.5409,
|
331 |
+
"step": 460
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.39881204921510394,
|
335 |
+
"grad_norm": 0.1707919112561785,
|
336 |
+
"learning_rate": 3.989813242784381e-05,
|
337 |
+
"loss": 1.5509,
|
338 |
+
"step": 470
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.4072974119643615,
|
342 |
+
"grad_norm": 0.14660149264284913,
|
343 |
+
"learning_rate": 4.074702886247878e-05,
|
344 |
+
"loss": 1.5433,
|
345 |
+
"step": 480
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.415782774713619,
|
349 |
+
"grad_norm": 0.12478895483834351,
|
350 |
+
"learning_rate": 4.1595925297113755e-05,
|
351 |
+
"loss": 1.5382,
|
352 |
+
"step": 490
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.42426813746287656,
|
356 |
+
"grad_norm": 0.12327957445795817,
|
357 |
+
"learning_rate": 4.244482173174873e-05,
|
358 |
+
"loss": 1.5515,
|
359 |
+
"step": 500
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.43275350021213405,
|
363 |
+
"grad_norm": 0.12922777738650987,
|
364 |
+
"learning_rate": 4.32937181663837e-05,
|
365 |
+
"loss": 1.5688,
|
366 |
+
"step": 510
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.4412388629613916,
|
370 |
+
"grad_norm": 0.12486802189783415,
|
371 |
+
"learning_rate": 4.414261460101868e-05,
|
372 |
+
"loss": 1.5452,
|
373 |
+
"step": 520
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.44972422571064913,
|
377 |
+
"grad_norm": 0.1360610874577123,
|
378 |
+
"learning_rate": 4.499151103565366e-05,
|
379 |
+
"loss": 1.5493,
|
380 |
+
"step": 530
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.45820958845990667,
|
384 |
+
"grad_norm": 0.1884897685356775,
|
385 |
+
"learning_rate": 4.5840407470288625e-05,
|
386 |
+
"loss": 1.5511,
|
387 |
+
"step": 540
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.4666949512091642,
|
391 |
+
"grad_norm": 0.12446302384809525,
|
392 |
+
"learning_rate": 4.6689303904923606e-05,
|
393 |
+
"loss": 1.5458,
|
394 |
+
"step": 550
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.4751803139584217,
|
398 |
+
"grad_norm": 0.13169591804768588,
|
399 |
+
"learning_rate": 4.753820033955857e-05,
|
400 |
+
"loss": 1.5569,
|
401 |
+
"step": 560
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.48366567670767924,
|
405 |
+
"grad_norm": 0.1343809247449631,
|
406 |
+
"learning_rate": 4.8387096774193554e-05,
|
407 |
+
"loss": 1.5408,
|
408 |
+
"step": 570
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.4921510394569368,
|
412 |
+
"grad_norm": 0.14024589853602,
|
413 |
+
"learning_rate": 4.923599320882852e-05,
|
414 |
+
"loss": 1.5487,
|
415 |
+
"step": 580
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.5006364022061943,
|
419 |
+
"grad_norm": 0.16240429253875313,
|
420 |
+
"learning_rate": 4.999999560970061e-05,
|
421 |
+
"loss": 1.5488,
|
422 |
+
"step": 590
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.5091217649554518,
|
426 |
+
"grad_norm": 0.12575424857894482,
|
427 |
+
"learning_rate": 4.999946877563971e-05,
|
428 |
+
"loss": 1.532,
|
429 |
+
"step": 600
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.5091217649554518,
|
433 |
+
"eval_loss": 1.519254446029663,
|
434 |
+
"eval_runtime": 53.3242,
|
435 |
+
"eval_samples_per_second": 7.145,
|
436 |
+
"eval_steps_per_second": 0.9,
|
437 |
+
"step": 600
|
438 |
+
},
|
439 |
+
{
|
440 |
+
"epoch": 0.5176071277047094,
|
441 |
+
"grad_norm": 0.18688482756329736,
|
442 |
+
"learning_rate": 4.999806390290309e-05,
|
443 |
+
"loss": 1.5544,
|
444 |
+
"step": 610
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.5260924904539669,
|
448 |
+
"grad_norm": 0.12425469431830571,
|
449 |
+
"learning_rate": 4.999578104083307e-05,
|
450 |
+
"loss": 1.5443,
|
451 |
+
"step": 620
|
452 |
+
},
|
453 |
+
{
|
454 |
+
"epoch": 0.5345778532032245,
|
455 |
+
"grad_norm": 0.1299027485420099,
|
456 |
+
"learning_rate": 4.999262026960902e-05,
|
457 |
+
"loss": 1.5569,
|
458 |
+
"step": 630
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"epoch": 0.543063215952482,
|
462 |
+
"grad_norm": 0.11441754852508934,
|
463 |
+
"learning_rate": 4.998858170024449e-05,
|
464 |
+
"loss": 1.5316,
|
465 |
+
"step": 640
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"epoch": 0.5515485787017395,
|
469 |
+
"grad_norm": 0.14888547248976478,
|
470 |
+
"learning_rate": 4.998366547458326e-05,
|
471 |
+
"loss": 1.5177,
|
472 |
+
"step": 650
|
473 |
+
},
|
474 |
+
{
|
475 |
+
"epoch": 0.560033941450997,
|
476 |
+
"grad_norm": 0.14859292774768867,
|
477 |
+
"learning_rate": 4.997787176529449e-05,
|
478 |
+
"loss": 1.5394,
|
479 |
+
"step": 660
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"epoch": 0.5685193042002545,
|
483 |
+
"grad_norm": 0.12499154376539734,
|
484 |
+
"learning_rate": 4.997120077586651e-05,
|
485 |
+
"loss": 1.5554,
|
486 |
+
"step": 670
|
487 |
+
},
|
488 |
+
{
|
489 |
+
"epoch": 0.5770046669495121,
|
490 |
+
"grad_norm": 0.1218974898058821,
|
491 |
+
"learning_rate": 4.9963652740599774e-05,
|
492 |
+
"loss": 1.5335,
|
493 |
+
"step": 680
|
494 |
+
},
|
495 |
+
{
|
496 |
+
"epoch": 0.5854900296987696,
|
497 |
+
"grad_norm": 0.1273110498715124,
|
498 |
+
"learning_rate": 4.995522792459859e-05,
|
499 |
+
"loss": 1.5349,
|
500 |
+
"step": 690
|
501 |
+
},
|
502 |
+
{
|
503 |
+
"epoch": 0.5939753924480271,
|
504 |
+
"grad_norm": 0.12115412881719101,
|
505 |
+
"learning_rate": 4.994592662376183e-05,
|
506 |
+
"loss": 1.5419,
|
507 |
+
"step": 700
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"epoch": 0.6024607551972847,
|
511 |
+
"grad_norm": 0.14855096330233286,
|
512 |
+
"learning_rate": 4.99357491647725e-05,
|
513 |
+
"loss": 1.513,
|
514 |
+
"step": 710
|
515 |
+
},
|
516 |
+
{
|
517 |
+
"epoch": 0.6109461179465422,
|
518 |
+
"grad_norm": 0.11407988659327956,
|
519 |
+
"learning_rate": 4.992469590508628e-05,
|
520 |
+
"loss": 1.5243,
|
521 |
+
"step": 720
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.6194314806957998,
|
525 |
+
"grad_norm": 0.1197712643781127,
|
526 |
+
"learning_rate": 4.9912767232919035e-05,
|
527 |
+
"loss": 1.5177,
|
528 |
+
"step": 730
|
529 |
+
},
|
530 |
+
{
|
531 |
+
"epoch": 0.6279168434450573,
|
532 |
+
"grad_norm": 0.12400515877262065,
|
533 |
+
"learning_rate": 4.9899963567233074e-05,
|
534 |
+
"loss": 1.5619,
|
535 |
+
"step": 740
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"epoch": 0.6364022061943148,
|
539 |
+
"grad_norm": 0.12250385257708406,
|
540 |
+
"learning_rate": 4.988628535772249e-05,
|
541 |
+
"loss": 1.539,
|
542 |
+
"step": 750
|
543 |
+
},
|
544 |
+
{
|
545 |
+
"epoch": 0.6448875689435724,
|
546 |
+
"grad_norm": 0.1262441090496857,
|
547 |
+
"learning_rate": 4.987173308479738e-05,
|
548 |
+
"loss": 1.5195,
|
549 |
+
"step": 760
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.6533729316928298,
|
553 |
+
"grad_norm": 0.12459694416473029,
|
554 |
+
"learning_rate": 4.985630725956694e-05,
|
555 |
+
"loss": 1.5462,
|
556 |
+
"step": 770
|
557 |
+
},
|
558 |
+
{
|
559 |
+
"epoch": 0.6618582944420874,
|
560 |
+
"grad_norm": 0.12985189006106762,
|
561 |
+
"learning_rate": 4.9840008423821527e-05,
|
562 |
+
"loss": 1.5113,
|
563 |
+
"step": 780
|
564 |
+
},
|
565 |
+
{
|
566 |
+
"epoch": 0.6703436571913449,
|
567 |
+
"grad_norm": 0.12689306141471304,
|
568 |
+
"learning_rate": 4.9822837150013636e-05,
|
569 |
+
"loss": 1.5201,
|
570 |
+
"step": 790
|
571 |
+
},
|
572 |
+
{
|
573 |
+
"epoch": 0.6788290199406024,
|
574 |
+
"grad_norm": 0.15393156370587963,
|
575 |
+
"learning_rate": 4.980479404123778e-05,
|
576 |
+
"loss": 1.5121,
|
577 |
+
"step": 800
|
578 |
+
},
|
579 |
+
{
|
580 |
+
"epoch": 0.68731438268986,
|
581 |
+
"grad_norm": 0.13213701895207608,
|
582 |
+
"learning_rate": 4.978587973120931e-05,
|
583 |
+
"loss": 1.5307,
|
584 |
+
"step": 810
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"epoch": 0.6957997454391175,
|
588 |
+
"grad_norm": 0.11561354931316294,
|
589 |
+
"learning_rate": 4.9766094884242184e-05,
|
590 |
+
"loss": 1.5316,
|
591 |
+
"step": 820
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"epoch": 0.7042851081883751,
|
595 |
+
"grad_norm": 0.12414772399330044,
|
596 |
+
"learning_rate": 4.974544019522559e-05,
|
597 |
+
"loss": 1.5148,
|
598 |
+
"step": 830
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"epoch": 0.7127704709376326,
|
602 |
+
"grad_norm": 0.1171652849153521,
|
603 |
+
"learning_rate": 4.972391638959959e-05,
|
604 |
+
"loss": 1.5096,
|
605 |
+
"step": 840
|
606 |
+
},
|
607 |
+
{
|
608 |
+
"epoch": 0.7212558336868902,
|
609 |
+
"grad_norm": 0.12868937349582316,
|
610 |
+
"learning_rate": 4.9701524223329585e-05,
|
611 |
+
"loss": 1.5282,
|
612 |
+
"step": 850
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 0.7297411964361477,
|
616 |
+
"grad_norm": 0.1200015077117309,
|
617 |
+
"learning_rate": 4.967826448287981e-05,
|
618 |
+
"loss": 1.5512,
|
619 |
+
"step": 860
|
620 |
+
},
|
621 |
+
{
|
622 |
+
"epoch": 0.7382265591854051,
|
623 |
+
"grad_norm": 0.12340885660045105,
|
624 |
+
"learning_rate": 4.96541379851857e-05,
|
625 |
+
"loss": 1.5394,
|
626 |
+
"step": 870
|
627 |
+
},
|
628 |
+
{
|
629 |
+
"epoch": 0.7467119219346627,
|
630 |
+
"grad_norm": 0.12976937691467555,
|
631 |
+
"learning_rate": 4.962914557762517e-05,
|
632 |
+
"loss": 1.51,
|
633 |
+
"step": 880
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"epoch": 0.7551972846839202,
|
637 |
+
"grad_norm": 0.11912878476038466,
|
638 |
+
"learning_rate": 4.9603288137988905e-05,
|
639 |
+
"loss": 1.5294,
|
640 |
+
"step": 890
|
641 |
+
},
|
642 |
+
{
|
643 |
+
"epoch": 0.7636826474331778,
|
644 |
+
"grad_norm": 0.1299625480337927,
|
645 |
+
"learning_rate": 4.957656657444947e-05,
|
646 |
+
"loss": 1.507,
|
647 |
+
"step": 900
|
648 |
+
},
|
649 |
+
{
|
650 |
+
"epoch": 0.7721680101824353,
|
651 |
+
"grad_norm": 0.12380144459698468,
|
652 |
+
"learning_rate": 4.954898182552946e-05,
|
653 |
+
"loss": 1.5376,
|
654 |
+
"step": 910
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.7806533729316928,
|
658 |
+
"grad_norm": 0.13139339643682763,
|
659 |
+
"learning_rate": 4.9520534860068535e-05,
|
660 |
+
"loss": 1.5291,
|
661 |
+
"step": 920
|
662 |
+
},
|
663 |
+
{
|
664 |
+
"epoch": 0.7891387356809504,
|
665 |
+
"grad_norm": 0.13088956203983898,
|
666 |
+
"learning_rate": 4.949122667718935e-05,
|
667 |
+
"loss": 1.5239,
|
668 |
+
"step": 930
|
669 |
+
},
|
670 |
+
{
|
671 |
+
"epoch": 0.7976240984302079,
|
672 |
+
"grad_norm": 0.12586052988453703,
|
673 |
+
"learning_rate": 4.94610583062625e-05,
|
674 |
+
"loss": 1.5525,
|
675 |
+
"step": 940
|
676 |
+
},
|
677 |
+
{
|
678 |
+
"epoch": 0.8061094611794655,
|
679 |
+
"grad_norm": 0.12020996031652877,
|
680 |
+
"learning_rate": 4.943003080687035e-05,
|
681 |
+
"loss": 1.5525,
|
682 |
+
"step": 950
|
683 |
+
},
|
684 |
+
{
|
685 |
+
"epoch": 0.814594823928723,
|
686 |
+
"grad_norm": 0.12866375954060869,
|
687 |
+
"learning_rate": 4.9398145268769856e-05,
|
688 |
+
"loss": 1.5266,
|
689 |
+
"step": 960
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"epoch": 0.8230801866779804,
|
693 |
+
"grad_norm": 0.13166136756817035,
|
694 |
+
"learning_rate": 4.936540281185423e-05,
|
695 |
+
"loss": 1.5041,
|
696 |
+
"step": 970
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"epoch": 0.831565549427238,
|
700 |
+
"grad_norm": 0.12481946698483787,
|
701 |
+
"learning_rate": 4.933180458611364e-05,
|
702 |
+
"loss": 1.5271,
|
703 |
+
"step": 980
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 0.8400509121764955,
|
707 |
+
"grad_norm": 0.12264463761209114,
|
708 |
+
"learning_rate": 4.9297351771594844e-05,
|
709 |
+
"loss": 1.5354,
|
710 |
+
"step": 990
|
711 |
+
},
|
712 |
+
{
|
713 |
+
"epoch": 0.8485362749257531,
|
714 |
+
"grad_norm": 0.11985452856537594,
|
715 |
+
"learning_rate": 4.926204557835968e-05,
|
716 |
+
"loss": 1.5167,
|
717 |
+
"step": 1000
|
718 |
+
},
|
719 |
+
{
|
720 |
+
"epoch": 0.8570216376750106,
|
721 |
+
"grad_norm": 0.13125396521190327,
|
722 |
+
"learning_rate": 4.9225887246442634e-05,
|
723 |
+
"loss": 1.5282,
|
724 |
+
"step": 1010
|
725 |
+
},
|
726 |
+
{
|
727 |
+
"epoch": 0.8655070004242681,
|
728 |
+
"grad_norm": 0.12730192328072554,
|
729 |
+
"learning_rate": 4.918887804580725e-05,
|
730 |
+
"loss": 1.5089,
|
731 |
+
"step": 1020
|
732 |
+
},
|
733 |
+
{
|
734 |
+
"epoch": 0.8739923631735257,
|
735 |
+
"grad_norm": 0.12724644219344786,
|
736 |
+
"learning_rate": 4.915101927630153e-05,
|
737 |
+
"loss": 1.4964,
|
738 |
+
"step": 1030
|
739 |
+
},
|
740 |
+
{
|
741 |
+
"epoch": 0.8824777259227832,
|
742 |
+
"grad_norm": 0.13578611501833232,
|
743 |
+
"learning_rate": 4.911231226761227e-05,
|
744 |
+
"loss": 1.5189,
|
745 |
+
"step": 1040
|
746 |
+
},
|
747 |
+
{
|
748 |
+
"epoch": 0.8909630886720408,
|
749 |
+
"grad_norm": 0.13577513964986457,
|
750 |
+
"learning_rate": 4.90727583792184e-05,
|
751 |
+
"loss": 1.5149,
|
752 |
+
"step": 1050
|
753 |
+
},
|
754 |
+
{
|
755 |
+
"epoch": 0.8994484514212983,
|
756 |
+
"grad_norm": 0.1269735011676505,
|
757 |
+
"learning_rate": 4.903235900034317e-05,
|
758 |
+
"loss": 1.5066,
|
759 |
+
"step": 1060
|
760 |
+
},
|
761 |
+
{
|
762 |
+
"epoch": 0.9079338141705557,
|
763 |
+
"grad_norm": 0.13250058214235566,
|
764 |
+
"learning_rate": 4.899111554990543e-05,
|
765 |
+
"loss": 1.5129,
|
766 |
+
"step": 1070
|
767 |
+
},
|
768 |
+
{
|
769 |
+
"epoch": 0.9164191769198133,
|
770 |
+
"grad_norm": 0.13130735246433495,
|
771 |
+
"learning_rate": 4.894902947646975e-05,
|
772 |
+
"loss": 1.5156,
|
773 |
+
"step": 1080
|
774 |
+
},
|
775 |
+
{
|
776 |
+
"epoch": 0.9249045396690708,
|
777 |
+
"grad_norm": 0.1273580180253049,
|
778 |
+
"learning_rate": 4.890610225819553e-05,
|
779 |
+
"loss": 1.5324,
|
780 |
+
"step": 1090
|
781 |
+
},
|
782 |
+
{
|
783 |
+
"epoch": 0.9333899024183284,
|
784 |
+
"grad_norm": 0.13155314243939242,
|
785 |
+
"learning_rate": 4.8862335402785136e-05,
|
786 |
+
"loss": 1.5106,
|
787 |
+
"step": 1100
|
788 |
+
},
|
789 |
+
{
|
790 |
+
"epoch": 0.9418752651675859,
|
791 |
+
"grad_norm": 0.13564895211984299,
|
792 |
+
"learning_rate": 4.88177304474309e-05,
|
793 |
+
"loss": 1.5067,
|
794 |
+
"step": 1110
|
795 |
+
},
|
796 |
+
{
|
797 |
+
"epoch": 0.9503606279168434,
|
798 |
+
"grad_norm": 0.12774735587114736,
|
799 |
+
"learning_rate": 4.877228895876115e-05,
|
800 |
+
"loss": 1.5182,
|
801 |
+
"step": 1120
|
802 |
+
},
|
803 |
+
{
|
804 |
+
"epoch": 0.958845990666101,
|
805 |
+
"grad_norm": 0.1307997709537685,
|
806 |
+
"learning_rate": 4.872601253278517e-05,
|
807 |
+
"loss": 1.4969,
|
808 |
+
"step": 1130
|
809 |
+
},
|
810 |
+
{
|
811 |
+
"epoch": 0.9673313534153585,
|
812 |
+
"grad_norm": 0.1304794845040634,
|
813 |
+
"learning_rate": 4.867890279483717e-05,
|
814 |
+
"loss": 1.5264,
|
815 |
+
"step": 1140
|
816 |
+
},
|
817 |
+
{
|
818 |
+
"epoch": 0.9758167161646161,
|
819 |
+
"grad_norm": 0.13666141796489684,
|
820 |
+
"learning_rate": 4.8630961399519206e-05,
|
821 |
+
"loss": 1.5467,
|
822 |
+
"step": 1150
|
823 |
+
},
|
824 |
+
{
|
825 |
+
"epoch": 0.9843020789138736,
|
826 |
+
"grad_norm": 0.1370278303190263,
|
827 |
+
"learning_rate": 4.8582190030643e-05,
|
828 |
+
"loss": 1.5127,
|
829 |
+
"step": 1160
|
830 |
+
},
|
831 |
+
{
|
832 |
+
"epoch": 0.9927874416631312,
|
833 |
+
"grad_norm": 0.1390936629299565,
|
834 |
+
"learning_rate": 4.8532590401170894e-05,
|
835 |
+
"loss": 1.5058,
|
836 |
+
"step": 1170
|
837 |
+
},
|
838 |
+
{
|
839 |
+
"epoch": 1.0012728044123886,
|
840 |
+
"grad_norm": 0.12934475548108287,
|
841 |
+
"learning_rate": 4.848216425315561e-05,
|
842 |
+
"loss": 1.5202,
|
843 |
+
"step": 1180
|
844 |
+
},
|
845 |
+
{
|
846 |
+
"epoch": 1.0097581671616462,
|
847 |
+
"grad_norm": 0.13898591683370803,
|
848 |
+
"learning_rate": 4.843091335767913e-05,
|
849 |
+
"loss": 1.4563,
|
850 |
+
"step": 1190
|
851 |
+
},
|
852 |
+
{
|
853 |
+
"epoch": 1.0182435299109036,
|
854 |
+
"grad_norm": 0.17488231535826249,
|
855 |
+
"learning_rate": 4.837883951479043e-05,
|
856 |
+
"loss": 1.4402,
|
857 |
+
"step": 1200
|
858 |
+
},
|
859 |
+
{
|
860 |
+
"epoch": 1.0182435299109036,
|
861 |
+
"eval_loss": 1.4955657720565796,
|
862 |
+
"eval_runtime": 52.424,
|
863 |
+
"eval_samples_per_second": 7.268,
|
864 |
+
"eval_steps_per_second": 0.916,
|
865 |
+
"step": 1200
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"epoch": 1.0267288926601612,
|
869 |
+
"grad_norm": 0.1536036344095855,
|
870 |
+
"learning_rate": 4.832594455344229e-05,
|
871 |
+
"loss": 1.4848,
|
872 |
+
"step": 1210
|
873 |
+
},
|
874 |
+
{
|
875 |
+
"epoch": 1.0352142554094188,
|
876 |
+
"grad_norm": 0.15762414421336599,
|
877 |
+
"learning_rate": 4.827223033142706e-05,
|
878 |
+
"loss": 1.4567,
|
879 |
+
"step": 1220
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"epoch": 1.0436996181586762,
|
883 |
+
"grad_norm": 0.15058229398130366,
|
884 |
+
"learning_rate": 4.8217698735311414e-05,
|
885 |
+
"loss": 1.4672,
|
886 |
+
"step": 1230
|
887 |
+
},
|
888 |
+
{
|
889 |
+
"epoch": 1.0521849809079338,
|
890 |
+
"grad_norm": 0.16010992835678386,
|
891 |
+
"learning_rate": 4.8162351680370044e-05,
|
892 |
+
"loss": 1.4458,
|
893 |
+
"step": 1240
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"epoch": 1.0606703436571914,
|
897 |
+
"grad_norm": 0.16758816000341356,
|
898 |
+
"learning_rate": 4.810619111051847e-05,
|
899 |
+
"loss": 1.4842,
|
900 |
+
"step": 1250
|
901 |
+
},
|
902 |
+
{
|
903 |
+
"epoch": 1.069155706406449,
|
904 |
+
"grad_norm": 0.16559260972674986,
|
905 |
+
"learning_rate": 4.8049218998244696e-05,
|
906 |
+
"loss": 1.4556,
|
907 |
+
"step": 1260
|
908 |
+
},
|
909 |
+
{
|
910 |
+
"epoch": 1.0776410691557063,
|
911 |
+
"grad_norm": 0.17237632034416966,
|
912 |
+
"learning_rate": 4.7991437344539966e-05,
|
913 |
+
"loss": 1.4813,
|
914 |
+
"step": 1270
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"epoch": 1.086126431904964,
|
918 |
+
"grad_norm": 0.17112756741722487,
|
919 |
+
"learning_rate": 4.793284817882845e-05,
|
920 |
+
"loss": 1.4535,
|
921 |
+
"step": 1280
|
922 |
+
},
|
923 |
+
{
|
924 |
+
"epoch": 1.0946117946542215,
|
925 |
+
"grad_norm": 0.16828572707718548,
|
926 |
+
"learning_rate": 4.787345355889604e-05,
|
927 |
+
"loss": 1.4344,
|
928 |
+
"step": 1290
|
929 |
+
},
|
930 |
+
{
|
931 |
+
"epoch": 1.103097157403479,
|
932 |
+
"grad_norm": 0.15709986047041227,
|
933 |
+
"learning_rate": 4.7813255570817985e-05,
|
934 |
+
"loss": 1.4744,
|
935 |
+
"step": 1300
|
936 |
+
},
|
937 |
+
{
|
938 |
+
"epoch": 1.1115825201527365,
|
939 |
+
"grad_norm": 0.16651547128146313,
|
940 |
+
"learning_rate": 4.775225632888568e-05,
|
941 |
+
"loss": 1.4561,
|
942 |
+
"step": 1310
|
943 |
+
},
|
944 |
+
{
|
945 |
+
"epoch": 1.120067882901994,
|
946 |
+
"grad_norm": 0.16750176017515714,
|
947 |
+
"learning_rate": 4.76904579755324e-05,
|
948 |
+
"loss": 1.4616,
|
949 |
+
"step": 1320
|
950 |
+
},
|
951 |
+
{
|
952 |
+
"epoch": 1.1285532456512515,
|
953 |
+
"grad_norm": 0.1608016567554825,
|
954 |
+
"learning_rate": 4.7627862681258037e-05,
|
955 |
+
"loss": 1.4593,
|
956 |
+
"step": 1330
|
957 |
+
},
|
958 |
+
{
|
959 |
+
"epoch": 1.137038608400509,
|
960 |
+
"grad_norm": 0.21390766919038295,
|
961 |
+
"learning_rate": 4.756447264455287e-05,
|
962 |
+
"loss": 1.4484,
|
963 |
+
"step": 1340
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"epoch": 1.1455239711497667,
|
967 |
+
"grad_norm": 0.16826883293172662,
|
968 |
+
"learning_rate": 4.750029009182038e-05,
|
969 |
+
"loss": 1.4703,
|
970 |
+
"step": 1350
|
971 |
+
},
|
972 |
+
{
|
973 |
+
"epoch": 1.1540093338990243,
|
974 |
+
"grad_norm": 0.17431508867079595,
|
975 |
+
"learning_rate": 4.7435317277299e-05,
|
976 |
+
"loss": 1.4701,
|
977 |
+
"step": 1360
|
978 |
+
},
|
979 |
+
{
|
980 |
+
"epoch": 1.1624946966482816,
|
981 |
+
"grad_norm": 0.15973851467570443,
|
982 |
+
"learning_rate": 4.736955648298299e-05,
|
983 |
+
"loss": 1.4503,
|
984 |
+
"step": 1370
|
985 |
+
},
|
986 |
+
{
|
987 |
+
"epoch": 1.1709800593975392,
|
988 |
+
"grad_norm": 0.1887713767970947,
|
989 |
+
"learning_rate": 4.730301001854225e-05,
|
990 |
+
"loss": 1.4624,
|
991 |
+
"step": 1380
|
992 |
+
},
|
993 |
+
{
|
994 |
+
"epoch": 1.1794654221467968,
|
995 |
+
"grad_norm": 0.16898695344997974,
|
996 |
+
"learning_rate": 4.7235680221241216e-05,
|
997 |
+
"loss": 1.4452,
|
998 |
+
"step": 1390
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"epoch": 1.1879507848960542,
|
1002 |
+
"grad_norm": 0.20014553287073528,
|
1003 |
+
"learning_rate": 4.716756945585681e-05,
|
1004 |
+
"loss": 1.4717,
|
1005 |
+
"step": 1400
|
1006 |
+
},
|
1007 |
+
{
|
1008 |
+
"epoch": 1.1964361476453118,
|
1009 |
+
"grad_norm": 0.17137954325200072,
|
1010 |
+
"learning_rate": 4.709868011459528e-05,
|
1011 |
+
"loss": 1.4403,
|
1012 |
+
"step": 1410
|
1013 |
+
},
|
1014 |
+
{
|
1015 |
+
"epoch": 1.2049215103945694,
|
1016 |
+
"grad_norm": 0.17801721751888322,
|
1017 |
+
"learning_rate": 4.7029014617008294e-05,
|
1018 |
+
"loss": 1.4339,
|
1019 |
+
"step": 1420
|
1020 |
+
},
|
1021 |
+
{
|
1022 |
+
"epoch": 1.213406873143827,
|
1023 |
+
"grad_norm": 0.17139613676642362,
|
1024 |
+
"learning_rate": 4.695857540990789e-05,
|
1025 |
+
"loss": 1.4573,
|
1026 |
+
"step": 1430
|
1027 |
+
},
|
1028 |
+
{
|
1029 |
+
"epoch": 1.2218922358930844,
|
1030 |
+
"grad_norm": 0.16971403514498054,
|
1031 |
+
"learning_rate": 4.688736496728058e-05,
|
1032 |
+
"loss": 1.4282,
|
1033 |
+
"step": 1440
|
1034 |
+
},
|
1035 |
+
{
|
1036 |
+
"epoch": 1.230377598642342,
|
1037 |
+
"grad_norm": 0.17200272420880428,
|
1038 |
+
"learning_rate": 4.681538579020038e-05,
|
1039 |
+
"loss": 1.4434,
|
1040 |
+
"step": 1450
|
1041 |
+
},
|
1042 |
+
{
|
1043 |
+
"epoch": 1.2388629613915996,
|
1044 |
+
"grad_norm": 0.17208160407432616,
|
1045 |
+
"learning_rate": 4.6742640406741106e-05,
|
1046 |
+
"loss": 1.45,
|
1047 |
+
"step": 1460
|
1048 |
+
},
|
1049 |
+
{
|
1050 |
+
"epoch": 1.247348324140857,
|
1051 |
+
"grad_norm": 0.1939626212901777,
|
1052 |
+
"learning_rate": 4.666913137188743e-05,
|
1053 |
+
"loss": 1.4608,
|
1054 |
+
"step": 1470
|
1055 |
+
},
|
1056 |
+
{
|
1057 |
+
"epoch": 1.2558336868901145,
|
1058 |
+
"grad_norm": 0.17291794493304186,
|
1059 |
+
"learning_rate": 4.6594861267445236e-05,
|
1060 |
+
"loss": 1.4671,
|
1061 |
+
"step": 1480
|
1062 |
+
},
|
1063 |
+
{
|
1064 |
+
"epoch": 1.2643190496393721,
|
1065 |
+
"grad_norm": 0.18219792041638924,
|
1066 |
+
"learning_rate": 4.651983270195093e-05,
|
1067 |
+
"loss": 1.4262,
|
1068 |
+
"step": 1490
|
1069 |
+
},
|
1070 |
+
{
|
1071 |
+
"epoch": 1.2728044123886297,
|
1072 |
+
"grad_norm": 0.18086437830489926,
|
1073 |
+
"learning_rate": 4.644404831057979e-05,
|
1074 |
+
"loss": 1.4455,
|
1075 |
+
"step": 1500
|
1076 |
+
},
|
1077 |
+
{
|
1078 |
+
"epoch": 1.281289775137887,
|
1079 |
+
"grad_norm": 0.17417619624549402,
|
1080 |
+
"learning_rate": 4.636751075505344e-05,
|
1081 |
+
"loss": 1.4873,
|
1082 |
+
"step": 1510
|
1083 |
+
},
|
1084 |
+
{
|
1085 |
+
"epoch": 1.2897751378871447,
|
1086 |
+
"grad_norm": 0.18354282411845188,
|
1087 |
+
"learning_rate": 4.629022272354637e-05,
|
1088 |
+
"loss": 1.4525,
|
1089 |
+
"step": 1520
|
1090 |
+
},
|
1091 |
+
{
|
1092 |
+
"epoch": 1.298260500636402,
|
1093 |
+
"grad_norm": 0.17985617345325455,
|
1094 |
+
"learning_rate": 4.621218693059149e-05,
|
1095 |
+
"loss": 1.4303,
|
1096 |
+
"step": 1530
|
1097 |
+
},
|
1098 |
+
{
|
1099 |
+
"epoch": 1.3067458633856597,
|
1100 |
+
"grad_norm": 0.1809708317849863,
|
1101 |
+
"learning_rate": 4.6133406116984795e-05,
|
1102 |
+
"loss": 1.4631,
|
1103 |
+
"step": 1540
|
1104 |
+
},
|
1105 |
+
{
|
1106 |
+
"epoch": 1.3152312261349173,
|
1107 |
+
"grad_norm": 0.17487374671212322,
|
1108 |
+
"learning_rate": 4.6053883049689145e-05,
|
1109 |
+
"loss": 1.4482,
|
1110 |
+
"step": 1550
|
1111 |
+
},
|
1112 |
+
{
|
1113 |
+
"epoch": 1.3237165888841749,
|
1114 |
+
"grad_norm": 0.19912807671077193,
|
1115 |
+
"learning_rate": 4.5973620521737036e-05,
|
1116 |
+
"loss": 1.4497,
|
1117 |
+
"step": 1560
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"epoch": 1.3322019516334322,
|
1121 |
+
"grad_norm": 0.17853627546912074,
|
1122 |
+
"learning_rate": 4.5892621352132514e-05,
|
1123 |
+
"loss": 1.4456,
|
1124 |
+
"step": 1570
|
1125 |
+
},
|
1126 |
+
{
|
1127 |
+
"epoch": 1.3406873143826898,
|
1128 |
+
"grad_norm": 0.18252596927754394,
|
1129 |
+
"learning_rate": 4.581088838575218e-05,
|
1130 |
+
"loss": 1.4328,
|
1131 |
+
"step": 1580
|
1132 |
+
},
|
1133 |
+
{
|
1134 |
+
"epoch": 1.3491726771319474,
|
1135 |
+
"grad_norm": 0.17604951053556211,
|
1136 |
+
"learning_rate": 4.572842449324525e-05,
|
1137 |
+
"loss": 1.4442,
|
1138 |
+
"step": 1590
|
1139 |
+
},
|
1140 |
+
{
|
1141 |
+
"epoch": 1.3576580398812048,
|
1142 |
+
"grad_norm": 0.18358942463311748,
|
1143 |
+
"learning_rate": 4.564523257093275e-05,
|
1144 |
+
"loss": 1.4338,
|
1145 |
+
"step": 1600
|
1146 |
+
},
|
1147 |
+
{
|
1148 |
+
"epoch": 1.3661434026304624,
|
1149 |
+
"grad_norm": 0.20508703236267142,
|
1150 |
+
"learning_rate": 4.5561315540705774e-05,
|
1151 |
+
"loss": 1.4445,
|
1152 |
+
"step": 1610
|
1153 |
+
},
|
1154 |
+
{
|
1155 |
+
"epoch": 1.37462876537972,
|
1156 |
+
"grad_norm": 0.18486352550747187,
|
1157 |
+
"learning_rate": 4.547667634992288e-05,
|
1158 |
+
"loss": 1.4261,
|
1159 |
+
"step": 1620
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"epoch": 1.3831141281289776,
|
1163 |
+
"grad_norm": 0.17492766465456316,
|
1164 |
+
"learning_rate": 4.539131797130656e-05,
|
1165 |
+
"loss": 1.4258,
|
1166 |
+
"step": 1630
|
1167 |
+
},
|
1168 |
+
{
|
1169 |
+
"epoch": 1.391599490878235,
|
1170 |
+
"grad_norm": 0.19692876587833674,
|
1171 |
+
"learning_rate": 4.530524340283881e-05,
|
1172 |
+
"loss": 1.4349,
|
1173 |
+
"step": 1640
|
1174 |
+
},
|
1175 |
+
{
|
1176 |
+
"epoch": 1.4000848536274926,
|
1177 |
+
"grad_norm": 0.19155373430892478,
|
1178 |
+
"learning_rate": 4.521845566765589e-05,
|
1179 |
+
"loss": 1.4536,
|
1180 |
+
"step": 1650
|
1181 |
+
},
|
1182 |
+
{
|
1183 |
+
"epoch": 1.4085702163767502,
|
1184 |
+
"grad_norm": 0.18544325977459192,
|
1185 |
+
"learning_rate": 4.513095781394208e-05,
|
1186 |
+
"loss": 1.4363,
|
1187 |
+
"step": 1660
|
1188 |
+
},
|
1189 |
+
{
|
1190 |
+
"epoch": 1.4170555791260075,
|
1191 |
+
"grad_norm": 0.177828004720666,
|
1192 |
+
"learning_rate": 4.504275291482267e-05,
|
1193 |
+
"loss": 1.4595,
|
1194 |
+
"step": 1670
|
1195 |
+
},
|
1196 |
+
{
|
1197 |
+
"epoch": 1.4255409418752651,
|
1198 |
+
"grad_norm": 0.17855432230356816,
|
1199 |
+
"learning_rate": 4.495384406825601e-05,
|
1200 |
+
"loss": 1.4211,
|
1201 |
+
"step": 1680
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"epoch": 1.4340263046245227,
|
1205 |
+
"grad_norm": 0.20232492538380317,
|
1206 |
+
"learning_rate": 4.486423439692469e-05,
|
1207 |
+
"loss": 1.4189,
|
1208 |
+
"step": 1690
|
1209 |
+
},
|
1210 |
+
{
|
1211 |
+
"epoch": 1.4425116673737803,
|
1212 |
+
"grad_norm": 0.1975109303350431,
|
1213 |
+
"learning_rate": 4.477392704812585e-05,
|
1214 |
+
"loss": 1.4565,
|
1215 |
+
"step": 1700
|
1216 |
+
},
|
1217 |
+
{
|
1218 |
+
"epoch": 1.4509970301230377,
|
1219 |
+
"grad_norm": 0.19619010830399825,
|
1220 |
+
"learning_rate": 4.468292519366071e-05,
|
1221 |
+
"loss": 1.4382,
|
1222 |
+
"step": 1710
|
1223 |
+
},
|
1224 |
+
{
|
1225 |
+
"epoch": 1.4594823928722953,
|
1226 |
+
"grad_norm": 0.18168826428246143,
|
1227 |
+
"learning_rate": 4.459123202972308e-05,
|
1228 |
+
"loss": 1.4471,
|
1229 |
+
"step": 1720
|
1230 |
+
},
|
1231 |
+
{
|
1232 |
+
"epoch": 1.4679677556215527,
|
1233 |
+
"grad_norm": 0.1923264062362399,
|
1234 |
+
"learning_rate": 4.449885077678717e-05,
|
1235 |
+
"loss": 1.4153,
|
1236 |
+
"step": 1730
|
1237 |
+
},
|
1238 |
+
{
|
1239 |
+
"epoch": 1.4764531183708103,
|
1240 |
+
"grad_norm": 0.1907937313040222,
|
1241 |
+
"learning_rate": 4.440578467949445e-05,
|
1242 |
+
"loss": 1.4432,
|
1243 |
+
"step": 1740
|
1244 |
+
},
|
1245 |
+
{
|
1246 |
+
"epoch": 1.4849384811200679,
|
1247 |
+
"grad_norm": 0.19107457667767244,
|
1248 |
+
"learning_rate": 4.431203700653968e-05,
|
1249 |
+
"loss": 1.4285,
|
1250 |
+
"step": 1750
|
1251 |
+
},
|
1252 |
+
{
|
1253 |
+
"epoch": 1.4934238438693255,
|
1254 |
+
"grad_norm": 0.19847350429107552,
|
1255 |
+
"learning_rate": 4.421761105055613e-05,
|
1256 |
+
"loss": 1.4383,
|
1257 |
+
"step": 1760
|
1258 |
+
},
|
1259 |
+
{
|
1260 |
+
"epoch": 1.501909206618583,
|
1261 |
+
"grad_norm": 0.18536475556610216,
|
1262 |
+
"learning_rate": 4.4122510127999937e-05,
|
1263 |
+
"loss": 1.42,
|
1264 |
+
"step": 1770
|
1265 |
+
},
|
1266 |
+
{
|
1267 |
+
"epoch": 1.5103945693678404,
|
1268 |
+
"grad_norm": 0.18481023473586697,
|
1269 |
+
"learning_rate": 4.4026737579033584e-05,
|
1270 |
+
"loss": 1.4384,
|
1271 |
+
"step": 1780
|
1272 |
+
},
|
1273 |
+
{
|
1274 |
+
"epoch": 1.518879932117098,
|
1275 |
+
"grad_norm": 0.20863867505874642,
|
1276 |
+
"learning_rate": 4.393029676740864e-05,
|
1277 |
+
"loss": 1.4543,
|
1278 |
+
"step": 1790
|
1279 |
+
},
|
1280 |
+
{
|
1281 |
+
"epoch": 1.5273652948663554,
|
1282 |
+
"grad_norm": 0.1816036870853105,
|
1283 |
+
"learning_rate": 4.3833191080347575e-05,
|
1284 |
+
"loss": 1.434,
|
1285 |
+
"step": 1800
|
1286 |
+
},
|
1287 |
+
{
|
1288 |
+
"epoch": 1.5273652948663554,
|
1289 |
+
"eval_loss": 1.4622184038162231,
|
1290 |
+
"eval_runtime": 52.4041,
|
1291 |
+
"eval_samples_per_second": 7.27,
|
1292 |
+
"eval_steps_per_second": 0.916,
|
1293 |
+
"step": 1800
|
1294 |
+
},
|
1295 |
+
{
|
1296 |
+
"epoch": 1.535850657615613,
|
1297 |
+
"grad_norm": 0.19378252368958881,
|
1298 |
+
"learning_rate": 4.3735423928424815e-05,
|
1299 |
+
"loss": 1.4275,
|
1300 |
+
"step": 1810
|
1301 |
+
},
|
1302 |
+
{
|
1303 |
+
"epoch": 1.5443360203648706,
|
1304 |
+
"grad_norm": 0.20453331251433848,
|
1305 |
+
"learning_rate": 4.363699874544697e-05,
|
1306 |
+
"loss": 1.4203,
|
1307 |
+
"step": 1820
|
1308 |
+
},
|
1309 |
+
{
|
1310 |
+
"epoch": 1.5528213831141282,
|
1311 |
+
"grad_norm": 0.26684319417219377,
|
1312 |
+
"learning_rate": 4.3537918988332156e-05,
|
1313 |
+
"loss": 1.4372,
|
1314 |
+
"step": 1830
|
1315 |
+
},
|
1316 |
+
{
|
1317 |
+
"epoch": 1.5613067458633858,
|
1318 |
+
"grad_norm": 0.25745160303419773,
|
1319 |
+
"learning_rate": 4.343818813698868e-05,
|
1320 |
+
"loss": 1.4082,
|
1321 |
+
"step": 1840
|
1322 |
+
},
|
1323 |
+
{
|
1324 |
+
"epoch": 1.5697921086126432,
|
1325 |
+
"grad_norm": 0.19969727996700776,
|
1326 |
+
"learning_rate": 4.3337809694192765e-05,
|
1327 |
+
"loss": 1.4314,
|
1328 |
+
"step": 1850
|
1329 |
+
},
|
1330 |
+
{
|
1331 |
+
"epoch": 1.5782774713619008,
|
1332 |
+
"grad_norm": 0.20117210832277968,
|
1333 |
+
"learning_rate": 4.3236787185465525e-05,
|
1334 |
+
"loss": 1.4293,
|
1335 |
+
"step": 1860
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"epoch": 1.5867628341111581,
|
1339 |
+
"grad_norm": 0.20173003641028897,
|
1340 |
+
"learning_rate": 4.313512415894913e-05,
|
1341 |
+
"loss": 1.4406,
|
1342 |
+
"step": 1870
|
1343 |
+
},
|
1344 |
+
{
|
1345 |
+
"epoch": 1.5952481968604157,
|
1346 |
+
"grad_norm": 0.20304770794371527,
|
1347 |
+
"learning_rate": 4.303282418528224e-05,
|
1348 |
+
"loss": 1.4286,
|
1349 |
+
"step": 1880
|
1350 |
+
},
|
1351 |
+
{
|
1352 |
+
"epoch": 1.6037335596096733,
|
1353 |
+
"grad_norm": 0.19126658907738198,
|
1354 |
+
"learning_rate": 4.292989085747452e-05,
|
1355 |
+
"loss": 1.4184,
|
1356 |
+
"step": 1890
|
1357 |
+
},
|
1358 |
+
{
|
1359 |
+
"epoch": 1.612218922358931,
|
1360 |
+
"grad_norm": 0.20069554966453027,
|
1361 |
+
"learning_rate": 4.282632779078051e-05,
|
1362 |
+
"loss": 1.4133,
|
1363 |
+
"step": 1900
|
1364 |
+
},
|
1365 |
+
{
|
1366 |
+
"epoch": 1.6207042851081885,
|
1367 |
+
"grad_norm": 0.1952881519566686,
|
1368 |
+
"learning_rate": 4.2722138622572624e-05,
|
1369 |
+
"loss": 1.4432,
|
1370 |
+
"step": 1910
|
1371 |
+
},
|
1372 |
+
{
|
1373 |
+
"epoch": 1.629189647857446,
|
1374 |
+
"grad_norm": 0.19763704668680288,
|
1375 |
+
"learning_rate": 4.261732701221339e-05,
|
1376 |
+
"loss": 1.3921,
|
1377 |
+
"step": 1920
|
1378 |
+
},
|
1379 |
+
{
|
1380 |
+
"epoch": 1.6376750106067033,
|
1381 |
+
"grad_norm": 0.19821464294464497,
|
1382 |
+
"learning_rate": 4.2511896640926925e-05,
|
1383 |
+
"loss": 1.4454,
|
1384 |
+
"step": 1930
|
1385 |
+
},
|
1386 |
+
{
|
1387 |
+
"epoch": 1.6461603733559609,
|
1388 |
+
"grad_norm": 0.20456545626297834,
|
1389 |
+
"learning_rate": 4.240585121166966e-05,
|
1390 |
+
"loss": 1.4147,
|
1391 |
+
"step": 1940
|
1392 |
+
},
|
1393 |
+
{
|
1394 |
+
"epoch": 1.6546457361052185,
|
1395 |
+
"grad_norm": 0.2119092529186395,
|
1396 |
+
"learning_rate": 4.229919444900027e-05,
|
1397 |
+
"loss": 1.3969,
|
1398 |
+
"step": 1950
|
1399 |
+
},
|
1400 |
+
{
|
1401 |
+
"epoch": 1.663131098854476,
|
1402 |
+
"grad_norm": 0.20330157582122357,
|
1403 |
+
"learning_rate": 4.2191930098948865e-05,
|
1404 |
+
"loss": 1.426,
|
1405 |
+
"step": 1960
|
1406 |
+
},
|
1407 |
+
{
|
1408 |
+
"epoch": 1.6716164616037337,
|
1409 |
+
"grad_norm": 0.21761164739298738,
|
1410 |
+
"learning_rate": 4.2084061928885406e-05,
|
1411 |
+
"loss": 1.4246,
|
1412 |
+
"step": 1970
|
1413 |
+
},
|
1414 |
+
{
|
1415 |
+
"epoch": 1.680101824352991,
|
1416 |
+
"grad_norm": 0.19331588142071401,
|
1417 |
+
"learning_rate": 4.197559372738741e-05,
|
1418 |
+
"loss": 1.4305,
|
1419 |
+
"step": 1980
|
1420 |
+
},
|
1421 |
+
{
|
1422 |
+
"epoch": 1.6885871871022486,
|
1423 |
+
"grad_norm": 0.20188460724329996,
|
1424 |
+
"learning_rate": 4.186652930410685e-05,
|
1425 |
+
"loss": 1.4153,
|
1426 |
+
"step": 1990
|
1427 |
+
},
|
1428 |
+
{
|
1429 |
+
"epoch": 1.697072549851506,
|
1430 |
+
"grad_norm": 0.20988950033571588,
|
1431 |
+
"learning_rate": 4.1756872489636425e-05,
|
1432 |
+
"loss": 1.3894,
|
1433 |
+
"step": 2000
|
1434 |
+
},
|
1435 |
+
{
|
1436 |
+
"epoch": 1.7055579126007636,
|
1437 |
+
"grad_norm": 0.1966475893123187,
|
1438 |
+
"learning_rate": 4.1646627135374916e-05,
|
1439 |
+
"loss": 1.3962,
|
1440 |
+
"step": 2010
|
1441 |
+
},
|
1442 |
+
{
|
1443 |
+
"epoch": 1.7140432753500212,
|
1444 |
+
"grad_norm": 0.20785207367991768,
|
1445 |
+
"learning_rate": 4.1535797113392004e-05,
|
1446 |
+
"loss": 1.4037,
|
1447 |
+
"step": 2020
|
1448 |
+
},
|
1449 |
+
{
|
1450 |
+
"epoch": 1.7225286380992788,
|
1451 |
+
"grad_norm": 0.2029940281663133,
|
1452 |
+
"learning_rate": 4.1424386316292224e-05,
|
1453 |
+
"loss": 1.4011,
|
1454 |
+
"step": 2030
|
1455 |
+
},
|
1456 |
+
{
|
1457 |
+
"epoch": 1.7310140008485364,
|
1458 |
+
"grad_norm": 0.2247844551379277,
|
1459 |
+
"learning_rate": 4.131239865707829e-05,
|
1460 |
+
"loss": 1.4084,
|
1461 |
+
"step": 2040
|
1462 |
+
},
|
1463 |
+
{
|
1464 |
+
"epoch": 1.7394993635977938,
|
1465 |
+
"grad_norm": 0.20900441746105022,
|
1466 |
+
"learning_rate": 4.11998380690136e-05,
|
1467 |
+
"loss": 1.4235,
|
1468 |
+
"step": 2050
|
1469 |
+
},
|
1470 |
+
{
|
1471 |
+
"epoch": 1.7479847263470514,
|
1472 |
+
"grad_norm": 0.20362408546889926,
|
1473 |
+
"learning_rate": 4.108670850548416e-05,
|
1474 |
+
"loss": 1.4204,
|
1475 |
+
"step": 2060
|
1476 |
+
},
|
1477 |
+
{
|
1478 |
+
"epoch": 1.7564700890963088,
|
1479 |
+
"grad_norm": 0.22281567946240438,
|
1480 |
+
"learning_rate": 4.097301393985968e-05,
|
1481 |
+
"loss": 1.4023,
|
1482 |
+
"step": 2070
|
1483 |
+
},
|
1484 |
+
{
|
1485 |
+
"epoch": 1.7649554518455663,
|
1486 |
+
"grad_norm": 0.20867113178797225,
|
1487 |
+
"learning_rate": 4.085875836535404e-05,
|
1488 |
+
"loss": 1.3895,
|
1489 |
+
"step": 2080
|
1490 |
+
},
|
1491 |
+
{
|
1492 |
+
"epoch": 1.773440814594824,
|
1493 |
+
"grad_norm": 0.22113231886160947,
|
1494 |
+
"learning_rate": 4.0743945794885063e-05,
|
1495 |
+
"loss": 1.3963,
|
1496 |
+
"step": 2090
|
1497 |
+
},
|
1498 |
+
{
|
1499 |
+
"epoch": 1.7819261773440815,
|
1500 |
+
"grad_norm": 0.22334563577844263,
|
1501 |
+
"learning_rate": 4.062858026093351e-05,
|
1502 |
+
"loss": 1.3988,
|
1503 |
+
"step": 2100
|
1504 |
+
},
|
1505 |
+
{
|
1506 |
+
"epoch": 1.7904115400933391,
|
1507 |
+
"grad_norm": 0.23218581668265403,
|
1508 |
+
"learning_rate": 4.051266581540152e-05,
|
1509 |
+
"loss": 1.4068,
|
1510 |
+
"step": 2110
|
1511 |
+
},
|
1512 |
+
{
|
1513 |
+
"epoch": 1.7988969028425965,
|
1514 |
+
"grad_norm": 0.20295589384571033,
|
1515 |
+
"learning_rate": 4.0396206529470234e-05,
|
1516 |
+
"loss": 1.3883,
|
1517 |
+
"step": 2120
|
1518 |
+
},
|
1519 |
+
{
|
1520 |
+
"epoch": 1.8073822655918539,
|
1521 |
+
"grad_norm": 0.22861611442392848,
|
1522 |
+
"learning_rate": 4.027920649345687e-05,
|
1523 |
+
"loss": 1.4043,
|
1524 |
+
"step": 2130
|
1525 |
+
},
|
1526 |
+
{
|
1527 |
+
"epoch": 1.8158676283411115,
|
1528 |
+
"grad_norm": 0.2083012771089638,
|
1529 |
+
"learning_rate": 4.0161669816671e-05,
|
1530 |
+
"loss": 1.398,
|
1531 |
+
"step": 2140
|
1532 |
+
},
|
1533 |
+
{
|
1534 |
+
"epoch": 1.824352991090369,
|
1535 |
+
"grad_norm": 0.21936173231840464,
|
1536 |
+
"learning_rate": 4.004360062727028e-05,
|
1537 |
+
"loss": 1.4142,
|
1538 |
+
"step": 2150
|
1539 |
+
},
|
1540 |
+
{
|
1541 |
+
"epoch": 1.8328383538396267,
|
1542 |
+
"grad_norm": 0.21383435796328337,
|
1543 |
+
"learning_rate": 3.9925003072115406e-05,
|
1544 |
+
"loss": 1.4138,
|
1545 |
+
"step": 2160
|
1546 |
+
},
|
1547 |
+
{
|
1548 |
+
"epoch": 1.8413237165888843,
|
1549 |
+
"grad_norm": 0.23301608248270392,
|
1550 |
+
"learning_rate": 3.9805881316624506e-05,
|
1551 |
+
"loss": 1.4195,
|
1552 |
+
"step": 2170
|
1553 |
+
},
|
1554 |
+
{
|
1555 |
+
"epoch": 1.8498090793381419,
|
1556 |
+
"grad_norm": 0.22424766656883474,
|
1557 |
+
"learning_rate": 3.968623954462681e-05,
|
1558 |
+
"loss": 1.4011,
|
1559 |
+
"step": 2180
|
1560 |
+
},
|
1561 |
+
{
|
1562 |
+
"epoch": 1.8582944420873992,
|
1563 |
+
"grad_norm": 0.21286417342881453,
|
1564 |
+
"learning_rate": 3.9566081958215734e-05,
|
1565 |
+
"loss": 1.409,
|
1566 |
+
"step": 2190
|
1567 |
+
},
|
1568 |
+
{
|
1569 |
+
"epoch": 1.8667798048366566,
|
1570 |
+
"grad_norm": 0.21944800687444807,
|
1571 |
+
"learning_rate": 3.9445412777601284e-05,
|
1572 |
+
"loss": 1.3877,
|
1573 |
+
"step": 2200
|
1574 |
+
},
|
1575 |
+
{
|
1576 |
+
"epoch": 1.8752651675859142,
|
1577 |
+
"grad_norm": 0.23113173625974803,
|
1578 |
+
"learning_rate": 3.932423624096181e-05,
|
1579 |
+
"loss": 1.4089,
|
1580 |
+
"step": 2210
|
1581 |
+
},
|
1582 |
+
{
|
1583 |
+
"epoch": 1.8837505303351718,
|
1584 |
+
"grad_norm": 0.2081941699587778,
|
1585 |
+
"learning_rate": 3.920255660429517e-05,
|
1586 |
+
"loss": 1.4024,
|
1587 |
+
"step": 2220
|
1588 |
+
},
|
1589 |
+
{
|
1590 |
+
"epoch": 1.8922358930844294,
|
1591 |
+
"grad_norm": 0.2188685806654701,
|
1592 |
+
"learning_rate": 3.908037814126927e-05,
|
1593 |
+
"loss": 1.3878,
|
1594 |
+
"step": 2230
|
1595 |
+
},
|
1596 |
+
{
|
1597 |
+
"epoch": 1.900721255833687,
|
1598 |
+
"grad_norm": 0.22761843244757962,
|
1599 |
+
"learning_rate": 3.895770514307193e-05,
|
1600 |
+
"loss": 1.4004,
|
1601 |
+
"step": 2240
|
1602 |
+
},
|
1603 |
+
{
|
1604 |
+
"epoch": 1.9092066185829444,
|
1605 |
+
"grad_norm": 0.23309183623120422,
|
1606 |
+
"learning_rate": 3.883454191826017e-05,
|
1607 |
+
"loss": 1.4188,
|
1608 |
+
"step": 2250
|
1609 |
+
},
|
1610 |
+
{
|
1611 |
+
"epoch": 1.917691981332202,
|
1612 |
+
"grad_norm": 0.20329785843911802,
|
1613 |
+
"learning_rate": 3.871089279260891e-05,
|
1614 |
+
"loss": 1.3893,
|
1615 |
+
"step": 2260
|
1616 |
+
},
|
1617 |
+
{
|
1618 |
+
"epoch": 1.9261773440814594,
|
1619 |
+
"grad_norm": 0.23470973193726366,
|
1620 |
+
"learning_rate": 3.8586762108958995e-05,
|
1621 |
+
"loss": 1.3974,
|
1622 |
+
"step": 2270
|
1623 |
+
},
|
1624 |
+
{
|
1625 |
+
"epoch": 1.934662706830717,
|
1626 |
+
"grad_norm": 0.22779136837044714,
|
1627 |
+
"learning_rate": 3.8462154227064725e-05,
|
1628 |
+
"loss": 1.4115,
|
1629 |
+
"step": 2280
|
1630 |
+
},
|
1631 |
+
{
|
1632 |
+
"epoch": 1.9431480695799745,
|
1633 |
+
"grad_norm": 0.22338952315651892,
|
1634 |
+
"learning_rate": 3.833707352344068e-05,
|
1635 |
+
"loss": 1.3873,
|
1636 |
+
"step": 2290
|
1637 |
+
},
|
1638 |
+
{
|
1639 |
+
"epoch": 1.9516334323292321,
|
1640 |
+
"grad_norm": 0.23069304025882129,
|
1641 |
+
"learning_rate": 3.821152439120801e-05,
|
1642 |
+
"loss": 1.3944,
|
1643 |
+
"step": 2300
|
1644 |
+
},
|
1645 |
+
{
|
1646 |
+
"epoch": 1.9601187950784897,
|
1647 |
+
"grad_norm": 0.23590596270163203,
|
1648 |
+
"learning_rate": 3.808551123994018e-05,
|
1649 |
+
"loss": 1.3857,
|
1650 |
+
"step": 2310
|
1651 |
+
},
|
1652 |
+
{
|
1653 |
+
"epoch": 1.9686041578277471,
|
1654 |
+
"grad_norm": 0.22545661808214923,
|
1655 |
+
"learning_rate": 3.795903849550805e-05,
|
1656 |
+
"loss": 1.3628,
|
1657 |
+
"step": 2320
|
1658 |
+
},
|
1659 |
+
{
|
1660 |
+
"epoch": 1.9770895205770047,
|
1661 |
+
"grad_norm": 0.2450769875954842,
|
1662 |
+
"learning_rate": 3.7832110599924455e-05,
|
1663 |
+
"loss": 1.4079,
|
1664 |
+
"step": 2330
|
1665 |
+
},
|
1666 |
+
{
|
1667 |
+
"epoch": 1.985574883326262,
|
1668 |
+
"grad_norm": 0.22931499326784313,
|
1669 |
+
"learning_rate": 3.7704732011188166e-05,
|
1670 |
+
"loss": 1.379,
|
1671 |
+
"step": 2340
|
1672 |
+
},
|
1673 |
+
{
|
1674 |
+
"epoch": 1.9940602460755197,
|
1675 |
+
"grad_norm": 0.22417244507397657,
|
1676 |
+
"learning_rate": 3.7576907203127346e-05,
|
1677 |
+
"loss": 1.4035,
|
1678 |
+
"step": 2350
|
1679 |
+
},
|
1680 |
+
{
|
1681 |
+
"epoch": 2.0025456088247773,
|
1682 |
+
"grad_norm": 0.24496197221575314,
|
1683 |
+
"learning_rate": 3.7448640665242406e-05,
|
1684 |
+
"loss": 1.442,
|
1685 |
+
"step": 2360
|
1686 |
+
},
|
1687 |
+
{
|
1688 |
+
"epoch": 2.011030971574035,
|
1689 |
+
"grad_norm": 0.2532740296990078,
|
1690 |
+
"learning_rate": 3.73199369025483e-05,
|
1691 |
+
"loss": 1.2672,
|
1692 |
+
"step": 2370
|
1693 |
+
},
|
1694 |
+
{
|
1695 |
+
"epoch": 2.0195163343232925,
|
1696 |
+
"grad_norm": 0.2890155987968593,
|
1697 |
+
"learning_rate": 3.7190800435416355e-05,
|
1698 |
+
"loss": 1.246,
|
1699 |
+
"step": 2380
|
1700 |
+
},
|
1701 |
+
{
|
1702 |
+
"epoch": 2.02800169707255,
|
1703 |
+
"grad_norm": 0.2541972565696406,
|
1704 |
+
"learning_rate": 3.706123579941545e-05,
|
1705 |
+
"loss": 1.2603,
|
1706 |
+
"step": 2390
|
1707 |
+
},
|
1708 |
+
{
|
1709 |
+
"epoch": 2.036487059821807,
|
1710 |
+
"grad_norm": 0.2530140862527023,
|
1711 |
+
"learning_rate": 3.693124754515272e-05,
|
1712 |
+
"loss": 1.2638,
|
1713 |
+
"step": 2400
|
1714 |
+
},
|
1715 |
+
{
|
1716 |
+
"epoch": 2.036487059821807,
|
1717 |
+
"eval_loss": 1.435962438583374,
|
1718 |
+
"eval_runtime": 52.582,
|
1719 |
+
"eval_samples_per_second": 7.246,
|
1720 |
+
"eval_steps_per_second": 0.913,
|
1721 |
+
"step": 2400
|
1722 |
+
},
|
1723 |
+
{
|
1724 |
+
"epoch": 2.044972422571065,
|
1725 |
+
"grad_norm": 0.25100458343337734,
|
1726 |
+
"learning_rate": 3.680084023811377e-05,
|
1727 |
+
"loss": 1.2711,
|
1728 |
+
"step": 2410
|
1729 |
+
},
|
1730 |
+
{
|
1731 |
+
"epoch": 2.0534577853203224,
|
1732 |
+
"grad_norm": 0.2695727673292618,
|
1733 |
+
"learning_rate": 3.66700184585023e-05,
|
1734 |
+
"loss": 1.2578,
|
1735 |
+
"step": 2420
|
1736 |
+
},
|
1737 |
+
{
|
1738 |
+
"epoch": 2.06194314806958,
|
1739 |
+
"grad_norm": 0.2605068415443213,
|
1740 |
+
"learning_rate": 3.6538786801079226e-05,
|
1741 |
+
"loss": 1.2506,
|
1742 |
+
"step": 2430
|
1743 |
+
},
|
1744 |
+
{
|
1745 |
+
"epoch": 2.0704285108188376,
|
1746 |
+
"grad_norm": 0.27415607207865045,
|
1747 |
+
"learning_rate": 3.64071498750013e-05,
|
1748 |
+
"loss": 1.2852,
|
1749 |
+
"step": 2440
|
1750 |
+
},
|
1751 |
+
{
|
1752 |
+
"epoch": 2.078913873568095,
|
1753 |
+
"grad_norm": 0.2688900338206285,
|
1754 |
+
"learning_rate": 3.627511230365928e-05,
|
1755 |
+
"loss": 1.2695,
|
1756 |
+
"step": 2450
|
1757 |
+
},
|
1758 |
+
{
|
1759 |
+
"epoch": 2.0873992363173524,
|
1760 |
+
"grad_norm": 0.2750825805336503,
|
1761 |
+
"learning_rate": 3.614267872451546e-05,
|
1762 |
+
"loss": 1.2643,
|
1763 |
+
"step": 2460
|
1764 |
+
},
|
1765 |
+
{
|
1766 |
+
"epoch": 2.09588459906661,
|
1767 |
+
"grad_norm": 0.2659269066581903,
|
1768 |
+
"learning_rate": 3.600985378894086e-05,
|
1769 |
+
"loss": 1.2868,
|
1770 |
+
"step": 2470
|
1771 |
+
},
|
1772 |
+
{
|
1773 |
+
"epoch": 2.1043699618158676,
|
1774 |
+
"grad_norm": 0.24411151291321526,
|
1775 |
+
"learning_rate": 3.587664216205183e-05,
|
1776 |
+
"loss": 1.2571,
|
1777 |
+
"step": 2480
|
1778 |
+
},
|
1779 |
+
{
|
1780 |
+
"epoch": 2.112855324565125,
|
1781 |
+
"grad_norm": 0.2574194755634052,
|
1782 |
+
"learning_rate": 3.574304852254621e-05,
|
1783 |
+
"loss": 1.2769,
|
1784 |
+
"step": 2490
|
1785 |
+
},
|
1786 |
+
{
|
1787 |
+
"epoch": 2.1213406873143827,
|
1788 |
+
"grad_norm": 0.2894545074998905,
|
1789 |
+
"learning_rate": 3.5609077562538997e-05,
|
1790 |
+
"loss": 1.2469,
|
1791 |
+
"step": 2500
|
1792 |
+
},
|
1793 |
+
{
|
1794 |
+
"epoch": 2.1298260500636403,
|
1795 |
+
"grad_norm": 0.2828176429904294,
|
1796 |
+
"learning_rate": 3.547473398739754e-05,
|
1797 |
+
"loss": 1.2527,
|
1798 |
+
"step": 2510
|
1799 |
+
},
|
1800 |
+
{
|
1801 |
+
"epoch": 2.138311412812898,
|
1802 |
+
"grad_norm": 0.25886029771650565,
|
1803 |
+
"learning_rate": 3.5340022515576294e-05,
|
1804 |
+
"loss": 1.2578,
|
1805 |
+
"step": 2520
|
1806 |
+
},
|
1807 |
+
{
|
1808 |
+
"epoch": 2.146796775562155,
|
1809 |
+
"grad_norm": 0.2783799371621383,
|
1810 |
+
"learning_rate": 3.52049478784511e-05,
|
1811 |
+
"loss": 1.2489,
|
1812 |
+
"step": 2530
|
1813 |
+
},
|
1814 |
+
{
|
1815 |
+
"epoch": 2.1552821383114127,
|
1816 |
+
"grad_norm": 0.2753116113218978,
|
1817 |
+
"learning_rate": 3.506951482015297e-05,
|
1818 |
+
"loss": 1.275,
|
1819 |
+
"step": 2540
|
1820 |
+
},
|
1821 |
+
{
|
1822 |
+
"epoch": 2.1637675010606703,
|
1823 |
+
"grad_norm": 0.28115792079727675,
|
1824 |
+
"learning_rate": 3.493372809740152e-05,
|
1825 |
+
"loss": 1.2554,
|
1826 |
+
"step": 2550
|
1827 |
+
},
|
1828 |
+
{
|
1829 |
+
"epoch": 2.172252863809928,
|
1830 |
+
"grad_norm": 0.27954425325951715,
|
1831 |
+
"learning_rate": 3.479759247933785e-05,
|
1832 |
+
"loss": 1.2618,
|
1833 |
+
"step": 2560
|
1834 |
+
},
|
1835 |
+
{
|
1836 |
+
"epoch": 2.1807382265591855,
|
1837 |
+
"grad_norm": 0.27555174232347995,
|
1838 |
+
"learning_rate": 3.466111274735707e-05,
|
1839 |
+
"loss": 1.2598,
|
1840 |
+
"step": 2570
|
1841 |
+
},
|
1842 |
+
{
|
1843 |
+
"epoch": 2.189223589308443,
|
1844 |
+
"grad_norm": 0.27280827991301104,
|
1845 |
+
"learning_rate": 3.452429369494037e-05,
|
1846 |
+
"loss": 1.262,
|
1847 |
+
"step": 2580
|
1848 |
+
},
|
1849 |
+
{
|
1850 |
+
"epoch": 2.1977089520577007,
|
1851 |
+
"grad_norm": 0.2749685805551003,
|
1852 |
+
"learning_rate": 3.438714012748664e-05,
|
1853 |
+
"loss": 1.2683,
|
1854 |
+
"step": 2590
|
1855 |
+
},
|
1856 |
+
{
|
1857 |
+
"epoch": 2.206194314806958,
|
1858 |
+
"grad_norm": 0.2780594302788235,
|
1859 |
+
"learning_rate": 3.424965686214371e-05,
|
1860 |
+
"loss": 1.2462,
|
1861 |
+
"step": 2600
|
1862 |
+
},
|
1863 |
+
{
|
1864 |
+
"epoch": 2.2146796775562154,
|
1865 |
+
"grad_norm": 0.2942257416636676,
|
1866 |
+
"learning_rate": 3.411184872763915e-05,
|
1867 |
+
"loss": 1.2581,
|
1868 |
+
"step": 2610
|
1869 |
+
},
|
1870 |
+
{
|
1871 |
+
"epoch": 2.223165040305473,
|
1872 |
+
"grad_norm": 0.27000377333423803,
|
1873 |
+
"learning_rate": 3.39737205641107e-05,
|
1874 |
+
"loss": 1.2412,
|
1875 |
+
"step": 2620
|
1876 |
+
},
|
1877 |
+
{
|
1878 |
+
"epoch": 2.2316504030547306,
|
1879 |
+
"grad_norm": 0.28187507810449336,
|
1880 |
+
"learning_rate": 3.383527722293622e-05,
|
1881 |
+
"loss": 1.2659,
|
1882 |
+
"step": 2630
|
1883 |
+
},
|
1884 |
+
{
|
1885 |
+
"epoch": 2.240135765803988,
|
1886 |
+
"grad_norm": 0.2736213940552268,
|
1887 |
+
"learning_rate": 3.369652356656336e-05,
|
1888 |
+
"loss": 1.2553,
|
1889 |
+
"step": 2640
|
1890 |
+
},
|
1891 |
+
{
|
1892 |
+
"epoch": 2.248621128553246,
|
1893 |
+
"grad_norm": 0.29698834543438446,
|
1894 |
+
"learning_rate": 3.355746446833873e-05,
|
1895 |
+
"loss": 1.2714,
|
1896 |
+
"step": 2650
|
1897 |
+
},
|
1898 |
+
{
|
1899 |
+
"epoch": 2.257106491302503,
|
1900 |
+
"grad_norm": 0.2875128112484735,
|
1901 |
+
"learning_rate": 3.3418104812336786e-05,
|
1902 |
+
"loss": 1.2508,
|
1903 |
+
"step": 2660
|
1904 |
+
},
|
1905 |
+
{
|
1906 |
+
"epoch": 2.2655918540517606,
|
1907 |
+
"grad_norm": 0.3016647299373059,
|
1908 |
+
"learning_rate": 3.327844949318824e-05,
|
1909 |
+
"loss": 1.2451,
|
1910 |
+
"step": 2670
|
1911 |
+
},
|
1912 |
+
{
|
1913 |
+
"epoch": 2.274077216801018,
|
1914 |
+
"grad_norm": 0.27371321581702696,
|
1915 |
+
"learning_rate": 3.3138503415908176e-05,
|
1916 |
+
"loss": 1.2467,
|
1917 |
+
"step": 2680
|
1918 |
+
},
|
1919 |
+
{
|
1920 |
+
"epoch": 2.2825625795502757,
|
1921 |
+
"grad_norm": 0.28374547760120017,
|
1922 |
+
"learning_rate": 3.299827149572376e-05,
|
1923 |
+
"loss": 1.2452,
|
1924 |
+
"step": 2690
|
1925 |
+
},
|
1926 |
+
{
|
1927 |
+
"epoch": 2.2910479422995333,
|
1928 |
+
"grad_norm": 0.2805999278165284,
|
1929 |
+
"learning_rate": 3.285775865790166e-05,
|
1930 |
+
"loss": 1.2595,
|
1931 |
+
"step": 2700
|
1932 |
+
},
|
1933 |
+
{
|
1934 |
+
"epoch": 2.299533305048791,
|
1935 |
+
"grad_norm": 0.2758019804125597,
|
1936 |
+
"learning_rate": 3.271696983757496e-05,
|
1937 |
+
"loss": 1.2583,
|
1938 |
+
"step": 2710
|
1939 |
+
},
|
1940 |
+
{
|
1941 |
+
"epoch": 2.3080186677980485,
|
1942 |
+
"grad_norm": 0.27211127699988974,
|
1943 |
+
"learning_rate": 3.2575909979569906e-05,
|
1944 |
+
"loss": 1.2255,
|
1945 |
+
"step": 2720
|
1946 |
+
},
|
1947 |
+
{
|
1948 |
+
"epoch": 2.316504030547306,
|
1949 |
+
"grad_norm": 0.2741831859110416,
|
1950 |
+
"learning_rate": 3.243458403823223e-05,
|
1951 |
+
"loss": 1.2335,
|
1952 |
+
"step": 2730
|
1953 |
+
},
|
1954 |
+
{
|
1955 |
+
"epoch": 2.3249893932965633,
|
1956 |
+
"grad_norm": 0.287074507507,
|
1957 |
+
"learning_rate": 3.2292996977253075e-05,
|
1958 |
+
"loss": 1.2555,
|
1959 |
+
"step": 2740
|
1960 |
+
},
|
1961 |
+
{
|
1962 |
+
"epoch": 2.333474756045821,
|
1963 |
+
"grad_norm": 0.2760197579958247,
|
1964 |
+
"learning_rate": 3.215115376949474e-05,
|
1965 |
+
"loss": 1.2574,
|
1966 |
+
"step": 2750
|
1967 |
+
},
|
1968 |
+
{
|
1969 |
+
"epoch": 2.3419601187950785,
|
1970 |
+
"grad_norm": 0.29917391348714156,
|
1971 |
+
"learning_rate": 3.200905939681599e-05,
|
1972 |
+
"loss": 1.2232,
|
1973 |
+
"step": 2760
|
1974 |
+
},
|
1975 |
+
{
|
1976 |
+
"epoch": 2.350445481544336,
|
1977 |
+
"grad_norm": 0.2863180346672473,
|
1978 |
+
"learning_rate": 3.1866718849897044e-05,
|
1979 |
+
"loss": 1.2341,
|
1980 |
+
"step": 2770
|
1981 |
+
},
|
1982 |
+
{
|
1983 |
+
"epoch": 2.3589308442935937,
|
1984 |
+
"grad_norm": 0.2760526831444543,
|
1985 |
+
"learning_rate": 3.172413712806435e-05,
|
1986 |
+
"loss": 1.253,
|
1987 |
+
"step": 2780
|
1988 |
+
},
|
1989 |
+
{
|
1990 |
+
"epoch": 2.3674162070428513,
|
1991 |
+
"grad_norm": 0.29286413736773825,
|
1992 |
+
"learning_rate": 3.158131923911498e-05,
|
1993 |
+
"loss": 1.2617,
|
1994 |
+
"step": 2790
|
1995 |
+
},
|
1996 |
+
{
|
1997 |
+
"epoch": 2.3759015697921084,
|
1998 |
+
"grad_norm": 0.27643034174892955,
|
1999 |
+
"learning_rate": 3.143827019914072e-05,
|
2000 |
+
"loss": 1.2152,
|
2001 |
+
"step": 2800
|
2002 |
+
},
|
2003 |
+
{
|
2004 |
+
"epoch": 2.384386932541366,
|
2005 |
+
"grad_norm": 0.2939949433037669,
|
2006 |
+
"learning_rate": 3.12949950323519e-05,
|
2007 |
+
"loss": 1.2354,
|
2008 |
+
"step": 2810
|
2009 |
+
},
|
2010 |
+
{
|
2011 |
+
"epoch": 2.3928722952906236,
|
2012 |
+
"grad_norm": 0.2864245267570891,
|
2013 |
+
"learning_rate": 3.115149877090097e-05,
|
2014 |
+
"loss": 1.2447,
|
2015 |
+
"step": 2820
|
2016 |
+
},
|
2017 |
+
{
|
2018 |
+
"epoch": 2.401357658039881,
|
2019 |
+
"grad_norm": 0.2952829920235313,
|
2020 |
+
"learning_rate": 3.1007786454705724e-05,
|
2021 |
+
"loss": 1.2462,
|
2022 |
+
"step": 2830
|
2023 |
+
},
|
2024 |
+
{
|
2025 |
+
"epoch": 2.409843020789139,
|
2026 |
+
"grad_norm": 0.3032080033620836,
|
2027 |
+
"learning_rate": 3.0863863131272265e-05,
|
2028 |
+
"loss": 1.2317,
|
2029 |
+
"step": 2840
|
2030 |
+
},
|
2031 |
+
{
|
2032 |
+
"epoch": 2.4183283835383964,
|
2033 |
+
"grad_norm": 0.2678380639415362,
|
2034 |
+
"learning_rate": 3.07197338555178e-05,
|
2035 |
+
"loss": 1.2466,
|
2036 |
+
"step": 2850
|
2037 |
+
},
|
2038 |
+
{
|
2039 |
+
"epoch": 2.426813746287654,
|
2040 |
+
"grad_norm": 0.3000338098809928,
|
2041 |
+
"learning_rate": 3.0575403689593016e-05,
|
2042 |
+
"loss": 1.2469,
|
2043 |
+
"step": 2860
|
2044 |
+
},
|
2045 |
+
{
|
2046 |
+
"epoch": 2.435299109036911,
|
2047 |
+
"grad_norm": 0.2885428511714088,
|
2048 |
+
"learning_rate": 3.043087770270435e-05,
|
2049 |
+
"loss": 1.241,
|
2050 |
+
"step": 2870
|
2051 |
+
},
|
2052 |
+
{
|
2053 |
+
"epoch": 2.4437844717861688,
|
2054 |
+
"grad_norm": 0.2902606566366597,
|
2055 |
+
"learning_rate": 3.0286160970935906e-05,
|
2056 |
+
"loss": 1.2498,
|
2057 |
+
"step": 2880
|
2058 |
+
},
|
2059 |
+
{
|
2060 |
+
"epoch": 2.4522698345354264,
|
2061 |
+
"grad_norm": 0.2930924599960876,
|
2062 |
+
"learning_rate": 3.0141258577071184e-05,
|
2063 |
+
"loss": 1.2508,
|
2064 |
+
"step": 2890
|
2065 |
+
},
|
2066 |
+
{
|
2067 |
+
"epoch": 2.460755197284684,
|
2068 |
+
"grad_norm": 0.28761403953538467,
|
2069 |
+
"learning_rate": 2.9996175610414572e-05,
|
2070 |
+
"loss": 1.2379,
|
2071 |
+
"step": 2900
|
2072 |
+
},
|
2073 |
+
{
|
2074 |
+
"epoch": 2.4692405600339415,
|
2075 |
+
"grad_norm": 0.28888693356528744,
|
2076 |
+
"learning_rate": 2.9850917166612586e-05,
|
2077 |
+
"loss": 1.2383,
|
2078 |
+
"step": 2910
|
2079 |
+
},
|
2080 |
+
{
|
2081 |
+
"epoch": 2.477725922783199,
|
2082 |
+
"grad_norm": 0.29714323219094924,
|
2083 |
+
"learning_rate": 2.9705488347474896e-05,
|
2084 |
+
"loss": 1.2221,
|
2085 |
+
"step": 2920
|
2086 |
+
},
|
2087 |
+
{
|
2088 |
+
"epoch": 2.4862112855324563,
|
2089 |
+
"grad_norm": 0.3024332099011336,
|
2090 |
+
"learning_rate": 2.9559894260795144e-05,
|
2091 |
+
"loss": 1.2417,
|
2092 |
+
"step": 2930
|
2093 |
+
},
|
2094 |
+
{
|
2095 |
+
"epoch": 2.494696648281714,
|
2096 |
+
"grad_norm": 0.2900123354730048,
|
2097 |
+
"learning_rate": 2.9414140020171554e-05,
|
2098 |
+
"loss": 1.2543,
|
2099 |
+
"step": 2940
|
2100 |
+
},
|
2101 |
+
{
|
2102 |
+
"epoch": 2.5031820110309715,
|
2103 |
+
"grad_norm": 0.30122390943433014,
|
2104 |
+
"learning_rate": 2.926823074482733e-05,
|
2105 |
+
"loss": 1.2542,
|
2106 |
+
"step": 2950
|
2107 |
+
},
|
2108 |
+
{
|
2109 |
+
"epoch": 2.511667373780229,
|
2110 |
+
"grad_norm": 0.2860208265471049,
|
2111 |
+
"learning_rate": 2.912217155943083e-05,
|
2112 |
+
"loss": 1.2335,
|
2113 |
+
"step": 2960
|
2114 |
+
},
|
2115 |
+
{
|
2116 |
+
"epoch": 2.5201527365294867,
|
2117 |
+
"grad_norm": 0.28980498979259595,
|
2118 |
+
"learning_rate": 2.897596759391561e-05,
|
2119 |
+
"loss": 1.2458,
|
2120 |
+
"step": 2970
|
2121 |
+
},
|
2122 |
+
{
|
2123 |
+
"epoch": 2.5286380992787443,
|
2124 |
+
"grad_norm": 0.30074882444504475,
|
2125 |
+
"learning_rate": 2.8829623983300242e-05,
|
2126 |
+
"loss": 1.2498,
|
2127 |
+
"step": 2980
|
2128 |
+
},
|
2129 |
+
{
|
2130 |
+
"epoch": 2.537123462028002,
|
2131 |
+
"grad_norm": 0.2929721105596463,
|
2132 |
+
"learning_rate": 2.868314586750794e-05,
|
2133 |
+
"loss": 1.2686,
|
2134 |
+
"step": 2990
|
2135 |
+
},
|
2136 |
+
{
|
2137 |
+
"epoch": 2.5456088247772595,
|
2138 |
+
"grad_norm": 0.291755235343187,
|
2139 |
+
"learning_rate": 2.853653839118605e-05,
|
2140 |
+
"loss": 1.2456,
|
2141 |
+
"step": 3000
|
2142 |
+
},
|
2143 |
+
{
|
2144 |
+
"epoch": 2.5456088247772595,
|
2145 |
+
"eval_loss": 1.4051239490509033,
|
2146 |
+
"eval_runtime": 52.7875,
|
2147 |
+
"eval_samples_per_second": 7.218,
|
2148 |
+
"eval_steps_per_second": 0.909,
|
2149 |
+
"step": 3000
|
2150 |
+
},
|
2151 |
+
{
|
2152 |
+
"epoch": 2.5540941875265166,
|
2153 |
+
"grad_norm": 0.3056527705148328,
|
2154 |
+
"learning_rate": 2.8389806703525383e-05,
|
2155 |
+
"loss": 1.2321,
|
2156 |
+
"step": 3010
|
2157 |
+
},
|
2158 |
+
{
|
2159 |
+
"epoch": 2.562579550275774,
|
2160 |
+
"grad_norm": 0.29756401069688737,
|
2161 |
+
"learning_rate": 2.8242955958079303e-05,
|
2162 |
+
"loss": 1.2341,
|
2163 |
+
"step": 3020
|
2164 |
+
},
|
2165 |
+
{
|
2166 |
+
"epoch": 2.571064913025032,
|
2167 |
+
"grad_norm": 0.3077048874608071,
|
2168 |
+
"learning_rate": 2.809599131258276e-05,
|
2169 |
+
"loss": 1.2475,
|
2170 |
+
"step": 3030
|
2171 |
+
},
|
2172 |
+
{
|
2173 |
+
"epoch": 2.5795502757742894,
|
2174 |
+
"grad_norm": 0.31006088313098146,
|
2175 |
+
"learning_rate": 2.7948917928771158e-05,
|
2176 |
+
"loss": 1.2381,
|
2177 |
+
"step": 3040
|
2178 |
+
},
|
2179 |
+
{
|
2180 |
+
"epoch": 2.588035638523547,
|
2181 |
+
"grad_norm": 0.3086227102652305,
|
2182 |
+
"learning_rate": 2.7801740972199014e-05,
|
2183 |
+
"loss": 1.2386,
|
2184 |
+
"step": 3050
|
2185 |
+
},
|
2186 |
+
{
|
2187 |
+
"epoch": 2.596521001272804,
|
2188 |
+
"grad_norm": 0.2909420805400902,
|
2189 |
+
"learning_rate": 2.7654465612058573e-05,
|
2190 |
+
"loss": 1.2071,
|
2191 |
+
"step": 3060
|
2192 |
+
},
|
2193 |
+
{
|
2194 |
+
"epoch": 2.6050063640220618,
|
2195 |
+
"grad_norm": 0.30310956499188235,
|
2196 |
+
"learning_rate": 2.7507097020998246e-05,
|
2197 |
+
"loss": 1.2206,
|
2198 |
+
"step": 3070
|
2199 |
+
},
|
2200 |
+
{
|
2201 |
+
"epoch": 2.6134917267713194,
|
2202 |
+
"grad_norm": 0.2873915382033808,
|
2203 |
+
"learning_rate": 2.7359640374940904e-05,
|
2204 |
+
"loss": 1.2346,
|
2205 |
+
"step": 3080
|
2206 |
+
},
|
2207 |
+
{
|
2208 |
+
"epoch": 2.621977089520577,
|
2209 |
+
"grad_norm": 0.29404028686651285,
|
2210 |
+
"learning_rate": 2.7212100852902133e-05,
|
2211 |
+
"loss": 1.2209,
|
2212 |
+
"step": 3090
|
2213 |
+
},
|
2214 |
+
{
|
2215 |
+
"epoch": 2.6304624522698346,
|
2216 |
+
"grad_norm": 0.2967558623710032,
|
2217 |
+
"learning_rate": 2.7064483636808313e-05,
|
2218 |
+
"loss": 1.2471,
|
2219 |
+
"step": 3100
|
2220 |
+
},
|
2221 |
+
{
|
2222 |
+
"epoch": 2.638947815019092,
|
2223 |
+
"grad_norm": 0.28348844201193973,
|
2224 |
+
"learning_rate": 2.6916793911314593e-05,
|
2225 |
+
"loss": 1.2271,
|
2226 |
+
"step": 3110
|
2227 |
+
},
|
2228 |
+
{
|
2229 |
+
"epoch": 2.6474331777683497,
|
2230 |
+
"grad_norm": 0.30908939180701456,
|
2231 |
+
"learning_rate": 2.6769036863622842e-05,
|
2232 |
+
"loss": 1.2348,
|
2233 |
+
"step": 3120
|
2234 |
+
},
|
2235 |
+
{
|
2236 |
+
"epoch": 2.6559185405176073,
|
2237 |
+
"grad_norm": 0.296064586506253,
|
2238 |
+
"learning_rate": 2.6621217683299437e-05,
|
2239 |
+
"loss": 1.2118,
|
2240 |
+
"step": 3130
|
2241 |
+
},
|
2242 |
+
{
|
2243 |
+
"epoch": 2.6644039032668645,
|
2244 |
+
"grad_norm": 0.29223118946191284,
|
2245 |
+
"learning_rate": 2.647334156209299e-05,
|
2246 |
+
"loss": 1.2368,
|
2247 |
+
"step": 3140
|
2248 |
+
},
|
2249 |
+
{
|
2250 |
+
"epoch": 2.672889266016122,
|
2251 |
+
"grad_norm": 0.2974562276968823,
|
2252 |
+
"learning_rate": 2.6325413693752004e-05,
|
2253 |
+
"loss": 1.2392,
|
2254 |
+
"step": 3150
|
2255 |
+
},
|
2256 |
+
{
|
2257 |
+
"epoch": 2.6813746287653797,
|
2258 |
+
"grad_norm": 0.30862646184519243,
|
2259 |
+
"learning_rate": 2.6177439273842463e-05,
|
2260 |
+
"loss": 1.244,
|
2261 |
+
"step": 3160
|
2262 |
+
},
|
2263 |
+
{
|
2264 |
+
"epoch": 2.6898599915146373,
|
2265 |
+
"grad_norm": 0.2958164221091078,
|
2266 |
+
"learning_rate": 2.602942349956536e-05,
|
2267 |
+
"loss": 1.2377,
|
2268 |
+
"step": 3170
|
2269 |
+
},
|
2270 |
+
{
|
2271 |
+
"epoch": 2.698345354263895,
|
2272 |
+
"grad_norm": 0.2941370782364945,
|
2273 |
+
"learning_rate": 2.5881371569574125e-05,
|
2274 |
+
"loss": 1.2296,
|
2275 |
+
"step": 3180
|
2276 |
+
},
|
2277 |
+
{
|
2278 |
+
"epoch": 2.7068307170131525,
|
2279 |
+
"grad_norm": 0.2949825785995608,
|
2280 |
+
"learning_rate": 2.5733288683792084e-05,
|
2281 |
+
"loss": 1.2292,
|
2282 |
+
"step": 3190
|
2283 |
+
},
|
2284 |
+
{
|
2285 |
+
"epoch": 2.7153160797624096,
|
2286 |
+
"grad_norm": 0.3020705479686205,
|
2287 |
+
"learning_rate": 2.558518004322979e-05,
|
2288 |
+
"loss": 1.2371,
|
2289 |
+
"step": 3200
|
2290 |
+
},
|
2291 |
+
{
|
2292 |
+
"epoch": 2.7238014425116672,
|
2293 |
+
"grad_norm": 0.3134012668403533,
|
2294 |
+
"learning_rate": 2.5437050849802356e-05,
|
2295 |
+
"loss": 1.2257,
|
2296 |
+
"step": 3210
|
2297 |
+
},
|
2298 |
+
{
|
2299 |
+
"epoch": 2.732286805260925,
|
2300 |
+
"grad_norm": 0.320012175903156,
|
2301 |
+
"learning_rate": 2.528890630614677e-05,
|
2302 |
+
"loss": 1.215,
|
2303 |
+
"step": 3220
|
2304 |
+
},
|
2305 |
+
{
|
2306 |
+
"epoch": 2.7407721680101824,
|
2307 |
+
"grad_norm": 0.2936053513063533,
|
2308 |
+
"learning_rate": 2.514075161543915e-05,
|
2309 |
+
"loss": 1.2364,
|
2310 |
+
"step": 3230
|
2311 |
+
},
|
2312 |
+
{
|
2313 |
+
"epoch": 2.74925753075944,
|
2314 |
+
"grad_norm": 0.30515854484741317,
|
2315 |
+
"learning_rate": 2.499259198121201e-05,
|
2316 |
+
"loss": 1.2117,
|
2317 |
+
"step": 3240
|
2318 |
+
},
|
2319 |
+
{
|
2320 |
+
"epoch": 2.7577428935086976,
|
2321 |
+
"grad_norm": 0.29763910785937486,
|
2322 |
+
"learning_rate": 2.484443260717147e-05,
|
2323 |
+
"loss": 1.2583,
|
2324 |
+
"step": 3250
|
2325 |
+
},
|
2326 |
+
{
|
2327 |
+
"epoch": 2.766228256257955,
|
2328 |
+
"grad_norm": 0.2975895109826329,
|
2329 |
+
"learning_rate": 2.4696278697014538e-05,
|
2330 |
+
"loss": 1.2153,
|
2331 |
+
"step": 3260
|
2332 |
+
},
|
2333 |
+
{
|
2334 |
+
"epoch": 2.774713619007213,
|
2335 |
+
"grad_norm": 0.2778934357454163,
|
2336 |
+
"learning_rate": 2.4548135454246306e-05,
|
2337 |
+
"loss": 1.2291,
|
2338 |
+
"step": 3270
|
2339 |
+
},
|
2340 |
+
{
|
2341 |
+
"epoch": 2.78319898175647,
|
2342 |
+
"grad_norm": 0.32172302391314234,
|
2343 |
+
"learning_rate": 2.4400008081997196e-05,
|
2344 |
+
"loss": 1.214,
|
2345 |
+
"step": 3280
|
2346 |
+
},
|
2347 |
+
{
|
2348 |
+
"epoch": 2.7916843445057276,
|
2349 |
+
"grad_norm": 0.3008060855751439,
|
2350 |
+
"learning_rate": 2.425190178284024e-05,
|
2351 |
+
"loss": 1.234,
|
2352 |
+
"step": 3290
|
2353 |
+
},
|
2354 |
+
{
|
2355 |
+
"epoch": 2.800169707254985,
|
2356 |
+
"grad_norm": 0.3093107834664431,
|
2357 |
+
"learning_rate": 2.4103821758608307e-05,
|
2358 |
+
"loss": 1.2492,
|
2359 |
+
"step": 3300
|
2360 |
+
},
|
2361 |
+
{
|
2362 |
+
"epoch": 2.8086550700042427,
|
2363 |
+
"grad_norm": 0.30554454329766617,
|
2364 |
+
"learning_rate": 2.3955773210211465e-05,
|
2365 |
+
"loss": 1.2401,
|
2366 |
+
"step": 3310
|
2367 |
+
},
|
2368 |
+
{
|
2369 |
+
"epoch": 2.8171404327535003,
|
2370 |
+
"grad_norm": 0.2997812898388053,
|
2371 |
+
"learning_rate": 2.380776133745425e-05,
|
2372 |
+
"loss": 1.2089,
|
2373 |
+
"step": 3320
|
2374 |
+
},
|
2375 |
+
{
|
2376 |
+
"epoch": 2.8256257955027575,
|
2377 |
+
"grad_norm": 0.2962123179493644,
|
2378 |
+
"learning_rate": 2.3659791338853066e-05,
|
2379 |
+
"loss": 1.2324,
|
2380 |
+
"step": 3330
|
2381 |
+
},
|
2382 |
+
{
|
2383 |
+
"epoch": 2.834111158252015,
|
2384 |
+
"grad_norm": 0.31328021490753843,
|
2385 |
+
"learning_rate": 2.3511868411453623e-05,
|
2386 |
+
"loss": 1.214,
|
2387 |
+
"step": 3340
|
2388 |
+
},
|
2389 |
+
{
|
2390 |
+
"epoch": 2.8425965210012727,
|
2391 |
+
"grad_norm": 0.30414013736166967,
|
2392 |
+
"learning_rate": 2.3363997750648357e-05,
|
2393 |
+
"loss": 1.2142,
|
2394 |
+
"step": 3350
|
2395 |
+
},
|
2396 |
+
{
|
2397 |
+
"epoch": 2.8510818837505303,
|
2398 |
+
"grad_norm": 0.3079683108015467,
|
2399 |
+
"learning_rate": 2.3216184549994006e-05,
|
2400 |
+
"loss": 1.2137,
|
2401 |
+
"step": 3360
|
2402 |
+
},
|
2403 |
+
{
|
2404 |
+
"epoch": 2.859567246499788,
|
2405 |
+
"grad_norm": 0.31165537422962203,
|
2406 |
+
"learning_rate": 2.3068434001029173e-05,
|
2407 |
+
"loss": 1.1915,
|
2408 |
+
"step": 3370
|
2409 |
+
},
|
2410 |
+
{
|
2411 |
+
"epoch": 2.8680526092490455,
|
2412 |
+
"grad_norm": 0.3190451157121095,
|
2413 |
+
"learning_rate": 2.2920751293091948e-05,
|
2414 |
+
"loss": 1.2193,
|
2415 |
+
"step": 3380
|
2416 |
+
},
|
2417 |
+
{
|
2418 |
+
"epoch": 2.876537971998303,
|
2419 |
+
"grad_norm": 0.30247955079343214,
|
2420 |
+
"learning_rate": 2.277314161313774e-05,
|
2421 |
+
"loss": 1.2253,
|
2422 |
+
"step": 3390
|
2423 |
+
},
|
2424 |
+
{
|
2425 |
+
"epoch": 2.8850233347475607,
|
2426 |
+
"grad_norm": 0.2936629891547958,
|
2427 |
+
"learning_rate": 2.262561014555703e-05,
|
2428 |
+
"loss": 1.2136,
|
2429 |
+
"step": 3400
|
2430 |
+
},
|
2431 |
+
{
|
2432 |
+
"epoch": 2.893508697496818,
|
2433 |
+
"grad_norm": 0.3001872886250926,
|
2434 |
+
"learning_rate": 2.2478162071993298e-05,
|
2435 |
+
"loss": 1.2061,
|
2436 |
+
"step": 3410
|
2437 |
+
},
|
2438 |
+
{
|
2439 |
+
"epoch": 2.9019940602460754,
|
2440 |
+
"grad_norm": 0.3111993397471538,
|
2441 |
+
"learning_rate": 2.233080257116103e-05,
|
2442 |
+
"loss": 1.2193,
|
2443 |
+
"step": 3420
|
2444 |
+
},
|
2445 |
+
{
|
2446 |
+
"epoch": 2.910479422995333,
|
2447 |
+
"grad_norm": 0.3119972602479391,
|
2448 |
+
"learning_rate": 2.2183536818663856e-05,
|
2449 |
+
"loss": 1.2125,
|
2450 |
+
"step": 3430
|
2451 |
+
},
|
2452 |
+
{
|
2453 |
+
"epoch": 2.9189647857445906,
|
2454 |
+
"grad_norm": 0.30787460159489605,
|
2455 |
+
"learning_rate": 2.2036369986812713e-05,
|
2456 |
+
"loss": 1.215,
|
2457 |
+
"step": 3440
|
2458 |
+
},
|
2459 |
+
{
|
2460 |
+
"epoch": 2.927450148493848,
|
2461 |
+
"grad_norm": 0.3175794611301324,
|
2462 |
+
"learning_rate": 2.1889307244444252e-05,
|
2463 |
+
"loss": 1.2202,
|
2464 |
+
"step": 3450
|
2465 |
+
},
|
2466 |
+
{
|
2467 |
+
"epoch": 2.9359355112431054,
|
2468 |
+
"grad_norm": 0.2956361362699715,
|
2469 |
+
"learning_rate": 2.1742353756739247e-05,
|
2470 |
+
"loss": 1.2125,
|
2471 |
+
"step": 3460
|
2472 |
+
},
|
2473 |
+
{
|
2474 |
+
"epoch": 2.944420873992363,
|
2475 |
+
"grad_norm": 0.33942331815482624,
|
2476 |
+
"learning_rate": 2.1595514685041205e-05,
|
2477 |
+
"loss": 1.2173,
|
2478 |
+
"step": 3470
|
2479 |
+
},
|
2480 |
+
{
|
2481 |
+
"epoch": 2.9529062367416206,
|
2482 |
+
"grad_norm": 0.332180923100718,
|
2483 |
+
"learning_rate": 2.144879518667507e-05,
|
2484 |
+
"loss": 1.2266,
|
2485 |
+
"step": 3480
|
2486 |
+
},
|
2487 |
+
{
|
2488 |
+
"epoch": 2.961391599490878,
|
2489 |
+
"grad_norm": 0.3157083099981729,
|
2490 |
+
"learning_rate": 2.1302200414766123e-05,
|
2491 |
+
"loss": 1.2154,
|
2492 |
+
"step": 3490
|
2493 |
+
},
|
2494 |
+
{
|
2495 |
+
"epoch": 2.9698769622401358,
|
2496 |
+
"grad_norm": 0.3149298205272042,
|
2497 |
+
"learning_rate": 2.1155735518058914e-05,
|
2498 |
+
"loss": 1.2232,
|
2499 |
+
"step": 3500
|
2500 |
+
},
|
2501 |
+
{
|
2502 |
+
"epoch": 2.9783623249893934,
|
2503 |
+
"grad_norm": 0.3132779789709915,
|
2504 |
+
"learning_rate": 2.100940564073653e-05,
|
2505 |
+
"loss": 1.2299,
|
2506 |
+
"step": 3510
|
2507 |
+
},
|
2508 |
+
{
|
2509 |
+
"epoch": 2.986847687738651,
|
2510 |
+
"grad_norm": 0.29387121758746726,
|
2511 |
+
"learning_rate": 2.086321592223984e-05,
|
2512 |
+
"loss": 1.2219,
|
2513 |
+
"step": 3520
|
2514 |
+
},
|
2515 |
+
{
|
2516 |
+
"epoch": 2.9953330504879085,
|
2517 |
+
"grad_norm": 0.2954282506485496,
|
2518 |
+
"learning_rate": 2.0717171497087014e-05,
|
2519 |
+
"loss": 1.2321,
|
2520 |
+
"step": 3530
|
2521 |
+
},
|
2522 |
+
{
|
2523 |
+
"epoch": 3.0038184132371657,
|
2524 |
+
"grad_norm": 0.2991107469221935,
|
2525 |
+
"learning_rate": 2.057127749469321e-05,
|
2526 |
+
"loss": 1.2387,
|
2527 |
+
"step": 3540
|
2528 |
+
},
|
2529 |
+
{
|
2530 |
+
"epoch": 3.0123037759864233,
|
2531 |
+
"grad_norm": 0.3135732189536929,
|
2532 |
+
"learning_rate": 2.042553903919036e-05,
|
2533 |
+
"loss": 1.1058,
|
2534 |
+
"step": 3550
|
2535 |
+
},
|
2536 |
+
{
|
2537 |
+
"epoch": 3.020789138735681,
|
2538 |
+
"grad_norm": 0.3179199807851635,
|
2539 |
+
"learning_rate": 2.0279961249247274e-05,
|
2540 |
+
"loss": 1.0677,
|
2541 |
+
"step": 3560
|
2542 |
+
},
|
2543 |
+
{
|
2544 |
+
"epoch": 3.0292745014849385,
|
2545 |
+
"grad_norm": 0.3217398418422315,
|
2546 |
+
"learning_rate": 2.0134549237889765e-05,
|
2547 |
+
"loss": 1.0978,
|
2548 |
+
"step": 3570
|
2549 |
+
},
|
2550 |
+
{
|
2551 |
+
"epoch": 3.037759864234196,
|
2552 |
+
"grad_norm": 0.32343352041544976,
|
2553 |
+
"learning_rate": 1.9989308112321164e-05,
|
2554 |
+
"loss": 1.0791,
|
2555 |
+
"step": 3580
|
2556 |
+
},
|
2557 |
+
{
|
2558 |
+
"epoch": 3.0462452269834537,
|
2559 |
+
"grad_norm": 0.32321267638520695,
|
2560 |
+
"learning_rate": 1.9844242973742886e-05,
|
2561 |
+
"loss": 1.0991,
|
2562 |
+
"step": 3590
|
2563 |
+
},
|
2564 |
+
{
|
2565 |
+
"epoch": 3.0547305897327113,
|
2566 |
+
"grad_norm": 0.321242777420917,
|
2567 |
+
"learning_rate": 1.9699358917175297e-05,
|
2568 |
+
"loss": 1.104,
|
2569 |
+
"step": 3600
|
2570 |
+
},
|
2571 |
+
{
|
2572 |
+
"epoch": 3.0547305897327113,
|
2573 |
+
"eval_loss": 1.3918192386627197,
|
2574 |
+
"eval_runtime": 52.2671,
|
2575 |
+
"eval_samples_per_second": 7.289,
|
2576 |
+
"eval_steps_per_second": 0.918,
|
2577 |
+
"step": 3600
|
2578 |
+
},
|
2579 |
+
{
|
2580 |
+
"epoch": 3.0632159524819684,
|
2581 |
+
"grad_norm": 0.3313441880623986,
|
2582 |
+
"learning_rate": 1.9554661031278712e-05,
|
2583 |
+
"loss": 1.081,
|
2584 |
+
"step": 3610
|
2585 |
+
},
|
2586 |
+
{
|
2587 |
+
"epoch": 3.071701315231226,
|
2588 |
+
"grad_norm": 0.3866269219140372,
|
2589 |
+
"learning_rate": 1.9410154398174742e-05,
|
2590 |
+
"loss": 1.0826,
|
2591 |
+
"step": 3620
|
2592 |
+
},
|
2593 |
+
{
|
2594 |
+
"epoch": 3.0801866779804836,
|
2595 |
+
"grad_norm": 0.32719602353062216,
|
2596 |
+
"learning_rate": 1.9265844093267728e-05,
|
2597 |
+
"loss": 1.0934,
|
2598 |
+
"step": 3630
|
2599 |
+
},
|
2600 |
+
{
|
2601 |
+
"epoch": 3.088672040729741,
|
2602 |
+
"grad_norm": 0.3301678171750988,
|
2603 |
+
"learning_rate": 1.9121735185066537e-05,
|
2604 |
+
"loss": 1.1047,
|
2605 |
+
"step": 3640
|
2606 |
+
},
|
2607 |
+
{
|
2608 |
+
"epoch": 3.097157403478999,
|
2609 |
+
"grad_norm": 0.35644796539197005,
|
2610 |
+
"learning_rate": 1.8977832735006522e-05,
|
2611 |
+
"loss": 1.0994,
|
2612 |
+
"step": 3650
|
2613 |
+
},
|
2614 |
+
{
|
2615 |
+
"epoch": 3.1056427662282564,
|
2616 |
+
"grad_norm": 0.33733028940251475,
|
2617 |
+
"learning_rate": 1.8834141797271742e-05,
|
2618 |
+
"loss": 1.0972,
|
2619 |
+
"step": 3660
|
2620 |
+
},
|
2621 |
+
{
|
2622 |
+
"epoch": 3.114128128977514,
|
2623 |
+
"grad_norm": 0.32548774712269085,
|
2624 |
+
"learning_rate": 1.8690667418617462e-05,
|
2625 |
+
"loss": 1.1046,
|
2626 |
+
"step": 3670
|
2627 |
+
},
|
2628 |
+
{
|
2629 |
+
"epoch": 3.122613491726771,
|
2630 |
+
"grad_norm": 0.3117109384467469,
|
2631 |
+
"learning_rate": 1.854741463819291e-05,
|
2632 |
+
"loss": 1.0791,
|
2633 |
+
"step": 3680
|
2634 |
+
},
|
2635 |
+
{
|
2636 |
+
"epoch": 3.1310988544760288,
|
2637 |
+
"grad_norm": 0.3240987091142989,
|
2638 |
+
"learning_rate": 1.8404388487364242e-05,
|
2639 |
+
"loss": 1.0824,
|
2640 |
+
"step": 3690
|
2641 |
+
},
|
2642 |
+
{
|
2643 |
+
"epoch": 3.1395842172252864,
|
2644 |
+
"grad_norm": 0.3346401099182515,
|
2645 |
+
"learning_rate": 1.8261593989537895e-05,
|
2646 |
+
"loss": 1.0753,
|
2647 |
+
"step": 3700
|
2648 |
+
},
|
2649 |
+
{
|
2650 |
+
"epoch": 3.148069579974544,
|
2651 |
+
"grad_norm": 0.3270030472928521,
|
2652 |
+
"learning_rate": 1.81190361599841e-05,
|
2653 |
+
"loss": 1.0934,
|
2654 |
+
"step": 3710
|
2655 |
+
},
|
2656 |
+
{
|
2657 |
+
"epoch": 3.1565549427238015,
|
2658 |
+
"grad_norm": 0.34129438803355183,
|
2659 |
+
"learning_rate": 1.797672000566077e-05,
|
2660 |
+
"loss": 1.0766,
|
2661 |
+
"step": 3720
|
2662 |
+
},
|
2663 |
+
{
|
2664 |
+
"epoch": 3.165040305473059,
|
2665 |
+
"grad_norm": 0.37057591586243926,
|
2666 |
+
"learning_rate": 1.783465052503762e-05,
|
2667 |
+
"loss": 1.1049,
|
2668 |
+
"step": 3730
|
2669 |
+
},
|
2670 |
+
{
|
2671 |
+
"epoch": 3.1735256682223163,
|
2672 |
+
"grad_norm": 0.3331237337555744,
|
2673 |
+
"learning_rate": 1.769283270792065e-05,
|
2674 |
+
"loss": 1.0876,
|
2675 |
+
"step": 3740
|
2676 |
+
},
|
2677 |
+
{
|
2678 |
+
"epoch": 3.182011030971574,
|
2679 |
+
"grad_norm": 0.31904083541369294,
|
2680 |
+
"learning_rate": 1.7551271535276792e-05,
|
2681 |
+
"loss": 1.1206,
|
2682 |
+
"step": 3750
|
2683 |
+
},
|
2684 |
+
{
|
2685 |
+
"epoch": 3.1904963937208315,
|
2686 |
+
"grad_norm": 0.35953020953263576,
|
2687 |
+
"learning_rate": 1.74099719790591e-05,
|
2688 |
+
"loss": 1.0736,
|
2689 |
+
"step": 3760
|
2690 |
+
},
|
2691 |
+
{
|
2692 |
+
"epoch": 3.198981756470089,
|
2693 |
+
"grad_norm": 0.33595544857573634,
|
2694 |
+
"learning_rate": 1.7268939002032035e-05,
|
2695 |
+
"loss": 1.0969,
|
2696 |
+
"step": 3770
|
2697 |
+
},
|
2698 |
+
{
|
2699 |
+
"epoch": 3.2074671192193467,
|
2700 |
+
"grad_norm": 0.34180259956288195,
|
2701 |
+
"learning_rate": 1.7128177557597185e-05,
|
2702 |
+
"loss": 1.0972,
|
2703 |
+
"step": 3780
|
2704 |
+
},
|
2705 |
+
{
|
2706 |
+
"epoch": 3.2159524819686043,
|
2707 |
+
"grad_norm": 0.3268247890892541,
|
2708 |
+
"learning_rate": 1.6987692589619304e-05,
|
2709 |
+
"loss": 1.0737,
|
2710 |
+
"step": 3790
|
2711 |
+
},
|
2712 |
+
{
|
2713 |
+
"epoch": 3.224437844717862,
|
2714 |
+
"grad_norm": 0.3207792660130559,
|
2715 |
+
"learning_rate": 1.6847489032252627e-05,
|
2716 |
+
"loss": 1.0797,
|
2717 |
+
"step": 3800
|
2718 |
+
},
|
2719 |
+
{
|
2720 |
+
"epoch": 3.232923207467119,
|
2721 |
+
"grad_norm": 0.3467270107463577,
|
2722 |
+
"learning_rate": 1.6707571809767644e-05,
|
2723 |
+
"loss": 1.1024,
|
2724 |
+
"step": 3810
|
2725 |
+
},
|
2726 |
+
{
|
2727 |
+
"epoch": 3.2414085702163766,
|
2728 |
+
"grad_norm": 0.35579083917156773,
|
2729 |
+
"learning_rate": 1.656794583637807e-05,
|
2730 |
+
"loss": 1.1026,
|
2731 |
+
"step": 3820
|
2732 |
+
},
|
2733 |
+
{
|
2734 |
+
"epoch": 3.2498939329656342,
|
2735 |
+
"grad_norm": 0.3339161504484877,
|
2736 |
+
"learning_rate": 1.6428616016068304e-05,
|
2737 |
+
"loss": 1.0866,
|
2738 |
+
"step": 3830
|
2739 |
+
},
|
2740 |
+
{
|
2741 |
+
"epoch": 3.258379295714892,
|
2742 |
+
"grad_norm": 0.34920082505086103,
|
2743 |
+
"learning_rate": 1.628958724242117e-05,
|
2744 |
+
"loss": 1.0789,
|
2745 |
+
"step": 3840
|
2746 |
+
},
|
2747 |
+
{
|
2748 |
+
"epoch": 3.2668646584641494,
|
2749 |
+
"grad_norm": 0.34847114740056345,
|
2750 |
+
"learning_rate": 1.615086439844604e-05,
|
2751 |
+
"loss": 1.0849,
|
2752 |
+
"step": 3850
|
2753 |
+
},
|
2754 |
+
{
|
2755 |
+
"epoch": 3.275350021213407,
|
2756 |
+
"grad_norm": 0.3164317420955498,
|
2757 |
+
"learning_rate": 1.601245235640733e-05,
|
2758 |
+
"loss": 1.0984,
|
2759 |
+
"step": 3860
|
2760 |
+
},
|
2761 |
+
{
|
2762 |
+
"epoch": 3.283835383962664,
|
2763 |
+
"grad_norm": 0.32036510223283066,
|
2764 |
+
"learning_rate": 1.5874355977653392e-05,
|
2765 |
+
"loss": 1.1122,
|
2766 |
+
"step": 3870
|
2767 |
+
},
|
2768 |
+
{
|
2769 |
+
"epoch": 3.2923207467119218,
|
2770 |
+
"grad_norm": 0.3618051784441363,
|
2771 |
+
"learning_rate": 1.5736580112445738e-05,
|
2772 |
+
"loss": 1.0942,
|
2773 |
+
"step": 3880
|
2774 |
+
},
|
2775 |
+
{
|
2776 |
+
"epoch": 3.3008061094611794,
|
2777 |
+
"grad_norm": 0.3376776049321294,
|
2778 |
+
"learning_rate": 1.559912959978872e-05,
|
2779 |
+
"loss": 1.0898,
|
2780 |
+
"step": 3890
|
2781 |
+
},
|
2782 |
+
{
|
2783 |
+
"epoch": 3.309291472210437,
|
2784 |
+
"grad_norm": 0.3402374603685398,
|
2785 |
+
"learning_rate": 1.546200926725958e-05,
|
2786 |
+
"loss": 1.1061,
|
2787 |
+
"step": 3900
|
2788 |
+
},
|
2789 |
+
{
|
2790 |
+
"epoch": 3.3177768349596946,
|
2791 |
+
"grad_norm": 0.32072793842909514,
|
2792 |
+
"learning_rate": 1.5325223930838838e-05,
|
2793 |
+
"loss": 1.0995,
|
2794 |
+
"step": 3910
|
2795 |
+
},
|
2796 |
+
{
|
2797 |
+
"epoch": 3.326262197708952,
|
2798 |
+
"grad_norm": 0.336633795191635,
|
2799 |
+
"learning_rate": 1.518877839474122e-05,
|
2800 |
+
"loss": 1.0971,
|
2801 |
+
"step": 3920
|
2802 |
+
},
|
2803 |
+
{
|
2804 |
+
"epoch": 3.3347475604582097,
|
2805 |
+
"grad_norm": 0.36554031780441654,
|
2806 |
+
"learning_rate": 1.5052677451246877e-05,
|
2807 |
+
"loss": 1.0867,
|
2808 |
+
"step": 3930
|
2809 |
+
},
|
2810 |
+
{
|
2811 |
+
"epoch": 3.3432329232074673,
|
2812 |
+
"grad_norm": 0.3569408014521597,
|
2813 |
+
"learning_rate": 1.491692588053305e-05,
|
2814 |
+
"loss": 1.0992,
|
2815 |
+
"step": 3940
|
2816 |
+
},
|
2817 |
+
{
|
2818 |
+
"epoch": 3.3517182859567245,
|
2819 |
+
"grad_norm": 0.33844743386504,
|
2820 |
+
"learning_rate": 1.4781528450506232e-05,
|
2821 |
+
"loss": 1.103,
|
2822 |
+
"step": 3950
|
2823 |
+
},
|
2824 |
+
{
|
2825 |
+
"epoch": 3.360203648705982,
|
2826 |
+
"grad_norm": 0.31581742738052115,
|
2827 |
+
"learning_rate": 1.4646489916634687e-05,
|
2828 |
+
"loss": 1.0843,
|
2829 |
+
"step": 3960
|
2830 |
+
},
|
2831 |
+
{
|
2832 |
+
"epoch": 3.3686890114552397,
|
2833 |
+
"grad_norm": 0.3882340052077705,
|
2834 |
+
"learning_rate": 1.4511815021781411e-05,
|
2835 |
+
"loss": 1.1146,
|
2836 |
+
"step": 3970
|
2837 |
+
},
|
2838 |
+
{
|
2839 |
+
"epoch": 3.3771743742044973,
|
2840 |
+
"grad_norm": 0.3309843437274728,
|
2841 |
+
"learning_rate": 1.4377508496037567e-05,
|
2842 |
+
"loss": 1.0751,
|
2843 |
+
"step": 3980
|
2844 |
+
},
|
2845 |
+
{
|
2846 |
+
"epoch": 3.385659736953755,
|
2847 |
+
"grad_norm": 0.34050270685357675,
|
2848 |
+
"learning_rate": 1.4243575056556355e-05,
|
2849 |
+
"loss": 1.0995,
|
2850 |
+
"step": 3990
|
2851 |
+
},
|
2852 |
+
{
|
2853 |
+
"epoch": 3.3941450997030125,
|
2854 |
+
"grad_norm": 0.3459308872000444,
|
2855 |
+
"learning_rate": 1.4110019407387315e-05,
|
2856 |
+
"loss": 1.0817,
|
2857 |
+
"step": 4000
|
2858 |
+
},
|
2859 |
+
{
|
2860 |
+
"epoch": 3.4026304624522696,
|
2861 |
+
"grad_norm": 0.337212090487825,
|
2862 |
+
"learning_rate": 1.3976846239311128e-05,
|
2863 |
+
"loss": 1.1086,
|
2864 |
+
"step": 4010
|
2865 |
+
},
|
2866 |
+
{
|
2867 |
+
"epoch": 3.4111158252015272,
|
2868 |
+
"grad_norm": 0.3351805063766743,
|
2869 |
+
"learning_rate": 1.384406022967489e-05,
|
2870 |
+
"loss": 1.0919,
|
2871 |
+
"step": 4020
|
2872 |
+
},
|
2873 |
+
{
|
2874 |
+
"epoch": 3.419601187950785,
|
2875 |
+
"grad_norm": 0.3398996417599571,
|
2876 |
+
"learning_rate": 1.3711666042227772e-05,
|
2877 |
+
"loss": 1.0808,
|
2878 |
+
"step": 4030
|
2879 |
+
},
|
2880 |
+
{
|
2881 |
+
"epoch": 3.4280865507000424,
|
2882 |
+
"grad_norm": 0.3585847037493432,
|
2883 |
+
"learning_rate": 1.357966832695725e-05,
|
2884 |
+
"loss": 1.1169,
|
2885 |
+
"step": 4040
|
2886 |
+
},
|
2887 |
+
{
|
2888 |
+
"epoch": 3.4365719134493,
|
2889 |
+
"grad_norm": 0.3229408856119979,
|
2890 |
+
"learning_rate": 1.3448071719925826e-05,
|
2891 |
+
"loss": 1.1041,
|
2892 |
+
"step": 4050
|
2893 |
+
},
|
2894 |
+
{
|
2895 |
+
"epoch": 3.4450572761985576,
|
2896 |
+
"grad_norm": 0.34474446572567957,
|
2897 |
+
"learning_rate": 1.331688084310812e-05,
|
2898 |
+
"loss": 1.0772,
|
2899 |
+
"step": 4060
|
2900 |
+
},
|
2901 |
+
{
|
2902 |
+
"epoch": 3.453542638947815,
|
2903 |
+
"grad_norm": 0.3189660506507834,
|
2904 |
+
"learning_rate": 1.3186100304228594e-05,
|
2905 |
+
"loss": 1.1082,
|
2906 |
+
"step": 4070
|
2907 |
+
},
|
2908 |
+
{
|
2909 |
+
"epoch": 3.4620280016970724,
|
2910 |
+
"grad_norm": 0.3609045764903581,
|
2911 |
+
"learning_rate": 1.3055734696599686e-05,
|
2912 |
+
"loss": 1.0815,
|
2913 |
+
"step": 4080
|
2914 |
+
},
|
2915 |
+
{
|
2916 |
+
"epoch": 3.47051336444633,
|
2917 |
+
"grad_norm": 0.35237020522576973,
|
2918 |
+
"learning_rate": 1.292578859896053e-05,
|
2919 |
+
"loss": 1.1176,
|
2920 |
+
"step": 4090
|
2921 |
+
},
|
2922 |
+
{
|
2923 |
+
"epoch": 3.4789987271955876,
|
2924 |
+
"grad_norm": 0.3189367280387807,
|
2925 |
+
"learning_rate": 1.2796266575316069e-05,
|
2926 |
+
"loss": 1.0826,
|
2927 |
+
"step": 4100
|
2928 |
+
},
|
2929 |
+
{
|
2930 |
+
"epoch": 3.487484089944845,
|
2931 |
+
"grad_norm": 0.34730662545931273,
|
2932 |
+
"learning_rate": 1.2667173174776823e-05,
|
2933 |
+
"loss": 1.0908,
|
2934 |
+
"step": 4110
|
2935 |
+
},
|
2936 |
+
{
|
2937 |
+
"epoch": 3.4959694526941028,
|
2938 |
+
"grad_norm": 0.34138804654797594,
|
2939 |
+
"learning_rate": 1.2538512931399072e-05,
|
2940 |
+
"loss": 1.0769,
|
2941 |
+
"step": 4120
|
2942 |
+
},
|
2943 |
+
{
|
2944 |
+
"epoch": 3.5044548154433603,
|
2945 |
+
"grad_norm": 0.33424304735568794,
|
2946 |
+
"learning_rate": 1.2410290364025623e-05,
|
2947 |
+
"loss": 1.0795,
|
2948 |
+
"step": 4130
|
2949 |
+
},
|
2950 |
+
{
|
2951 |
+
"epoch": 3.5129401781926175,
|
2952 |
+
"grad_norm": 0.3313504435273678,
|
2953 |
+
"learning_rate": 1.2282509976127098e-05,
|
2954 |
+
"loss": 1.0878,
|
2955 |
+
"step": 4140
|
2956 |
+
},
|
2957 |
+
{
|
2958 |
+
"epoch": 3.521425540941875,
|
2959 |
+
"grad_norm": 0.3233457240542579,
|
2960 |
+
"learning_rate": 1.215517625564376e-05,
|
2961 |
+
"loss": 1.0892,
|
2962 |
+
"step": 4150
|
2963 |
+
},
|
2964 |
+
{
|
2965 |
+
"epoch": 3.5299109036911327,
|
2966 |
+
"grad_norm": 0.36342717886744974,
|
2967 |
+
"learning_rate": 1.2028293674827909e-05,
|
2968 |
+
"loss": 1.1121,
|
2969 |
+
"step": 4160
|
2970 |
+
},
|
2971 |
+
{
|
2972 |
+
"epoch": 3.5383962664403903,
|
2973 |
+
"grad_norm": 0.3384633320066937,
|
2974 |
+
"learning_rate": 1.1901866690086747e-05,
|
2975 |
+
"loss": 1.0873,
|
2976 |
+
"step": 4170
|
2977 |
+
},
|
2978 |
+
{
|
2979 |
+
"epoch": 3.546881629189648,
|
2980 |
+
"grad_norm": 0.3478508212091021,
|
2981 |
+
"learning_rate": 1.1775899741825947e-05,
|
2982 |
+
"loss": 1.1074,
|
2983 |
+
"step": 4180
|
2984 |
+
},
|
2985 |
+
{
|
2986 |
+
"epoch": 3.5553669919389055,
|
2987 |
+
"grad_norm": 0.33728300112135334,
|
2988 |
+
"learning_rate": 1.1650397254293583e-05,
|
2989 |
+
"loss": 1.0962,
|
2990 |
+
"step": 4190
|
2991 |
+
},
|
2992 |
+
{
|
2993 |
+
"epoch": 3.563852354688163,
|
2994 |
+
"grad_norm": 0.3237234384231759,
|
2995 |
+
"learning_rate": 1.1525363635424863e-05,
|
2996 |
+
"loss": 1.0918,
|
2997 |
+
"step": 4200
|
2998 |
+
},
|
2999 |
+
{
|
3000 |
+
"epoch": 3.563852354688163,
|
3001 |
+
"eval_loss": 1.3741682767868042,
|
3002 |
+
"eval_runtime": 52.4151,
|
3003 |
+
"eval_samples_per_second": 7.269,
|
3004 |
+
"eval_steps_per_second": 0.916,
|
3005 |
+
"step": 4200
|
3006 |
+
},
|
3007 |
+
{
|
3008 |
+
"epoch": 3.5723377174374207,
|
3009 |
+
"grad_norm": 0.35478171271989195,
|
3010 |
+
"learning_rate": 1.1400803276687208e-05,
|
3011 |
+
"loss": 1.0845,
|
3012 |
+
"step": 4210
|
3013 |
+
},
|
3014 |
+
{
|
3015 |
+
"epoch": 3.580823080186678,
|
3016 |
+
"grad_norm": 0.37888228352789066,
|
3017 |
+
"learning_rate": 1.1276720552926096e-05,
|
3018 |
+
"loss": 1.0622,
|
3019 |
+
"step": 4220
|
3020 |
+
},
|
3021 |
+
{
|
3022 |
+
"epoch": 3.5893084429359354,
|
3023 |
+
"grad_norm": 0.3279788866194937,
|
3024 |
+
"learning_rate": 1.1153119822211338e-05,
|
3025 |
+
"loss": 1.1021,
|
3026 |
+
"step": 4230
|
3027 |
+
},
|
3028 |
+
{
|
3029 |
+
"epoch": 3.597793805685193,
|
3030 |
+
"grad_norm": 0.3240797704044235,
|
3031 |
+
"learning_rate": 1.103000542568406e-05,
|
3032 |
+
"loss": 1.0931,
|
3033 |
+
"step": 4240
|
3034 |
+
},
|
3035 |
+
{
|
3036 |
+
"epoch": 3.6062791684344506,
|
3037 |
+
"grad_norm": 0.3326855671061254,
|
3038 |
+
"learning_rate": 1.0907381687404206e-05,
|
3039 |
+
"loss": 1.0856,
|
3040 |
+
"step": 4250
|
3041 |
+
},
|
3042 |
+
{
|
3043 |
+
"epoch": 3.614764531183708,
|
3044 |
+
"grad_norm": 0.3667803858944268,
|
3045 |
+
"learning_rate": 1.0785252914198676e-05,
|
3046 |
+
"loss": 1.0987,
|
3047 |
+
"step": 4260
|
3048 |
+
},
|
3049 |
+
{
|
3050 |
+
"epoch": 3.6232498939329654,
|
3051 |
+
"grad_norm": 0.36473787559668763,
|
3052 |
+
"learning_rate": 1.0663623395510087e-05,
|
3053 |
+
"loss": 1.0817,
|
3054 |
+
"step": 4270
|
3055 |
+
},
|
3056 |
+
{
|
3057 |
+
"epoch": 3.631735256682223,
|
3058 |
+
"grad_norm": 0.3409471312744712,
|
3059 |
+
"learning_rate": 1.0542497403246055e-05,
|
3060 |
+
"loss": 1.0817,
|
3061 |
+
"step": 4280
|
3062 |
+
},
|
3063 |
+
{
|
3064 |
+
"epoch": 3.6402206194314806,
|
3065 |
+
"grad_norm": 0.3432913496959211,
|
3066 |
+
"learning_rate": 1.0421879191629227e-05,
|
3067 |
+
"loss": 1.1028,
|
3068 |
+
"step": 4290
|
3069 |
+
},
|
3070 |
+
{
|
3071 |
+
"epoch": 3.648705982180738,
|
3072 |
+
"grad_norm": 0.3432540649401779,
|
3073 |
+
"learning_rate": 1.0301772997047809e-05,
|
3074 |
+
"loss": 1.1015,
|
3075 |
+
"step": 4300
|
3076 |
+
},
|
3077 |
+
{
|
3078 |
+
"epoch": 3.6571913449299958,
|
3079 |
+
"grad_norm": 0.33346746301829316,
|
3080 |
+
"learning_rate": 1.0182183037906799e-05,
|
3081 |
+
"loss": 1.0731,
|
3082 |
+
"step": 4310
|
3083 |
+
},
|
3084 |
+
{
|
3085 |
+
"epoch": 3.6656767076792534,
|
3086 |
+
"grad_norm": 0.3413502676678206,
|
3087 |
+
"learning_rate": 1.0063113514479809e-05,
|
3088 |
+
"loss": 1.0712,
|
3089 |
+
"step": 4320
|
3090 |
+
},
|
3091 |
+
{
|
3092 |
+
"epoch": 3.674162070428511,
|
3093 |
+
"grad_norm": 0.3285225243235751,
|
3094 |
+
"learning_rate": 9.94456860876159e-06,
|
3095 |
+
"loss": 1.0848,
|
3096 |
+
"step": 4330
|
3097 |
+
},
|
3098 |
+
{
|
3099 |
+
"epoch": 3.6826474331777685,
|
3100 |
+
"grad_norm": 0.3388803496844761,
|
3101 |
+
"learning_rate": 9.826552484321087e-06,
|
3102 |
+
"loss": 1.0819,
|
3103 |
+
"step": 4340
|
3104 |
+
},
|
3105 |
+
{
|
3106 |
+
"epoch": 3.691132795927026,
|
3107 |
+
"grad_norm": 0.33054778123683576,
|
3108 |
+
"learning_rate": 9.709069286155231e-06,
|
3109 |
+
"loss": 1.0865,
|
3110 |
+
"step": 4350
|
3111 |
+
},
|
3112 |
+
{
|
3113 |
+
"epoch": 3.6996181586762833,
|
3114 |
+
"grad_norm": 0.33512423378595196,
|
3115 |
+
"learning_rate": 9.592123140543388e-06,
|
3116 |
+
"loss": 1.0799,
|
3117 |
+
"step": 4360
|
3118 |
+
},
|
3119 |
+
{
|
3120 |
+
"epoch": 3.708103521425541,
|
3121 |
+
"grad_norm": 0.33282736103466287,
|
3122 |
+
"learning_rate": 9.475718154902382e-06,
|
3123 |
+
"loss": 1.089,
|
3124 |
+
"step": 4370
|
3125 |
+
},
|
3126 |
+
{
|
3127 |
+
"epoch": 3.7165888841747985,
|
3128 |
+
"grad_norm": 0.33746488116219714,
|
3129 |
+
"learning_rate": 9.359858417642266e-06,
|
3130 |
+
"loss": 1.1047,
|
3131 |
+
"step": 4380
|
3132 |
+
},
|
3133 |
+
{
|
3134 |
+
"epoch": 3.725074246924056,
|
3135 |
+
"grad_norm": 0.35251902658665213,
|
3136 |
+
"learning_rate": 9.244547998022709e-06,
|
3137 |
+
"loss": 1.0897,
|
3138 |
+
"step": 4390
|
3139 |
+
},
|
3140 |
+
{
|
3141 |
+
"epoch": 3.7335596096733137,
|
3142 |
+
"grad_norm": 0.32061079556576844,
|
3143 |
+
"learning_rate": 9.12979094601011e-06,
|
3144 |
+
"loss": 1.0873,
|
3145 |
+
"step": 4400
|
3146 |
+
},
|
3147 |
+
{
|
3148 |
+
"epoch": 3.742044972422571,
|
3149 |
+
"grad_norm": 0.3339779862802376,
|
3150 |
+
"learning_rate": 9.0155912921353e-06,
|
3151 |
+
"loss": 1.0976,
|
3152 |
+
"step": 4410
|
3153 |
+
},
|
3154 |
+
{
|
3155 |
+
"epoch": 3.7505303351718284,
|
3156 |
+
"grad_norm": 0.33137105564248603,
|
3157 |
+
"learning_rate": 8.901953047352032e-06,
|
3158 |
+
"loss": 1.0815,
|
3159 |
+
"step": 4420
|
3160 |
+
},
|
3161 |
+
{
|
3162 |
+
"epoch": 3.759015697921086,
|
3163 |
+
"grad_norm": 0.3421991932531284,
|
3164 |
+
"learning_rate": 8.788880202896072e-06,
|
3165 |
+
"loss": 1.0962,
|
3166 |
+
"step": 4430
|
3167 |
+
},
|
3168 |
+
{
|
3169 |
+
"epoch": 3.7675010606703436,
|
3170 |
+
"grad_norm": 0.34520719229693314,
|
3171 |
+
"learning_rate": 8.676376730145031e-06,
|
3172 |
+
"loss": 1.0862,
|
3173 |
+
"step": 4440
|
3174 |
+
},
|
3175 |
+
{
|
3176 |
+
"epoch": 3.7759864234196012,
|
3177 |
+
"grad_norm": 0.3472744469623473,
|
3178 |
+
"learning_rate": 8.564446580478877e-06,
|
3179 |
+
"loss": 1.0906,
|
3180 |
+
"step": 4450
|
3181 |
+
},
|
3182 |
+
{
|
3183 |
+
"epoch": 3.784471786168859,
|
3184 |
+
"grad_norm": 0.3247372551943734,
|
3185 |
+
"learning_rate": 8.453093685141156e-06,
|
3186 |
+
"loss": 1.0892,
|
3187 |
+
"step": 4460
|
3188 |
+
},
|
3189 |
+
{
|
3190 |
+
"epoch": 3.7929571489181164,
|
3191 |
+
"grad_norm": 0.32380813984188905,
|
3192 |
+
"learning_rate": 8.342321955100935e-06,
|
3193 |
+
"loss": 1.1084,
|
3194 |
+
"step": 4470
|
3195 |
+
},
|
3196 |
+
{
|
3197 |
+
"epoch": 3.801442511667374,
|
3198 |
+
"grad_norm": 0.3379339847312466,
|
3199 |
+
"learning_rate": 8.232135280915398e-06,
|
3200 |
+
"loss": 1.0751,
|
3201 |
+
"step": 4480
|
3202 |
+
},
|
3203 |
+
{
|
3204 |
+
"epoch": 3.809927874416631,
|
3205 |
+
"grad_norm": 0.34446270018278696,
|
3206 |
+
"learning_rate": 8.122537532593264e-06,
|
3207 |
+
"loss": 1.0934,
|
3208 |
+
"step": 4490
|
3209 |
+
},
|
3210 |
+
{
|
3211 |
+
"epoch": 3.8184132371658888,
|
3212 |
+
"grad_norm": 0.34800470636637415,
|
3213 |
+
"learning_rate": 8.013532559458761e-06,
|
3214 |
+
"loss": 1.0896,
|
3215 |
+
"step": 4500
|
3216 |
+
}
|
3217 |
+
],
|
3218 |
+
"logging_steps": 10,
|
3219 |
+
"max_steps": 5890,
|
3220 |
+
"num_input_tokens_seen": 0,
|
3221 |
+
"num_train_epochs": 5,
|
3222 |
+
"save_steps": 500,
|
3223 |
+
"stateful_callbacks": {
|
3224 |
+
"TrainerControl": {
|
3225 |
+
"args": {
|
3226 |
+
"should_epoch_stop": false,
|
3227 |
+
"should_evaluate": false,
|
3228 |
+
"should_log": false,
|
3229 |
+
"should_save": true,
|
3230 |
+
"should_training_stop": false
|
3231 |
+
},
|
3232 |
+
"attributes": {}
|
3233 |
+
}
|
3234 |
+
},
|
3235 |
+
"total_flos": 3782908946939904.0,
|
3236 |
+
"train_batch_size": 2,
|
3237 |
+
"trial_name": null,
|
3238 |
+
"trial_params": null
|
3239 |
+
}
|
checkpoint-4500/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-4500/zero_to_fp32.py
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example:
|
14 |
+
# python zero_to_fp32.py . output_dir/
|
15 |
+
# or
|
16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import torch
|
20 |
+
import glob
|
21 |
+
import math
|
22 |
+
import os
|
23 |
+
import re
|
24 |
+
import json
|
25 |
+
from tqdm import tqdm
|
26 |
+
from collections import OrderedDict
|
27 |
+
from dataclasses import dataclass
|
28 |
+
|
29 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
30 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
31 |
+
from deepspeed.utils import logger
|
32 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
33 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
34 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class zero_model_state:
|
39 |
+
buffers: dict()
|
40 |
+
param_shapes: dict()
|
41 |
+
shared_params: list
|
42 |
+
ds_version: int
|
43 |
+
frozen_param_shapes: dict()
|
44 |
+
frozen_param_fragments: dict()
|
45 |
+
|
46 |
+
|
47 |
+
debug = 0
|
48 |
+
|
49 |
+
# load to cpu
|
50 |
+
device = torch.device('cpu')
|
51 |
+
|
52 |
+
|
53 |
+
def atoi(text):
|
54 |
+
return int(text) if text.isdigit() else text
|
55 |
+
|
56 |
+
|
57 |
+
def natural_keys(text):
|
58 |
+
'''
|
59 |
+
alist.sort(key=natural_keys) sorts in human order
|
60 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
61 |
+
(See Toothy's implementation in the comments)
|
62 |
+
'''
|
63 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
64 |
+
|
65 |
+
|
66 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
67 |
+
if not os.path.isdir(checkpoint_dir):
|
68 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
69 |
+
|
70 |
+
# there should be only one file
|
71 |
+
if zero_stage <= 2:
|
72 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
73 |
+
elif zero_stage == 3:
|
74 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
75 |
+
|
76 |
+
if not os.path.exists(file):
|
77 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
78 |
+
|
79 |
+
return file
|
80 |
+
|
81 |
+
|
82 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
83 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
84 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
85 |
+
|
86 |
+
if len(ckpt_files) == 0:
|
87 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
88 |
+
|
89 |
+
return ckpt_files
|
90 |
+
|
91 |
+
|
92 |
+
def get_optim_files(checkpoint_dir):
|
93 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
94 |
+
|
95 |
+
|
96 |
+
def get_model_state_files(checkpoint_dir):
|
97 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
98 |
+
|
99 |
+
|
100 |
+
def parse_model_states(files):
|
101 |
+
zero_model_states = []
|
102 |
+
for file in files:
|
103 |
+
state_dict = torch.load(file, map_location=device)
|
104 |
+
|
105 |
+
if BUFFER_NAMES not in state_dict:
|
106 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
107 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
108 |
+
if debug:
|
109 |
+
print("Found buffers:", buffer_names)
|
110 |
+
|
111 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
112 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
113 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
114 |
+
|
115 |
+
# collect parameters that are included in param_shapes
|
116 |
+
param_names = []
|
117 |
+
for s in param_shapes:
|
118 |
+
for name in s.keys():
|
119 |
+
param_names.append(name)
|
120 |
+
|
121 |
+
# update with frozen parameters
|
122 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
123 |
+
if frozen_param_shapes is not None:
|
124 |
+
if debug:
|
125 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
126 |
+
param_names += list(frozen_param_shapes.keys())
|
127 |
+
|
128 |
+
# handle shared params
|
129 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
130 |
+
|
131 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
132 |
+
|
133 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
134 |
+
|
135 |
+
z_model_state = zero_model_state(buffers=buffers,
|
136 |
+
param_shapes=param_shapes,
|
137 |
+
shared_params=shared_params,
|
138 |
+
ds_version=ds_version,
|
139 |
+
frozen_param_shapes=frozen_param_shapes,
|
140 |
+
frozen_param_fragments=frozen_param_fragments)
|
141 |
+
zero_model_states.append(z_model_state)
|
142 |
+
|
143 |
+
return zero_model_states
|
144 |
+
|
145 |
+
|
146 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
147 |
+
total_files = len(files)
|
148 |
+
state_dicts = []
|
149 |
+
for f in files:
|
150 |
+
state_dict = torch.load(f, map_location=device)
|
151 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
152 |
+
# and also handle the case where it was already removed by another helper script
|
153 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
154 |
+
state_dicts.append(state_dict)
|
155 |
+
|
156 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
157 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
158 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
159 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
160 |
+
|
161 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
162 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
163 |
+
# use the max of the partition_count to get the dp world_size.
|
164 |
+
|
165 |
+
if type(world_size) is list:
|
166 |
+
world_size = max(world_size)
|
167 |
+
|
168 |
+
if world_size != total_files:
|
169 |
+
raise ValueError(
|
170 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
171 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
172 |
+
)
|
173 |
+
|
174 |
+
# the groups are named differently in each stage
|
175 |
+
if zero_stage <= 2:
|
176 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
177 |
+
elif zero_stage == 3:
|
178 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
179 |
+
else:
|
180 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
181 |
+
|
182 |
+
if zero_stage <= 2:
|
183 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
184 |
+
elif zero_stage == 3:
|
185 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
186 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
187 |
+
#
|
188 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
189 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
190 |
+
|
191 |
+
fp32_flat_groups = [
|
192 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
193 |
+
]
|
194 |
+
|
195 |
+
return zero_stage, world_size, fp32_flat_groups
|
196 |
+
|
197 |
+
|
198 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
199 |
+
"""
|
200 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
201 |
+
|
202 |
+
Args:
|
203 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
204 |
+
|
205 |
+
"""
|
206 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
207 |
+
|
208 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
209 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
210 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
211 |
+
|
212 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
213 |
+
|
214 |
+
zero_model_states = parse_model_states(model_files)
|
215 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
216 |
+
|
217 |
+
if zero_stage <= 2:
|
218 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
219 |
+
exclude_frozen_parameters)
|
220 |
+
elif zero_stage == 3:
|
221 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
222 |
+
exclude_frozen_parameters)
|
223 |
+
|
224 |
+
|
225 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
226 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
227 |
+
return
|
228 |
+
|
229 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
230 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
231 |
+
|
232 |
+
if debug:
|
233 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
234 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
235 |
+
|
236 |
+
wanted_params = len(frozen_param_shapes)
|
237 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
238 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
239 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
240 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
241 |
+
|
242 |
+
total_params = 0
|
243 |
+
total_numel = 0
|
244 |
+
for name, shape in frozen_param_shapes.items():
|
245 |
+
total_params += 1
|
246 |
+
unpartitioned_numel = shape.numel()
|
247 |
+
total_numel += unpartitioned_numel
|
248 |
+
|
249 |
+
state_dict[name] = frozen_param_fragments[name]
|
250 |
+
|
251 |
+
if debug:
|
252 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
253 |
+
|
254 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
255 |
+
|
256 |
+
|
257 |
+
def _has_callable(obj, fn):
|
258 |
+
attr = getattr(obj, fn, None)
|
259 |
+
return callable(attr)
|
260 |
+
|
261 |
+
|
262 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
263 |
+
param_shapes = zero_model_states[0].param_shapes
|
264 |
+
|
265 |
+
# Reconstruction protocol:
|
266 |
+
#
|
267 |
+
# XXX: document this
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
for i in range(world_size):
|
271 |
+
for j in range(len(fp32_flat_groups[0])):
|
272 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
273 |
+
|
274 |
+
# XXX: memory usage doubles here (zero2)
|
275 |
+
num_param_groups = len(fp32_flat_groups[0])
|
276 |
+
merged_single_partition_of_fp32_groups = []
|
277 |
+
for i in range(num_param_groups):
|
278 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
279 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
280 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
281 |
+
avail_numel = sum(
|
282 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
283 |
+
|
284 |
+
if debug:
|
285 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
286 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
287 |
+
# not asserting if there is a mismatch due to possible padding
|
288 |
+
print(f"Have {avail_numel} numels to process.")
|
289 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
290 |
+
|
291 |
+
# params
|
292 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
293 |
+
# out-of-core computing solution
|
294 |
+
total_numel = 0
|
295 |
+
total_params = 0
|
296 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
297 |
+
offset = 0
|
298 |
+
avail_numel = full_single_fp32_vector.numel()
|
299 |
+
for name, shape in shapes.items():
|
300 |
+
|
301 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
302 |
+
total_numel += unpartitioned_numel
|
303 |
+
total_params += 1
|
304 |
+
|
305 |
+
if debug:
|
306 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
307 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
308 |
+
offset += unpartitioned_numel
|
309 |
+
|
310 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
311 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
312 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
313 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
314 |
+
align_to = 2 * world_size
|
315 |
+
|
316 |
+
def zero2_align(x):
|
317 |
+
return align_to * math.ceil(x / align_to)
|
318 |
+
|
319 |
+
if debug:
|
320 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
321 |
+
|
322 |
+
offset = zero2_align(offset)
|
323 |
+
avail_numel = zero2_align(avail_numel)
|
324 |
+
|
325 |
+
if debug:
|
326 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
327 |
+
|
328 |
+
# Sanity check
|
329 |
+
if offset != avail_numel:
|
330 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
331 |
+
|
332 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
333 |
+
|
334 |
+
|
335 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
336 |
+
exclude_frozen_parameters):
|
337 |
+
state_dict = OrderedDict()
|
338 |
+
|
339 |
+
# buffers
|
340 |
+
buffers = zero_model_states[0].buffers
|
341 |
+
state_dict.update(buffers)
|
342 |
+
if debug:
|
343 |
+
print(f"added {len(buffers)} buffers")
|
344 |
+
|
345 |
+
if not exclude_frozen_parameters:
|
346 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
347 |
+
|
348 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
349 |
+
|
350 |
+
# recover shared parameters
|
351 |
+
for pair in zero_model_states[0].shared_params:
|
352 |
+
if pair[1] in state_dict:
|
353 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
354 |
+
|
355 |
+
return state_dict
|
356 |
+
|
357 |
+
|
358 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
359 |
+
remainder = unpartitioned_numel % world_size
|
360 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
361 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
362 |
+
return partitioned_numel, padding_numel
|
363 |
+
|
364 |
+
|
365 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
366 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
367 |
+
return
|
368 |
+
|
369 |
+
if debug:
|
370 |
+
for i in range(world_size):
|
371 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
372 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
373 |
+
|
374 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
375 |
+
wanted_params = len(frozen_param_shapes)
|
376 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
377 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
378 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
379 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
380 |
+
|
381 |
+
total_params = 0
|
382 |
+
total_numel = 0
|
383 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
384 |
+
total_params += 1
|
385 |
+
unpartitioned_numel = shape.numel()
|
386 |
+
total_numel += unpartitioned_numel
|
387 |
+
|
388 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
389 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
390 |
+
|
391 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
print(
|
395 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
396 |
+
)
|
397 |
+
|
398 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
399 |
+
|
400 |
+
|
401 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
402 |
+
param_shapes = zero_model_states[0].param_shapes
|
403 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
404 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
405 |
+
# param, re-consolidating each param, while dealing with padding if any
|
406 |
+
|
407 |
+
# merge list of dicts, preserving order
|
408 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
409 |
+
|
410 |
+
if debug:
|
411 |
+
for i in range(world_size):
|
412 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
413 |
+
|
414 |
+
wanted_params = len(param_shapes)
|
415 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
416 |
+
# not asserting if there is a mismatch due to possible padding
|
417 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
418 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
419 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
420 |
+
|
421 |
+
# params
|
422 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
423 |
+
# out-of-core computing solution
|
424 |
+
offset = 0
|
425 |
+
total_numel = 0
|
426 |
+
total_params = 0
|
427 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
|
428 |
+
unpartitioned_numel = shape.numel()
|
429 |
+
total_numel += unpartitioned_numel
|
430 |
+
total_params += 1
|
431 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
432 |
+
|
433 |
+
if debug:
|
434 |
+
print(
|
435 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
436 |
+
)
|
437 |
+
|
438 |
+
# XXX: memory usage doubles here
|
439 |
+
state_dict[name] = torch.cat(
|
440 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
441 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
442 |
+
offset += partitioned_numel
|
443 |
+
|
444 |
+
offset *= world_size
|
445 |
+
|
446 |
+
# Sanity check
|
447 |
+
if offset != avail_numel:
|
448 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
449 |
+
|
450 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
451 |
+
|
452 |
+
|
453 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
454 |
+
exclude_frozen_parameters):
|
455 |
+
state_dict = OrderedDict()
|
456 |
+
|
457 |
+
# buffers
|
458 |
+
buffers = zero_model_states[0].buffers
|
459 |
+
state_dict.update(buffers)
|
460 |
+
if debug:
|
461 |
+
print(f"added {len(buffers)} buffers")
|
462 |
+
|
463 |
+
if not exclude_frozen_parameters:
|
464 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
465 |
+
|
466 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
467 |
+
|
468 |
+
# recover shared parameters
|
469 |
+
for pair in zero_model_states[0].shared_params:
|
470 |
+
if pair[1] in state_dict:
|
471 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
472 |
+
|
473 |
+
return state_dict
|
474 |
+
|
475 |
+
|
476 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
477 |
+
"""
|
478 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
479 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
480 |
+
via a model hub.
|
481 |
+
|
482 |
+
Args:
|
483 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
484 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
485 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
486 |
+
|
487 |
+
Returns:
|
488 |
+
- pytorch ``state_dict``
|
489 |
+
|
490 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
491 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
492 |
+
the checkpoint.
|
493 |
+
|
494 |
+
A typical usage might be ::
|
495 |
+
|
496 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
497 |
+
# do the training and checkpoint saving
|
498 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
499 |
+
model = model.cpu() # move to cpu
|
500 |
+
model.load_state_dict(state_dict)
|
501 |
+
# submit to model hub or save the model to share with others
|
502 |
+
|
503 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
504 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
505 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
506 |
+
|
507 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
508 |
+
|
509 |
+
"""
|
510 |
+
if tag is None:
|
511 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
512 |
+
if os.path.isfile(latest_path):
|
513 |
+
with open(latest_path, 'r') as fd:
|
514 |
+
tag = fd.read().strip()
|
515 |
+
else:
|
516 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
517 |
+
|
518 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
519 |
+
|
520 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
521 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
522 |
+
|
523 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
524 |
+
|
525 |
+
|
526 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
527 |
+
output_dir,
|
528 |
+
max_shard_size="5GB",
|
529 |
+
safe_serialization=False,
|
530 |
+
tag=None,
|
531 |
+
exclude_frozen_parameters=False):
|
532 |
+
"""
|
533 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
534 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
535 |
+
|
536 |
+
Args:
|
537 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
538 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
539 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
540 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
541 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
542 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
543 |
+
"""
|
544 |
+
# Dependency pre-check
|
545 |
+
if safe_serialization:
|
546 |
+
try:
|
547 |
+
from safetensors.torch import save_file
|
548 |
+
except ImportError:
|
549 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
550 |
+
raise
|
551 |
+
if max_shard_size is not None:
|
552 |
+
try:
|
553 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
554 |
+
except ImportError:
|
555 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
556 |
+
raise
|
557 |
+
|
558 |
+
# Convert zero checkpoint to state_dict
|
559 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
560 |
+
|
561 |
+
# Shard the model if it is too big.
|
562 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
563 |
+
if max_shard_size is not None:
|
564 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
565 |
+
state_dict_split = split_torch_state_dict_into_shards(state_dict,
|
566 |
+
filename_pattern=filename_pattern,
|
567 |
+
max_shard_size=max_shard_size)
|
568 |
+
else:
|
569 |
+
from collections import namedtuple
|
570 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
571 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
572 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
573 |
+
|
574 |
+
# Save the model
|
575 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
576 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
577 |
+
shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
|
578 |
+
output_path = os.path.join(output_dir, shard_file)
|
579 |
+
if safe_serialization:
|
580 |
+
save_file(shard, output_path, metadata={"format": "pt"})
|
581 |
+
else:
|
582 |
+
torch.save(shard, output_path)
|
583 |
+
|
584 |
+
# Save index if sharded
|
585 |
+
if state_dict_split.is_sharded:
|
586 |
+
index = {
|
587 |
+
"metadata": state_dict_split.metadata,
|
588 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
589 |
+
}
|
590 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
591 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
592 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
593 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
594 |
+
f.write(content)
|
595 |
+
|
596 |
+
|
597 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
598 |
+
"""
|
599 |
+
1. Put the provided model to cpu
|
600 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
601 |
+
3. Load it into the provided model
|
602 |
+
|
603 |
+
Args:
|
604 |
+
- ``model``: the model object to update
|
605 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
606 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
607 |
+
|
608 |
+
Returns:
|
609 |
+
- ``model`: modified model
|
610 |
+
|
611 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
612 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
613 |
+
conveniently placed for you in the checkpoint folder.
|
614 |
+
|
615 |
+
A typical usage might be ::
|
616 |
+
|
617 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
618 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
619 |
+
# submit to model hub or save the model to share with others
|
620 |
+
|
621 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
622 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
623 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
624 |
+
|
625 |
+
"""
|
626 |
+
logger.info(f"Extracting fp32 weights")
|
627 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
628 |
+
|
629 |
+
logger.info(f"Overwriting model with fp32 weights")
|
630 |
+
model = model.cpu()
|
631 |
+
model.load_state_dict(state_dict, strict=False)
|
632 |
+
|
633 |
+
return model
|
634 |
+
|
635 |
+
|
636 |
+
if __name__ == "__main__":
|
637 |
+
parser = argparse.ArgumentParser()
|
638 |
+
parser.add_argument("checkpoint_dir",
|
639 |
+
type=str,
|
640 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
641 |
+
parser.add_argument("output_dir",
|
642 |
+
type=str,
|
643 |
+
help="directory to the pytorch fp32 state_dict output files"
|
644 |
+
"(e.g. path/checkpoint-12-output/)")
|
645 |
+
parser.add_argument(
|
646 |
+
"--max_shard_size",
|
647 |
+
type=str,
|
648 |
+
default="5GB",
|
649 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
650 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
651 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
652 |
+
"without CPU OOM issues.")
|
653 |
+
parser.add_argument(
|
654 |
+
"--safe_serialization",
|
655 |
+
default=False,
|
656 |
+
action='store_true',
|
657 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
658 |
+
parser.add_argument("-t",
|
659 |
+
"--tag",
|
660 |
+
type=str,
|
661 |
+
default=None,
|
662 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
663 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
664 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
665 |
+
args = parser.parse_args()
|
666 |
+
|
667 |
+
debug = args.debug
|
668 |
+
|
669 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
670 |
+
args.output_dir,
|
671 |
+
max_shard_size=args.max_shard_size,
|
672 |
+
safe_serialization=args.safe_serialization,
|
673 |
+
tag=args.tag,
|
674 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
checkpoint-5000/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: Qwen/Qwen2.5-72B
|
3 |
+
library_name: peft
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.12.0
|
checkpoint-5000/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-72B",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 128,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"k_proj",
|
25 |
+
"up_proj",
|
26 |
+
"gate_proj",
|
27 |
+
"o_proj",
|
28 |
+
"v_proj",
|
29 |
+
"down_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": true
|
34 |
+
}
|
checkpoint-5000/added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
checkpoint-5000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step5000
|
checkpoint-5000/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-5000/special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
checkpoint-5000/tokenizer_config.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|endoftext|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"model_max_length": 131072,
|
203 |
+
"pad_token": "<|endoftext|>",
|
204 |
+
"padding_side": "right",
|
205 |
+
"split_special_tokens": false,
|
206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
207 |
+
"unk_token": null
|
208 |
+
}
|
checkpoint-5000/trainer_state.json
ADDED
@@ -0,0 +1,3597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 4.2426813746287655,
|
5 |
+
"eval_steps": 600,
|
6 |
+
"global_step": 5000,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.00848536274925753,
|
13 |
+
"grad_norm": 0.4898678891363344,
|
14 |
+
"learning_rate": 8.488964346349746e-07,
|
15 |
+
"loss": 1.8056,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.01697072549851506,
|
20 |
+
"grad_norm": 0.3537473179717183,
|
21 |
+
"learning_rate": 1.6977928692699491e-06,
|
22 |
+
"loss": 1.7621,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.025456088247772592,
|
27 |
+
"grad_norm": 0.28215953004159977,
|
28 |
+
"learning_rate": 2.546689303904924e-06,
|
29 |
+
"loss": 1.7571,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.03394145099703012,
|
34 |
+
"grad_norm": 0.27446565146764923,
|
35 |
+
"learning_rate": 3.3955857385398982e-06,
|
36 |
+
"loss": 1.7136,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.04242681374628765,
|
41 |
+
"grad_norm": 0.17051549768176558,
|
42 |
+
"learning_rate": 4.244482173174873e-06,
|
43 |
+
"loss": 1.6767,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.050912176495545185,
|
48 |
+
"grad_norm": 0.17763882467320422,
|
49 |
+
"learning_rate": 5.093378607809848e-06,
|
50 |
+
"loss": 1.6371,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.05939753924480271,
|
55 |
+
"grad_norm": 0.14311462596290048,
|
56 |
+
"learning_rate": 5.942275042444822e-06,
|
57 |
+
"loss": 1.6324,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.06788290199406025,
|
62 |
+
"grad_norm": 0.1659540846071645,
|
63 |
+
"learning_rate": 6.7911714770797965e-06,
|
64 |
+
"loss": 1.6062,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.07636826474331777,
|
69 |
+
"grad_norm": 0.20064072815620043,
|
70 |
+
"learning_rate": 7.640067911714771e-06,
|
71 |
+
"loss": 1.5832,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.0848536274925753,
|
76 |
+
"grad_norm": 0.2179045681711979,
|
77 |
+
"learning_rate": 8.488964346349745e-06,
|
78 |
+
"loss": 1.5898,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.09333899024183284,
|
83 |
+
"grad_norm": 0.23866012053128668,
|
84 |
+
"learning_rate": 9.337860780984721e-06,
|
85 |
+
"loss": 1.5924,
|
86 |
+
"step": 110
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.10182435299109037,
|
90 |
+
"grad_norm": 0.18578051776430282,
|
91 |
+
"learning_rate": 1.0186757215619695e-05,
|
92 |
+
"loss": 1.5877,
|
93 |
+
"step": 120
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.1103097157403479,
|
97 |
+
"grad_norm": 0.2216509707409362,
|
98 |
+
"learning_rate": 1.103565365025467e-05,
|
99 |
+
"loss": 1.5947,
|
100 |
+
"step": 130
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.11879507848960542,
|
104 |
+
"grad_norm": 0.20427142255694086,
|
105 |
+
"learning_rate": 1.1884550084889643e-05,
|
106 |
+
"loss": 1.5841,
|
107 |
+
"step": 140
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.12728044123886295,
|
111 |
+
"grad_norm": 0.1765851415675038,
|
112 |
+
"learning_rate": 1.2733446519524619e-05,
|
113 |
+
"loss": 1.5878,
|
114 |
+
"step": 150
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.1357658039881205,
|
118 |
+
"grad_norm": 0.1769355117060811,
|
119 |
+
"learning_rate": 1.3582342954159593e-05,
|
120 |
+
"loss": 1.5795,
|
121 |
+
"step": 160
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.14425116673737803,
|
125 |
+
"grad_norm": 0.1617675663096666,
|
126 |
+
"learning_rate": 1.4431239388794569e-05,
|
127 |
+
"loss": 1.5549,
|
128 |
+
"step": 170
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.15273652948663555,
|
132 |
+
"grad_norm": 0.17302259072151574,
|
133 |
+
"learning_rate": 1.5280135823429543e-05,
|
134 |
+
"loss": 1.5808,
|
135 |
+
"step": 180
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.1612218922358931,
|
139 |
+
"grad_norm": 0.16876039012432806,
|
140 |
+
"learning_rate": 1.6129032258064517e-05,
|
141 |
+
"loss": 1.5676,
|
142 |
+
"step": 190
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.1697072549851506,
|
146 |
+
"grad_norm": 0.19627360154037596,
|
147 |
+
"learning_rate": 1.697792869269949e-05,
|
148 |
+
"loss": 1.5598,
|
149 |
+
"step": 200
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.17819261773440814,
|
153 |
+
"grad_norm": 0.16078510362361015,
|
154 |
+
"learning_rate": 1.7826825127334465e-05,
|
155 |
+
"loss": 1.5667,
|
156 |
+
"step": 210
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.18667798048366568,
|
160 |
+
"grad_norm": 0.16044786518959703,
|
161 |
+
"learning_rate": 1.8675721561969442e-05,
|
162 |
+
"loss": 1.5815,
|
163 |
+
"step": 220
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.1951633432329232,
|
167 |
+
"grad_norm": 0.15656958873834717,
|
168 |
+
"learning_rate": 1.9524617996604416e-05,
|
169 |
+
"loss": 1.5576,
|
170 |
+
"step": 230
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.20364870598218074,
|
174 |
+
"grad_norm": 0.1687290471357602,
|
175 |
+
"learning_rate": 2.037351443123939e-05,
|
176 |
+
"loss": 1.5453,
|
177 |
+
"step": 240
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.21213406873143828,
|
181 |
+
"grad_norm": 0.1519017348276184,
|
182 |
+
"learning_rate": 2.1222410865874364e-05,
|
183 |
+
"loss": 1.5554,
|
184 |
+
"step": 250
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.2206194314806958,
|
188 |
+
"grad_norm": 0.15761892005160086,
|
189 |
+
"learning_rate": 2.207130730050934e-05,
|
190 |
+
"loss": 1.5494,
|
191 |
+
"step": 260
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.22910479422995333,
|
195 |
+
"grad_norm": 0.16857088482977495,
|
196 |
+
"learning_rate": 2.2920203735144312e-05,
|
197 |
+
"loss": 1.5794,
|
198 |
+
"step": 270
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.23759015697921085,
|
202 |
+
"grad_norm": 0.1678705209913503,
|
203 |
+
"learning_rate": 2.3769100169779286e-05,
|
204 |
+
"loss": 1.5373,
|
205 |
+
"step": 280
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.2460755197284684,
|
209 |
+
"grad_norm": 0.14812649566587394,
|
210 |
+
"learning_rate": 2.461799660441426e-05,
|
211 |
+
"loss": 1.5504,
|
212 |
+
"step": 290
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.2545608824777259,
|
216 |
+
"grad_norm": 0.17651916734325857,
|
217 |
+
"learning_rate": 2.5466893039049238e-05,
|
218 |
+
"loss": 1.5607,
|
219 |
+
"step": 300
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.26304624522698344,
|
223 |
+
"grad_norm": 0.14883055338507856,
|
224 |
+
"learning_rate": 2.6315789473684212e-05,
|
225 |
+
"loss": 1.5311,
|
226 |
+
"step": 310
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.271531607976241,
|
230 |
+
"grad_norm": 0.15787522753231265,
|
231 |
+
"learning_rate": 2.7164685908319186e-05,
|
232 |
+
"loss": 1.5656,
|
233 |
+
"step": 320
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.2800169707254985,
|
237 |
+
"grad_norm": 0.1625232940237689,
|
238 |
+
"learning_rate": 2.801358234295416e-05,
|
239 |
+
"loss": 1.5686,
|
240 |
+
"step": 330
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.28850233347475607,
|
244 |
+
"grad_norm": 0.18505951289343867,
|
245 |
+
"learning_rate": 2.8862478777589137e-05,
|
246 |
+
"loss": 1.5474,
|
247 |
+
"step": 340
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.29698769622401355,
|
251 |
+
"grad_norm": 0.13785772316349984,
|
252 |
+
"learning_rate": 2.9711375212224108e-05,
|
253 |
+
"loss": 1.5696,
|
254 |
+
"step": 350
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.3054730589732711,
|
258 |
+
"grad_norm": 0.13531274658248552,
|
259 |
+
"learning_rate": 3.0560271646859086e-05,
|
260 |
+
"loss": 1.5551,
|
261 |
+
"step": 360
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.31395842172252864,
|
265 |
+
"grad_norm": 0.1366381415368909,
|
266 |
+
"learning_rate": 3.140916808149406e-05,
|
267 |
+
"loss": 1.524,
|
268 |
+
"step": 370
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.3224437844717862,
|
272 |
+
"grad_norm": 0.14587220569353926,
|
273 |
+
"learning_rate": 3.2258064516129034e-05,
|
274 |
+
"loss": 1.5515,
|
275 |
+
"step": 380
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.3309291472210437,
|
279 |
+
"grad_norm": 0.13336349383744864,
|
280 |
+
"learning_rate": 3.310696095076401e-05,
|
281 |
+
"loss": 1.5457,
|
282 |
+
"step": 390
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.3394145099703012,
|
286 |
+
"grad_norm": 0.1772016947970983,
|
287 |
+
"learning_rate": 3.395585738539898e-05,
|
288 |
+
"loss": 1.5582,
|
289 |
+
"step": 400
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.34789987271955874,
|
293 |
+
"grad_norm": 0.13819420575084573,
|
294 |
+
"learning_rate": 3.4804753820033956e-05,
|
295 |
+
"loss": 1.5326,
|
296 |
+
"step": 410
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.3563852354688163,
|
300 |
+
"grad_norm": 0.12729862167862188,
|
301 |
+
"learning_rate": 3.565365025466893e-05,
|
302 |
+
"loss": 1.5387,
|
303 |
+
"step": 420
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.3648705982180738,
|
307 |
+
"grad_norm": 0.11777082851399363,
|
308 |
+
"learning_rate": 3.6502546689303904e-05,
|
309 |
+
"loss": 1.5587,
|
310 |
+
"step": 430
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.37335596096733137,
|
314 |
+
"grad_norm": 0.15372268131323022,
|
315 |
+
"learning_rate": 3.7351443123938885e-05,
|
316 |
+
"loss": 1.5362,
|
317 |
+
"step": 440
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.3818413237165889,
|
321 |
+
"grad_norm": 0.12616185572252248,
|
322 |
+
"learning_rate": 3.820033955857386e-05,
|
323 |
+
"loss": 1.5548,
|
324 |
+
"step": 450
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.3903266864658464,
|
328 |
+
"grad_norm": 0.1311200786303391,
|
329 |
+
"learning_rate": 3.904923599320883e-05,
|
330 |
+
"loss": 1.5409,
|
331 |
+
"step": 460
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.39881204921510394,
|
335 |
+
"grad_norm": 0.1707919112561785,
|
336 |
+
"learning_rate": 3.989813242784381e-05,
|
337 |
+
"loss": 1.5509,
|
338 |
+
"step": 470
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.4072974119643615,
|
342 |
+
"grad_norm": 0.14660149264284913,
|
343 |
+
"learning_rate": 4.074702886247878e-05,
|
344 |
+
"loss": 1.5433,
|
345 |
+
"step": 480
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.415782774713619,
|
349 |
+
"grad_norm": 0.12478895483834351,
|
350 |
+
"learning_rate": 4.1595925297113755e-05,
|
351 |
+
"loss": 1.5382,
|
352 |
+
"step": 490
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.42426813746287656,
|
356 |
+
"grad_norm": 0.12327957445795817,
|
357 |
+
"learning_rate": 4.244482173174873e-05,
|
358 |
+
"loss": 1.5515,
|
359 |
+
"step": 500
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.43275350021213405,
|
363 |
+
"grad_norm": 0.12922777738650987,
|
364 |
+
"learning_rate": 4.32937181663837e-05,
|
365 |
+
"loss": 1.5688,
|
366 |
+
"step": 510
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.4412388629613916,
|
370 |
+
"grad_norm": 0.12486802189783415,
|
371 |
+
"learning_rate": 4.414261460101868e-05,
|
372 |
+
"loss": 1.5452,
|
373 |
+
"step": 520
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.44972422571064913,
|
377 |
+
"grad_norm": 0.1360610874577123,
|
378 |
+
"learning_rate": 4.499151103565366e-05,
|
379 |
+
"loss": 1.5493,
|
380 |
+
"step": 530
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.45820958845990667,
|
384 |
+
"grad_norm": 0.1884897685356775,
|
385 |
+
"learning_rate": 4.5840407470288625e-05,
|
386 |
+
"loss": 1.5511,
|
387 |
+
"step": 540
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.4666949512091642,
|
391 |
+
"grad_norm": 0.12446302384809525,
|
392 |
+
"learning_rate": 4.6689303904923606e-05,
|
393 |
+
"loss": 1.5458,
|
394 |
+
"step": 550
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.4751803139584217,
|
398 |
+
"grad_norm": 0.13169591804768588,
|
399 |
+
"learning_rate": 4.753820033955857e-05,
|
400 |
+
"loss": 1.5569,
|
401 |
+
"step": 560
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.48366567670767924,
|
405 |
+
"grad_norm": 0.1343809247449631,
|
406 |
+
"learning_rate": 4.8387096774193554e-05,
|
407 |
+
"loss": 1.5408,
|
408 |
+
"step": 570
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.4921510394569368,
|
412 |
+
"grad_norm": 0.14024589853602,
|
413 |
+
"learning_rate": 4.923599320882852e-05,
|
414 |
+
"loss": 1.5487,
|
415 |
+
"step": 580
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.5006364022061943,
|
419 |
+
"grad_norm": 0.16240429253875313,
|
420 |
+
"learning_rate": 4.999999560970061e-05,
|
421 |
+
"loss": 1.5488,
|
422 |
+
"step": 590
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.5091217649554518,
|
426 |
+
"grad_norm": 0.12575424857894482,
|
427 |
+
"learning_rate": 4.999946877563971e-05,
|
428 |
+
"loss": 1.532,
|
429 |
+
"step": 600
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.5091217649554518,
|
433 |
+
"eval_loss": 1.519254446029663,
|
434 |
+
"eval_runtime": 53.3242,
|
435 |
+
"eval_samples_per_second": 7.145,
|
436 |
+
"eval_steps_per_second": 0.9,
|
437 |
+
"step": 600
|
438 |
+
},
|
439 |
+
{
|
440 |
+
"epoch": 0.5176071277047094,
|
441 |
+
"grad_norm": 0.18688482756329736,
|
442 |
+
"learning_rate": 4.999806390290309e-05,
|
443 |
+
"loss": 1.5544,
|
444 |
+
"step": 610
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.5260924904539669,
|
448 |
+
"grad_norm": 0.12425469431830571,
|
449 |
+
"learning_rate": 4.999578104083307e-05,
|
450 |
+
"loss": 1.5443,
|
451 |
+
"step": 620
|
452 |
+
},
|
453 |
+
{
|
454 |
+
"epoch": 0.5345778532032245,
|
455 |
+
"grad_norm": 0.1299027485420099,
|
456 |
+
"learning_rate": 4.999262026960902e-05,
|
457 |
+
"loss": 1.5569,
|
458 |
+
"step": 630
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"epoch": 0.543063215952482,
|
462 |
+
"grad_norm": 0.11441754852508934,
|
463 |
+
"learning_rate": 4.998858170024449e-05,
|
464 |
+
"loss": 1.5316,
|
465 |
+
"step": 640
|
466 |
+
},
|
467 |
+
{
|
468 |
+
"epoch": 0.5515485787017395,
|
469 |
+
"grad_norm": 0.14888547248976478,
|
470 |
+
"learning_rate": 4.998366547458326e-05,
|
471 |
+
"loss": 1.5177,
|
472 |
+
"step": 650
|
473 |
+
},
|
474 |
+
{
|
475 |
+
"epoch": 0.560033941450997,
|
476 |
+
"grad_norm": 0.14859292774768867,
|
477 |
+
"learning_rate": 4.997787176529449e-05,
|
478 |
+
"loss": 1.5394,
|
479 |
+
"step": 660
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"epoch": 0.5685193042002545,
|
483 |
+
"grad_norm": 0.12499154376539734,
|
484 |
+
"learning_rate": 4.997120077586651e-05,
|
485 |
+
"loss": 1.5554,
|
486 |
+
"step": 670
|
487 |
+
},
|
488 |
+
{
|
489 |
+
"epoch": 0.5770046669495121,
|
490 |
+
"grad_norm": 0.1218974898058821,
|
491 |
+
"learning_rate": 4.9963652740599774e-05,
|
492 |
+
"loss": 1.5335,
|
493 |
+
"step": 680
|
494 |
+
},
|
495 |
+
{
|
496 |
+
"epoch": 0.5854900296987696,
|
497 |
+
"grad_norm": 0.1273110498715124,
|
498 |
+
"learning_rate": 4.995522792459859e-05,
|
499 |
+
"loss": 1.5349,
|
500 |
+
"step": 690
|
501 |
+
},
|
502 |
+
{
|
503 |
+
"epoch": 0.5939753924480271,
|
504 |
+
"grad_norm": 0.12115412881719101,
|
505 |
+
"learning_rate": 4.994592662376183e-05,
|
506 |
+
"loss": 1.5419,
|
507 |
+
"step": 700
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"epoch": 0.6024607551972847,
|
511 |
+
"grad_norm": 0.14855096330233286,
|
512 |
+
"learning_rate": 4.99357491647725e-05,
|
513 |
+
"loss": 1.513,
|
514 |
+
"step": 710
|
515 |
+
},
|
516 |
+
{
|
517 |
+
"epoch": 0.6109461179465422,
|
518 |
+
"grad_norm": 0.11407988659327956,
|
519 |
+
"learning_rate": 4.992469590508628e-05,
|
520 |
+
"loss": 1.5243,
|
521 |
+
"step": 720
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"epoch": 0.6194314806957998,
|
525 |
+
"grad_norm": 0.1197712643781127,
|
526 |
+
"learning_rate": 4.9912767232919035e-05,
|
527 |
+
"loss": 1.5177,
|
528 |
+
"step": 730
|
529 |
+
},
|
530 |
+
{
|
531 |
+
"epoch": 0.6279168434450573,
|
532 |
+
"grad_norm": 0.12400515877262065,
|
533 |
+
"learning_rate": 4.9899963567233074e-05,
|
534 |
+
"loss": 1.5619,
|
535 |
+
"step": 740
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"epoch": 0.6364022061943148,
|
539 |
+
"grad_norm": 0.12250385257708406,
|
540 |
+
"learning_rate": 4.988628535772249e-05,
|
541 |
+
"loss": 1.539,
|
542 |
+
"step": 750
|
543 |
+
},
|
544 |
+
{
|
545 |
+
"epoch": 0.6448875689435724,
|
546 |
+
"grad_norm": 0.1262441090496857,
|
547 |
+
"learning_rate": 4.987173308479738e-05,
|
548 |
+
"loss": 1.5195,
|
549 |
+
"step": 760
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.6533729316928298,
|
553 |
+
"grad_norm": 0.12459694416473029,
|
554 |
+
"learning_rate": 4.985630725956694e-05,
|
555 |
+
"loss": 1.5462,
|
556 |
+
"step": 770
|
557 |
+
},
|
558 |
+
{
|
559 |
+
"epoch": 0.6618582944420874,
|
560 |
+
"grad_norm": 0.12985189006106762,
|
561 |
+
"learning_rate": 4.9840008423821527e-05,
|
562 |
+
"loss": 1.5113,
|
563 |
+
"step": 780
|
564 |
+
},
|
565 |
+
{
|
566 |
+
"epoch": 0.6703436571913449,
|
567 |
+
"grad_norm": 0.12689306141471304,
|
568 |
+
"learning_rate": 4.9822837150013636e-05,
|
569 |
+
"loss": 1.5201,
|
570 |
+
"step": 790
|
571 |
+
},
|
572 |
+
{
|
573 |
+
"epoch": 0.6788290199406024,
|
574 |
+
"grad_norm": 0.15393156370587963,
|
575 |
+
"learning_rate": 4.980479404123778e-05,
|
576 |
+
"loss": 1.5121,
|
577 |
+
"step": 800
|
578 |
+
},
|
579 |
+
{
|
580 |
+
"epoch": 0.68731438268986,
|
581 |
+
"grad_norm": 0.13213701895207608,
|
582 |
+
"learning_rate": 4.978587973120931e-05,
|
583 |
+
"loss": 1.5307,
|
584 |
+
"step": 810
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"epoch": 0.6957997454391175,
|
588 |
+
"grad_norm": 0.11561354931316294,
|
589 |
+
"learning_rate": 4.9766094884242184e-05,
|
590 |
+
"loss": 1.5316,
|
591 |
+
"step": 820
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"epoch": 0.7042851081883751,
|
595 |
+
"grad_norm": 0.12414772399330044,
|
596 |
+
"learning_rate": 4.974544019522559e-05,
|
597 |
+
"loss": 1.5148,
|
598 |
+
"step": 830
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"epoch": 0.7127704709376326,
|
602 |
+
"grad_norm": 0.1171652849153521,
|
603 |
+
"learning_rate": 4.972391638959959e-05,
|
604 |
+
"loss": 1.5096,
|
605 |
+
"step": 840
|
606 |
+
},
|
607 |
+
{
|
608 |
+
"epoch": 0.7212558336868902,
|
609 |
+
"grad_norm": 0.12868937349582316,
|
610 |
+
"learning_rate": 4.9701524223329585e-05,
|
611 |
+
"loss": 1.5282,
|
612 |
+
"step": 850
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 0.7297411964361477,
|
616 |
+
"grad_norm": 0.1200015077117309,
|
617 |
+
"learning_rate": 4.967826448287981e-05,
|
618 |
+
"loss": 1.5512,
|
619 |
+
"step": 860
|
620 |
+
},
|
621 |
+
{
|
622 |
+
"epoch": 0.7382265591854051,
|
623 |
+
"grad_norm": 0.12340885660045105,
|
624 |
+
"learning_rate": 4.96541379851857e-05,
|
625 |
+
"loss": 1.5394,
|
626 |
+
"step": 870
|
627 |
+
},
|
628 |
+
{
|
629 |
+
"epoch": 0.7467119219346627,
|
630 |
+
"grad_norm": 0.12976937691467555,
|
631 |
+
"learning_rate": 4.962914557762517e-05,
|
632 |
+
"loss": 1.51,
|
633 |
+
"step": 880
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"epoch": 0.7551972846839202,
|
637 |
+
"grad_norm": 0.11912878476038466,
|
638 |
+
"learning_rate": 4.9603288137988905e-05,
|
639 |
+
"loss": 1.5294,
|
640 |
+
"step": 890
|
641 |
+
},
|
642 |
+
{
|
643 |
+
"epoch": 0.7636826474331778,
|
644 |
+
"grad_norm": 0.1299625480337927,
|
645 |
+
"learning_rate": 4.957656657444947e-05,
|
646 |
+
"loss": 1.507,
|
647 |
+
"step": 900
|
648 |
+
},
|
649 |
+
{
|
650 |
+
"epoch": 0.7721680101824353,
|
651 |
+
"grad_norm": 0.12380144459698468,
|
652 |
+
"learning_rate": 4.954898182552946e-05,
|
653 |
+
"loss": 1.5376,
|
654 |
+
"step": 910
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.7806533729316928,
|
658 |
+
"grad_norm": 0.13139339643682763,
|
659 |
+
"learning_rate": 4.9520534860068535e-05,
|
660 |
+
"loss": 1.5291,
|
661 |
+
"step": 920
|
662 |
+
},
|
663 |
+
{
|
664 |
+
"epoch": 0.7891387356809504,
|
665 |
+
"grad_norm": 0.13088956203983898,
|
666 |
+
"learning_rate": 4.949122667718935e-05,
|
667 |
+
"loss": 1.5239,
|
668 |
+
"step": 930
|
669 |
+
},
|
670 |
+
{
|
671 |
+
"epoch": 0.7976240984302079,
|
672 |
+
"grad_norm": 0.12586052988453703,
|
673 |
+
"learning_rate": 4.94610583062625e-05,
|
674 |
+
"loss": 1.5525,
|
675 |
+
"step": 940
|
676 |
+
},
|
677 |
+
{
|
678 |
+
"epoch": 0.8061094611794655,
|
679 |
+
"grad_norm": 0.12020996031652877,
|
680 |
+
"learning_rate": 4.943003080687035e-05,
|
681 |
+
"loss": 1.5525,
|
682 |
+
"step": 950
|
683 |
+
},
|
684 |
+
{
|
685 |
+
"epoch": 0.814594823928723,
|
686 |
+
"grad_norm": 0.12866375954060869,
|
687 |
+
"learning_rate": 4.9398145268769856e-05,
|
688 |
+
"loss": 1.5266,
|
689 |
+
"step": 960
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"epoch": 0.8230801866779804,
|
693 |
+
"grad_norm": 0.13166136756817035,
|
694 |
+
"learning_rate": 4.936540281185423e-05,
|
695 |
+
"loss": 1.5041,
|
696 |
+
"step": 970
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"epoch": 0.831565549427238,
|
700 |
+
"grad_norm": 0.12481946698483787,
|
701 |
+
"learning_rate": 4.933180458611364e-05,
|
702 |
+
"loss": 1.5271,
|
703 |
+
"step": 980
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 0.8400509121764955,
|
707 |
+
"grad_norm": 0.12264463761209114,
|
708 |
+
"learning_rate": 4.9297351771594844e-05,
|
709 |
+
"loss": 1.5354,
|
710 |
+
"step": 990
|
711 |
+
},
|
712 |
+
{
|
713 |
+
"epoch": 0.8485362749257531,
|
714 |
+
"grad_norm": 0.11985452856537594,
|
715 |
+
"learning_rate": 4.926204557835968e-05,
|
716 |
+
"loss": 1.5167,
|
717 |
+
"step": 1000
|
718 |
+
},
|
719 |
+
{
|
720 |
+
"epoch": 0.8570216376750106,
|
721 |
+
"grad_norm": 0.13125396521190327,
|
722 |
+
"learning_rate": 4.9225887246442634e-05,
|
723 |
+
"loss": 1.5282,
|
724 |
+
"step": 1010
|
725 |
+
},
|
726 |
+
{
|
727 |
+
"epoch": 0.8655070004242681,
|
728 |
+
"grad_norm": 0.12730192328072554,
|
729 |
+
"learning_rate": 4.918887804580725e-05,
|
730 |
+
"loss": 1.5089,
|
731 |
+
"step": 1020
|
732 |
+
},
|
733 |
+
{
|
734 |
+
"epoch": 0.8739923631735257,
|
735 |
+
"grad_norm": 0.12724644219344786,
|
736 |
+
"learning_rate": 4.915101927630153e-05,
|
737 |
+
"loss": 1.4964,
|
738 |
+
"step": 1030
|
739 |
+
},
|
740 |
+
{
|
741 |
+
"epoch": 0.8824777259227832,
|
742 |
+
"grad_norm": 0.13578611501833232,
|
743 |
+
"learning_rate": 4.911231226761227e-05,
|
744 |
+
"loss": 1.5189,
|
745 |
+
"step": 1040
|
746 |
+
},
|
747 |
+
{
|
748 |
+
"epoch": 0.8909630886720408,
|
749 |
+
"grad_norm": 0.13577513964986457,
|
750 |
+
"learning_rate": 4.90727583792184e-05,
|
751 |
+
"loss": 1.5149,
|
752 |
+
"step": 1050
|
753 |
+
},
|
754 |
+
{
|
755 |
+
"epoch": 0.8994484514212983,
|
756 |
+
"grad_norm": 0.1269735011676505,
|
757 |
+
"learning_rate": 4.903235900034317e-05,
|
758 |
+
"loss": 1.5066,
|
759 |
+
"step": 1060
|
760 |
+
},
|
761 |
+
{
|
762 |
+
"epoch": 0.9079338141705557,
|
763 |
+
"grad_norm": 0.13250058214235566,
|
764 |
+
"learning_rate": 4.899111554990543e-05,
|
765 |
+
"loss": 1.5129,
|
766 |
+
"step": 1070
|
767 |
+
},
|
768 |
+
{
|
769 |
+
"epoch": 0.9164191769198133,
|
770 |
+
"grad_norm": 0.13130735246433495,
|
771 |
+
"learning_rate": 4.894902947646975e-05,
|
772 |
+
"loss": 1.5156,
|
773 |
+
"step": 1080
|
774 |
+
},
|
775 |
+
{
|
776 |
+
"epoch": 0.9249045396690708,
|
777 |
+
"grad_norm": 0.1273580180253049,
|
778 |
+
"learning_rate": 4.890610225819553e-05,
|
779 |
+
"loss": 1.5324,
|
780 |
+
"step": 1090
|
781 |
+
},
|
782 |
+
{
|
783 |
+
"epoch": 0.9333899024183284,
|
784 |
+
"grad_norm": 0.13155314243939242,
|
785 |
+
"learning_rate": 4.8862335402785136e-05,
|
786 |
+
"loss": 1.5106,
|
787 |
+
"step": 1100
|
788 |
+
},
|
789 |
+
{
|
790 |
+
"epoch": 0.9418752651675859,
|
791 |
+
"grad_norm": 0.13564895211984299,
|
792 |
+
"learning_rate": 4.88177304474309e-05,
|
793 |
+
"loss": 1.5067,
|
794 |
+
"step": 1110
|
795 |
+
},
|
796 |
+
{
|
797 |
+
"epoch": 0.9503606279168434,
|
798 |
+
"grad_norm": 0.12774735587114736,
|
799 |
+
"learning_rate": 4.877228895876115e-05,
|
800 |
+
"loss": 1.5182,
|
801 |
+
"step": 1120
|
802 |
+
},
|
803 |
+
{
|
804 |
+
"epoch": 0.958845990666101,
|
805 |
+
"grad_norm": 0.1307997709537685,
|
806 |
+
"learning_rate": 4.872601253278517e-05,
|
807 |
+
"loss": 1.4969,
|
808 |
+
"step": 1130
|
809 |
+
},
|
810 |
+
{
|
811 |
+
"epoch": 0.9673313534153585,
|
812 |
+
"grad_norm": 0.1304794845040634,
|
813 |
+
"learning_rate": 4.867890279483717e-05,
|
814 |
+
"loss": 1.5264,
|
815 |
+
"step": 1140
|
816 |
+
},
|
817 |
+
{
|
818 |
+
"epoch": 0.9758167161646161,
|
819 |
+
"grad_norm": 0.13666141796489684,
|
820 |
+
"learning_rate": 4.8630961399519206e-05,
|
821 |
+
"loss": 1.5467,
|
822 |
+
"step": 1150
|
823 |
+
},
|
824 |
+
{
|
825 |
+
"epoch": 0.9843020789138736,
|
826 |
+
"grad_norm": 0.1370278303190263,
|
827 |
+
"learning_rate": 4.8582190030643e-05,
|
828 |
+
"loss": 1.5127,
|
829 |
+
"step": 1160
|
830 |
+
},
|
831 |
+
{
|
832 |
+
"epoch": 0.9927874416631312,
|
833 |
+
"grad_norm": 0.1390936629299565,
|
834 |
+
"learning_rate": 4.8532590401170894e-05,
|
835 |
+
"loss": 1.5058,
|
836 |
+
"step": 1170
|
837 |
+
},
|
838 |
+
{
|
839 |
+
"epoch": 1.0012728044123886,
|
840 |
+
"grad_norm": 0.12934475548108287,
|
841 |
+
"learning_rate": 4.848216425315561e-05,
|
842 |
+
"loss": 1.5202,
|
843 |
+
"step": 1180
|
844 |
+
},
|
845 |
+
{
|
846 |
+
"epoch": 1.0097581671616462,
|
847 |
+
"grad_norm": 0.13898591683370803,
|
848 |
+
"learning_rate": 4.843091335767913e-05,
|
849 |
+
"loss": 1.4563,
|
850 |
+
"step": 1190
|
851 |
+
},
|
852 |
+
{
|
853 |
+
"epoch": 1.0182435299109036,
|
854 |
+
"grad_norm": 0.17488231535826249,
|
855 |
+
"learning_rate": 4.837883951479043e-05,
|
856 |
+
"loss": 1.4402,
|
857 |
+
"step": 1200
|
858 |
+
},
|
859 |
+
{
|
860 |
+
"epoch": 1.0182435299109036,
|
861 |
+
"eval_loss": 1.4955657720565796,
|
862 |
+
"eval_runtime": 52.424,
|
863 |
+
"eval_samples_per_second": 7.268,
|
864 |
+
"eval_steps_per_second": 0.916,
|
865 |
+
"step": 1200
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"epoch": 1.0267288926601612,
|
869 |
+
"grad_norm": 0.1536036344095855,
|
870 |
+
"learning_rate": 4.832594455344229e-05,
|
871 |
+
"loss": 1.4848,
|
872 |
+
"step": 1210
|
873 |
+
},
|
874 |
+
{
|
875 |
+
"epoch": 1.0352142554094188,
|
876 |
+
"grad_norm": 0.15762414421336599,
|
877 |
+
"learning_rate": 4.827223033142706e-05,
|
878 |
+
"loss": 1.4567,
|
879 |
+
"step": 1220
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"epoch": 1.0436996181586762,
|
883 |
+
"grad_norm": 0.15058229398130366,
|
884 |
+
"learning_rate": 4.8217698735311414e-05,
|
885 |
+
"loss": 1.4672,
|
886 |
+
"step": 1230
|
887 |
+
},
|
888 |
+
{
|
889 |
+
"epoch": 1.0521849809079338,
|
890 |
+
"grad_norm": 0.16010992835678386,
|
891 |
+
"learning_rate": 4.8162351680370044e-05,
|
892 |
+
"loss": 1.4458,
|
893 |
+
"step": 1240
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"epoch": 1.0606703436571914,
|
897 |
+
"grad_norm": 0.16758816000341356,
|
898 |
+
"learning_rate": 4.810619111051847e-05,
|
899 |
+
"loss": 1.4842,
|
900 |
+
"step": 1250
|
901 |
+
},
|
902 |
+
{
|
903 |
+
"epoch": 1.069155706406449,
|
904 |
+
"grad_norm": 0.16559260972674986,
|
905 |
+
"learning_rate": 4.8049218998244696e-05,
|
906 |
+
"loss": 1.4556,
|
907 |
+
"step": 1260
|
908 |
+
},
|
909 |
+
{
|
910 |
+
"epoch": 1.0776410691557063,
|
911 |
+
"grad_norm": 0.17237632034416966,
|
912 |
+
"learning_rate": 4.7991437344539966e-05,
|
913 |
+
"loss": 1.4813,
|
914 |
+
"step": 1270
|
915 |
+
},
|
916 |
+
{
|
917 |
+
"epoch": 1.086126431904964,
|
918 |
+
"grad_norm": 0.17112756741722487,
|
919 |
+
"learning_rate": 4.793284817882845e-05,
|
920 |
+
"loss": 1.4535,
|
921 |
+
"step": 1280
|
922 |
+
},
|
923 |
+
{
|
924 |
+
"epoch": 1.0946117946542215,
|
925 |
+
"grad_norm": 0.16828572707718548,
|
926 |
+
"learning_rate": 4.787345355889604e-05,
|
927 |
+
"loss": 1.4344,
|
928 |
+
"step": 1290
|
929 |
+
},
|
930 |
+
{
|
931 |
+
"epoch": 1.103097157403479,
|
932 |
+
"grad_norm": 0.15709986047041227,
|
933 |
+
"learning_rate": 4.7813255570817985e-05,
|
934 |
+
"loss": 1.4744,
|
935 |
+
"step": 1300
|
936 |
+
},
|
937 |
+
{
|
938 |
+
"epoch": 1.1115825201527365,
|
939 |
+
"grad_norm": 0.16651547128146313,
|
940 |
+
"learning_rate": 4.775225632888568e-05,
|
941 |
+
"loss": 1.4561,
|
942 |
+
"step": 1310
|
943 |
+
},
|
944 |
+
{
|
945 |
+
"epoch": 1.120067882901994,
|
946 |
+
"grad_norm": 0.16750176017515714,
|
947 |
+
"learning_rate": 4.76904579755324e-05,
|
948 |
+
"loss": 1.4616,
|
949 |
+
"step": 1320
|
950 |
+
},
|
951 |
+
{
|
952 |
+
"epoch": 1.1285532456512515,
|
953 |
+
"grad_norm": 0.1608016567554825,
|
954 |
+
"learning_rate": 4.7627862681258037e-05,
|
955 |
+
"loss": 1.4593,
|
956 |
+
"step": 1330
|
957 |
+
},
|
958 |
+
{
|
959 |
+
"epoch": 1.137038608400509,
|
960 |
+
"grad_norm": 0.21390766919038295,
|
961 |
+
"learning_rate": 4.756447264455287e-05,
|
962 |
+
"loss": 1.4484,
|
963 |
+
"step": 1340
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"epoch": 1.1455239711497667,
|
967 |
+
"grad_norm": 0.16826883293172662,
|
968 |
+
"learning_rate": 4.750029009182038e-05,
|
969 |
+
"loss": 1.4703,
|
970 |
+
"step": 1350
|
971 |
+
},
|
972 |
+
{
|
973 |
+
"epoch": 1.1540093338990243,
|
974 |
+
"grad_norm": 0.17431508867079595,
|
975 |
+
"learning_rate": 4.7435317277299e-05,
|
976 |
+
"loss": 1.4701,
|
977 |
+
"step": 1360
|
978 |
+
},
|
979 |
+
{
|
980 |
+
"epoch": 1.1624946966482816,
|
981 |
+
"grad_norm": 0.15973851467570443,
|
982 |
+
"learning_rate": 4.736955648298299e-05,
|
983 |
+
"loss": 1.4503,
|
984 |
+
"step": 1370
|
985 |
+
},
|
986 |
+
{
|
987 |
+
"epoch": 1.1709800593975392,
|
988 |
+
"grad_norm": 0.1887713767970947,
|
989 |
+
"learning_rate": 4.730301001854225e-05,
|
990 |
+
"loss": 1.4624,
|
991 |
+
"step": 1380
|
992 |
+
},
|
993 |
+
{
|
994 |
+
"epoch": 1.1794654221467968,
|
995 |
+
"grad_norm": 0.16898695344997974,
|
996 |
+
"learning_rate": 4.7235680221241216e-05,
|
997 |
+
"loss": 1.4452,
|
998 |
+
"step": 1390
|
999 |
+
},
|
1000 |
+
{
|
1001 |
+
"epoch": 1.1879507848960542,
|
1002 |
+
"grad_norm": 0.20014553287073528,
|
1003 |
+
"learning_rate": 4.716756945585681e-05,
|
1004 |
+
"loss": 1.4717,
|
1005 |
+
"step": 1400
|
1006 |
+
},
|
1007 |
+
{
|
1008 |
+
"epoch": 1.1964361476453118,
|
1009 |
+
"grad_norm": 0.17137954325200072,
|
1010 |
+
"learning_rate": 4.709868011459528e-05,
|
1011 |
+
"loss": 1.4403,
|
1012 |
+
"step": 1410
|
1013 |
+
},
|
1014 |
+
{
|
1015 |
+
"epoch": 1.2049215103945694,
|
1016 |
+
"grad_norm": 0.17801721751888322,
|
1017 |
+
"learning_rate": 4.7029014617008294e-05,
|
1018 |
+
"loss": 1.4339,
|
1019 |
+
"step": 1420
|
1020 |
+
},
|
1021 |
+
{
|
1022 |
+
"epoch": 1.213406873143827,
|
1023 |
+
"grad_norm": 0.17139613676642362,
|
1024 |
+
"learning_rate": 4.695857540990789e-05,
|
1025 |
+
"loss": 1.4573,
|
1026 |
+
"step": 1430
|
1027 |
+
},
|
1028 |
+
{
|
1029 |
+
"epoch": 1.2218922358930844,
|
1030 |
+
"grad_norm": 0.16971403514498054,
|
1031 |
+
"learning_rate": 4.688736496728058e-05,
|
1032 |
+
"loss": 1.4282,
|
1033 |
+
"step": 1440
|
1034 |
+
},
|
1035 |
+
{
|
1036 |
+
"epoch": 1.230377598642342,
|
1037 |
+
"grad_norm": 0.17200272420880428,
|
1038 |
+
"learning_rate": 4.681538579020038e-05,
|
1039 |
+
"loss": 1.4434,
|
1040 |
+
"step": 1450
|
1041 |
+
},
|
1042 |
+
{
|
1043 |
+
"epoch": 1.2388629613915996,
|
1044 |
+
"grad_norm": 0.17208160407432616,
|
1045 |
+
"learning_rate": 4.6742640406741106e-05,
|
1046 |
+
"loss": 1.45,
|
1047 |
+
"step": 1460
|
1048 |
+
},
|
1049 |
+
{
|
1050 |
+
"epoch": 1.247348324140857,
|
1051 |
+
"grad_norm": 0.1939626212901777,
|
1052 |
+
"learning_rate": 4.666913137188743e-05,
|
1053 |
+
"loss": 1.4608,
|
1054 |
+
"step": 1470
|
1055 |
+
},
|
1056 |
+
{
|
1057 |
+
"epoch": 1.2558336868901145,
|
1058 |
+
"grad_norm": 0.17291794493304186,
|
1059 |
+
"learning_rate": 4.6594861267445236e-05,
|
1060 |
+
"loss": 1.4671,
|
1061 |
+
"step": 1480
|
1062 |
+
},
|
1063 |
+
{
|
1064 |
+
"epoch": 1.2643190496393721,
|
1065 |
+
"grad_norm": 0.18219792041638924,
|
1066 |
+
"learning_rate": 4.651983270195093e-05,
|
1067 |
+
"loss": 1.4262,
|
1068 |
+
"step": 1490
|
1069 |
+
},
|
1070 |
+
{
|
1071 |
+
"epoch": 1.2728044123886297,
|
1072 |
+
"grad_norm": 0.18086437830489926,
|
1073 |
+
"learning_rate": 4.644404831057979e-05,
|
1074 |
+
"loss": 1.4455,
|
1075 |
+
"step": 1500
|
1076 |
+
},
|
1077 |
+
{
|
1078 |
+
"epoch": 1.281289775137887,
|
1079 |
+
"grad_norm": 0.17417619624549402,
|
1080 |
+
"learning_rate": 4.636751075505344e-05,
|
1081 |
+
"loss": 1.4873,
|
1082 |
+
"step": 1510
|
1083 |
+
},
|
1084 |
+
{
|
1085 |
+
"epoch": 1.2897751378871447,
|
1086 |
+
"grad_norm": 0.18354282411845188,
|
1087 |
+
"learning_rate": 4.629022272354637e-05,
|
1088 |
+
"loss": 1.4525,
|
1089 |
+
"step": 1520
|
1090 |
+
},
|
1091 |
+
{
|
1092 |
+
"epoch": 1.298260500636402,
|
1093 |
+
"grad_norm": 0.17985617345325455,
|
1094 |
+
"learning_rate": 4.621218693059149e-05,
|
1095 |
+
"loss": 1.4303,
|
1096 |
+
"step": 1530
|
1097 |
+
},
|
1098 |
+
{
|
1099 |
+
"epoch": 1.3067458633856597,
|
1100 |
+
"grad_norm": 0.1809708317849863,
|
1101 |
+
"learning_rate": 4.6133406116984795e-05,
|
1102 |
+
"loss": 1.4631,
|
1103 |
+
"step": 1540
|
1104 |
+
},
|
1105 |
+
{
|
1106 |
+
"epoch": 1.3152312261349173,
|
1107 |
+
"grad_norm": 0.17487374671212322,
|
1108 |
+
"learning_rate": 4.6053883049689145e-05,
|
1109 |
+
"loss": 1.4482,
|
1110 |
+
"step": 1550
|
1111 |
+
},
|
1112 |
+
{
|
1113 |
+
"epoch": 1.3237165888841749,
|
1114 |
+
"grad_norm": 0.19912807671077193,
|
1115 |
+
"learning_rate": 4.5973620521737036e-05,
|
1116 |
+
"loss": 1.4497,
|
1117 |
+
"step": 1560
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"epoch": 1.3322019516334322,
|
1121 |
+
"grad_norm": 0.17853627546912074,
|
1122 |
+
"learning_rate": 4.5892621352132514e-05,
|
1123 |
+
"loss": 1.4456,
|
1124 |
+
"step": 1570
|
1125 |
+
},
|
1126 |
+
{
|
1127 |
+
"epoch": 1.3406873143826898,
|
1128 |
+
"grad_norm": 0.18252596927754394,
|
1129 |
+
"learning_rate": 4.581088838575218e-05,
|
1130 |
+
"loss": 1.4328,
|
1131 |
+
"step": 1580
|
1132 |
+
},
|
1133 |
+
{
|
1134 |
+
"epoch": 1.3491726771319474,
|
1135 |
+
"grad_norm": 0.17604951053556211,
|
1136 |
+
"learning_rate": 4.572842449324525e-05,
|
1137 |
+
"loss": 1.4442,
|
1138 |
+
"step": 1590
|
1139 |
+
},
|
1140 |
+
{
|
1141 |
+
"epoch": 1.3576580398812048,
|
1142 |
+
"grad_norm": 0.18358942463311748,
|
1143 |
+
"learning_rate": 4.564523257093275e-05,
|
1144 |
+
"loss": 1.4338,
|
1145 |
+
"step": 1600
|
1146 |
+
},
|
1147 |
+
{
|
1148 |
+
"epoch": 1.3661434026304624,
|
1149 |
+
"grad_norm": 0.20508703236267142,
|
1150 |
+
"learning_rate": 4.5561315540705774e-05,
|
1151 |
+
"loss": 1.4445,
|
1152 |
+
"step": 1610
|
1153 |
+
},
|
1154 |
+
{
|
1155 |
+
"epoch": 1.37462876537972,
|
1156 |
+
"grad_norm": 0.18486352550747187,
|
1157 |
+
"learning_rate": 4.547667634992288e-05,
|
1158 |
+
"loss": 1.4261,
|
1159 |
+
"step": 1620
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"epoch": 1.3831141281289776,
|
1163 |
+
"grad_norm": 0.17492766465456316,
|
1164 |
+
"learning_rate": 4.539131797130656e-05,
|
1165 |
+
"loss": 1.4258,
|
1166 |
+
"step": 1630
|
1167 |
+
},
|
1168 |
+
{
|
1169 |
+
"epoch": 1.391599490878235,
|
1170 |
+
"grad_norm": 0.19692876587833674,
|
1171 |
+
"learning_rate": 4.530524340283881e-05,
|
1172 |
+
"loss": 1.4349,
|
1173 |
+
"step": 1640
|
1174 |
+
},
|
1175 |
+
{
|
1176 |
+
"epoch": 1.4000848536274926,
|
1177 |
+
"grad_norm": 0.19155373430892478,
|
1178 |
+
"learning_rate": 4.521845566765589e-05,
|
1179 |
+
"loss": 1.4536,
|
1180 |
+
"step": 1650
|
1181 |
+
},
|
1182 |
+
{
|
1183 |
+
"epoch": 1.4085702163767502,
|
1184 |
+
"grad_norm": 0.18544325977459192,
|
1185 |
+
"learning_rate": 4.513095781394208e-05,
|
1186 |
+
"loss": 1.4363,
|
1187 |
+
"step": 1660
|
1188 |
+
},
|
1189 |
+
{
|
1190 |
+
"epoch": 1.4170555791260075,
|
1191 |
+
"grad_norm": 0.177828004720666,
|
1192 |
+
"learning_rate": 4.504275291482267e-05,
|
1193 |
+
"loss": 1.4595,
|
1194 |
+
"step": 1670
|
1195 |
+
},
|
1196 |
+
{
|
1197 |
+
"epoch": 1.4255409418752651,
|
1198 |
+
"grad_norm": 0.17855432230356816,
|
1199 |
+
"learning_rate": 4.495384406825601e-05,
|
1200 |
+
"loss": 1.4211,
|
1201 |
+
"step": 1680
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"epoch": 1.4340263046245227,
|
1205 |
+
"grad_norm": 0.20232492538380317,
|
1206 |
+
"learning_rate": 4.486423439692469e-05,
|
1207 |
+
"loss": 1.4189,
|
1208 |
+
"step": 1690
|
1209 |
+
},
|
1210 |
+
{
|
1211 |
+
"epoch": 1.4425116673737803,
|
1212 |
+
"grad_norm": 0.1975109303350431,
|
1213 |
+
"learning_rate": 4.477392704812585e-05,
|
1214 |
+
"loss": 1.4565,
|
1215 |
+
"step": 1700
|
1216 |
+
},
|
1217 |
+
{
|
1218 |
+
"epoch": 1.4509970301230377,
|
1219 |
+
"grad_norm": 0.19619010830399825,
|
1220 |
+
"learning_rate": 4.468292519366071e-05,
|
1221 |
+
"loss": 1.4382,
|
1222 |
+
"step": 1710
|
1223 |
+
},
|
1224 |
+
{
|
1225 |
+
"epoch": 1.4594823928722953,
|
1226 |
+
"grad_norm": 0.18168826428246143,
|
1227 |
+
"learning_rate": 4.459123202972308e-05,
|
1228 |
+
"loss": 1.4471,
|
1229 |
+
"step": 1720
|
1230 |
+
},
|
1231 |
+
{
|
1232 |
+
"epoch": 1.4679677556215527,
|
1233 |
+
"grad_norm": 0.1923264062362399,
|
1234 |
+
"learning_rate": 4.449885077678717e-05,
|
1235 |
+
"loss": 1.4153,
|
1236 |
+
"step": 1730
|
1237 |
+
},
|
1238 |
+
{
|
1239 |
+
"epoch": 1.4764531183708103,
|
1240 |
+
"grad_norm": 0.1907937313040222,
|
1241 |
+
"learning_rate": 4.440578467949445e-05,
|
1242 |
+
"loss": 1.4432,
|
1243 |
+
"step": 1740
|
1244 |
+
},
|
1245 |
+
{
|
1246 |
+
"epoch": 1.4849384811200679,
|
1247 |
+
"grad_norm": 0.19107457667767244,
|
1248 |
+
"learning_rate": 4.431203700653968e-05,
|
1249 |
+
"loss": 1.4285,
|
1250 |
+
"step": 1750
|
1251 |
+
},
|
1252 |
+
{
|
1253 |
+
"epoch": 1.4934238438693255,
|
1254 |
+
"grad_norm": 0.19847350429107552,
|
1255 |
+
"learning_rate": 4.421761105055613e-05,
|
1256 |
+
"loss": 1.4383,
|
1257 |
+
"step": 1760
|
1258 |
+
},
|
1259 |
+
{
|
1260 |
+
"epoch": 1.501909206618583,
|
1261 |
+
"grad_norm": 0.18536475556610216,
|
1262 |
+
"learning_rate": 4.4122510127999937e-05,
|
1263 |
+
"loss": 1.42,
|
1264 |
+
"step": 1770
|
1265 |
+
},
|
1266 |
+
{
|
1267 |
+
"epoch": 1.5103945693678404,
|
1268 |
+
"grad_norm": 0.18481023473586697,
|
1269 |
+
"learning_rate": 4.4026737579033584e-05,
|
1270 |
+
"loss": 1.4384,
|
1271 |
+
"step": 1780
|
1272 |
+
},
|
1273 |
+
{
|
1274 |
+
"epoch": 1.518879932117098,
|
1275 |
+
"grad_norm": 0.20863867505874642,
|
1276 |
+
"learning_rate": 4.393029676740864e-05,
|
1277 |
+
"loss": 1.4543,
|
1278 |
+
"step": 1790
|
1279 |
+
},
|
1280 |
+
{
|
1281 |
+
"epoch": 1.5273652948663554,
|
1282 |
+
"grad_norm": 0.1816036870853105,
|
1283 |
+
"learning_rate": 4.3833191080347575e-05,
|
1284 |
+
"loss": 1.434,
|
1285 |
+
"step": 1800
|
1286 |
+
},
|
1287 |
+
{
|
1288 |
+
"epoch": 1.5273652948663554,
|
1289 |
+
"eval_loss": 1.4622184038162231,
|
1290 |
+
"eval_runtime": 52.4041,
|
1291 |
+
"eval_samples_per_second": 7.27,
|
1292 |
+
"eval_steps_per_second": 0.916,
|
1293 |
+
"step": 1800
|
1294 |
+
},
|
1295 |
+
{
|
1296 |
+
"epoch": 1.535850657615613,
|
1297 |
+
"grad_norm": 0.19378252368958881,
|
1298 |
+
"learning_rate": 4.3735423928424815e-05,
|
1299 |
+
"loss": 1.4275,
|
1300 |
+
"step": 1810
|
1301 |
+
},
|
1302 |
+
{
|
1303 |
+
"epoch": 1.5443360203648706,
|
1304 |
+
"grad_norm": 0.20453331251433848,
|
1305 |
+
"learning_rate": 4.363699874544697e-05,
|
1306 |
+
"loss": 1.4203,
|
1307 |
+
"step": 1820
|
1308 |
+
},
|
1309 |
+
{
|
1310 |
+
"epoch": 1.5528213831141282,
|
1311 |
+
"grad_norm": 0.26684319417219377,
|
1312 |
+
"learning_rate": 4.3537918988332156e-05,
|
1313 |
+
"loss": 1.4372,
|
1314 |
+
"step": 1830
|
1315 |
+
},
|
1316 |
+
{
|
1317 |
+
"epoch": 1.5613067458633858,
|
1318 |
+
"grad_norm": 0.25745160303419773,
|
1319 |
+
"learning_rate": 4.343818813698868e-05,
|
1320 |
+
"loss": 1.4082,
|
1321 |
+
"step": 1840
|
1322 |
+
},
|
1323 |
+
{
|
1324 |
+
"epoch": 1.5697921086126432,
|
1325 |
+
"grad_norm": 0.19969727996700776,
|
1326 |
+
"learning_rate": 4.3337809694192765e-05,
|
1327 |
+
"loss": 1.4314,
|
1328 |
+
"step": 1850
|
1329 |
+
},
|
1330 |
+
{
|
1331 |
+
"epoch": 1.5782774713619008,
|
1332 |
+
"grad_norm": 0.20117210832277968,
|
1333 |
+
"learning_rate": 4.3236787185465525e-05,
|
1334 |
+
"loss": 1.4293,
|
1335 |
+
"step": 1860
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"epoch": 1.5867628341111581,
|
1339 |
+
"grad_norm": 0.20173003641028897,
|
1340 |
+
"learning_rate": 4.313512415894913e-05,
|
1341 |
+
"loss": 1.4406,
|
1342 |
+
"step": 1870
|
1343 |
+
},
|
1344 |
+
{
|
1345 |
+
"epoch": 1.5952481968604157,
|
1346 |
+
"grad_norm": 0.20304770794371527,
|
1347 |
+
"learning_rate": 4.303282418528224e-05,
|
1348 |
+
"loss": 1.4286,
|
1349 |
+
"step": 1880
|
1350 |
+
},
|
1351 |
+
{
|
1352 |
+
"epoch": 1.6037335596096733,
|
1353 |
+
"grad_norm": 0.19126658907738198,
|
1354 |
+
"learning_rate": 4.292989085747452e-05,
|
1355 |
+
"loss": 1.4184,
|
1356 |
+
"step": 1890
|
1357 |
+
},
|
1358 |
+
{
|
1359 |
+
"epoch": 1.612218922358931,
|
1360 |
+
"grad_norm": 0.20069554966453027,
|
1361 |
+
"learning_rate": 4.282632779078051e-05,
|
1362 |
+
"loss": 1.4133,
|
1363 |
+
"step": 1900
|
1364 |
+
},
|
1365 |
+
{
|
1366 |
+
"epoch": 1.6207042851081885,
|
1367 |
+
"grad_norm": 0.1952881519566686,
|
1368 |
+
"learning_rate": 4.2722138622572624e-05,
|
1369 |
+
"loss": 1.4432,
|
1370 |
+
"step": 1910
|
1371 |
+
},
|
1372 |
+
{
|
1373 |
+
"epoch": 1.629189647857446,
|
1374 |
+
"grad_norm": 0.19763704668680288,
|
1375 |
+
"learning_rate": 4.261732701221339e-05,
|
1376 |
+
"loss": 1.3921,
|
1377 |
+
"step": 1920
|
1378 |
+
},
|
1379 |
+
{
|
1380 |
+
"epoch": 1.6376750106067033,
|
1381 |
+
"grad_norm": 0.19821464294464497,
|
1382 |
+
"learning_rate": 4.2511896640926925e-05,
|
1383 |
+
"loss": 1.4454,
|
1384 |
+
"step": 1930
|
1385 |
+
},
|
1386 |
+
{
|
1387 |
+
"epoch": 1.6461603733559609,
|
1388 |
+
"grad_norm": 0.20456545626297834,
|
1389 |
+
"learning_rate": 4.240585121166966e-05,
|
1390 |
+
"loss": 1.4147,
|
1391 |
+
"step": 1940
|
1392 |
+
},
|
1393 |
+
{
|
1394 |
+
"epoch": 1.6546457361052185,
|
1395 |
+
"grad_norm": 0.2119092529186395,
|
1396 |
+
"learning_rate": 4.229919444900027e-05,
|
1397 |
+
"loss": 1.3969,
|
1398 |
+
"step": 1950
|
1399 |
+
},
|
1400 |
+
{
|
1401 |
+
"epoch": 1.663131098854476,
|
1402 |
+
"grad_norm": 0.20330157582122357,
|
1403 |
+
"learning_rate": 4.2191930098948865e-05,
|
1404 |
+
"loss": 1.426,
|
1405 |
+
"step": 1960
|
1406 |
+
},
|
1407 |
+
{
|
1408 |
+
"epoch": 1.6716164616037337,
|
1409 |
+
"grad_norm": 0.21761164739298738,
|
1410 |
+
"learning_rate": 4.2084061928885406e-05,
|
1411 |
+
"loss": 1.4246,
|
1412 |
+
"step": 1970
|
1413 |
+
},
|
1414 |
+
{
|
1415 |
+
"epoch": 1.680101824352991,
|
1416 |
+
"grad_norm": 0.19331588142071401,
|
1417 |
+
"learning_rate": 4.197559372738741e-05,
|
1418 |
+
"loss": 1.4305,
|
1419 |
+
"step": 1980
|
1420 |
+
},
|
1421 |
+
{
|
1422 |
+
"epoch": 1.6885871871022486,
|
1423 |
+
"grad_norm": 0.20188460724329996,
|
1424 |
+
"learning_rate": 4.186652930410685e-05,
|
1425 |
+
"loss": 1.4153,
|
1426 |
+
"step": 1990
|
1427 |
+
},
|
1428 |
+
{
|
1429 |
+
"epoch": 1.697072549851506,
|
1430 |
+
"grad_norm": 0.20988950033571588,
|
1431 |
+
"learning_rate": 4.1756872489636425e-05,
|
1432 |
+
"loss": 1.3894,
|
1433 |
+
"step": 2000
|
1434 |
+
},
|
1435 |
+
{
|
1436 |
+
"epoch": 1.7055579126007636,
|
1437 |
+
"grad_norm": 0.1966475893123187,
|
1438 |
+
"learning_rate": 4.1646627135374916e-05,
|
1439 |
+
"loss": 1.3962,
|
1440 |
+
"step": 2010
|
1441 |
+
},
|
1442 |
+
{
|
1443 |
+
"epoch": 1.7140432753500212,
|
1444 |
+
"grad_norm": 0.20785207367991768,
|
1445 |
+
"learning_rate": 4.1535797113392004e-05,
|
1446 |
+
"loss": 1.4037,
|
1447 |
+
"step": 2020
|
1448 |
+
},
|
1449 |
+
{
|
1450 |
+
"epoch": 1.7225286380992788,
|
1451 |
+
"grad_norm": 0.2029940281663133,
|
1452 |
+
"learning_rate": 4.1424386316292224e-05,
|
1453 |
+
"loss": 1.4011,
|
1454 |
+
"step": 2030
|
1455 |
+
},
|
1456 |
+
{
|
1457 |
+
"epoch": 1.7310140008485364,
|
1458 |
+
"grad_norm": 0.2247844551379277,
|
1459 |
+
"learning_rate": 4.131239865707829e-05,
|
1460 |
+
"loss": 1.4084,
|
1461 |
+
"step": 2040
|
1462 |
+
},
|
1463 |
+
{
|
1464 |
+
"epoch": 1.7394993635977938,
|
1465 |
+
"grad_norm": 0.20900441746105022,
|
1466 |
+
"learning_rate": 4.11998380690136e-05,
|
1467 |
+
"loss": 1.4235,
|
1468 |
+
"step": 2050
|
1469 |
+
},
|
1470 |
+
{
|
1471 |
+
"epoch": 1.7479847263470514,
|
1472 |
+
"grad_norm": 0.20362408546889926,
|
1473 |
+
"learning_rate": 4.108670850548416e-05,
|
1474 |
+
"loss": 1.4204,
|
1475 |
+
"step": 2060
|
1476 |
+
},
|
1477 |
+
{
|
1478 |
+
"epoch": 1.7564700890963088,
|
1479 |
+
"grad_norm": 0.22281567946240438,
|
1480 |
+
"learning_rate": 4.097301393985968e-05,
|
1481 |
+
"loss": 1.4023,
|
1482 |
+
"step": 2070
|
1483 |
+
},
|
1484 |
+
{
|
1485 |
+
"epoch": 1.7649554518455663,
|
1486 |
+
"grad_norm": 0.20867113178797225,
|
1487 |
+
"learning_rate": 4.085875836535404e-05,
|
1488 |
+
"loss": 1.3895,
|
1489 |
+
"step": 2080
|
1490 |
+
},
|
1491 |
+
{
|
1492 |
+
"epoch": 1.773440814594824,
|
1493 |
+
"grad_norm": 0.22113231886160947,
|
1494 |
+
"learning_rate": 4.0743945794885063e-05,
|
1495 |
+
"loss": 1.3963,
|
1496 |
+
"step": 2090
|
1497 |
+
},
|
1498 |
+
{
|
1499 |
+
"epoch": 1.7819261773440815,
|
1500 |
+
"grad_norm": 0.22334563577844263,
|
1501 |
+
"learning_rate": 4.062858026093351e-05,
|
1502 |
+
"loss": 1.3988,
|
1503 |
+
"step": 2100
|
1504 |
+
},
|
1505 |
+
{
|
1506 |
+
"epoch": 1.7904115400933391,
|
1507 |
+
"grad_norm": 0.23218581668265403,
|
1508 |
+
"learning_rate": 4.051266581540152e-05,
|
1509 |
+
"loss": 1.4068,
|
1510 |
+
"step": 2110
|
1511 |
+
},
|
1512 |
+
{
|
1513 |
+
"epoch": 1.7988969028425965,
|
1514 |
+
"grad_norm": 0.20295589384571033,
|
1515 |
+
"learning_rate": 4.0396206529470234e-05,
|
1516 |
+
"loss": 1.3883,
|
1517 |
+
"step": 2120
|
1518 |
+
},
|
1519 |
+
{
|
1520 |
+
"epoch": 1.8073822655918539,
|
1521 |
+
"grad_norm": 0.22861611442392848,
|
1522 |
+
"learning_rate": 4.027920649345687e-05,
|
1523 |
+
"loss": 1.4043,
|
1524 |
+
"step": 2130
|
1525 |
+
},
|
1526 |
+
{
|
1527 |
+
"epoch": 1.8158676283411115,
|
1528 |
+
"grad_norm": 0.2083012771089638,
|
1529 |
+
"learning_rate": 4.0161669816671e-05,
|
1530 |
+
"loss": 1.398,
|
1531 |
+
"step": 2140
|
1532 |
+
},
|
1533 |
+
{
|
1534 |
+
"epoch": 1.824352991090369,
|
1535 |
+
"grad_norm": 0.21936173231840464,
|
1536 |
+
"learning_rate": 4.004360062727028e-05,
|
1537 |
+
"loss": 1.4142,
|
1538 |
+
"step": 2150
|
1539 |
+
},
|
1540 |
+
{
|
1541 |
+
"epoch": 1.8328383538396267,
|
1542 |
+
"grad_norm": 0.21383435796328337,
|
1543 |
+
"learning_rate": 3.9925003072115406e-05,
|
1544 |
+
"loss": 1.4138,
|
1545 |
+
"step": 2160
|
1546 |
+
},
|
1547 |
+
{
|
1548 |
+
"epoch": 1.8413237165888843,
|
1549 |
+
"grad_norm": 0.23301608248270392,
|
1550 |
+
"learning_rate": 3.9805881316624506e-05,
|
1551 |
+
"loss": 1.4195,
|
1552 |
+
"step": 2170
|
1553 |
+
},
|
1554 |
+
{
|
1555 |
+
"epoch": 1.8498090793381419,
|
1556 |
+
"grad_norm": 0.22424766656883474,
|
1557 |
+
"learning_rate": 3.968623954462681e-05,
|
1558 |
+
"loss": 1.4011,
|
1559 |
+
"step": 2180
|
1560 |
+
},
|
1561 |
+
{
|
1562 |
+
"epoch": 1.8582944420873992,
|
1563 |
+
"grad_norm": 0.21286417342881453,
|
1564 |
+
"learning_rate": 3.9566081958215734e-05,
|
1565 |
+
"loss": 1.409,
|
1566 |
+
"step": 2190
|
1567 |
+
},
|
1568 |
+
{
|
1569 |
+
"epoch": 1.8667798048366566,
|
1570 |
+
"grad_norm": 0.21944800687444807,
|
1571 |
+
"learning_rate": 3.9445412777601284e-05,
|
1572 |
+
"loss": 1.3877,
|
1573 |
+
"step": 2200
|
1574 |
+
},
|
1575 |
+
{
|
1576 |
+
"epoch": 1.8752651675859142,
|
1577 |
+
"grad_norm": 0.23113173625974803,
|
1578 |
+
"learning_rate": 3.932423624096181e-05,
|
1579 |
+
"loss": 1.4089,
|
1580 |
+
"step": 2210
|
1581 |
+
},
|
1582 |
+
{
|
1583 |
+
"epoch": 1.8837505303351718,
|
1584 |
+
"grad_norm": 0.2081941699587778,
|
1585 |
+
"learning_rate": 3.920255660429517e-05,
|
1586 |
+
"loss": 1.4024,
|
1587 |
+
"step": 2220
|
1588 |
+
},
|
1589 |
+
{
|
1590 |
+
"epoch": 1.8922358930844294,
|
1591 |
+
"grad_norm": 0.2188685806654701,
|
1592 |
+
"learning_rate": 3.908037814126927e-05,
|
1593 |
+
"loss": 1.3878,
|
1594 |
+
"step": 2230
|
1595 |
+
},
|
1596 |
+
{
|
1597 |
+
"epoch": 1.900721255833687,
|
1598 |
+
"grad_norm": 0.22761843244757962,
|
1599 |
+
"learning_rate": 3.895770514307193e-05,
|
1600 |
+
"loss": 1.4004,
|
1601 |
+
"step": 2240
|
1602 |
+
},
|
1603 |
+
{
|
1604 |
+
"epoch": 1.9092066185829444,
|
1605 |
+
"grad_norm": 0.23309183623120422,
|
1606 |
+
"learning_rate": 3.883454191826017e-05,
|
1607 |
+
"loss": 1.4188,
|
1608 |
+
"step": 2250
|
1609 |
+
},
|
1610 |
+
{
|
1611 |
+
"epoch": 1.917691981332202,
|
1612 |
+
"grad_norm": 0.20329785843911802,
|
1613 |
+
"learning_rate": 3.871089279260891e-05,
|
1614 |
+
"loss": 1.3893,
|
1615 |
+
"step": 2260
|
1616 |
+
},
|
1617 |
+
{
|
1618 |
+
"epoch": 1.9261773440814594,
|
1619 |
+
"grad_norm": 0.23470973193726366,
|
1620 |
+
"learning_rate": 3.8586762108958995e-05,
|
1621 |
+
"loss": 1.3974,
|
1622 |
+
"step": 2270
|
1623 |
+
},
|
1624 |
+
{
|
1625 |
+
"epoch": 1.934662706830717,
|
1626 |
+
"grad_norm": 0.22779136837044714,
|
1627 |
+
"learning_rate": 3.8462154227064725e-05,
|
1628 |
+
"loss": 1.4115,
|
1629 |
+
"step": 2280
|
1630 |
+
},
|
1631 |
+
{
|
1632 |
+
"epoch": 1.9431480695799745,
|
1633 |
+
"grad_norm": 0.22338952315651892,
|
1634 |
+
"learning_rate": 3.833707352344068e-05,
|
1635 |
+
"loss": 1.3873,
|
1636 |
+
"step": 2290
|
1637 |
+
},
|
1638 |
+
{
|
1639 |
+
"epoch": 1.9516334323292321,
|
1640 |
+
"grad_norm": 0.23069304025882129,
|
1641 |
+
"learning_rate": 3.821152439120801e-05,
|
1642 |
+
"loss": 1.3944,
|
1643 |
+
"step": 2300
|
1644 |
+
},
|
1645 |
+
{
|
1646 |
+
"epoch": 1.9601187950784897,
|
1647 |
+
"grad_norm": 0.23590596270163203,
|
1648 |
+
"learning_rate": 3.808551123994018e-05,
|
1649 |
+
"loss": 1.3857,
|
1650 |
+
"step": 2310
|
1651 |
+
},
|
1652 |
+
{
|
1653 |
+
"epoch": 1.9686041578277471,
|
1654 |
+
"grad_norm": 0.22545661808214923,
|
1655 |
+
"learning_rate": 3.795903849550805e-05,
|
1656 |
+
"loss": 1.3628,
|
1657 |
+
"step": 2320
|
1658 |
+
},
|
1659 |
+
{
|
1660 |
+
"epoch": 1.9770895205770047,
|
1661 |
+
"grad_norm": 0.2450769875954842,
|
1662 |
+
"learning_rate": 3.7832110599924455e-05,
|
1663 |
+
"loss": 1.4079,
|
1664 |
+
"step": 2330
|
1665 |
+
},
|
1666 |
+
{
|
1667 |
+
"epoch": 1.985574883326262,
|
1668 |
+
"grad_norm": 0.22931499326784313,
|
1669 |
+
"learning_rate": 3.7704732011188166e-05,
|
1670 |
+
"loss": 1.379,
|
1671 |
+
"step": 2340
|
1672 |
+
},
|
1673 |
+
{
|
1674 |
+
"epoch": 1.9940602460755197,
|
1675 |
+
"grad_norm": 0.22417244507397657,
|
1676 |
+
"learning_rate": 3.7576907203127346e-05,
|
1677 |
+
"loss": 1.4035,
|
1678 |
+
"step": 2350
|
1679 |
+
},
|
1680 |
+
{
|
1681 |
+
"epoch": 2.0025456088247773,
|
1682 |
+
"grad_norm": 0.24496197221575314,
|
1683 |
+
"learning_rate": 3.7448640665242406e-05,
|
1684 |
+
"loss": 1.442,
|
1685 |
+
"step": 2360
|
1686 |
+
},
|
1687 |
+
{
|
1688 |
+
"epoch": 2.011030971574035,
|
1689 |
+
"grad_norm": 0.2532740296990078,
|
1690 |
+
"learning_rate": 3.73199369025483e-05,
|
1691 |
+
"loss": 1.2672,
|
1692 |
+
"step": 2370
|
1693 |
+
},
|
1694 |
+
{
|
1695 |
+
"epoch": 2.0195163343232925,
|
1696 |
+
"grad_norm": 0.2890155987968593,
|
1697 |
+
"learning_rate": 3.7190800435416355e-05,
|
1698 |
+
"loss": 1.246,
|
1699 |
+
"step": 2380
|
1700 |
+
},
|
1701 |
+
{
|
1702 |
+
"epoch": 2.02800169707255,
|
1703 |
+
"grad_norm": 0.2541972565696406,
|
1704 |
+
"learning_rate": 3.706123579941545e-05,
|
1705 |
+
"loss": 1.2603,
|
1706 |
+
"step": 2390
|
1707 |
+
},
|
1708 |
+
{
|
1709 |
+
"epoch": 2.036487059821807,
|
1710 |
+
"grad_norm": 0.2530140862527023,
|
1711 |
+
"learning_rate": 3.693124754515272e-05,
|
1712 |
+
"loss": 1.2638,
|
1713 |
+
"step": 2400
|
1714 |
+
},
|
1715 |
+
{
|
1716 |
+
"epoch": 2.036487059821807,
|
1717 |
+
"eval_loss": 1.435962438583374,
|
1718 |
+
"eval_runtime": 52.582,
|
1719 |
+
"eval_samples_per_second": 7.246,
|
1720 |
+
"eval_steps_per_second": 0.913,
|
1721 |
+
"step": 2400
|
1722 |
+
},
|
1723 |
+
{
|
1724 |
+
"epoch": 2.044972422571065,
|
1725 |
+
"grad_norm": 0.25100458343337734,
|
1726 |
+
"learning_rate": 3.680084023811377e-05,
|
1727 |
+
"loss": 1.2711,
|
1728 |
+
"step": 2410
|
1729 |
+
},
|
1730 |
+
{
|
1731 |
+
"epoch": 2.0534577853203224,
|
1732 |
+
"grad_norm": 0.2695727673292618,
|
1733 |
+
"learning_rate": 3.66700184585023e-05,
|
1734 |
+
"loss": 1.2578,
|
1735 |
+
"step": 2420
|
1736 |
+
},
|
1737 |
+
{
|
1738 |
+
"epoch": 2.06194314806958,
|
1739 |
+
"grad_norm": 0.2605068415443213,
|
1740 |
+
"learning_rate": 3.6538786801079226e-05,
|
1741 |
+
"loss": 1.2506,
|
1742 |
+
"step": 2430
|
1743 |
+
},
|
1744 |
+
{
|
1745 |
+
"epoch": 2.0704285108188376,
|
1746 |
+
"grad_norm": 0.27415607207865045,
|
1747 |
+
"learning_rate": 3.64071498750013e-05,
|
1748 |
+
"loss": 1.2852,
|
1749 |
+
"step": 2440
|
1750 |
+
},
|
1751 |
+
{
|
1752 |
+
"epoch": 2.078913873568095,
|
1753 |
+
"grad_norm": 0.2688900338206285,
|
1754 |
+
"learning_rate": 3.627511230365928e-05,
|
1755 |
+
"loss": 1.2695,
|
1756 |
+
"step": 2450
|
1757 |
+
},
|
1758 |
+
{
|
1759 |
+
"epoch": 2.0873992363173524,
|
1760 |
+
"grad_norm": 0.2750825805336503,
|
1761 |
+
"learning_rate": 3.614267872451546e-05,
|
1762 |
+
"loss": 1.2643,
|
1763 |
+
"step": 2460
|
1764 |
+
},
|
1765 |
+
{
|
1766 |
+
"epoch": 2.09588459906661,
|
1767 |
+
"grad_norm": 0.2659269066581903,
|
1768 |
+
"learning_rate": 3.600985378894086e-05,
|
1769 |
+
"loss": 1.2868,
|
1770 |
+
"step": 2470
|
1771 |
+
},
|
1772 |
+
{
|
1773 |
+
"epoch": 2.1043699618158676,
|
1774 |
+
"grad_norm": 0.24411151291321526,
|
1775 |
+
"learning_rate": 3.587664216205183e-05,
|
1776 |
+
"loss": 1.2571,
|
1777 |
+
"step": 2480
|
1778 |
+
},
|
1779 |
+
{
|
1780 |
+
"epoch": 2.112855324565125,
|
1781 |
+
"grad_norm": 0.2574194755634052,
|
1782 |
+
"learning_rate": 3.574304852254621e-05,
|
1783 |
+
"loss": 1.2769,
|
1784 |
+
"step": 2490
|
1785 |
+
},
|
1786 |
+
{
|
1787 |
+
"epoch": 2.1213406873143827,
|
1788 |
+
"grad_norm": 0.2894545074998905,
|
1789 |
+
"learning_rate": 3.5609077562538997e-05,
|
1790 |
+
"loss": 1.2469,
|
1791 |
+
"step": 2500
|
1792 |
+
},
|
1793 |
+
{
|
1794 |
+
"epoch": 2.1298260500636403,
|
1795 |
+
"grad_norm": 0.2828176429904294,
|
1796 |
+
"learning_rate": 3.547473398739754e-05,
|
1797 |
+
"loss": 1.2527,
|
1798 |
+
"step": 2510
|
1799 |
+
},
|
1800 |
+
{
|
1801 |
+
"epoch": 2.138311412812898,
|
1802 |
+
"grad_norm": 0.25886029771650565,
|
1803 |
+
"learning_rate": 3.5340022515576294e-05,
|
1804 |
+
"loss": 1.2578,
|
1805 |
+
"step": 2520
|
1806 |
+
},
|
1807 |
+
{
|
1808 |
+
"epoch": 2.146796775562155,
|
1809 |
+
"grad_norm": 0.2783799371621383,
|
1810 |
+
"learning_rate": 3.52049478784511e-05,
|
1811 |
+
"loss": 1.2489,
|
1812 |
+
"step": 2530
|
1813 |
+
},
|
1814 |
+
{
|
1815 |
+
"epoch": 2.1552821383114127,
|
1816 |
+
"grad_norm": 0.2753116113218978,
|
1817 |
+
"learning_rate": 3.506951482015297e-05,
|
1818 |
+
"loss": 1.275,
|
1819 |
+
"step": 2540
|
1820 |
+
},
|
1821 |
+
{
|
1822 |
+
"epoch": 2.1637675010606703,
|
1823 |
+
"grad_norm": 0.28115792079727675,
|
1824 |
+
"learning_rate": 3.493372809740152e-05,
|
1825 |
+
"loss": 1.2554,
|
1826 |
+
"step": 2550
|
1827 |
+
},
|
1828 |
+
{
|
1829 |
+
"epoch": 2.172252863809928,
|
1830 |
+
"grad_norm": 0.27954425325951715,
|
1831 |
+
"learning_rate": 3.479759247933785e-05,
|
1832 |
+
"loss": 1.2618,
|
1833 |
+
"step": 2560
|
1834 |
+
},
|
1835 |
+
{
|
1836 |
+
"epoch": 2.1807382265591855,
|
1837 |
+
"grad_norm": 0.27555174232347995,
|
1838 |
+
"learning_rate": 3.466111274735707e-05,
|
1839 |
+
"loss": 1.2598,
|
1840 |
+
"step": 2570
|
1841 |
+
},
|
1842 |
+
{
|
1843 |
+
"epoch": 2.189223589308443,
|
1844 |
+
"grad_norm": 0.27280827991301104,
|
1845 |
+
"learning_rate": 3.452429369494037e-05,
|
1846 |
+
"loss": 1.262,
|
1847 |
+
"step": 2580
|
1848 |
+
},
|
1849 |
+
{
|
1850 |
+
"epoch": 2.1977089520577007,
|
1851 |
+
"grad_norm": 0.2749685805551003,
|
1852 |
+
"learning_rate": 3.438714012748664e-05,
|
1853 |
+
"loss": 1.2683,
|
1854 |
+
"step": 2590
|
1855 |
+
},
|
1856 |
+
{
|
1857 |
+
"epoch": 2.206194314806958,
|
1858 |
+
"grad_norm": 0.2780594302788235,
|
1859 |
+
"learning_rate": 3.424965686214371e-05,
|
1860 |
+
"loss": 1.2462,
|
1861 |
+
"step": 2600
|
1862 |
+
},
|
1863 |
+
{
|
1864 |
+
"epoch": 2.2146796775562154,
|
1865 |
+
"grad_norm": 0.2942257416636676,
|
1866 |
+
"learning_rate": 3.411184872763915e-05,
|
1867 |
+
"loss": 1.2581,
|
1868 |
+
"step": 2610
|
1869 |
+
},
|
1870 |
+
{
|
1871 |
+
"epoch": 2.223165040305473,
|
1872 |
+
"grad_norm": 0.27000377333423803,
|
1873 |
+
"learning_rate": 3.39737205641107e-05,
|
1874 |
+
"loss": 1.2412,
|
1875 |
+
"step": 2620
|
1876 |
+
},
|
1877 |
+
{
|
1878 |
+
"epoch": 2.2316504030547306,
|
1879 |
+
"grad_norm": 0.28187507810449336,
|
1880 |
+
"learning_rate": 3.383527722293622e-05,
|
1881 |
+
"loss": 1.2659,
|
1882 |
+
"step": 2630
|
1883 |
+
},
|
1884 |
+
{
|
1885 |
+
"epoch": 2.240135765803988,
|
1886 |
+
"grad_norm": 0.2736213940552268,
|
1887 |
+
"learning_rate": 3.369652356656336e-05,
|
1888 |
+
"loss": 1.2553,
|
1889 |
+
"step": 2640
|
1890 |
+
},
|
1891 |
+
{
|
1892 |
+
"epoch": 2.248621128553246,
|
1893 |
+
"grad_norm": 0.29698834543438446,
|
1894 |
+
"learning_rate": 3.355746446833873e-05,
|
1895 |
+
"loss": 1.2714,
|
1896 |
+
"step": 2650
|
1897 |
+
},
|
1898 |
+
{
|
1899 |
+
"epoch": 2.257106491302503,
|
1900 |
+
"grad_norm": 0.2875128112484735,
|
1901 |
+
"learning_rate": 3.3418104812336786e-05,
|
1902 |
+
"loss": 1.2508,
|
1903 |
+
"step": 2660
|
1904 |
+
},
|
1905 |
+
{
|
1906 |
+
"epoch": 2.2655918540517606,
|
1907 |
+
"grad_norm": 0.3016647299373059,
|
1908 |
+
"learning_rate": 3.327844949318824e-05,
|
1909 |
+
"loss": 1.2451,
|
1910 |
+
"step": 2670
|
1911 |
+
},
|
1912 |
+
{
|
1913 |
+
"epoch": 2.274077216801018,
|
1914 |
+
"grad_norm": 0.27371321581702696,
|
1915 |
+
"learning_rate": 3.3138503415908176e-05,
|
1916 |
+
"loss": 1.2467,
|
1917 |
+
"step": 2680
|
1918 |
+
},
|
1919 |
+
{
|
1920 |
+
"epoch": 2.2825625795502757,
|
1921 |
+
"grad_norm": 0.28374547760120017,
|
1922 |
+
"learning_rate": 3.299827149572376e-05,
|
1923 |
+
"loss": 1.2452,
|
1924 |
+
"step": 2690
|
1925 |
+
},
|
1926 |
+
{
|
1927 |
+
"epoch": 2.2910479422995333,
|
1928 |
+
"grad_norm": 0.2805999278165284,
|
1929 |
+
"learning_rate": 3.285775865790166e-05,
|
1930 |
+
"loss": 1.2595,
|
1931 |
+
"step": 2700
|
1932 |
+
},
|
1933 |
+
{
|
1934 |
+
"epoch": 2.299533305048791,
|
1935 |
+
"grad_norm": 0.2758019804125597,
|
1936 |
+
"learning_rate": 3.271696983757496e-05,
|
1937 |
+
"loss": 1.2583,
|
1938 |
+
"step": 2710
|
1939 |
+
},
|
1940 |
+
{
|
1941 |
+
"epoch": 2.3080186677980485,
|
1942 |
+
"grad_norm": 0.27211127699988974,
|
1943 |
+
"learning_rate": 3.2575909979569906e-05,
|
1944 |
+
"loss": 1.2255,
|
1945 |
+
"step": 2720
|
1946 |
+
},
|
1947 |
+
{
|
1948 |
+
"epoch": 2.316504030547306,
|
1949 |
+
"grad_norm": 0.2741831859110416,
|
1950 |
+
"learning_rate": 3.243458403823223e-05,
|
1951 |
+
"loss": 1.2335,
|
1952 |
+
"step": 2730
|
1953 |
+
},
|
1954 |
+
{
|
1955 |
+
"epoch": 2.3249893932965633,
|
1956 |
+
"grad_norm": 0.287074507507,
|
1957 |
+
"learning_rate": 3.2292996977253075e-05,
|
1958 |
+
"loss": 1.2555,
|
1959 |
+
"step": 2740
|
1960 |
+
},
|
1961 |
+
{
|
1962 |
+
"epoch": 2.333474756045821,
|
1963 |
+
"grad_norm": 0.2760197579958247,
|
1964 |
+
"learning_rate": 3.215115376949474e-05,
|
1965 |
+
"loss": 1.2574,
|
1966 |
+
"step": 2750
|
1967 |
+
},
|
1968 |
+
{
|
1969 |
+
"epoch": 2.3419601187950785,
|
1970 |
+
"grad_norm": 0.29917391348714156,
|
1971 |
+
"learning_rate": 3.200905939681599e-05,
|
1972 |
+
"loss": 1.2232,
|
1973 |
+
"step": 2760
|
1974 |
+
},
|
1975 |
+
{
|
1976 |
+
"epoch": 2.350445481544336,
|
1977 |
+
"grad_norm": 0.2863180346672473,
|
1978 |
+
"learning_rate": 3.1866718849897044e-05,
|
1979 |
+
"loss": 1.2341,
|
1980 |
+
"step": 2770
|
1981 |
+
},
|
1982 |
+
{
|
1983 |
+
"epoch": 2.3589308442935937,
|
1984 |
+
"grad_norm": 0.2760526831444543,
|
1985 |
+
"learning_rate": 3.172413712806435e-05,
|
1986 |
+
"loss": 1.253,
|
1987 |
+
"step": 2780
|
1988 |
+
},
|
1989 |
+
{
|
1990 |
+
"epoch": 2.3674162070428513,
|
1991 |
+
"grad_norm": 0.29286413736773825,
|
1992 |
+
"learning_rate": 3.158131923911498e-05,
|
1993 |
+
"loss": 1.2617,
|
1994 |
+
"step": 2790
|
1995 |
+
},
|
1996 |
+
{
|
1997 |
+
"epoch": 2.3759015697921084,
|
1998 |
+
"grad_norm": 0.27643034174892955,
|
1999 |
+
"learning_rate": 3.143827019914072e-05,
|
2000 |
+
"loss": 1.2152,
|
2001 |
+
"step": 2800
|
2002 |
+
},
|
2003 |
+
{
|
2004 |
+
"epoch": 2.384386932541366,
|
2005 |
+
"grad_norm": 0.2939949433037669,
|
2006 |
+
"learning_rate": 3.12949950323519e-05,
|
2007 |
+
"loss": 1.2354,
|
2008 |
+
"step": 2810
|
2009 |
+
},
|
2010 |
+
{
|
2011 |
+
"epoch": 2.3928722952906236,
|
2012 |
+
"grad_norm": 0.2864245267570891,
|
2013 |
+
"learning_rate": 3.115149877090097e-05,
|
2014 |
+
"loss": 1.2447,
|
2015 |
+
"step": 2820
|
2016 |
+
},
|
2017 |
+
{
|
2018 |
+
"epoch": 2.401357658039881,
|
2019 |
+
"grad_norm": 0.2952829920235313,
|
2020 |
+
"learning_rate": 3.1007786454705724e-05,
|
2021 |
+
"loss": 1.2462,
|
2022 |
+
"step": 2830
|
2023 |
+
},
|
2024 |
+
{
|
2025 |
+
"epoch": 2.409843020789139,
|
2026 |
+
"grad_norm": 0.3032080033620836,
|
2027 |
+
"learning_rate": 3.0863863131272265e-05,
|
2028 |
+
"loss": 1.2317,
|
2029 |
+
"step": 2840
|
2030 |
+
},
|
2031 |
+
{
|
2032 |
+
"epoch": 2.4183283835383964,
|
2033 |
+
"grad_norm": 0.2678380639415362,
|
2034 |
+
"learning_rate": 3.07197338555178e-05,
|
2035 |
+
"loss": 1.2466,
|
2036 |
+
"step": 2850
|
2037 |
+
},
|
2038 |
+
{
|
2039 |
+
"epoch": 2.426813746287654,
|
2040 |
+
"grad_norm": 0.3000338098809928,
|
2041 |
+
"learning_rate": 3.0575403689593016e-05,
|
2042 |
+
"loss": 1.2469,
|
2043 |
+
"step": 2860
|
2044 |
+
},
|
2045 |
+
{
|
2046 |
+
"epoch": 2.435299109036911,
|
2047 |
+
"grad_norm": 0.2885428511714088,
|
2048 |
+
"learning_rate": 3.043087770270435e-05,
|
2049 |
+
"loss": 1.241,
|
2050 |
+
"step": 2870
|
2051 |
+
},
|
2052 |
+
{
|
2053 |
+
"epoch": 2.4437844717861688,
|
2054 |
+
"grad_norm": 0.2902606566366597,
|
2055 |
+
"learning_rate": 3.0286160970935906e-05,
|
2056 |
+
"loss": 1.2498,
|
2057 |
+
"step": 2880
|
2058 |
+
},
|
2059 |
+
{
|
2060 |
+
"epoch": 2.4522698345354264,
|
2061 |
+
"grad_norm": 0.2930924599960876,
|
2062 |
+
"learning_rate": 3.0141258577071184e-05,
|
2063 |
+
"loss": 1.2508,
|
2064 |
+
"step": 2890
|
2065 |
+
},
|
2066 |
+
{
|
2067 |
+
"epoch": 2.460755197284684,
|
2068 |
+
"grad_norm": 0.28761403953538467,
|
2069 |
+
"learning_rate": 2.9996175610414572e-05,
|
2070 |
+
"loss": 1.2379,
|
2071 |
+
"step": 2900
|
2072 |
+
},
|
2073 |
+
{
|
2074 |
+
"epoch": 2.4692405600339415,
|
2075 |
+
"grad_norm": 0.28888693356528744,
|
2076 |
+
"learning_rate": 2.9850917166612586e-05,
|
2077 |
+
"loss": 1.2383,
|
2078 |
+
"step": 2910
|
2079 |
+
},
|
2080 |
+
{
|
2081 |
+
"epoch": 2.477725922783199,
|
2082 |
+
"grad_norm": 0.29714323219094924,
|
2083 |
+
"learning_rate": 2.9705488347474896e-05,
|
2084 |
+
"loss": 1.2221,
|
2085 |
+
"step": 2920
|
2086 |
+
},
|
2087 |
+
{
|
2088 |
+
"epoch": 2.4862112855324563,
|
2089 |
+
"grad_norm": 0.3024332099011336,
|
2090 |
+
"learning_rate": 2.9559894260795144e-05,
|
2091 |
+
"loss": 1.2417,
|
2092 |
+
"step": 2930
|
2093 |
+
},
|
2094 |
+
{
|
2095 |
+
"epoch": 2.494696648281714,
|
2096 |
+
"grad_norm": 0.2900123354730048,
|
2097 |
+
"learning_rate": 2.9414140020171554e-05,
|
2098 |
+
"loss": 1.2543,
|
2099 |
+
"step": 2940
|
2100 |
+
},
|
2101 |
+
{
|
2102 |
+
"epoch": 2.5031820110309715,
|
2103 |
+
"grad_norm": 0.30122390943433014,
|
2104 |
+
"learning_rate": 2.926823074482733e-05,
|
2105 |
+
"loss": 1.2542,
|
2106 |
+
"step": 2950
|
2107 |
+
},
|
2108 |
+
{
|
2109 |
+
"epoch": 2.511667373780229,
|
2110 |
+
"grad_norm": 0.2860208265471049,
|
2111 |
+
"learning_rate": 2.912217155943083e-05,
|
2112 |
+
"loss": 1.2335,
|
2113 |
+
"step": 2960
|
2114 |
+
},
|
2115 |
+
{
|
2116 |
+
"epoch": 2.5201527365294867,
|
2117 |
+
"grad_norm": 0.28980498979259595,
|
2118 |
+
"learning_rate": 2.897596759391561e-05,
|
2119 |
+
"loss": 1.2458,
|
2120 |
+
"step": 2970
|
2121 |
+
},
|
2122 |
+
{
|
2123 |
+
"epoch": 2.5286380992787443,
|
2124 |
+
"grad_norm": 0.30074882444504475,
|
2125 |
+
"learning_rate": 2.8829623983300242e-05,
|
2126 |
+
"loss": 1.2498,
|
2127 |
+
"step": 2980
|
2128 |
+
},
|
2129 |
+
{
|
2130 |
+
"epoch": 2.537123462028002,
|
2131 |
+
"grad_norm": 0.2929721105596463,
|
2132 |
+
"learning_rate": 2.868314586750794e-05,
|
2133 |
+
"loss": 1.2686,
|
2134 |
+
"step": 2990
|
2135 |
+
},
|
2136 |
+
{
|
2137 |
+
"epoch": 2.5456088247772595,
|
2138 |
+
"grad_norm": 0.291755235343187,
|
2139 |
+
"learning_rate": 2.853653839118605e-05,
|
2140 |
+
"loss": 1.2456,
|
2141 |
+
"step": 3000
|
2142 |
+
},
|
2143 |
+
{
|
2144 |
+
"epoch": 2.5456088247772595,
|
2145 |
+
"eval_loss": 1.4051239490509033,
|
2146 |
+
"eval_runtime": 52.7875,
|
2147 |
+
"eval_samples_per_second": 7.218,
|
2148 |
+
"eval_steps_per_second": 0.909,
|
2149 |
+
"step": 3000
|
2150 |
+
},
|
2151 |
+
{
|
2152 |
+
"epoch": 2.5540941875265166,
|
2153 |
+
"grad_norm": 0.3056527705148328,
|
2154 |
+
"learning_rate": 2.8389806703525383e-05,
|
2155 |
+
"loss": 1.2321,
|
2156 |
+
"step": 3010
|
2157 |
+
},
|
2158 |
+
{
|
2159 |
+
"epoch": 2.562579550275774,
|
2160 |
+
"grad_norm": 0.29756401069688737,
|
2161 |
+
"learning_rate": 2.8242955958079303e-05,
|
2162 |
+
"loss": 1.2341,
|
2163 |
+
"step": 3020
|
2164 |
+
},
|
2165 |
+
{
|
2166 |
+
"epoch": 2.571064913025032,
|
2167 |
+
"grad_norm": 0.3077048874608071,
|
2168 |
+
"learning_rate": 2.809599131258276e-05,
|
2169 |
+
"loss": 1.2475,
|
2170 |
+
"step": 3030
|
2171 |
+
},
|
2172 |
+
{
|
2173 |
+
"epoch": 2.5795502757742894,
|
2174 |
+
"grad_norm": 0.31006088313098146,
|
2175 |
+
"learning_rate": 2.7948917928771158e-05,
|
2176 |
+
"loss": 1.2381,
|
2177 |
+
"step": 3040
|
2178 |
+
},
|
2179 |
+
{
|
2180 |
+
"epoch": 2.588035638523547,
|
2181 |
+
"grad_norm": 0.3086227102652305,
|
2182 |
+
"learning_rate": 2.7801740972199014e-05,
|
2183 |
+
"loss": 1.2386,
|
2184 |
+
"step": 3050
|
2185 |
+
},
|
2186 |
+
{
|
2187 |
+
"epoch": 2.596521001272804,
|
2188 |
+
"grad_norm": 0.2909420805400902,
|
2189 |
+
"learning_rate": 2.7654465612058573e-05,
|
2190 |
+
"loss": 1.2071,
|
2191 |
+
"step": 3060
|
2192 |
+
},
|
2193 |
+
{
|
2194 |
+
"epoch": 2.6050063640220618,
|
2195 |
+
"grad_norm": 0.30310956499188235,
|
2196 |
+
"learning_rate": 2.7507097020998246e-05,
|
2197 |
+
"loss": 1.2206,
|
2198 |
+
"step": 3070
|
2199 |
+
},
|
2200 |
+
{
|
2201 |
+
"epoch": 2.6134917267713194,
|
2202 |
+
"grad_norm": 0.2873915382033808,
|
2203 |
+
"learning_rate": 2.7359640374940904e-05,
|
2204 |
+
"loss": 1.2346,
|
2205 |
+
"step": 3080
|
2206 |
+
},
|
2207 |
+
{
|
2208 |
+
"epoch": 2.621977089520577,
|
2209 |
+
"grad_norm": 0.29404028686651285,
|
2210 |
+
"learning_rate": 2.7212100852902133e-05,
|
2211 |
+
"loss": 1.2209,
|
2212 |
+
"step": 3090
|
2213 |
+
},
|
2214 |
+
{
|
2215 |
+
"epoch": 2.6304624522698346,
|
2216 |
+
"grad_norm": 0.2967558623710032,
|
2217 |
+
"learning_rate": 2.7064483636808313e-05,
|
2218 |
+
"loss": 1.2471,
|
2219 |
+
"step": 3100
|
2220 |
+
},
|
2221 |
+
{
|
2222 |
+
"epoch": 2.638947815019092,
|
2223 |
+
"grad_norm": 0.28348844201193973,
|
2224 |
+
"learning_rate": 2.6916793911314593e-05,
|
2225 |
+
"loss": 1.2271,
|
2226 |
+
"step": 3110
|
2227 |
+
},
|
2228 |
+
{
|
2229 |
+
"epoch": 2.6474331777683497,
|
2230 |
+
"grad_norm": 0.30908939180701456,
|
2231 |
+
"learning_rate": 2.6769036863622842e-05,
|
2232 |
+
"loss": 1.2348,
|
2233 |
+
"step": 3120
|
2234 |
+
},
|
2235 |
+
{
|
2236 |
+
"epoch": 2.6559185405176073,
|
2237 |
+
"grad_norm": 0.296064586506253,
|
2238 |
+
"learning_rate": 2.6621217683299437e-05,
|
2239 |
+
"loss": 1.2118,
|
2240 |
+
"step": 3130
|
2241 |
+
},
|
2242 |
+
{
|
2243 |
+
"epoch": 2.6644039032668645,
|
2244 |
+
"grad_norm": 0.29223118946191284,
|
2245 |
+
"learning_rate": 2.647334156209299e-05,
|
2246 |
+
"loss": 1.2368,
|
2247 |
+
"step": 3140
|
2248 |
+
},
|
2249 |
+
{
|
2250 |
+
"epoch": 2.672889266016122,
|
2251 |
+
"grad_norm": 0.2974562276968823,
|
2252 |
+
"learning_rate": 2.6325413693752004e-05,
|
2253 |
+
"loss": 1.2392,
|
2254 |
+
"step": 3150
|
2255 |
+
},
|
2256 |
+
{
|
2257 |
+
"epoch": 2.6813746287653797,
|
2258 |
+
"grad_norm": 0.30862646184519243,
|
2259 |
+
"learning_rate": 2.6177439273842463e-05,
|
2260 |
+
"loss": 1.244,
|
2261 |
+
"step": 3160
|
2262 |
+
},
|
2263 |
+
{
|
2264 |
+
"epoch": 2.6898599915146373,
|
2265 |
+
"grad_norm": 0.2958164221091078,
|
2266 |
+
"learning_rate": 2.602942349956536e-05,
|
2267 |
+
"loss": 1.2377,
|
2268 |
+
"step": 3170
|
2269 |
+
},
|
2270 |
+
{
|
2271 |
+
"epoch": 2.698345354263895,
|
2272 |
+
"grad_norm": 0.2941370782364945,
|
2273 |
+
"learning_rate": 2.5881371569574125e-05,
|
2274 |
+
"loss": 1.2296,
|
2275 |
+
"step": 3180
|
2276 |
+
},
|
2277 |
+
{
|
2278 |
+
"epoch": 2.7068307170131525,
|
2279 |
+
"grad_norm": 0.2949825785995608,
|
2280 |
+
"learning_rate": 2.5733288683792084e-05,
|
2281 |
+
"loss": 1.2292,
|
2282 |
+
"step": 3190
|
2283 |
+
},
|
2284 |
+
{
|
2285 |
+
"epoch": 2.7153160797624096,
|
2286 |
+
"grad_norm": 0.3020705479686205,
|
2287 |
+
"learning_rate": 2.558518004322979e-05,
|
2288 |
+
"loss": 1.2371,
|
2289 |
+
"step": 3200
|
2290 |
+
},
|
2291 |
+
{
|
2292 |
+
"epoch": 2.7238014425116672,
|
2293 |
+
"grad_norm": 0.3134012668403533,
|
2294 |
+
"learning_rate": 2.5437050849802356e-05,
|
2295 |
+
"loss": 1.2257,
|
2296 |
+
"step": 3210
|
2297 |
+
},
|
2298 |
+
{
|
2299 |
+
"epoch": 2.732286805260925,
|
2300 |
+
"grad_norm": 0.320012175903156,
|
2301 |
+
"learning_rate": 2.528890630614677e-05,
|
2302 |
+
"loss": 1.215,
|
2303 |
+
"step": 3220
|
2304 |
+
},
|
2305 |
+
{
|
2306 |
+
"epoch": 2.7407721680101824,
|
2307 |
+
"grad_norm": 0.2936053513063533,
|
2308 |
+
"learning_rate": 2.514075161543915e-05,
|
2309 |
+
"loss": 1.2364,
|
2310 |
+
"step": 3230
|
2311 |
+
},
|
2312 |
+
{
|
2313 |
+
"epoch": 2.74925753075944,
|
2314 |
+
"grad_norm": 0.30515854484741317,
|
2315 |
+
"learning_rate": 2.499259198121201e-05,
|
2316 |
+
"loss": 1.2117,
|
2317 |
+
"step": 3240
|
2318 |
+
},
|
2319 |
+
{
|
2320 |
+
"epoch": 2.7577428935086976,
|
2321 |
+
"grad_norm": 0.29763910785937486,
|
2322 |
+
"learning_rate": 2.484443260717147e-05,
|
2323 |
+
"loss": 1.2583,
|
2324 |
+
"step": 3250
|
2325 |
+
},
|
2326 |
+
{
|
2327 |
+
"epoch": 2.766228256257955,
|
2328 |
+
"grad_norm": 0.2975895109826329,
|
2329 |
+
"learning_rate": 2.4696278697014538e-05,
|
2330 |
+
"loss": 1.2153,
|
2331 |
+
"step": 3260
|
2332 |
+
},
|
2333 |
+
{
|
2334 |
+
"epoch": 2.774713619007213,
|
2335 |
+
"grad_norm": 0.2778934357454163,
|
2336 |
+
"learning_rate": 2.4548135454246306e-05,
|
2337 |
+
"loss": 1.2291,
|
2338 |
+
"step": 3270
|
2339 |
+
},
|
2340 |
+
{
|
2341 |
+
"epoch": 2.78319898175647,
|
2342 |
+
"grad_norm": 0.32172302391314234,
|
2343 |
+
"learning_rate": 2.4400008081997196e-05,
|
2344 |
+
"loss": 1.214,
|
2345 |
+
"step": 3280
|
2346 |
+
},
|
2347 |
+
{
|
2348 |
+
"epoch": 2.7916843445057276,
|
2349 |
+
"grad_norm": 0.3008060855751439,
|
2350 |
+
"learning_rate": 2.425190178284024e-05,
|
2351 |
+
"loss": 1.234,
|
2352 |
+
"step": 3290
|
2353 |
+
},
|
2354 |
+
{
|
2355 |
+
"epoch": 2.800169707254985,
|
2356 |
+
"grad_norm": 0.3093107834664431,
|
2357 |
+
"learning_rate": 2.4103821758608307e-05,
|
2358 |
+
"loss": 1.2492,
|
2359 |
+
"step": 3300
|
2360 |
+
},
|
2361 |
+
{
|
2362 |
+
"epoch": 2.8086550700042427,
|
2363 |
+
"grad_norm": 0.30554454329766617,
|
2364 |
+
"learning_rate": 2.3955773210211465e-05,
|
2365 |
+
"loss": 1.2401,
|
2366 |
+
"step": 3310
|
2367 |
+
},
|
2368 |
+
{
|
2369 |
+
"epoch": 2.8171404327535003,
|
2370 |
+
"grad_norm": 0.2997812898388053,
|
2371 |
+
"learning_rate": 2.380776133745425e-05,
|
2372 |
+
"loss": 1.2089,
|
2373 |
+
"step": 3320
|
2374 |
+
},
|
2375 |
+
{
|
2376 |
+
"epoch": 2.8256257955027575,
|
2377 |
+
"grad_norm": 0.2962123179493644,
|
2378 |
+
"learning_rate": 2.3659791338853066e-05,
|
2379 |
+
"loss": 1.2324,
|
2380 |
+
"step": 3330
|
2381 |
+
},
|
2382 |
+
{
|
2383 |
+
"epoch": 2.834111158252015,
|
2384 |
+
"grad_norm": 0.31328021490753843,
|
2385 |
+
"learning_rate": 2.3511868411453623e-05,
|
2386 |
+
"loss": 1.214,
|
2387 |
+
"step": 3340
|
2388 |
+
},
|
2389 |
+
{
|
2390 |
+
"epoch": 2.8425965210012727,
|
2391 |
+
"grad_norm": 0.30414013736166967,
|
2392 |
+
"learning_rate": 2.3363997750648357e-05,
|
2393 |
+
"loss": 1.2142,
|
2394 |
+
"step": 3350
|
2395 |
+
},
|
2396 |
+
{
|
2397 |
+
"epoch": 2.8510818837505303,
|
2398 |
+
"grad_norm": 0.3079683108015467,
|
2399 |
+
"learning_rate": 2.3216184549994006e-05,
|
2400 |
+
"loss": 1.2137,
|
2401 |
+
"step": 3360
|
2402 |
+
},
|
2403 |
+
{
|
2404 |
+
"epoch": 2.859567246499788,
|
2405 |
+
"grad_norm": 0.31165537422962203,
|
2406 |
+
"learning_rate": 2.3068434001029173e-05,
|
2407 |
+
"loss": 1.1915,
|
2408 |
+
"step": 3370
|
2409 |
+
},
|
2410 |
+
{
|
2411 |
+
"epoch": 2.8680526092490455,
|
2412 |
+
"grad_norm": 0.3190451157121095,
|
2413 |
+
"learning_rate": 2.2920751293091948e-05,
|
2414 |
+
"loss": 1.2193,
|
2415 |
+
"step": 3380
|
2416 |
+
},
|
2417 |
+
{
|
2418 |
+
"epoch": 2.876537971998303,
|
2419 |
+
"grad_norm": 0.30247955079343214,
|
2420 |
+
"learning_rate": 2.277314161313774e-05,
|
2421 |
+
"loss": 1.2253,
|
2422 |
+
"step": 3390
|
2423 |
+
},
|
2424 |
+
{
|
2425 |
+
"epoch": 2.8850233347475607,
|
2426 |
+
"grad_norm": 0.2936629891547958,
|
2427 |
+
"learning_rate": 2.262561014555703e-05,
|
2428 |
+
"loss": 1.2136,
|
2429 |
+
"step": 3400
|
2430 |
+
},
|
2431 |
+
{
|
2432 |
+
"epoch": 2.893508697496818,
|
2433 |
+
"grad_norm": 0.3001872886250926,
|
2434 |
+
"learning_rate": 2.2478162071993298e-05,
|
2435 |
+
"loss": 1.2061,
|
2436 |
+
"step": 3410
|
2437 |
+
},
|
2438 |
+
{
|
2439 |
+
"epoch": 2.9019940602460754,
|
2440 |
+
"grad_norm": 0.3111993397471538,
|
2441 |
+
"learning_rate": 2.233080257116103e-05,
|
2442 |
+
"loss": 1.2193,
|
2443 |
+
"step": 3420
|
2444 |
+
},
|
2445 |
+
{
|
2446 |
+
"epoch": 2.910479422995333,
|
2447 |
+
"grad_norm": 0.3119972602479391,
|
2448 |
+
"learning_rate": 2.2183536818663856e-05,
|
2449 |
+
"loss": 1.2125,
|
2450 |
+
"step": 3430
|
2451 |
+
},
|
2452 |
+
{
|
2453 |
+
"epoch": 2.9189647857445906,
|
2454 |
+
"grad_norm": 0.30787460159489605,
|
2455 |
+
"learning_rate": 2.2036369986812713e-05,
|
2456 |
+
"loss": 1.215,
|
2457 |
+
"step": 3440
|
2458 |
+
},
|
2459 |
+
{
|
2460 |
+
"epoch": 2.927450148493848,
|
2461 |
+
"grad_norm": 0.3175794611301324,
|
2462 |
+
"learning_rate": 2.1889307244444252e-05,
|
2463 |
+
"loss": 1.2202,
|
2464 |
+
"step": 3450
|
2465 |
+
},
|
2466 |
+
{
|
2467 |
+
"epoch": 2.9359355112431054,
|
2468 |
+
"grad_norm": 0.2956361362699715,
|
2469 |
+
"learning_rate": 2.1742353756739247e-05,
|
2470 |
+
"loss": 1.2125,
|
2471 |
+
"step": 3460
|
2472 |
+
},
|
2473 |
+
{
|
2474 |
+
"epoch": 2.944420873992363,
|
2475 |
+
"grad_norm": 0.33942331815482624,
|
2476 |
+
"learning_rate": 2.1595514685041205e-05,
|
2477 |
+
"loss": 1.2173,
|
2478 |
+
"step": 3470
|
2479 |
+
},
|
2480 |
+
{
|
2481 |
+
"epoch": 2.9529062367416206,
|
2482 |
+
"grad_norm": 0.332180923100718,
|
2483 |
+
"learning_rate": 2.144879518667507e-05,
|
2484 |
+
"loss": 1.2266,
|
2485 |
+
"step": 3480
|
2486 |
+
},
|
2487 |
+
{
|
2488 |
+
"epoch": 2.961391599490878,
|
2489 |
+
"grad_norm": 0.3157083099981729,
|
2490 |
+
"learning_rate": 2.1302200414766123e-05,
|
2491 |
+
"loss": 1.2154,
|
2492 |
+
"step": 3490
|
2493 |
+
},
|
2494 |
+
{
|
2495 |
+
"epoch": 2.9698769622401358,
|
2496 |
+
"grad_norm": 0.3149298205272042,
|
2497 |
+
"learning_rate": 2.1155735518058914e-05,
|
2498 |
+
"loss": 1.2232,
|
2499 |
+
"step": 3500
|
2500 |
+
},
|
2501 |
+
{
|
2502 |
+
"epoch": 2.9783623249893934,
|
2503 |
+
"grad_norm": 0.3132779789709915,
|
2504 |
+
"learning_rate": 2.100940564073653e-05,
|
2505 |
+
"loss": 1.2299,
|
2506 |
+
"step": 3510
|
2507 |
+
},
|
2508 |
+
{
|
2509 |
+
"epoch": 2.986847687738651,
|
2510 |
+
"grad_norm": 0.29387121758746726,
|
2511 |
+
"learning_rate": 2.086321592223984e-05,
|
2512 |
+
"loss": 1.2219,
|
2513 |
+
"step": 3520
|
2514 |
+
},
|
2515 |
+
{
|
2516 |
+
"epoch": 2.9953330504879085,
|
2517 |
+
"grad_norm": 0.2954282506485496,
|
2518 |
+
"learning_rate": 2.0717171497087014e-05,
|
2519 |
+
"loss": 1.2321,
|
2520 |
+
"step": 3530
|
2521 |
+
},
|
2522 |
+
{
|
2523 |
+
"epoch": 3.0038184132371657,
|
2524 |
+
"grad_norm": 0.2991107469221935,
|
2525 |
+
"learning_rate": 2.057127749469321e-05,
|
2526 |
+
"loss": 1.2387,
|
2527 |
+
"step": 3540
|
2528 |
+
},
|
2529 |
+
{
|
2530 |
+
"epoch": 3.0123037759864233,
|
2531 |
+
"grad_norm": 0.3135732189536929,
|
2532 |
+
"learning_rate": 2.042553903919036e-05,
|
2533 |
+
"loss": 1.1058,
|
2534 |
+
"step": 3550
|
2535 |
+
},
|
2536 |
+
{
|
2537 |
+
"epoch": 3.020789138735681,
|
2538 |
+
"grad_norm": 0.3179199807851635,
|
2539 |
+
"learning_rate": 2.0279961249247274e-05,
|
2540 |
+
"loss": 1.0677,
|
2541 |
+
"step": 3560
|
2542 |
+
},
|
2543 |
+
{
|
2544 |
+
"epoch": 3.0292745014849385,
|
2545 |
+
"grad_norm": 0.3217398418422315,
|
2546 |
+
"learning_rate": 2.0134549237889765e-05,
|
2547 |
+
"loss": 1.0978,
|
2548 |
+
"step": 3570
|
2549 |
+
},
|
2550 |
+
{
|
2551 |
+
"epoch": 3.037759864234196,
|
2552 |
+
"grad_norm": 0.32343352041544976,
|
2553 |
+
"learning_rate": 1.9989308112321164e-05,
|
2554 |
+
"loss": 1.0791,
|
2555 |
+
"step": 3580
|
2556 |
+
},
|
2557 |
+
{
|
2558 |
+
"epoch": 3.0462452269834537,
|
2559 |
+
"grad_norm": 0.32321267638520695,
|
2560 |
+
"learning_rate": 1.9844242973742886e-05,
|
2561 |
+
"loss": 1.0991,
|
2562 |
+
"step": 3590
|
2563 |
+
},
|
2564 |
+
{
|
2565 |
+
"epoch": 3.0547305897327113,
|
2566 |
+
"grad_norm": 0.321242777420917,
|
2567 |
+
"learning_rate": 1.9699358917175297e-05,
|
2568 |
+
"loss": 1.104,
|
2569 |
+
"step": 3600
|
2570 |
+
},
|
2571 |
+
{
|
2572 |
+
"epoch": 3.0547305897327113,
|
2573 |
+
"eval_loss": 1.3918192386627197,
|
2574 |
+
"eval_runtime": 52.2671,
|
2575 |
+
"eval_samples_per_second": 7.289,
|
2576 |
+
"eval_steps_per_second": 0.918,
|
2577 |
+
"step": 3600
|
2578 |
+
},
|
2579 |
+
{
|
2580 |
+
"epoch": 3.0632159524819684,
|
2581 |
+
"grad_norm": 0.3313441880623986,
|
2582 |
+
"learning_rate": 1.9554661031278712e-05,
|
2583 |
+
"loss": 1.081,
|
2584 |
+
"step": 3610
|
2585 |
+
},
|
2586 |
+
{
|
2587 |
+
"epoch": 3.071701315231226,
|
2588 |
+
"grad_norm": 0.3866269219140372,
|
2589 |
+
"learning_rate": 1.9410154398174742e-05,
|
2590 |
+
"loss": 1.0826,
|
2591 |
+
"step": 3620
|
2592 |
+
},
|
2593 |
+
{
|
2594 |
+
"epoch": 3.0801866779804836,
|
2595 |
+
"grad_norm": 0.32719602353062216,
|
2596 |
+
"learning_rate": 1.9265844093267728e-05,
|
2597 |
+
"loss": 1.0934,
|
2598 |
+
"step": 3630
|
2599 |
+
},
|
2600 |
+
{
|
2601 |
+
"epoch": 3.088672040729741,
|
2602 |
+
"grad_norm": 0.3301678171750988,
|
2603 |
+
"learning_rate": 1.9121735185066537e-05,
|
2604 |
+
"loss": 1.1047,
|
2605 |
+
"step": 3640
|
2606 |
+
},
|
2607 |
+
{
|
2608 |
+
"epoch": 3.097157403478999,
|
2609 |
+
"grad_norm": 0.35644796539197005,
|
2610 |
+
"learning_rate": 1.8977832735006522e-05,
|
2611 |
+
"loss": 1.0994,
|
2612 |
+
"step": 3650
|
2613 |
+
},
|
2614 |
+
{
|
2615 |
+
"epoch": 3.1056427662282564,
|
2616 |
+
"grad_norm": 0.33733028940251475,
|
2617 |
+
"learning_rate": 1.8834141797271742e-05,
|
2618 |
+
"loss": 1.0972,
|
2619 |
+
"step": 3660
|
2620 |
+
},
|
2621 |
+
{
|
2622 |
+
"epoch": 3.114128128977514,
|
2623 |
+
"grad_norm": 0.32548774712269085,
|
2624 |
+
"learning_rate": 1.8690667418617462e-05,
|
2625 |
+
"loss": 1.1046,
|
2626 |
+
"step": 3670
|
2627 |
+
},
|
2628 |
+
{
|
2629 |
+
"epoch": 3.122613491726771,
|
2630 |
+
"grad_norm": 0.3117109384467469,
|
2631 |
+
"learning_rate": 1.854741463819291e-05,
|
2632 |
+
"loss": 1.0791,
|
2633 |
+
"step": 3680
|
2634 |
+
},
|
2635 |
+
{
|
2636 |
+
"epoch": 3.1310988544760288,
|
2637 |
+
"grad_norm": 0.3240987091142989,
|
2638 |
+
"learning_rate": 1.8404388487364242e-05,
|
2639 |
+
"loss": 1.0824,
|
2640 |
+
"step": 3690
|
2641 |
+
},
|
2642 |
+
{
|
2643 |
+
"epoch": 3.1395842172252864,
|
2644 |
+
"grad_norm": 0.3346401099182515,
|
2645 |
+
"learning_rate": 1.8261593989537895e-05,
|
2646 |
+
"loss": 1.0753,
|
2647 |
+
"step": 3700
|
2648 |
+
},
|
2649 |
+
{
|
2650 |
+
"epoch": 3.148069579974544,
|
2651 |
+
"grad_norm": 0.3270030472928521,
|
2652 |
+
"learning_rate": 1.81190361599841e-05,
|
2653 |
+
"loss": 1.0934,
|
2654 |
+
"step": 3710
|
2655 |
+
},
|
2656 |
+
{
|
2657 |
+
"epoch": 3.1565549427238015,
|
2658 |
+
"grad_norm": 0.34129438803355183,
|
2659 |
+
"learning_rate": 1.797672000566077e-05,
|
2660 |
+
"loss": 1.0766,
|
2661 |
+
"step": 3720
|
2662 |
+
},
|
2663 |
+
{
|
2664 |
+
"epoch": 3.165040305473059,
|
2665 |
+
"grad_norm": 0.37057591586243926,
|
2666 |
+
"learning_rate": 1.783465052503762e-05,
|
2667 |
+
"loss": 1.1049,
|
2668 |
+
"step": 3730
|
2669 |
+
},
|
2670 |
+
{
|
2671 |
+
"epoch": 3.1735256682223163,
|
2672 |
+
"grad_norm": 0.3331237337555744,
|
2673 |
+
"learning_rate": 1.769283270792065e-05,
|
2674 |
+
"loss": 1.0876,
|
2675 |
+
"step": 3740
|
2676 |
+
},
|
2677 |
+
{
|
2678 |
+
"epoch": 3.182011030971574,
|
2679 |
+
"grad_norm": 0.31904083541369294,
|
2680 |
+
"learning_rate": 1.7551271535276792e-05,
|
2681 |
+
"loss": 1.1206,
|
2682 |
+
"step": 3750
|
2683 |
+
},
|
2684 |
+
{
|
2685 |
+
"epoch": 3.1904963937208315,
|
2686 |
+
"grad_norm": 0.35953020953263576,
|
2687 |
+
"learning_rate": 1.74099719790591e-05,
|
2688 |
+
"loss": 1.0736,
|
2689 |
+
"step": 3760
|
2690 |
+
},
|
2691 |
+
{
|
2692 |
+
"epoch": 3.198981756470089,
|
2693 |
+
"grad_norm": 0.33595544857573634,
|
2694 |
+
"learning_rate": 1.7268939002032035e-05,
|
2695 |
+
"loss": 1.0969,
|
2696 |
+
"step": 3770
|
2697 |
+
},
|
2698 |
+
{
|
2699 |
+
"epoch": 3.2074671192193467,
|
2700 |
+
"grad_norm": 0.34180259956288195,
|
2701 |
+
"learning_rate": 1.7128177557597185e-05,
|
2702 |
+
"loss": 1.0972,
|
2703 |
+
"step": 3780
|
2704 |
+
},
|
2705 |
+
{
|
2706 |
+
"epoch": 3.2159524819686043,
|
2707 |
+
"grad_norm": 0.3268247890892541,
|
2708 |
+
"learning_rate": 1.6987692589619304e-05,
|
2709 |
+
"loss": 1.0737,
|
2710 |
+
"step": 3790
|
2711 |
+
},
|
2712 |
+
{
|
2713 |
+
"epoch": 3.224437844717862,
|
2714 |
+
"grad_norm": 0.3207792660130559,
|
2715 |
+
"learning_rate": 1.6847489032252627e-05,
|
2716 |
+
"loss": 1.0797,
|
2717 |
+
"step": 3800
|
2718 |
+
},
|
2719 |
+
{
|
2720 |
+
"epoch": 3.232923207467119,
|
2721 |
+
"grad_norm": 0.3467270107463577,
|
2722 |
+
"learning_rate": 1.6707571809767644e-05,
|
2723 |
+
"loss": 1.1024,
|
2724 |
+
"step": 3810
|
2725 |
+
},
|
2726 |
+
{
|
2727 |
+
"epoch": 3.2414085702163766,
|
2728 |
+
"grad_norm": 0.35579083917156773,
|
2729 |
+
"learning_rate": 1.656794583637807e-05,
|
2730 |
+
"loss": 1.1026,
|
2731 |
+
"step": 3820
|
2732 |
+
},
|
2733 |
+
{
|
2734 |
+
"epoch": 3.2498939329656342,
|
2735 |
+
"grad_norm": 0.3339161504484877,
|
2736 |
+
"learning_rate": 1.6428616016068304e-05,
|
2737 |
+
"loss": 1.0866,
|
2738 |
+
"step": 3830
|
2739 |
+
},
|
2740 |
+
{
|
2741 |
+
"epoch": 3.258379295714892,
|
2742 |
+
"grad_norm": 0.34920082505086103,
|
2743 |
+
"learning_rate": 1.628958724242117e-05,
|
2744 |
+
"loss": 1.0789,
|
2745 |
+
"step": 3840
|
2746 |
+
},
|
2747 |
+
{
|
2748 |
+
"epoch": 3.2668646584641494,
|
2749 |
+
"grad_norm": 0.34847114740056345,
|
2750 |
+
"learning_rate": 1.615086439844604e-05,
|
2751 |
+
"loss": 1.0849,
|
2752 |
+
"step": 3850
|
2753 |
+
},
|
2754 |
+
{
|
2755 |
+
"epoch": 3.275350021213407,
|
2756 |
+
"grad_norm": 0.3164317420955498,
|
2757 |
+
"learning_rate": 1.601245235640733e-05,
|
2758 |
+
"loss": 1.0984,
|
2759 |
+
"step": 3860
|
2760 |
+
},
|
2761 |
+
{
|
2762 |
+
"epoch": 3.283835383962664,
|
2763 |
+
"grad_norm": 0.32036510223283066,
|
2764 |
+
"learning_rate": 1.5874355977653392e-05,
|
2765 |
+
"loss": 1.1122,
|
2766 |
+
"step": 3870
|
2767 |
+
},
|
2768 |
+
{
|
2769 |
+
"epoch": 3.2923207467119218,
|
2770 |
+
"grad_norm": 0.3618051784441363,
|
2771 |
+
"learning_rate": 1.5736580112445738e-05,
|
2772 |
+
"loss": 1.0942,
|
2773 |
+
"step": 3880
|
2774 |
+
},
|
2775 |
+
{
|
2776 |
+
"epoch": 3.3008061094611794,
|
2777 |
+
"grad_norm": 0.3376776049321294,
|
2778 |
+
"learning_rate": 1.559912959978872e-05,
|
2779 |
+
"loss": 1.0898,
|
2780 |
+
"step": 3890
|
2781 |
+
},
|
2782 |
+
{
|
2783 |
+
"epoch": 3.309291472210437,
|
2784 |
+
"grad_norm": 0.3402374603685398,
|
2785 |
+
"learning_rate": 1.546200926725958e-05,
|
2786 |
+
"loss": 1.1061,
|
2787 |
+
"step": 3900
|
2788 |
+
},
|
2789 |
+
{
|
2790 |
+
"epoch": 3.3177768349596946,
|
2791 |
+
"grad_norm": 0.32072793842909514,
|
2792 |
+
"learning_rate": 1.5325223930838838e-05,
|
2793 |
+
"loss": 1.0995,
|
2794 |
+
"step": 3910
|
2795 |
+
},
|
2796 |
+
{
|
2797 |
+
"epoch": 3.326262197708952,
|
2798 |
+
"grad_norm": 0.336633795191635,
|
2799 |
+
"learning_rate": 1.518877839474122e-05,
|
2800 |
+
"loss": 1.0971,
|
2801 |
+
"step": 3920
|
2802 |
+
},
|
2803 |
+
{
|
2804 |
+
"epoch": 3.3347475604582097,
|
2805 |
+
"grad_norm": 0.36554031780441654,
|
2806 |
+
"learning_rate": 1.5052677451246877e-05,
|
2807 |
+
"loss": 1.0867,
|
2808 |
+
"step": 3930
|
2809 |
+
},
|
2810 |
+
{
|
2811 |
+
"epoch": 3.3432329232074673,
|
2812 |
+
"grad_norm": 0.3569408014521597,
|
2813 |
+
"learning_rate": 1.491692588053305e-05,
|
2814 |
+
"loss": 1.0992,
|
2815 |
+
"step": 3940
|
2816 |
+
},
|
2817 |
+
{
|
2818 |
+
"epoch": 3.3517182859567245,
|
2819 |
+
"grad_norm": 0.33844743386504,
|
2820 |
+
"learning_rate": 1.4781528450506232e-05,
|
2821 |
+
"loss": 1.103,
|
2822 |
+
"step": 3950
|
2823 |
+
},
|
2824 |
+
{
|
2825 |
+
"epoch": 3.360203648705982,
|
2826 |
+
"grad_norm": 0.31581742738052115,
|
2827 |
+
"learning_rate": 1.4646489916634687e-05,
|
2828 |
+
"loss": 1.0843,
|
2829 |
+
"step": 3960
|
2830 |
+
},
|
2831 |
+
{
|
2832 |
+
"epoch": 3.3686890114552397,
|
2833 |
+
"grad_norm": 0.3882340052077705,
|
2834 |
+
"learning_rate": 1.4511815021781411e-05,
|
2835 |
+
"loss": 1.1146,
|
2836 |
+
"step": 3970
|
2837 |
+
},
|
2838 |
+
{
|
2839 |
+
"epoch": 3.3771743742044973,
|
2840 |
+
"grad_norm": 0.3309843437274728,
|
2841 |
+
"learning_rate": 1.4377508496037567e-05,
|
2842 |
+
"loss": 1.0751,
|
2843 |
+
"step": 3980
|
2844 |
+
},
|
2845 |
+
{
|
2846 |
+
"epoch": 3.385659736953755,
|
2847 |
+
"grad_norm": 0.34050270685357675,
|
2848 |
+
"learning_rate": 1.4243575056556355e-05,
|
2849 |
+
"loss": 1.0995,
|
2850 |
+
"step": 3990
|
2851 |
+
},
|
2852 |
+
{
|
2853 |
+
"epoch": 3.3941450997030125,
|
2854 |
+
"grad_norm": 0.3459308872000444,
|
2855 |
+
"learning_rate": 1.4110019407387315e-05,
|
2856 |
+
"loss": 1.0817,
|
2857 |
+
"step": 4000
|
2858 |
+
},
|
2859 |
+
{
|
2860 |
+
"epoch": 3.4026304624522696,
|
2861 |
+
"grad_norm": 0.337212090487825,
|
2862 |
+
"learning_rate": 1.3976846239311128e-05,
|
2863 |
+
"loss": 1.1086,
|
2864 |
+
"step": 4010
|
2865 |
+
},
|
2866 |
+
{
|
2867 |
+
"epoch": 3.4111158252015272,
|
2868 |
+
"grad_norm": 0.3351805063766743,
|
2869 |
+
"learning_rate": 1.384406022967489e-05,
|
2870 |
+
"loss": 1.0919,
|
2871 |
+
"step": 4020
|
2872 |
+
},
|
2873 |
+
{
|
2874 |
+
"epoch": 3.419601187950785,
|
2875 |
+
"grad_norm": 0.3398996417599571,
|
2876 |
+
"learning_rate": 1.3711666042227772e-05,
|
2877 |
+
"loss": 1.0808,
|
2878 |
+
"step": 4030
|
2879 |
+
},
|
2880 |
+
{
|
2881 |
+
"epoch": 3.4280865507000424,
|
2882 |
+
"grad_norm": 0.3585847037493432,
|
2883 |
+
"learning_rate": 1.357966832695725e-05,
|
2884 |
+
"loss": 1.1169,
|
2885 |
+
"step": 4040
|
2886 |
+
},
|
2887 |
+
{
|
2888 |
+
"epoch": 3.4365719134493,
|
2889 |
+
"grad_norm": 0.3229408856119979,
|
2890 |
+
"learning_rate": 1.3448071719925826e-05,
|
2891 |
+
"loss": 1.1041,
|
2892 |
+
"step": 4050
|
2893 |
+
},
|
2894 |
+
{
|
2895 |
+
"epoch": 3.4450572761985576,
|
2896 |
+
"grad_norm": 0.34474446572567957,
|
2897 |
+
"learning_rate": 1.331688084310812e-05,
|
2898 |
+
"loss": 1.0772,
|
2899 |
+
"step": 4060
|
2900 |
+
},
|
2901 |
+
{
|
2902 |
+
"epoch": 3.453542638947815,
|
2903 |
+
"grad_norm": 0.3189660506507834,
|
2904 |
+
"learning_rate": 1.3186100304228594e-05,
|
2905 |
+
"loss": 1.1082,
|
2906 |
+
"step": 4070
|
2907 |
+
},
|
2908 |
+
{
|
2909 |
+
"epoch": 3.4620280016970724,
|
2910 |
+
"grad_norm": 0.3609045764903581,
|
2911 |
+
"learning_rate": 1.3055734696599686e-05,
|
2912 |
+
"loss": 1.0815,
|
2913 |
+
"step": 4080
|
2914 |
+
},
|
2915 |
+
{
|
2916 |
+
"epoch": 3.47051336444633,
|
2917 |
+
"grad_norm": 0.35237020522576973,
|
2918 |
+
"learning_rate": 1.292578859896053e-05,
|
2919 |
+
"loss": 1.1176,
|
2920 |
+
"step": 4090
|
2921 |
+
},
|
2922 |
+
{
|
2923 |
+
"epoch": 3.4789987271955876,
|
2924 |
+
"grad_norm": 0.3189367280387807,
|
2925 |
+
"learning_rate": 1.2796266575316069e-05,
|
2926 |
+
"loss": 1.0826,
|
2927 |
+
"step": 4100
|
2928 |
+
},
|
2929 |
+
{
|
2930 |
+
"epoch": 3.487484089944845,
|
2931 |
+
"grad_norm": 0.34730662545931273,
|
2932 |
+
"learning_rate": 1.2667173174776823e-05,
|
2933 |
+
"loss": 1.0908,
|
2934 |
+
"step": 4110
|
2935 |
+
},
|
2936 |
+
{
|
2937 |
+
"epoch": 3.4959694526941028,
|
2938 |
+
"grad_norm": 0.34138804654797594,
|
2939 |
+
"learning_rate": 1.2538512931399072e-05,
|
2940 |
+
"loss": 1.0769,
|
2941 |
+
"step": 4120
|
2942 |
+
},
|
2943 |
+
{
|
2944 |
+
"epoch": 3.5044548154433603,
|
2945 |
+
"grad_norm": 0.33424304735568794,
|
2946 |
+
"learning_rate": 1.2410290364025623e-05,
|
2947 |
+
"loss": 1.0795,
|
2948 |
+
"step": 4130
|
2949 |
+
},
|
2950 |
+
{
|
2951 |
+
"epoch": 3.5129401781926175,
|
2952 |
+
"grad_norm": 0.3313504435273678,
|
2953 |
+
"learning_rate": 1.2282509976127098e-05,
|
2954 |
+
"loss": 1.0878,
|
2955 |
+
"step": 4140
|
2956 |
+
},
|
2957 |
+
{
|
2958 |
+
"epoch": 3.521425540941875,
|
2959 |
+
"grad_norm": 0.3233457240542579,
|
2960 |
+
"learning_rate": 1.215517625564376e-05,
|
2961 |
+
"loss": 1.0892,
|
2962 |
+
"step": 4150
|
2963 |
+
},
|
2964 |
+
{
|
2965 |
+
"epoch": 3.5299109036911327,
|
2966 |
+
"grad_norm": 0.36342717886744974,
|
2967 |
+
"learning_rate": 1.2028293674827909e-05,
|
2968 |
+
"loss": 1.1121,
|
2969 |
+
"step": 4160
|
2970 |
+
},
|
2971 |
+
{
|
2972 |
+
"epoch": 3.5383962664403903,
|
2973 |
+
"grad_norm": 0.3384633320066937,
|
2974 |
+
"learning_rate": 1.1901866690086747e-05,
|
2975 |
+
"loss": 1.0873,
|
2976 |
+
"step": 4170
|
2977 |
+
},
|
2978 |
+
{
|
2979 |
+
"epoch": 3.546881629189648,
|
2980 |
+
"grad_norm": 0.3478508212091021,
|
2981 |
+
"learning_rate": 1.1775899741825947e-05,
|
2982 |
+
"loss": 1.1074,
|
2983 |
+
"step": 4180
|
2984 |
+
},
|
2985 |
+
{
|
2986 |
+
"epoch": 3.5553669919389055,
|
2987 |
+
"grad_norm": 0.33728300112135334,
|
2988 |
+
"learning_rate": 1.1650397254293583e-05,
|
2989 |
+
"loss": 1.0962,
|
2990 |
+
"step": 4190
|
2991 |
+
},
|
2992 |
+
{
|
2993 |
+
"epoch": 3.563852354688163,
|
2994 |
+
"grad_norm": 0.3237234384231759,
|
2995 |
+
"learning_rate": 1.1525363635424863e-05,
|
2996 |
+
"loss": 1.0918,
|
2997 |
+
"step": 4200
|
2998 |
+
},
|
2999 |
+
{
|
3000 |
+
"epoch": 3.563852354688163,
|
3001 |
+
"eval_loss": 1.3741682767868042,
|
3002 |
+
"eval_runtime": 52.4151,
|
3003 |
+
"eval_samples_per_second": 7.269,
|
3004 |
+
"eval_steps_per_second": 0.916,
|
3005 |
+
"step": 4200
|
3006 |
+
},
|
3007 |
+
{
|
3008 |
+
"epoch": 3.5723377174374207,
|
3009 |
+
"grad_norm": 0.35478171271989195,
|
3010 |
+
"learning_rate": 1.1400803276687208e-05,
|
3011 |
+
"loss": 1.0845,
|
3012 |
+
"step": 4210
|
3013 |
+
},
|
3014 |
+
{
|
3015 |
+
"epoch": 3.580823080186678,
|
3016 |
+
"grad_norm": 0.37888228352789066,
|
3017 |
+
"learning_rate": 1.1276720552926096e-05,
|
3018 |
+
"loss": 1.0622,
|
3019 |
+
"step": 4220
|
3020 |
+
},
|
3021 |
+
{
|
3022 |
+
"epoch": 3.5893084429359354,
|
3023 |
+
"grad_norm": 0.3279788866194937,
|
3024 |
+
"learning_rate": 1.1153119822211338e-05,
|
3025 |
+
"loss": 1.1021,
|
3026 |
+
"step": 4230
|
3027 |
+
},
|
3028 |
+
{
|
3029 |
+
"epoch": 3.597793805685193,
|
3030 |
+
"grad_norm": 0.3240797704044235,
|
3031 |
+
"learning_rate": 1.103000542568406e-05,
|
3032 |
+
"loss": 1.0931,
|
3033 |
+
"step": 4240
|
3034 |
+
},
|
3035 |
+
{
|
3036 |
+
"epoch": 3.6062791684344506,
|
3037 |
+
"grad_norm": 0.3326855671061254,
|
3038 |
+
"learning_rate": 1.0907381687404206e-05,
|
3039 |
+
"loss": 1.0856,
|
3040 |
+
"step": 4250
|
3041 |
+
},
|
3042 |
+
{
|
3043 |
+
"epoch": 3.614764531183708,
|
3044 |
+
"grad_norm": 0.3667803858944268,
|
3045 |
+
"learning_rate": 1.0785252914198676e-05,
|
3046 |
+
"loss": 1.0987,
|
3047 |
+
"step": 4260
|
3048 |
+
},
|
3049 |
+
{
|
3050 |
+
"epoch": 3.6232498939329654,
|
3051 |
+
"grad_norm": 0.36473787559668763,
|
3052 |
+
"learning_rate": 1.0663623395510087e-05,
|
3053 |
+
"loss": 1.0817,
|
3054 |
+
"step": 4270
|
3055 |
+
},
|
3056 |
+
{
|
3057 |
+
"epoch": 3.631735256682223,
|
3058 |
+
"grad_norm": 0.3409471312744712,
|
3059 |
+
"learning_rate": 1.0542497403246055e-05,
|
3060 |
+
"loss": 1.0817,
|
3061 |
+
"step": 4280
|
3062 |
+
},
|
3063 |
+
{
|
3064 |
+
"epoch": 3.6402206194314806,
|
3065 |
+
"grad_norm": 0.3432913496959211,
|
3066 |
+
"learning_rate": 1.0421879191629227e-05,
|
3067 |
+
"loss": 1.1028,
|
3068 |
+
"step": 4290
|
3069 |
+
},
|
3070 |
+
{
|
3071 |
+
"epoch": 3.648705982180738,
|
3072 |
+
"grad_norm": 0.3432540649401779,
|
3073 |
+
"learning_rate": 1.0301772997047809e-05,
|
3074 |
+
"loss": 1.1015,
|
3075 |
+
"step": 4300
|
3076 |
+
},
|
3077 |
+
{
|
3078 |
+
"epoch": 3.6571913449299958,
|
3079 |
+
"grad_norm": 0.33346746301829316,
|
3080 |
+
"learning_rate": 1.0182183037906799e-05,
|
3081 |
+
"loss": 1.0731,
|
3082 |
+
"step": 4310
|
3083 |
+
},
|
3084 |
+
{
|
3085 |
+
"epoch": 3.6656767076792534,
|
3086 |
+
"grad_norm": 0.3413502676678206,
|
3087 |
+
"learning_rate": 1.0063113514479809e-05,
|
3088 |
+
"loss": 1.0712,
|
3089 |
+
"step": 4320
|
3090 |
+
},
|
3091 |
+
{
|
3092 |
+
"epoch": 3.674162070428511,
|
3093 |
+
"grad_norm": 0.3285225243235751,
|
3094 |
+
"learning_rate": 9.94456860876159e-06,
|
3095 |
+
"loss": 1.0848,
|
3096 |
+
"step": 4330
|
3097 |
+
},
|
3098 |
+
{
|
3099 |
+
"epoch": 3.6826474331777685,
|
3100 |
+
"grad_norm": 0.3388803496844761,
|
3101 |
+
"learning_rate": 9.826552484321087e-06,
|
3102 |
+
"loss": 1.0819,
|
3103 |
+
"step": 4340
|
3104 |
+
},
|
3105 |
+
{
|
3106 |
+
"epoch": 3.691132795927026,
|
3107 |
+
"grad_norm": 0.33054778123683576,
|
3108 |
+
"learning_rate": 9.709069286155231e-06,
|
3109 |
+
"loss": 1.0865,
|
3110 |
+
"step": 4350
|
3111 |
+
},
|
3112 |
+
{
|
3113 |
+
"epoch": 3.6996181586762833,
|
3114 |
+
"grad_norm": 0.33512423378595196,
|
3115 |
+
"learning_rate": 9.592123140543388e-06,
|
3116 |
+
"loss": 1.0799,
|
3117 |
+
"step": 4360
|
3118 |
+
},
|
3119 |
+
{
|
3120 |
+
"epoch": 3.708103521425541,
|
3121 |
+
"grad_norm": 0.33282736103466287,
|
3122 |
+
"learning_rate": 9.475718154902382e-06,
|
3123 |
+
"loss": 1.089,
|
3124 |
+
"step": 4370
|
3125 |
+
},
|
3126 |
+
{
|
3127 |
+
"epoch": 3.7165888841747985,
|
3128 |
+
"grad_norm": 0.33746488116219714,
|
3129 |
+
"learning_rate": 9.359858417642266e-06,
|
3130 |
+
"loss": 1.1047,
|
3131 |
+
"step": 4380
|
3132 |
+
},
|
3133 |
+
{
|
3134 |
+
"epoch": 3.725074246924056,
|
3135 |
+
"grad_norm": 0.35251902658665213,
|
3136 |
+
"learning_rate": 9.244547998022709e-06,
|
3137 |
+
"loss": 1.0897,
|
3138 |
+
"step": 4390
|
3139 |
+
},
|
3140 |
+
{
|
3141 |
+
"epoch": 3.7335596096733137,
|
3142 |
+
"grad_norm": 0.32061079556576844,
|
3143 |
+
"learning_rate": 9.12979094601011e-06,
|
3144 |
+
"loss": 1.0873,
|
3145 |
+
"step": 4400
|
3146 |
+
},
|
3147 |
+
{
|
3148 |
+
"epoch": 3.742044972422571,
|
3149 |
+
"grad_norm": 0.3339779862802376,
|
3150 |
+
"learning_rate": 9.0155912921353e-06,
|
3151 |
+
"loss": 1.0976,
|
3152 |
+
"step": 4410
|
3153 |
+
},
|
3154 |
+
{
|
3155 |
+
"epoch": 3.7505303351718284,
|
3156 |
+
"grad_norm": 0.33137105564248603,
|
3157 |
+
"learning_rate": 8.901953047352032e-06,
|
3158 |
+
"loss": 1.0815,
|
3159 |
+
"step": 4420
|
3160 |
+
},
|
3161 |
+
{
|
3162 |
+
"epoch": 3.759015697921086,
|
3163 |
+
"grad_norm": 0.3421991932531284,
|
3164 |
+
"learning_rate": 8.788880202896072e-06,
|
3165 |
+
"loss": 1.0962,
|
3166 |
+
"step": 4430
|
3167 |
+
},
|
3168 |
+
{
|
3169 |
+
"epoch": 3.7675010606703436,
|
3170 |
+
"grad_norm": 0.34520719229693314,
|
3171 |
+
"learning_rate": 8.676376730145031e-06,
|
3172 |
+
"loss": 1.0862,
|
3173 |
+
"step": 4440
|
3174 |
+
},
|
3175 |
+
{
|
3176 |
+
"epoch": 3.7759864234196012,
|
3177 |
+
"grad_norm": 0.3472744469623473,
|
3178 |
+
"learning_rate": 8.564446580478877e-06,
|
3179 |
+
"loss": 1.0906,
|
3180 |
+
"step": 4450
|
3181 |
+
},
|
3182 |
+
{
|
3183 |
+
"epoch": 3.784471786168859,
|
3184 |
+
"grad_norm": 0.3247372551943734,
|
3185 |
+
"learning_rate": 8.453093685141156e-06,
|
3186 |
+
"loss": 1.0892,
|
3187 |
+
"step": 4460
|
3188 |
+
},
|
3189 |
+
{
|
3190 |
+
"epoch": 3.7929571489181164,
|
3191 |
+
"grad_norm": 0.32380813984188905,
|
3192 |
+
"learning_rate": 8.342321955100935e-06,
|
3193 |
+
"loss": 1.1084,
|
3194 |
+
"step": 4470
|
3195 |
+
},
|
3196 |
+
{
|
3197 |
+
"epoch": 3.801442511667374,
|
3198 |
+
"grad_norm": 0.3379339847312466,
|
3199 |
+
"learning_rate": 8.232135280915398e-06,
|
3200 |
+
"loss": 1.0751,
|
3201 |
+
"step": 4480
|
3202 |
+
},
|
3203 |
+
{
|
3204 |
+
"epoch": 3.809927874416631,
|
3205 |
+
"grad_norm": 0.34446270018278696,
|
3206 |
+
"learning_rate": 8.122537532593264e-06,
|
3207 |
+
"loss": 1.0934,
|
3208 |
+
"step": 4490
|
3209 |
+
},
|
3210 |
+
{
|
3211 |
+
"epoch": 3.8184132371658888,
|
3212 |
+
"grad_norm": 0.34800470636637415,
|
3213 |
+
"learning_rate": 8.013532559458761e-06,
|
3214 |
+
"loss": 1.0896,
|
3215 |
+
"step": 4500
|
3216 |
+
},
|
3217 |
+
{
|
3218 |
+
"epoch": 3.8268985999151464,
|
3219 |
+
"grad_norm": 0.34720788545028275,
|
3220 |
+
"learning_rate": 7.90512419001656e-06,
|
3221 |
+
"loss": 1.0755,
|
3222 |
+
"step": 4510
|
3223 |
+
},
|
3224 |
+
{
|
3225 |
+
"epoch": 3.835383962664404,
|
3226 |
+
"grad_norm": 0.32618575043616216,
|
3227 |
+
"learning_rate": 7.797316231817198e-06,
|
3228 |
+
"loss": 1.0897,
|
3229 |
+
"step": 4520
|
3230 |
+
},
|
3231 |
+
{
|
3232 |
+
"epoch": 3.8438693254136616,
|
3233 |
+
"grad_norm": 0.3301952338609731,
|
3234 |
+
"learning_rate": 7.690112471323419e-06,
|
3235 |
+
"loss": 1.0737,
|
3236 |
+
"step": 4530
|
3237 |
+
},
|
3238 |
+
{
|
3239 |
+
"epoch": 3.8523546881629187,
|
3240 |
+
"grad_norm": 0.36691506455432976,
|
3241 |
+
"learning_rate": 7.583516673777141e-06,
|
3242 |
+
"loss": 1.0749,
|
3243 |
+
"step": 4540
|
3244 |
+
},
|
3245 |
+
{
|
3246 |
+
"epoch": 3.8608400509121763,
|
3247 |
+
"grad_norm": 0.32038136896218583,
|
3248 |
+
"learning_rate": 7.477532583067234e-06,
|
3249 |
+
"loss": 1.0945,
|
3250 |
+
"step": 4550
|
3251 |
+
},
|
3252 |
+
{
|
3253 |
+
"epoch": 3.869325413661434,
|
3254 |
+
"grad_norm": 0.3612002616743919,
|
3255 |
+
"learning_rate": 7.3721639215980216e-06,
|
3256 |
+
"loss": 1.0869,
|
3257 |
+
"step": 4560
|
3258 |
+
},
|
3259 |
+
{
|
3260 |
+
"epoch": 3.8778107764106915,
|
3261 |
+
"grad_norm": 0.32001031069324903,
|
3262 |
+
"learning_rate": 7.267414390158533e-06,
|
3263 |
+
"loss": 1.0587,
|
3264 |
+
"step": 4570
|
3265 |
+
},
|
3266 |
+
{
|
3267 |
+
"epoch": 3.886296139159949,
|
3268 |
+
"grad_norm": 0.3397263736848682,
|
3269 |
+
"learning_rate": 7.163287667792557e-06,
|
3270 |
+
"loss": 1.0815,
|
3271 |
+
"step": 4580
|
3272 |
+
},
|
3273 |
+
{
|
3274 |
+
"epoch": 3.8947815019092067,
|
3275 |
+
"grad_norm": 0.3376479038706788,
|
3276 |
+
"learning_rate": 7.059787411669361e-06,
|
3277 |
+
"loss": 1.0966,
|
3278 |
+
"step": 4590
|
3279 |
+
},
|
3280 |
+
{
|
3281 |
+
"epoch": 3.9032668646584643,
|
3282 |
+
"grad_norm": 0.3361477045154485,
|
3283 |
+
"learning_rate": 6.9569172569553195e-06,
|
3284 |
+
"loss": 1.095,
|
3285 |
+
"step": 4600
|
3286 |
+
},
|
3287 |
+
{
|
3288 |
+
"epoch": 3.911752227407722,
|
3289 |
+
"grad_norm": 0.3200411096233463,
|
3290 |
+
"learning_rate": 6.854680816686177e-06,
|
3291 |
+
"loss": 1.0547,
|
3292 |
+
"step": 4610
|
3293 |
+
},
|
3294 |
+
{
|
3295 |
+
"epoch": 3.9202375901569795,
|
3296 |
+
"grad_norm": 0.31629554430499285,
|
3297 |
+
"learning_rate": 6.7530816816401745e-06,
|
3298 |
+
"loss": 1.0722,
|
3299 |
+
"step": 4620
|
3300 |
+
},
|
3301 |
+
{
|
3302 |
+
"epoch": 3.9287229529062366,
|
3303 |
+
"grad_norm": 0.41055357604639015,
|
3304 |
+
"learning_rate": 6.6521234202119396e-06,
|
3305 |
+
"loss": 1.0598,
|
3306 |
+
"step": 4630
|
3307 |
+
},
|
3308 |
+
{
|
3309 |
+
"epoch": 3.9372083156554942,
|
3310 |
+
"grad_norm": 0.3066099811651359,
|
3311 |
+
"learning_rate": 6.551809578287138e-06,
|
3312 |
+
"loss": 1.0796,
|
3313 |
+
"step": 4640
|
3314 |
+
},
|
3315 |
+
{
|
3316 |
+
"epoch": 3.945693678404752,
|
3317 |
+
"grad_norm": 0.34352050628293235,
|
3318 |
+
"learning_rate": 6.452143679117964e-06,
|
3319 |
+
"loss": 1.0635,
|
3320 |
+
"step": 4650
|
3321 |
+
},
|
3322 |
+
{
|
3323 |
+
"epoch": 3.9541790411540094,
|
3324 |
+
"grad_norm": 0.33858419417369884,
|
3325 |
+
"learning_rate": 6.353129223199353e-06,
|
3326 |
+
"loss": 1.0939,
|
3327 |
+
"step": 4660
|
3328 |
+
},
|
3329 |
+
{
|
3330 |
+
"epoch": 3.962664403903267,
|
3331 |
+
"grad_norm": 0.3421642104660851,
|
3332 |
+
"learning_rate": 6.2547696881460835e-06,
|
3333 |
+
"loss": 1.0852,
|
3334 |
+
"step": 4670
|
3335 |
+
},
|
3336 |
+
{
|
3337 |
+
"epoch": 3.971149766652524,
|
3338 |
+
"grad_norm": 0.3146569348417928,
|
3339 |
+
"learning_rate": 6.157068528570592e-06,
|
3340 |
+
"loss": 1.0858,
|
3341 |
+
"step": 4680
|
3342 |
+
},
|
3343 |
+
{
|
3344 |
+
"epoch": 3.9796351294017818,
|
3345 |
+
"grad_norm": 0.33303947321847055,
|
3346 |
+
"learning_rate": 6.060029175961665e-06,
|
3347 |
+
"loss": 1.0894,
|
3348 |
+
"step": 4690
|
3349 |
+
},
|
3350 |
+
{
|
3351 |
+
"epoch": 3.9881204921510394,
|
3352 |
+
"grad_norm": 0.3246201166003683,
|
3353 |
+
"learning_rate": 5.963655038563904e-06,
|
3354 |
+
"loss": 1.0713,
|
3355 |
+
"step": 4700
|
3356 |
+
},
|
3357 |
+
{
|
3358 |
+
"epoch": 3.996605854900297,
|
3359 |
+
"grad_norm": 0.33668268336604734,
|
3360 |
+
"learning_rate": 5.867949501258047e-06,
|
3361 |
+
"loss": 1.0714,
|
3362 |
+
"step": 4710
|
3363 |
+
},
|
3364 |
+
{
|
3365 |
+
"epoch": 4.005091217649555,
|
3366 |
+
"grad_norm": 0.340739931299029,
|
3367 |
+
"learning_rate": 5.7729159254420405e-06,
|
3368 |
+
"loss": 1.1111,
|
3369 |
+
"step": 4720
|
3370 |
+
},
|
3371 |
+
{
|
3372 |
+
"epoch": 4.013576580398812,
|
3373 |
+
"grad_norm": 0.4123963053921562,
|
3374 |
+
"learning_rate": 5.678557648913007e-06,
|
3375 |
+
"loss": 1.0235,
|
3376 |
+
"step": 4730
|
3377 |
+
},
|
3378 |
+
{
|
3379 |
+
"epoch": 4.02206194314807,
|
3380 |
+
"grad_norm": 0.3503972385770886,
|
3381 |
+
"learning_rate": 5.584877985750036e-06,
|
3382 |
+
"loss": 1.0207,
|
3383 |
+
"step": 4740
|
3384 |
+
},
|
3385 |
+
{
|
3386 |
+
"epoch": 4.030547305897327,
|
3387 |
+
"grad_norm": 0.36492718284781545,
|
3388 |
+
"learning_rate": 5.491880226197707e-06,
|
3389 |
+
"loss": 1.0065,
|
3390 |
+
"step": 4750
|
3391 |
+
},
|
3392 |
+
{
|
3393 |
+
"epoch": 4.039032668646585,
|
3394 |
+
"grad_norm": 0.3558493920675881,
|
3395 |
+
"learning_rate": 5.399567636550634e-06,
|
3396 |
+
"loss": 1.012,
|
3397 |
+
"step": 4760
|
3398 |
+
},
|
3399 |
+
{
|
3400 |
+
"epoch": 4.0475180313958425,
|
3401 |
+
"grad_norm": 0.35265155864184533,
|
3402 |
+
"learning_rate": 5.307943459038656e-06,
|
3403 |
+
"loss": 1.0042,
|
3404 |
+
"step": 4770
|
3405 |
+
},
|
3406 |
+
{
|
3407 |
+
"epoch": 4.0560033941451,
|
3408 |
+
"grad_norm": 0.3628265724559059,
|
3409 |
+
"learning_rate": 5.217010911713022e-06,
|
3410 |
+
"loss": 1.0138,
|
3411 |
+
"step": 4780
|
3412 |
+
},
|
3413 |
+
{
|
3414 |
+
"epoch": 4.064488756894357,
|
3415 |
+
"grad_norm": 0.366016038213362,
|
3416 |
+
"learning_rate": 5.126773188333326e-06,
|
3417 |
+
"loss": 1.0055,
|
3418 |
+
"step": 4790
|
3419 |
+
},
|
3420 |
+
{
|
3421 |
+
"epoch": 4.072974119643614,
|
3422 |
+
"grad_norm": 0.3663550294337188,
|
3423 |
+
"learning_rate": 5.037233458255375e-06,
|
3424 |
+
"loss": 1.0143,
|
3425 |
+
"step": 4800
|
3426 |
+
},
|
3427 |
+
{
|
3428 |
+
"epoch": 4.072974119643614,
|
3429 |
+
"eval_loss": 1.3839582204818726,
|
3430 |
+
"eval_runtime": 52.5375,
|
3431 |
+
"eval_samples_per_second": 7.252,
|
3432 |
+
"eval_steps_per_second": 0.914,
|
3433 |
+
"step": 4800
|
3434 |
+
},
|
3435 |
+
{
|
3436 |
+
"epoch": 4.081459482392872,
|
3437 |
+
"grad_norm": 0.3734159050536221,
|
3438 |
+
"learning_rate": 4.9483948663198106e-06,
|
3439 |
+
"loss": 1.0256,
|
3440 |
+
"step": 4810
|
3441 |
+
},
|
3442 |
+
{
|
3443 |
+
"epoch": 4.08994484514213,
|
3444 |
+
"grad_norm": 0.3756787343497328,
|
3445 |
+
"learning_rate": 4.860260532741739e-06,
|
3446 |
+
"loss": 0.9939,
|
3447 |
+
"step": 4820
|
3448 |
+
},
|
3449 |
+
{
|
3450 |
+
"epoch": 4.098430207891387,
|
3451 |
+
"grad_norm": 0.3641062929794199,
|
3452 |
+
"learning_rate": 4.7728335530010684e-06,
|
3453 |
+
"loss": 1.0016,
|
3454 |
+
"step": 4830
|
3455 |
+
},
|
3456 |
+
{
|
3457 |
+
"epoch": 4.106915570640645,
|
3458 |
+
"grad_norm": 0.3720412035298521,
|
3459 |
+
"learning_rate": 4.686116997733822e-06,
|
3460 |
+
"loss": 1.0099,
|
3461 |
+
"step": 4840
|
3462 |
+
},
|
3463 |
+
{
|
3464 |
+
"epoch": 4.115400933389902,
|
3465 |
+
"grad_norm": 0.3494502732717846,
|
3466 |
+
"learning_rate": 4.600113912624305e-06,
|
3467 |
+
"loss": 0.9903,
|
3468 |
+
"step": 4850
|
3469 |
+
},
|
3470 |
+
{
|
3471 |
+
"epoch": 4.12388629613916,
|
3472 |
+
"grad_norm": 0.3847064809020709,
|
3473 |
+
"learning_rate": 4.514827318298087e-06,
|
3474 |
+
"loss": 1.0109,
|
3475 |
+
"step": 4860
|
3476 |
+
},
|
3477 |
+
{
|
3478 |
+
"epoch": 4.132371658888418,
|
3479 |
+
"grad_norm": 0.3724702679939693,
|
3480 |
+
"learning_rate": 4.430260210215972e-06,
|
3481 |
+
"loss": 1.0172,
|
3482 |
+
"step": 4870
|
3483 |
+
},
|
3484 |
+
{
|
3485 |
+
"epoch": 4.140857021637675,
|
3486 |
+
"grad_norm": 0.3854607242705416,
|
3487 |
+
"learning_rate": 4.346415558568712e-06,
|
3488 |
+
"loss": 1.014,
|
3489 |
+
"step": 4880
|
3490 |
+
},
|
3491 |
+
{
|
3492 |
+
"epoch": 4.149342384386933,
|
3493 |
+
"grad_norm": 0.4119725224707018,
|
3494 |
+
"learning_rate": 4.263296308172774e-06,
|
3495 |
+
"loss": 1.0037,
|
3496 |
+
"step": 4890
|
3497 |
+
},
|
3498 |
+
{
|
3499 |
+
"epoch": 4.15782774713619,
|
3500 |
+
"grad_norm": 0.38075029944179556,
|
3501 |
+
"learning_rate": 4.180905378366845e-06,
|
3502 |
+
"loss": 1.0004,
|
3503 |
+
"step": 4900
|
3504 |
+
},
|
3505 |
+
{
|
3506 |
+
"epoch": 4.166313109885448,
|
3507 |
+
"grad_norm": 0.3788138960181575,
|
3508 |
+
"learning_rate": 4.099245662909338e-06,
|
3509 |
+
"loss": 0.9996,
|
3510 |
+
"step": 4910
|
3511 |
+
},
|
3512 |
+
{
|
3513 |
+
"epoch": 4.174798472634705,
|
3514 |
+
"grad_norm": 0.5528304971596814,
|
3515 |
+
"learning_rate": 4.018320029876729e-06,
|
3516 |
+
"loss": 0.9861,
|
3517 |
+
"step": 4920
|
3518 |
+
},
|
3519 |
+
{
|
3520 |
+
"epoch": 4.183283835383962,
|
3521 |
+
"grad_norm": 0.38085596177818104,
|
3522 |
+
"learning_rate": 3.938131321562841e-06,
|
3523 |
+
"loss": 1.0123,
|
3524 |
+
"step": 4930
|
3525 |
+
},
|
3526 |
+
{
|
3527 |
+
"epoch": 4.19176919813322,
|
3528 |
+
"grad_norm": 0.4065521249170979,
|
3529 |
+
"learning_rate": 3.858682354379012e-06,
|
3530 |
+
"loss": 1.0124,
|
3531 |
+
"step": 4940
|
3532 |
+
},
|
3533 |
+
{
|
3534 |
+
"epoch": 4.2002545608824775,
|
3535 |
+
"grad_norm": 0.37820891516953525,
|
3536 |
+
"learning_rate": 3.7799759187551603e-06,
|
3537 |
+
"loss": 0.9985,
|
3538 |
+
"step": 4950
|
3539 |
+
},
|
3540 |
+
{
|
3541 |
+
"epoch": 4.208739923631735,
|
3542 |
+
"grad_norm": 0.38792497529901426,
|
3543 |
+
"learning_rate": 3.7020147790418263e-06,
|
3544 |
+
"loss": 1.0166,
|
3545 |
+
"step": 4960
|
3546 |
+
},
|
3547 |
+
{
|
3548 |
+
"epoch": 4.217225286380993,
|
3549 |
+
"grad_norm": 0.37981175823185026,
|
3550 |
+
"learning_rate": 3.624801673413011e-06,
|
3551 |
+
"loss": 1.0012,
|
3552 |
+
"step": 4970
|
3553 |
+
},
|
3554 |
+
{
|
3555 |
+
"epoch": 4.22571064913025,
|
3556 |
+
"grad_norm": 0.3752506606207182,
|
3557 |
+
"learning_rate": 3.5483393137700717e-06,
|
3558 |
+
"loss": 0.9926,
|
3559 |
+
"step": 4980
|
3560 |
+
},
|
3561 |
+
{
|
3562 |
+
"epoch": 4.234196011879508,
|
3563 |
+
"grad_norm": 0.36701501672766035,
|
3564 |
+
"learning_rate": 3.4726303856464306e-06,
|
3565 |
+
"loss": 0.9998,
|
3566 |
+
"step": 4990
|
3567 |
+
},
|
3568 |
+
{
|
3569 |
+
"epoch": 4.2426813746287655,
|
3570 |
+
"grad_norm": 0.37096066532390476,
|
3571 |
+
"learning_rate": 3.3976775481132678e-06,
|
3572 |
+
"loss": 1.004,
|
3573 |
+
"step": 5000
|
3574 |
+
}
|
3575 |
+
],
|
3576 |
+
"logging_steps": 10,
|
3577 |
+
"max_steps": 5890,
|
3578 |
+
"num_input_tokens_seen": 0,
|
3579 |
+
"num_train_epochs": 5,
|
3580 |
+
"save_steps": 500,
|
3581 |
+
"stateful_callbacks": {
|
3582 |
+
"TrainerControl": {
|
3583 |
+
"args": {
|
3584 |
+
"should_epoch_stop": false,
|
3585 |
+
"should_evaluate": false,
|
3586 |
+
"should_log": false,
|
3587 |
+
"should_save": true,
|
3588 |
+
"should_training_stop": false
|
3589 |
+
},
|
3590 |
+
"attributes": {}
|
3591 |
+
}
|
3592 |
+
},
|
3593 |
+
"total_flos": 4203278871035904.0,
|
3594 |
+
"train_batch_size": 2,
|
3595 |
+
"trial_name": null,
|
3596 |
+
"trial_params": null
|
3597 |
+
}
|
checkpoint-5000/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|