ai-agi commited on
Commit
edd899f
·
1 Parent(s): 29987b9

Inicial commit

Browse files
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "directionality": "bidi",
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "output_past": true,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "type_vocab_size": 2,
26
+ "vocab_size": 29794
27
+ }
eval_results.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ eval_loss = 0.697434716236745
2
+ perplexity = tensor(2.0086)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e23fcbbe5ac0d777fbf49b49bbf8b715986ec2dc0efd9c2310d326006bca16c
3
+ size 435714904
model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "outputs/best_model", "cache_dir": "cache_dir/", "custom_layer_parameters": [], "custom_parameter_groups": [], "train_custom_parameters_only": false, "config": {}, "do_lower_case": true, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": "utf-8", "eval_batch_size": 48, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 10000, "evaluate_during_training_verbose": true, "fp16": true, "fp16_opt_level": "O1", "gradient_accumulation_steps": 1, "learning_rate": 4e-05, "local_rank": -1, "logging_steps": 50, "manual_seed": null, "max_grad_norm": 1.0, "max_seq_length": 128, "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": true, "no_save": false, "num_train_epochs": 4, "output_dir": "outputs/LM/FineTune", "overwrite_output_dir": true, "process_count": 2, "reprocess_input_data": false, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_steps": 10000, "save_optimizer_and_scheduler": true, "silent": false, "tensorboard_dir": null, "train_batch_size": 48, "use_cached_eval_features": false, "use_early_stopping": false, "use_multiprocessing": true, "wandb_kwargs": {}, "wandb_project": null, "warmup_ratio": 0.06, "warmup_steps": 1000, "weight_decay": 0, "block_size": 128, "config_name": null, "dataset_class": null, "dataset_type": "simple", "discriminator_config": {}, "discriminator_loss_weight": 50.0, "generator_config": {}, "max_steps": -1, "min_frequency": 2, "mlm": true, "mlm_probability": 0.15, "sliding_window": false, "special_tokens": ["<s>", "<pad>", "</s>", "<unk>", "<mask>"], "stride": 0.8, "tie_generator_and_discriminator_embeddings": true, "tokenizer_name": "/dados/projetos/sentimentos/outputs/LM/FineTune/checkpoint-30000/", "vocab_size": null, "clean_text": true, "handle_chinese_chars": false, "strip_accents": true}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:048110cd7852611b49ab7b831ecbbfb4db8b6ec684b7f84b4f9e743f23c90ff7
3
+ size 438229514
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"special_tokens_map_file": null, "full_tokenizer_file": null}
training_progress_scores.csv ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ global_step,perplexity,eval_loss,train_loss
2
+ 10000,tensor(2.1992),0.7880828963867849,0.8737165927886963
3
+ 20000,tensor(2.1610),0.7705507040390662,0.8072351813316345
4
+ 30000,tensor(2.1334),0.7577285939474578,0.7155318260192871
5
+ 34763,tensor(2.1201),0.7514673543713649,0.8827775716781616
6
+ 40000,tensor(2.1026),0.7431845677856856,0.7359579801559448
7
+ 50000,tensor(2.0772),0.731001776631241,0.6598162651062012
8
+ 60000,tensor(2.0668),0.726000594505081,0.7386130690574646
9
+ 69526,tensor(2.0506),0.7181160413628266,0.8968920707702637
10
+ 70000,tensor(2.0418),0.7138193785487026,0.706092894077301
11
+ 80000,tensor(2.0300),0.7080411803251254,0.7196658849716187
12
+ 90000,tensor(2.0187),0.7024395720692894,0.8007305264472961
13
+ 100000,tensor(2.0050),0.695622627103864,0.6531331539154053
14
+ 104289,tensor(2.0049),0.6956184461403945,0.8391630053520203
15
+ 110000,tensor(2.0087),0.6975072758021955,0.8131434917449951
16
+ 120000,tensor(1.9993),0.6927953025623087,0.7214128375053406
17
+ 130000,tensor(2.0056),0.6959200249105013,0.718044638633728
18
+ 139052,tensor(2.0068),0.6965351469204804,0.7522366046905518
vocab.txt ADDED
The diff for this file is too large to render. See raw diff