Uploading Hate Speech Detection
Browse files- README.md +25 -25
- config.json +23 -15
- model.safetensors +2 -2
- tokenizer.json +0 -0
- tokenizer_config.json +6 -4
- training_args.bin +1 -1
- vocab.txt +0 -0
README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
-
license:
|
4 |
-
base_model:
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
metrics:
|
@@ -16,10 +16,10 @@ should probably proofread and complete it, then remove this comment. -->
|
|
16 |
|
17 |
# hate_speech_detection_with_target-bert-large-portuguese-cased
|
18 |
|
19 |
-
This model is a fine-tuned version of [
|
20 |
It achieves the following results on the evaluation set:
|
21 |
-
- Loss: 0.
|
22 |
-
- Accuracy: 0.
|
23 |
|
24 |
## Model description
|
25 |
|
@@ -50,26 +50,26 @@ The following hyperparameters were used during training:
|
|
50 |
|
51 |
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
52 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
53 |
-
|
|
54 |
-
| 0.
|
55 |
-
| 0.
|
56 |
-
| 0.
|
57 |
-
| 0.
|
58 |
-
| 0.
|
59 |
-
| 0.
|
60 |
-
| 0.
|
61 |
-
| 0.
|
62 |
-
| 0.
|
63 |
-
| 0.
|
64 |
-
| 0.
|
65 |
-
| 0.
|
66 |
-
| 0.
|
67 |
-
| 0.
|
68 |
-
| 0.
|
69 |
-
| 0.
|
70 |
-
| 0.
|
71 |
-
| 0.
|
72 |
-
| 0.
|
73 |
|
74 |
|
75 |
### Framework versions
|
|
|
1 |
---
|
2 |
library_name: transformers
|
3 |
+
license: mit
|
4 |
+
base_model: neuralmind/bert-large-portuguese-cased
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
metrics:
|
|
|
16 |
|
17 |
# hate_speech_detection_with_target-bert-large-portuguese-cased
|
18 |
|
19 |
+
This model is a fine-tuned version of [neuralmind/bert-large-portuguese-cased](https://huggingface.co/neuralmind/bert-large-portuguese-cased) on the None dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.0610
|
22 |
+
- Accuracy: 0.9892
|
23 |
|
24 |
## Model description
|
25 |
|
|
|
50 |
|
51 |
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
52 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
53 |
+
| 0.8021 | 1.0 | 93 | 0.1410 | 0.9594 |
|
54 |
+
| 0.0974 | 2.0 | 186 | 0.0974 | 0.9770 |
|
55 |
+
| 0.1401 | 3.0 | 279 | 0.0550 | 0.9838 |
|
56 |
+
| 0.046 | 4.0 | 372 | 0.0618 | 0.9878 |
|
57 |
+
| 0.0344 | 5.0 | 465 | 0.0469 | 0.9892 |
|
58 |
+
| 0.2429 | 6.0 | 558 | 0.0854 | 0.9878 |
|
59 |
+
| 0.0696 | 7.0 | 651 | 0.0451 | 0.9892 |
|
60 |
+
| 0.0394 | 8.0 | 744 | 0.0460 | 0.9892 |
|
61 |
+
| 0.0279 | 9.0 | 837 | 0.0469 | 0.9892 |
|
62 |
+
| 0.0362 | 10.0 | 930 | 0.0779 | 0.9865 |
|
63 |
+
| 0.0215 | 11.0 | 1023 | 0.0655 | 0.9878 |
|
64 |
+
| 0.0193 | 12.0 | 1116 | 0.0587 | 0.9892 |
|
65 |
+
| 0.0154 | 13.0 | 1209 | 0.0594 | 0.9892 |
|
66 |
+
| 0.015 | 14.0 | 1302 | 0.0601 | 0.9905 |
|
67 |
+
| 0.0156 | 15.0 | 1395 | 0.0604 | 0.9892 |
|
68 |
+
| 0.0157 | 16.0 | 1488 | 0.0604 | 0.9892 |
|
69 |
+
| 0.0145 | 17.0 | 1581 | 0.0607 | 0.9892 |
|
70 |
+
| 0.0176 | 18.0 | 1674 | 0.0607 | 0.9892 |
|
71 |
+
| 0.0193 | 19.0 | 1767 | 0.0609 | 0.9892 |
|
72 |
+
| 0.0206 | 20.0 | 1860 | 0.0610 | 0.9892 |
|
73 |
|
74 |
|
75 |
### Framework versions
|
config.json
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
-
"activation": "gelu",
|
4 |
"architectures": [
|
5 |
-
"
|
6 |
],
|
7 |
-
"
|
8 |
-
"
|
9 |
-
"
|
10 |
-
"
|
|
|
|
|
11 |
"id2label": {
|
12 |
"0": "gay people non-hateful",
|
13 |
"1": "gay people hateful",
|
@@ -27,6 +28,7 @@
|
|
27 |
"15": "unknown hateful"
|
28 |
},
|
29 |
"initializer_range": 0.02,
|
|
|
30 |
"label2id": {
|
31 |
"black people hateful": 9,
|
32 |
"black people non-hateful": 8,
|
@@ -45,17 +47,23 @@
|
|
45 |
"women hateful": 7,
|
46 |
"women non-hateful": 6
|
47 |
},
|
|
|
48 |
"max_position_embeddings": 512,
|
49 |
-
"model_type": "
|
50 |
-
"
|
51 |
-
"
|
|
|
52 |
"pad_token_id": 0,
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
"problem_type": "single_label_classification",
|
54 |
-
"qa_dropout": 0.1,
|
55 |
-
"seq_classif_dropout": 0.2,
|
56 |
-
"sinusoidal_pos_embds": false,
|
57 |
-
"tie_weights_": true,
|
58 |
"torch_dtype": "float32",
|
59 |
"transformers_version": "4.48.1",
|
60 |
-
"
|
|
|
|
|
61 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "neuralmind/bert-large-portuguese-cased",
|
|
|
3 |
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"directionality": "bidi",
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 1024,
|
12 |
"id2label": {
|
13 |
"0": "gay people non-hateful",
|
14 |
"1": "gay people hateful",
|
|
|
28 |
"15": "unknown hateful"
|
29 |
},
|
30 |
"initializer_range": 0.02,
|
31 |
+
"intermediate_size": 4096,
|
32 |
"label2id": {
|
33 |
"black people hateful": 9,
|
34 |
"black people non-hateful": 8,
|
|
|
47 |
"women hateful": 7,
|
48 |
"women non-hateful": 6
|
49 |
},
|
50 |
+
"layer_norm_eps": 1e-12,
|
51 |
"max_position_embeddings": 512,
|
52 |
+
"model_type": "bert",
|
53 |
+
"num_attention_heads": 16,
|
54 |
+
"num_hidden_layers": 24,
|
55 |
+
"output_past": true,
|
56 |
"pad_token_id": 0,
|
57 |
+
"pooler_fc_size": 768,
|
58 |
+
"pooler_num_attention_heads": 12,
|
59 |
+
"pooler_num_fc_layers": 3,
|
60 |
+
"pooler_size_per_head": 128,
|
61 |
+
"pooler_type": "first_token_transform",
|
62 |
+
"position_embedding_type": "absolute",
|
63 |
"problem_type": "single_label_classification",
|
|
|
|
|
|
|
|
|
64 |
"torch_dtype": "float32",
|
65 |
"transformers_version": "4.48.1",
|
66 |
+
"type_vocab_size": 2,
|
67 |
+
"use_cache": true,
|
68 |
+
"vocab_size": 29794
|
69 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b0420c000ef87d3e9328bceab297bce05ad420a8fb1682cac5af6e6aa5fcd12
|
3 |
+
size 1337698272
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -41,16 +41,18 @@
|
|
41 |
"special": true
|
42 |
}
|
43 |
},
|
44 |
-
"clean_up_tokenization_spaces":
|
45 |
"cls_token": "[CLS]",
|
46 |
-
"
|
|
|
47 |
"extra_special_tokens": {},
|
48 |
"mask_token": "[MASK]",
|
49 |
-
"model_max_length":
|
|
|
50 |
"pad_token": "[PAD]",
|
51 |
"sep_token": "[SEP]",
|
52 |
"strip_accents": null,
|
53 |
"tokenize_chinese_chars": true,
|
54 |
-
"tokenizer_class": "
|
55 |
"unk_token": "[UNK]"
|
56 |
}
|
|
|
41 |
"special": true
|
42 |
}
|
43 |
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": false,
|
48 |
"extra_special_tokens": {},
|
49 |
"mask_token": "[MASK]",
|
50 |
+
"model_max_length": 1000000000000000019884624838656,
|
51 |
+
"never_split": null,
|
52 |
"pad_token": "[PAD]",
|
53 |
"sep_token": "[SEP]",
|
54 |
"strip_accents": null,
|
55 |
"tokenize_chinese_chars": true,
|
56 |
+
"tokenizer_class": "BertTokenizer",
|
57 |
"unk_token": "[UNK]"
|
58 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:841b22bc60db7f01f6cca60f4f27808415b6907d98773e38ed24fc9cca628477
|
3 |
size 5432
|
vocab.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|