Upload 7 files
Browse files- config.json +4 -5
- pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
- training_args.bin +3 -0
config.json
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
{
|
|
|
|
| 2 |
"architectures": [
|
| 3 |
"BertForSequenceClassification"
|
| 4 |
],
|
|
@@ -15,8 +16,7 @@
|
|
| 15 |
"2": "LABEL_2",
|
| 16 |
"3": "LABEL_3",
|
| 17 |
"4": "LABEL_4",
|
| 18 |
-
"5": "LABEL_5"
|
| 19 |
-
"6": "LABEL_6"
|
| 20 |
},
|
| 21 |
"initializer_range": 0.02,
|
| 22 |
"intermediate_size": 600,
|
|
@@ -26,8 +26,7 @@
|
|
| 26 |
"LABEL_2": 2,
|
| 27 |
"LABEL_3": 3,
|
| 28 |
"LABEL_4": 4,
|
| 29 |
-
"LABEL_5": 5
|
| 30 |
-
"LABEL_6": 6
|
| 31 |
},
|
| 32 |
"layer_norm_eps": 1e-12,
|
| 33 |
"max_position_embeddings": 2048,
|
|
@@ -38,7 +37,7 @@
|
|
| 38 |
"position_embedding_type": "absolute",
|
| 39 |
"problem_type": "single_label_classification",
|
| 40 |
"torch_dtype": "float32",
|
| 41 |
-
"transformers_version": "4.
|
| 42 |
"type_vocab_size": 2,
|
| 43 |
"use_cache": true,
|
| 44 |
"vocab_size": 83828
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/ruBert-tiny2/",
|
| 3 |
"architectures": [
|
| 4 |
"BertForSequenceClassification"
|
| 5 |
],
|
|
|
|
| 16 |
"2": "LABEL_2",
|
| 17 |
"3": "LABEL_3",
|
| 18 |
"4": "LABEL_4",
|
| 19 |
+
"5": "LABEL_5"
|
|
|
|
| 20 |
},
|
| 21 |
"initializer_range": 0.02,
|
| 22 |
"intermediate_size": 600,
|
|
|
|
| 26 |
"LABEL_2": 2,
|
| 27 |
"LABEL_3": 3,
|
| 28 |
"LABEL_4": 4,
|
| 29 |
+
"LABEL_5": 5
|
|
|
|
| 30 |
},
|
| 31 |
"layer_norm_eps": 1e-12,
|
| 32 |
"max_position_embeddings": 2048,
|
|
|
|
| 37 |
"position_embedding_type": "absolute",
|
| 38 |
"problem_type": "single_label_classification",
|
| 39 |
"torch_dtype": "float32",
|
| 40 |
+
"transformers_version": "4.29.2",
|
| 41 |
"type_vocab_size": 2,
|
| 42 |
"use_cache": true,
|
| 43 |
"vocab_size": 83828
|
pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4edd6a51e220e2051eb6ab49bd80a9f466222c7243cf091b567c77670efc2e08
|
| 3 |
+
size 116817247
|
tokenizer_config.json
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
{
|
|
|
|
| 2 |
"cls_token": "[CLS]",
|
| 3 |
"do_basic_tokenize": true,
|
| 4 |
"do_lower_case": false,
|
|
@@ -7,7 +8,6 @@
|
|
| 7 |
"never_split": null,
|
| 8 |
"pad_token": "[PAD]",
|
| 9 |
"sep_token": "[SEP]",
|
| 10 |
-
"special_tokens_map_file": null,
|
| 11 |
"strip_accents": null,
|
| 12 |
"tokenize_chinese_chars": true,
|
| 13 |
"tokenizer_class": "BertTokenizer",
|
|
|
|
| 1 |
{
|
| 2 |
+
"clean_up_tokenization_spaces": true,
|
| 3 |
"cls_token": "[CLS]",
|
| 4 |
"do_basic_tokenize": true,
|
| 5 |
"do_lower_case": false,
|
|
|
|
| 8 |
"never_split": null,
|
| 9 |
"pad_token": "[PAD]",
|
| 10 |
"sep_token": "[SEP]",
|
|
|
|
| 11 |
"strip_accents": null,
|
| 12 |
"tokenize_chinese_chars": true,
|
| 13 |
"tokenizer_class": "BertTokenizer",
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7260a631cb44b5882c0714bf14266665cf2cf2ff39cd53d865d82bb268b97a57
|
| 3 |
+
size 3899
|