kaneez13 commited on
Commit
217fb82
·
verified ·
1 Parent(s): 6086f82

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ library_name: transformers
4
+ tags:
5
+ - autotrain
6
+ - text-classification
7
+ base_model: cardiffnlp/twitter-roberta-base-sentiment
8
+ widget:
9
+ - text: "I love AutoTrain"
10
+ ---
11
+
12
+ # Model Trained Using AutoTrain
13
+
14
+ - Problem type: Text Classification
15
+
16
+ ## Validation Metrics
17
+ loss: 0.31597408652305603
18
+
19
+ f1_macro: 0.8943671439134233
20
+
21
+ f1_micro: 0.9047619047619048
22
+
23
+ f1_weighted: 0.9078467240899183
24
+
25
+ precision_macro: 0.8852813852813853
26
+
27
+ precision_micro: 0.9047619047619048
28
+
29
+ precision_weighted: 0.9225417439703153
30
+
31
+ recall_macro: 0.92046783625731
32
+
33
+ recall_micro: 0.9047619047619048
34
+
35
+ recall_weighted: 0.9047619047619048
36
+
37
+ accuracy: 0.9047619047619048
checkpoint-42/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cardiffnlp/twitter-roberta-base-sentiment",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "RobertaForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "gradient_checkpointing": false,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 768,
15
+ "id2label": {
16
+ "0": "Negative",
17
+ "1": "Neutral",
18
+ "2": "Positive"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "Negative": 0,
24
+ "Neutral": 1,
25
+ "Positive": 2
26
+ },
27
+ "layer_norm_eps": 1e-05,
28
+ "max_position_embeddings": 514,
29
+ "model_type": "roberta",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token_id": 1,
33
+ "position_embedding_type": "absolute",
34
+ "problem_type": "single_label_classification",
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.48.0",
37
+ "type_vocab_size": 1,
38
+ "use_cache": true,
39
+ "vocab_size": 50265
40
+ }
checkpoint-42/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bc636ed400aec2d9be3d219357d3f0d16ff63c8e7d10a07195496bfc8c83e5
3
+ size 498615900
checkpoint-42/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5637f99c27281f7b5ab2617dd530b74cee175303460b673d6e33bad5a55dd825
3
+ size 997346042
checkpoint-42/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:690267bc7cbabc314531bd6477e0080e8b5f73f6523da79cf93ab0c9e4509dc3
3
+ size 13990
checkpoint-42/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9bab0fa6c02dd9a5168020a57d68f2bff2da8a2df935372c4c5d8d94657171b
3
+ size 1064
checkpoint-42/trainer_state.json ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.31597408652305603,
3
+ "best_model_checkpoint": "sentiment-analysis-demo/checkpoint-42",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 42,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.19047619047619047,
13
+ "grad_norm": 13.430932998657227,
14
+ "learning_rate": 1.7142857142857142e-05,
15
+ "loss": 0.7748,
16
+ "step": 4
17
+ },
18
+ {
19
+ "epoch": 0.38095238095238093,
20
+ "grad_norm": 22.47103500366211,
21
+ "learning_rate": 2.9464285714285714e-05,
22
+ "loss": 0.6984,
23
+ "step": 8
24
+ },
25
+ {
26
+ "epoch": 0.5714285714285714,
27
+ "grad_norm": 19.184471130371094,
28
+ "learning_rate": 2.732142857142857e-05,
29
+ "loss": 0.3973,
30
+ "step": 12
31
+ },
32
+ {
33
+ "epoch": 0.7619047619047619,
34
+ "grad_norm": 3.9649658203125,
35
+ "learning_rate": 2.517857142857143e-05,
36
+ "loss": 0.382,
37
+ "step": 16
38
+ },
39
+ {
40
+ "epoch": 0.9523809523809523,
41
+ "grad_norm": 4.749580383300781,
42
+ "learning_rate": 2.3035714285714288e-05,
43
+ "loss": 0.2585,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 1.0,
48
+ "eval_accuracy": 0.8809523809523809,
49
+ "eval_f1_macro": 0.8512741738548191,
50
+ "eval_f1_micro": 0.8809523809523809,
51
+ "eval_f1_weighted": 0.87605993596777,
52
+ "eval_loss": 0.3272962272167206,
53
+ "eval_precision_macro": 0.8736111111111112,
54
+ "eval_precision_micro": 0.8809523809523809,
55
+ "eval_precision_weighted": 0.8780753968253967,
56
+ "eval_recall_macro": 0.8399122807017544,
57
+ "eval_recall_micro": 0.8809523809523809,
58
+ "eval_recall_weighted": 0.8809523809523809,
59
+ "eval_runtime": 8.3555,
60
+ "eval_samples_per_second": 5.027,
61
+ "eval_steps_per_second": 1.316,
62
+ "step": 21
63
+ },
64
+ {
65
+ "epoch": 1.1428571428571428,
66
+ "grad_norm": 6.048248767852783,
67
+ "learning_rate": 2.089285714285714e-05,
68
+ "loss": 0.3733,
69
+ "step": 24
70
+ },
71
+ {
72
+ "epoch": 1.3333333333333333,
73
+ "grad_norm": 0.4461651146411896,
74
+ "learning_rate": 1.8750000000000002e-05,
75
+ "loss": 0.1687,
76
+ "step": 28
77
+ },
78
+ {
79
+ "epoch": 1.5238095238095237,
80
+ "grad_norm": 16.08753776550293,
81
+ "learning_rate": 1.660714285714286e-05,
82
+ "loss": 0.2311,
83
+ "step": 32
84
+ },
85
+ {
86
+ "epoch": 1.7142857142857144,
87
+ "grad_norm": 5.2479658126831055,
88
+ "learning_rate": 1.4464285714285715e-05,
89
+ "loss": 0.1154,
90
+ "step": 36
91
+ },
92
+ {
93
+ "epoch": 1.9047619047619047,
94
+ "grad_norm": 12.738253593444824,
95
+ "learning_rate": 1.232142857142857e-05,
96
+ "loss": 0.1225,
97
+ "step": 40
98
+ },
99
+ {
100
+ "epoch": 2.0,
101
+ "eval_accuracy": 0.9047619047619048,
102
+ "eval_f1_macro": 0.8943671439134233,
103
+ "eval_f1_micro": 0.9047619047619048,
104
+ "eval_f1_weighted": 0.9078467240899183,
105
+ "eval_loss": 0.31597408652305603,
106
+ "eval_precision_macro": 0.8852813852813853,
107
+ "eval_precision_micro": 0.9047619047619048,
108
+ "eval_precision_weighted": 0.9225417439703153,
109
+ "eval_recall_macro": 0.92046783625731,
110
+ "eval_recall_micro": 0.9047619047619048,
111
+ "eval_recall_weighted": 0.9047619047619048,
112
+ "eval_runtime": 8.4584,
113
+ "eval_samples_per_second": 4.965,
114
+ "eval_steps_per_second": 1.3,
115
+ "step": 42
116
+ }
117
+ ],
118
+ "logging_steps": 4,
119
+ "max_steps": 63,
120
+ "num_input_tokens_seen": 0,
121
+ "num_train_epochs": 3,
122
+ "save_steps": 500,
123
+ "stateful_callbacks": {
124
+ "EarlyStoppingCallback": {
125
+ "args": {
126
+ "early_stopping_patience": 5,
127
+ "early_stopping_threshold": 0.01
128
+ },
129
+ "attributes": {
130
+ "early_stopping_patience_counter": 0
131
+ }
132
+ },
133
+ "TrainerControl": {
134
+ "args": {
135
+ "should_epoch_stop": false,
136
+ "should_evaluate": false,
137
+ "should_log": false,
138
+ "should_save": true,
139
+ "should_training_stop": false
140
+ },
141
+ "attributes": {}
142
+ }
143
+ },
144
+ "total_flos": 22101527089152.0,
145
+ "train_batch_size": 2,
146
+ "trial_name": null,
147
+ "trial_params": null
148
+ }
checkpoint-42/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:137655415fb6eee8b81e536c747ab5a21bc72fbfc062d8f8532ab6cd252b8e81
3
+ size 5368
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cardiffnlp/twitter-roberta-base-sentiment",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "RobertaForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "gradient_checkpointing": false,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 768,
15
+ "id2label": {
16
+ "0": "Negative",
17
+ "1": "Neutral",
18
+ "2": "Positive"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "Negative": 0,
24
+ "Neutral": 1,
25
+ "Positive": 2
26
+ },
27
+ "layer_norm_eps": 1e-05,
28
+ "max_position_embeddings": 514,
29
+ "model_type": "roberta",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token_id": 1,
33
+ "position_embedding_type": "absolute",
34
+ "problem_type": "single_label_classification",
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.48.0",
37
+ "type_vocab_size": 1,
38
+ "use_cache": true,
39
+ "vocab_size": 50265
40
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bc636ed400aec2d9be3d219357d3f0d16ff63c8e7d10a07195496bfc8c83e5
3
+ size 498615900
runs/Nov07_13-18-26_r-kaneez13-sentiment-analysis-auto-train-demo-foc9h9a-93a597cs8/events.out.tfevents.1762521509.r-kaneez13-sentiment-analysis-auto-train-demo-foc9h9a-93a597cs8.71.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1bd86a4ebe0e7260eded4081d50fd03aa0390d5a8994ff4f40626b7fbf675de3
3
- size 5364
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e89b22ac55ed7bf62055bdd1de9eaf379248d13bff776cda7334d1f3171831e9
3
+ size 11271
runs/Nov07_13-18-26_r-kaneez13-sentiment-analysis-auto-train-demo-foc9h9a-93a597cs8/events.out.tfevents.1762521966.r-kaneez13-sentiment-analysis-auto-train-demo-foc9h9a-93a597cs8.71.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39eb36656dec1ba243f237958c54c9d3bae8fbc2d401cee61f5952b893e487c6
3
+ size 906
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:137655415fb6eee8b81e536c747ab5a21bc72fbfc062d8f8532ab6cd252b8e81
3
+ size 5368
training_params.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "sentiment-analysis-demo/autotrain-data",
3
+ "model": "cardiffnlp/twitter-roberta-base-sentiment",
4
+ "lr": 3e-05,
5
+ "epochs": 3,
6
+ "max_seq_length": 128,
7
+ "batch_size": 2,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 4,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "text_column": "autotrain_text",
18
+ "target_column": "autotrain_label",
19
+ "logging_steps": -1,
20
+ "project_name": "sentiment-analysis-demo",
21
+ "auto_find_batch_size": false,
22
+ "mixed_precision": "fp16",
23
+ "save_total_limit": 1,
24
+ "push_to_hub": true,
25
+ "eval_strategy": "epoch",
26
+ "username": "kaneez13",
27
+ "log": "tensorboard",
28
+ "early_stopping_patience": 5,
29
+ "early_stopping_threshold": 0.01
30
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff