muneebnadeem1870 commited on
Commit
3832544
·
verified ·
1 Parent(s): ba020e3

Upload 7 files

Browse files
checkpoint-3569/config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SwinForImageClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "depths": [
7
+ 2,
8
+ 2,
9
+ 18,
10
+ 2
11
+ ],
12
+ "drop_path_rate": 0.1,
13
+ "embed_dim": 128,
14
+ "encoder_stride": 32,
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.0,
17
+ "hidden_size": 1024,
18
+ "id2label": {
19
+ "0": "Real",
20
+ "1": "Fake"
21
+ },
22
+ "image_size": 224,
23
+ "initializer_range": 0.02,
24
+ "label2id": {
25
+ "Fake": 1,
26
+ "Real": 0
27
+ },
28
+ "layer_norm_eps": 1e-05,
29
+ "mlp_ratio": 4.0,
30
+ "model_type": "swin",
31
+ "num_channels": 3,
32
+ "num_heads": [
33
+ 4,
34
+ 8,
35
+ 16,
36
+ 32
37
+ ],
38
+ "num_layers": 4,
39
+ "out_features": [
40
+ "stage4"
41
+ ],
42
+ "out_indices": [
43
+ 4
44
+ ],
45
+ "patch_size": 4,
46
+ "path_norm": true,
47
+ "problem_type": "single_label_classification",
48
+ "qkv_bias": true,
49
+ "stage_names": [
50
+ "stem",
51
+ "stage1",
52
+ "stage2",
53
+ "stage3",
54
+ "stage4"
55
+ ],
56
+ "torch_dtype": "float32",
57
+ "transformers_version": "4.51.3",
58
+ "use_absolute_embeddings": false,
59
+ "window_size": 7
60
+ }
checkpoint-3569/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:483f21159984ebe78a6b8baea5749d9723ee161cc76b69101654cd22a3885360
3
+ size 694317645
checkpoint-3569/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 3,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
checkpoint-3569/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf6287704d996e156caa2e95ecd5898e5a208e9f6c9d5a5744832c18ede6ebb
3
+ size 14244
checkpoint-3569/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5ef3e45b6f7c952ac8b11d94300293ca843c2514e7621959f1b5d8aee032926
3
+ size 1064
checkpoint-3569/trainer_state.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 3569,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.9848819187222528,
15
+ "eval_f1": 0.9848815385551507,
16
+ "eval_loss": 0.04867897927761078,
17
+ "eval_precision": 0.9849274340676982,
18
+ "eval_recall": 0.9848819187222528,
19
+ "eval_runtime": 839.8961,
20
+ "eval_samples_per_second": 90.647,
21
+ "eval_steps_per_second": 5.666,
22
+ "step": 3569
23
+ }
24
+ ],
25
+ "logging_steps": 500,
26
+ "max_steps": 7138,
27
+ "num_input_tokens_seen": 0,
28
+ "num_train_epochs": 2,
29
+ "save_steps": 500,
30
+ "stateful_callbacks": {
31
+ "TrainerControl": {
32
+ "args": {
33
+ "should_epoch_stop": false,
34
+ "should_evaluate": false,
35
+ "should_log": false,
36
+ "should_save": true,
37
+ "should_training_stop": false
38
+ },
39
+ "attributes": {}
40
+ }
41
+ },
42
+ "total_flos": 8.947140798276882e+18,
43
+ "train_batch_size": 32,
44
+ "trial_name": null,
45
+ "trial_params": null
46
+ }
checkpoint-3569/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b8626f5e0841a11e42111fd6bd51cc5fb2d0bcd488198afb90a0b37de3e8aa
3
+ size 5304