Upload 13 files
Browse files- added_tokens.json +3 -0
- config.json +38 -0
- generation_config.json +6 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +12 -0
- tokenizer.json +0 -0
- tokenizer_config.json +29 -0
- trainer_state.json +77 -0
- training_args.bin +3 -0
- vocab.json +0 -0
added_tokens.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"[PAD]": 50257
|
| 3 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "tiiuae/falcon-rw-1b",
|
| 3 |
+
"alibi": true,
|
| 4 |
+
"apply_residual_connection_post_layernorm": false,
|
| 5 |
+
"architectures": [
|
| 6 |
+
"FalconForCausalLMSimulMT"
|
| 7 |
+
],
|
| 8 |
+
"attention_dropout": 0.0,
|
| 9 |
+
"auto_map": {
|
| 10 |
+
"AutoConfig": "tiiuae/falcon-rw-1b--configuration_falcon.FalconConfig",
|
| 11 |
+
"AutoModel": "tiiuae/falcon-rw-1b--modeling_falcon.FalconModel",
|
| 12 |
+
"AutoModelForCausalLM": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForCausalLM",
|
| 13 |
+
"AutoModelForQuestionAnswering": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForQuestionAnswering",
|
| 14 |
+
"AutoModelForSequenceClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForSequenceClassification",
|
| 15 |
+
"AutoModelForTokenClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForTokenClassification"
|
| 16 |
+
},
|
| 17 |
+
"bias": true,
|
| 18 |
+
"bos_token_id": 1,
|
| 19 |
+
"eos_token_id": 2,
|
| 20 |
+
"hidden_dropout": 0.0,
|
| 21 |
+
"hidden_size": 2048,
|
| 22 |
+
"initializer_range": 0.02,
|
| 23 |
+
"layer_norm_epsilon": 1e-05,
|
| 24 |
+
"max_position_embeddings": 2048,
|
| 25 |
+
"model_type": "falcon",
|
| 26 |
+
"multi_query": false,
|
| 27 |
+
"new_decoder_architecture": false,
|
| 28 |
+
"num_attention_heads": 32,
|
| 29 |
+
"num_hidden_layers": 24,
|
| 30 |
+
"num_kv_heads": 32,
|
| 31 |
+
"parallel_attn": false,
|
| 32 |
+
"rope_scaling": null,
|
| 33 |
+
"rope_theta": 10000.0,
|
| 34 |
+
"torch_dtype": "bfloat16",
|
| 35 |
+
"transformers_version": "4.34.1",
|
| 36 |
+
"use_cache": true,
|
| 37 |
+
"vocab_size": 50258
|
| 38 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 1,
|
| 4 |
+
"eos_token_id": 2,
|
| 5 |
+
"transformers_version": "4.34.1"
|
| 6 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f310e217d956ab7329d5907f7cdfcca601ad3c9031564e230c89243395c6f44
|
| 3 |
+
size 2623160918
|
rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8e5a5422bffb8ad489dd18316b774678880c86d55fb9075bc7a76cdb487d4cec
|
| 3 |
+
size 14244
|
scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6275c47d6f812f87a9b4ad75251c347b858c02c1ccaa5f559dbf5ff60dc7cbb3
|
| 3 |
+
size 1064
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": {
|
| 5 |
+
"content": "[PAD]",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false
|
| 10 |
+
},
|
| 11 |
+
"unk_token": "<|endoftext|>"
|
| 12 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"50257": {
|
| 13 |
+
"content": "[PAD]",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
}
|
| 20 |
+
},
|
| 21 |
+
"bos_token": "<|endoftext|>",
|
| 22 |
+
"clean_up_tokenization_spaces": true,
|
| 23 |
+
"eos_token": "<|endoftext|>",
|
| 24 |
+
"model_max_length": 1024,
|
| 25 |
+
"pad_token": "[PAD]",
|
| 26 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 27 |
+
"trust_remove_code": true,
|
| 28 |
+
"unk_token": "<|endoftext|>"
|
| 29 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 2.0,
|
| 5 |
+
"eval_steps": 10000,
|
| 6 |
+
"global_step": 7276,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.27,
|
| 13 |
+
"learning_rate": 9.359487165438073e-05,
|
| 14 |
+
"loss": 1.3028,
|
| 15 |
+
"step": 1000
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 0.55,
|
| 19 |
+
"learning_rate": 6.618156843109719e-05,
|
| 20 |
+
"loss": 0.9004,
|
| 21 |
+
"step": 2000
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"epoch": 0.82,
|
| 25 |
+
"learning_rate": 5.403702434442518e-05,
|
| 26 |
+
"loss": 0.7984,
|
| 27 |
+
"step": 3000
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"epoch": 1.0,
|
| 31 |
+
"eval_loss": 1.0141562223434448,
|
| 32 |
+
"eval_runtime": 3.363,
|
| 33 |
+
"eval_samples_per_second": 264.642,
|
| 34 |
+
"eval_steps_per_second": 33.303,
|
| 35 |
+
"step": 3638
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"epoch": 1.1,
|
| 39 |
+
"learning_rate": 4.679743582719036e-05,
|
| 40 |
+
"loss": 0.6662,
|
| 41 |
+
"step": 4000
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"epoch": 1.37,
|
| 45 |
+
"learning_rate": 4.1856899072912704e-05,
|
| 46 |
+
"loss": 0.4999,
|
| 47 |
+
"step": 5000
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"epoch": 1.65,
|
| 51 |
+
"learning_rate": 3.820994634908561e-05,
|
| 52 |
+
"loss": 0.4938,
|
| 53 |
+
"step": 6000
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"epoch": 1.92,
|
| 57 |
+
"learning_rate": 3.537553634121427e-05,
|
| 58 |
+
"loss": 0.4857,
|
| 59 |
+
"step": 7000
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"epoch": 2.0,
|
| 63 |
+
"eval_loss": 1.0224509239196777,
|
| 64 |
+
"eval_runtime": 3.3432,
|
| 65 |
+
"eval_samples_per_second": 266.209,
|
| 66 |
+
"eval_steps_per_second": 33.5,
|
| 67 |
+
"step": 7276
|
| 68 |
+
}
|
| 69 |
+
],
|
| 70 |
+
"logging_steps": 1000,
|
| 71 |
+
"max_steps": 7276,
|
| 72 |
+
"num_train_epochs": 2,
|
| 73 |
+
"save_steps": 500,
|
| 74 |
+
"total_flos": 2.5033888640414515e+17,
|
| 75 |
+
"trial_name": null,
|
| 76 |
+
"trial_params": null
|
| 77 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ecef41dcd49770cd396f2b37ca10a5876c0c98da94c15fffa3b3f3aae7ca086
|
| 3 |
+
size 4600
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|