Abdo96 yhshin commited on
Commit
d13efc1
·
0 Parent(s):

Duplicate from yhshin/latex-ocr

Browse files

Co-authored-by: Young Ho Shin <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Latex Ocr
3
+ emoji: 👀
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 2.9.4
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: yhshin/latex-ocr
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import TrOCRProcessor, VisionEncoderDecoderModel
3
+ import requests
4
+ from PIL import Image
5
+
6
+ url = 'https://huggingface.co/yhshin/latex-ocr/raw/main/tokenizer-wordlevel.json'
7
+ r = requests.get(url)
8
+ open('tokenizer-wordlevel.json' , 'wb').write(r.content)
9
+
10
+
11
+ processor = TrOCRProcessor.from_pretrained("microsoft/trocr-small-printed")
12
+ model = VisionEncoderDecoderModel.from_pretrained("yhshin/latex-ocr")
13
+
14
+ from tokenizers import Tokenizer
15
+ tokenizer = Tokenizer.from_file("tokenizer-wordlevel.json")
16
+
17
+ # load image examples
18
+
19
+ def process_image(image):
20
+ # prepare image
21
+ pixel_values = processor(image, return_tensors="pt").pixel_values
22
+
23
+ # generate (no beam search)
24
+ generated_ids = model.generate(pixel_values)
25
+
26
+ # decode
27
+ generated_text = tokenizer.decode_batch(generated_ids.tolist(), skip_special_tokens=True)[0]
28
+
29
+ # Strip spaces
30
+ generated_text = generated_text.replace(" ", "")
31
+
32
+ return generated_text
33
+
34
+ # !ls examples | grep png
35
+
36
+ # +
37
+ title = "Convert image to LaTeX source code"
38
+
39
+ with open('article.md',mode='r') as file:
40
+ article = file.read()
41
+
42
+ description = """
43
+ This is a demo of machine learning model trained to reconstruct the LaTeX source code of an equation from an image.
44
+ To use it, simply upload an image or use one of the example images below and click 'submit'.
45
+ Results will show up in a few seconds.
46
+
47
+ Try rendering the generated LaTeX [here](https://quicklatex.com/) to compare with the original.
48
+ (The model is not perfect yet, so you may need to edit the resulting LaTeX a bit to get it to render a good match.)
49
+
50
+ """
51
+
52
+ examples = [
53
+ [ "examples/1d32874f02.png" ],
54
+ [ "examples/1e466b180d.png" ],
55
+ [ "examples/2d3503f427.png" ],
56
+ [ "examples/2f9d3c4e43.png" ],
57
+ [ "examples/51c5cc2ff5.png" ],
58
+ [ "examples/545a492388.png" ],
59
+ [ "examples/6a51a30502.png" ],
60
+ [ "examples/6bf6832adb.png" ],
61
+ [ "examples/7afdeff0e6.png" ],
62
+ [ "examples/b8f1e64b1f.png" ],
63
+ ]
64
+ # -
65
+
66
+ iface = gr.Interface(fn=process_image,
67
+ inputs=[gr.inputs.Image(type="pil")],
68
+ outputs=gr.outputs.Textbox(),
69
+ title=title,
70
+ description=description,
71
+ article=article,
72
+ examples=examples)
73
+ iface.launch()
74
+
article.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## What's the point of this?
2
+
3
+ LaTeX is the de-facto standard markup language for typesetting pretty equations in academic papers.
4
+ It is extremely feature rich and flexible but very verbose.
5
+ This makes it great for typesetting complex equations, but not very convenient for quick note-taking on the fly.
6
+
7
+ For example, here's a short equation from [this page](https://en.wikipedia.org/wiki/Quantum_electrodynamics) on Wikipedia about Quantum Electrodynamics
8
+ and the corresponding LaTeX code:
9
+
10
+ ![Example]( https://wikimedia.org/api/rest_v1/media/math/render/svg/6faab1adbb88a567a52e55b2012e836a011a0675 )
11
+
12
+ ```
13
+ {\displaystyle {\mathcal {L}}={\bar {\psi }}(i\gamma ^{\mu }D_{\mu }-m)\psi -{\frac {1}{4}}F_{\mu \nu }F^{\mu \nu },}
14
+ ```
15
+
16
+
17
+ This demo is a first step in solving this problem.
18
+ Eventually, you'll be able to take a quick partial screenshot from a paper
19
+ and a program built with this model will generate its corresponding LaTeX source code
20
+ so that you can just copy/paste straight into your personal notes.
21
+ No more endless googling obscure LaTeX syntax!
22
+
23
+ ## How does it work?
24
+
25
+ Because this problem involves looking at an image and generating valid LaTeX code,
26
+ the model needs to understand both Computer Vision (CV) and Natural Language Processing (NLP).
27
+ There are some other projects that aim to solve the same problem with some very interesting models.
28
+ These generally involve some kind of "encoder" that looks at the image and extracts/encodes the information about the equation from the image,
29
+ and a "decoder" that takes that information and translates it into what is hopefully both valid and accurate LaTeX code.
30
+ The "encode" part can be done using classic CNN architectures commonly used for CV tasks, or newer vision transformer architectures.
31
+ The "decode" part can be done with LSTMs or transformer decoders, using attention mechanism to make sure the decoder understands long range dependencies, e.g. remembering to close a bracket that was opened a long sequence away.
32
+
33
+ I chose to tackle this problem with transfer learning, using an existing OCR model and fine-tuning it for this task.
34
+ The biggest reason for this is computing constraints -
35
+ GPU hours are expensive so I wanted training to be reasonably fast, on the order of a couple of hours.
36
+ There are some other benefits to this approach,
37
+ e.g. the architecture is already proven to be robust.
38
+ I chose [TrOCR](https://arxiv.org/abs/2109.10282), a model trained at Microsoft for text recognition tasks which uses transformer architecture for both the encoder and decoder.
39
+
40
+ For the data, I used the `im2latex-100k` dataset, which includes a total of roughly 100k formulas and images.
41
+ Some preprocessing steps were done by Harvard NLP for the [`im2markup` project](https://github.com/harvardnlp/im2markup).
42
+ To limit the scope of the project and simplify the task, I limited training data to only look at equations containing 100 LaTeX tokens or less.
43
+ This covers most single line equations, including fractions, subscripts, symbols, etc, but does not cover large multi line equations, some of which can have up to 500 LaTeX tokens.
44
+ GPU training was done on a Kaggle GPU Kernel in roughly 3 hours.
45
+ You can find the full training code on my Kaggle profile [here](https://www.kaggle.com/code/younghoshin/finetuning-trocr/notebook).
46
+
47
+ ## What's next?
48
+
49
+ There's multiple improvements that I'm hoping to make to this project.
50
+
51
+ ### More robust prediction
52
+
53
+ If you've tried the examples above (randomly sampled from the test set), you've noticed that the model predictions aren't quite perfect and the model occasionally misses, duplicates or mistakes tokens.
54
+ More training on the existing data set could help with this.
55
+
56
+ ### More data
57
+
58
+ There's a lot of LaTeX data available on the internet besides `im2latex-100k`, e.g. arXiv and Wikipedia.
59
+ It's just waiting to be scraped and used for this project.
60
+ This means a lot of hours of scraping, cleaning, and processing but having a more diverse set of input images could improve model accuracy significantly.
61
+
62
+ ### Faster and smaller model
63
+
64
+ The model currently takes a few seconds to process a single image.
65
+ I would love to improve performance so that it can run in one second or less, maybe even on mobile devices.
66
+ This might be impossible with TrOCR which is a fairly large model, designed for use on GPUs.
67
+
68
+
69
+ <p style='text-align: center'>Made by Young Ho Shin</p>
70
+ <p style='text-align: center'>
71
+ <a href = "mailto: [email protected]">Email</a> |
72
+ <a href='https://www.github.com/yhshin11'>Github</a> |
73
+ <a href='https://www.linkedin.com/in/young-ho-shin/'>Linkedin</a>
74
+ </p>
examples/1d32874f02.png ADDED
examples/1e466b180d.png ADDED
examples/2d3503f427.png ADDED
examples/2f9d3c4e43.png ADDED
examples/51c5cc2ff5.png ADDED
examples/545a492388.png ADDED
examples/6a51a30502.png ADDED
examples/6bf6832adb.png ADDED
examples/7afdeff0e6.png ADDED
examples/b8f1e64b1f.png ADDED
model/config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/trocr-small-stage1",
3
+ "architectures": [
4
+ "VisionEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "",
8
+ "activation_dropout": 0.0,
9
+ "activation_function": "relu",
10
+ "add_cross_attention": true,
11
+ "architectures": null,
12
+ "attention_dropout": 0.0,
13
+ "bad_words_ids": null,
14
+ "bos_token_id": 0,
15
+ "chunk_size_feed_forward": 0,
16
+ "classifier_dropout": 0.0,
17
+ "cross_attention_hidden_size": 384,
18
+ "d_model": 256,
19
+ "decoder_attention_heads": 8,
20
+ "decoder_ffn_dim": 1024,
21
+ "decoder_layerdrop": 0.0,
22
+ "decoder_layers": 6,
23
+ "decoder_start_token_id": 2,
24
+ "diversity_penalty": 0.0,
25
+ "do_sample": false,
26
+ "dropout": 0.1,
27
+ "early_stopping": false,
28
+ "encoder_no_repeat_ngram_size": 0,
29
+ "eos_token_id": 2,
30
+ "finetuning_task": null,
31
+ "forced_bos_token_id": null,
32
+ "forced_eos_token_id": null,
33
+ "id2label": {
34
+ "0": "LABEL_0",
35
+ "1": "LABEL_1"
36
+ },
37
+ "init_std": 0.02,
38
+ "is_decoder": true,
39
+ "is_encoder_decoder": false,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1
43
+ },
44
+ "layernorm_embedding": true,
45
+ "length_penalty": 1.0,
46
+ "max_length": 20,
47
+ "max_position_embeddings": 512,
48
+ "min_length": 0,
49
+ "model_type": "trocr",
50
+ "no_repeat_ngram_size": 0,
51
+ "num_beam_groups": 1,
52
+ "num_beams": 1,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 1,
58
+ "prefix": null,
59
+ "problem_type": null,
60
+ "pruned_heads": {},
61
+ "remove_invalid_values": false,
62
+ "repetition_penalty": 1.0,
63
+ "return_dict": true,
64
+ "return_dict_in_generate": false,
65
+ "scale_embedding": true,
66
+ "sep_token_id": null,
67
+ "task_specific_params": null,
68
+ "temperature": 1.0,
69
+ "tie_encoder_decoder": false,
70
+ "tie_word_embeddings": false,
71
+ "tokenizer_class": null,
72
+ "top_k": 50,
73
+ "top_p": 1.0,
74
+ "torch_dtype": null,
75
+ "torchscript": false,
76
+ "transformers_version": "4.16.2",
77
+ "use_bfloat16": false,
78
+ "use_cache": false,
79
+ "use_learned_position_embeddings": true,
80
+ "vocab_size": 64044
81
+ },
82
+ "decoder_start_token_id": 1,
83
+ "early_stopping": true,
84
+ "encoder": {
85
+ "_name_or_path": "",
86
+ "add_cross_attention": false,
87
+ "architectures": null,
88
+ "attention_probs_dropout_prob": 0.0,
89
+ "bad_words_ids": null,
90
+ "bos_token_id": null,
91
+ "chunk_size_feed_forward": 0,
92
+ "cross_attention_hidden_size": null,
93
+ "decoder_start_token_id": null,
94
+ "diversity_penalty": 0.0,
95
+ "do_sample": false,
96
+ "early_stopping": false,
97
+ "encoder_no_repeat_ngram_size": 0,
98
+ "eos_token_id": null,
99
+ "finetuning_task": null,
100
+ "forced_bos_token_id": null,
101
+ "forced_eos_token_id": null,
102
+ "hidden_act": "gelu",
103
+ "hidden_dropout_prob": 0.0,
104
+ "hidden_size": 384,
105
+ "id2label": {
106
+ "0": "LABEL_0",
107
+ "1": "LABEL_1"
108
+ },
109
+ "image_size": 384,
110
+ "initializer_range": 0.02,
111
+ "intermediate_size": 1536,
112
+ "is_decoder": false,
113
+ "is_encoder_decoder": false,
114
+ "label2id": {
115
+ "LABEL_0": 0,
116
+ "LABEL_1": 1
117
+ },
118
+ "layer_norm_eps": 1e-12,
119
+ "length_penalty": 1.0,
120
+ "max_length": 20,
121
+ "min_length": 0,
122
+ "model_type": "deit",
123
+ "no_repeat_ngram_size": 0,
124
+ "num_attention_heads": 6,
125
+ "num_beam_groups": 1,
126
+ "num_beams": 1,
127
+ "num_channels": 3,
128
+ "num_hidden_layers": 12,
129
+ "num_return_sequences": 1,
130
+ "output_attentions": false,
131
+ "output_hidden_states": false,
132
+ "output_scores": false,
133
+ "pad_token_id": null,
134
+ "patch_size": 16,
135
+ "prefix": null,
136
+ "problem_type": null,
137
+ "pruned_heads": {},
138
+ "qkv_bias": true,
139
+ "remove_invalid_values": false,
140
+ "repetition_penalty": 1.0,
141
+ "return_dict": true,
142
+ "return_dict_in_generate": false,
143
+ "sep_token_id": null,
144
+ "task_specific_params": null,
145
+ "temperature": 1.0,
146
+ "tie_encoder_decoder": false,
147
+ "tie_word_embeddings": true,
148
+ "tokenizer_class": null,
149
+ "top_k": 50,
150
+ "top_p": 1.0,
151
+ "torch_dtype": null,
152
+ "torchscript": false,
153
+ "transformers_version": "4.16.2",
154
+ "use_bfloat16": false
155
+ },
156
+ "eos_token_id": 2,
157
+ "is_encoder_decoder": true,
158
+ "length_penalty": 2.0,
159
+ "max_length": 100,
160
+ "model_type": "vision-encoder-decoder",
161
+ "no_repeat_ngram_size": 3,
162
+ "num_beams": 4,
163
+ "pad_token_id": 3,
164
+ "tie_word_embeddings": false,
165
+ "torch_dtype": "float32",
166
+ "transformers_version": null,
167
+ "vocab_size": 200
168
+ }
model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4a4c0a9d1da23b3f66f4e6af213f9d17945aa1fb9376f2ad59c03ec74995ba7
3
+ size 246530221
model/tokenizer-wordlevel.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 100,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 100
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 0,
16
+ "pad_type_id": 0,
17
+ "pad_token": "[PAD]"
18
+ },
19
+ "added_tokens": [
20
+ {
21
+ "id": 0,
22
+ "content": "[UNK]",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 1,
31
+ "content": "[CLS]",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 2,
40
+ "content": "[SEP]",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 3,
49
+ "content": "[PAD]",
50
+ "single_word": false,
51
+ "lstrip": false,
52
+ "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
+ },
56
+ {
57
+ "id": 4,
58
+ "content": "[MASK]",
59
+ "single_word": false,
60
+ "lstrip": false,
61
+ "rstrip": false,
62
+ "normalized": false,
63
+ "special": true
64
+ }
65
+ ],
66
+ "normalizer": null,
67
+ "pre_tokenizer": {
68
+ "type": "Whitespace"
69
+ },
70
+ "post_processor": {
71
+ "type": "TemplateProcessing",
72
+ "single": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "[CLS]",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "[SEP]",
88
+ "type_id": 0
89
+ }
90
+ }
91
+ ],
92
+ "pair": [
93
+ {
94
+ "SpecialToken": {
95
+ "id": "[CLS]",
96
+ "type_id": 0
97
+ }
98
+ },
99
+ {
100
+ "Sequence": {
101
+ "id": "A",
102
+ "type_id": 0
103
+ }
104
+ },
105
+ {
106
+ "SpecialToken": {
107
+ "id": "[SEP]",
108
+ "type_id": 0
109
+ }
110
+ },
111
+ {
112
+ "Sequence": {
113
+ "id": "B",
114
+ "type_id": 1
115
+ }
116
+ },
117
+ {
118
+ "SpecialToken": {
119
+ "id": "[SEP]",
120
+ "type_id": 1
121
+ }
122
+ }
123
+ ],
124
+ "special_tokens": {
125
+ "[CLS]": {
126
+ "id": "[CLS]",
127
+ "ids": [
128
+ 1
129
+ ],
130
+ "tokens": [
131
+ "[CLS]"
132
+ ]
133
+ },
134
+ "[SEP]": {
135
+ "id": "[SEP]",
136
+ "ids": [
137
+ 2
138
+ ],
139
+ "tokens": [
140
+ "[SEP]"
141
+ ]
142
+ }
143
+ }
144
+ },
145
+ "decoder": null,
146
+ "model": {
147
+ "type": "WordLevel",
148
+ "vocab": {
149
+ "[UNK]": 0,
150
+ "[CLS]": 1,
151
+ "[SEP]": 2,
152
+ "[PAD]": 3,
153
+ "[MASK]": 4,
154
+ "}": 5,
155
+ "{": 6,
156
+ "\\": 7,
157
+ "_": 8,
158
+ "^": 9,
159
+ "(": 10,
160
+ ")": 11,
161
+ "2": 12,
162
+ "1": 13,
163
+ "-": 14,
164
+ "=": 15,
165
+ ",": 16,
166
+ "+": 17,
167
+ "frac": 18,
168
+ "i": 19,
169
+ "0": 20,
170
+ "x": 21,
171
+ "n": 22,
172
+ ".": 23,
173
+ "d": 24,
174
+ "\\,": 25,
175
+ "a": 26,
176
+ "mu": 27,
177
+ "left": 28,
178
+ "right": 29,
179
+ "e": 30,
180
+ "k": 31,
181
+ "c": 32,
182
+ "m": 33,
183
+ "r": 34,
184
+ "p": 35,
185
+ "3": 36,
186
+ "alpha": 37,
187
+ "t": 38,
188
+ "partial": 39,
189
+ "~": 40,
190
+ "l": 41,
191
+ "A": 42,
192
+ "s": 43,
193
+ "&": 44,
194
+ "4": 45,
195
+ "j": 46,
196
+ "\\;": 47,
197
+ "g": 48,
198
+ "prime": 49,
199
+ "]": 50,
200
+ "[": 51,
201
+ "nu": 52,
202
+ "z": 53,
203
+ "pi": 54,
204
+ "|": 55,
205
+ "b": 56,
206
+ "phi": 57,
207
+ "\\\\": 58,
208
+ "mathrm": 59,
209
+ "q": 60,
210
+ "operatorname": 61,
211
+ "cal": 62,
212
+ "N": 63,
213
+ "delta": 64,
214
+ "f": 65,
215
+ "lambda": 66,
216
+ "beta": 67,
217
+ "bar": 68,
218
+ "T": 69,
219
+ "int": 70,
220
+ "array": 71,
221
+ "R": 72,
222
+ "S": 73,
223
+ "D": 74,
224
+ "L": 75,
225
+ "M": 76,
226
+ "B": 77,
227
+ "y": 78,
228
+ "sigma": 79,
229
+ "F": 80,
230
+ "theta": 81,
231
+ "/": 82,
232
+ "gamma": 83,
233
+ "h": 84,
234
+ "hat": 85,
235
+ "psi": 86,
236
+ "sqrt": 87,
237
+ "sum": 88,
238
+ "u": 89,
239
+ "H": 90,
240
+ "o": 91,
241
+ "rho": 92,
242
+ "tilde": 93,
243
+ "tau": 94,
244
+ "C": 95,
245
+ "P": 96,
246
+ "G": 97,
247
+ "V": 98,
248
+ "I": 99,
249
+ "X": 100,
250
+ "omega": 101,
251
+ "epsilon": 102,
252
+ "E": 103,
253
+ "J": 104,
254
+ "bf": 105,
255
+ "eta": 106,
256
+ "v": 107,
257
+ "xi": 108,
258
+ "Q": 109,
259
+ "Phi": 110,
260
+ "quad": 111,
261
+ "*": 112,
262
+ "5": 113,
263
+ "\\{": 114,
264
+ "vec": 115,
265
+ "begin": 116,
266
+ "end": 117,
267
+ "Gamma": 118,
268
+ "K": 119,
269
+ "infty": 120,
270
+ "\\}": 121,
271
+ "6": 122,
272
+ "U": 123,
273
+ "rangle": 124,
274
+ "dot": 125,
275
+ "W": 126,
276
+ "pm": 127,
277
+ "Lambda": 128,
278
+ "Z": 129,
279
+ "varphi": 130,
280
+ "Delta": 131,
281
+ "w": 132,
282
+ "chi": 133,
283
+ ";": 134,
284
+ "8": 135,
285
+ "\\!": 136,
286
+ "Omega": 137,
287
+ "kappa": 138,
288
+ "qquad": 139,
289
+ "cdot": 140,
290
+ "Psi": 141,
291
+ "equiv": 142,
292
+ "langle": 143,
293
+ "overline": 144,
294
+ ">": 145,
295
+ "<": 146,
296
+ "dagger": 147,
297
+ "zeta": 148,
298
+ "varepsilon": 149,
299
+ "cdots": 150,
300
+ "rightarrow": 151,
301
+ "O": 152,
302
+ "nabla": 153,
303
+ "Y": 154,
304
+ "ldots": 155,
305
+ ":": 156,
306
+ "Sigma": 157,
307
+ "ell": 158,
308
+ "7": 159,
309
+ "mathcal": 160,
310
+ "\\:": 161,
311
+ "!": 162,
312
+ "otimes": 163,
313
+ "prod": 164,
314
+ "wedge": 165,
315
+ "9": 166,
316
+ "hspace": 167,
317
+ "Pi": 168,
318
+ "hbar": 169,
319
+ "sim": 170,
320
+ "vert": 171,
321
+ "in": 172,
322
+ "Big": 173,
323
+ "widetilde": 174,
324
+ "displaystyle": 175,
325
+ "times": 176,
326
+ "Theta": 177,
327
+ "underline": 178,
328
+ "mid": 179,
329
+ "to": 180,
330
+ "dots": 181,
331
+ "mathbf": 182,
332
+ "ast": 183,
333
+ "leq": 184,
334
+ "approx": 185,
335
+ "star": 186,
336
+ "stackrel": 187,
337
+ "perp": 188,
338
+ "widehat": 189,
339
+ "big": 190,
340
+ "vartheta": 191,
341
+ "'": 192,
342
+ "Bigr": 193,
343
+ "geq": 194,
344
+ "mp": 195,
345
+ "Bigl": 196,
346
+ "dag": 197,
347
+ "neq": 198,
348
+ "simeq": 199
349
+ },
350
+ "unk_token": "[UNK]"
351
+ }
352
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==1.9.0
2
+ transformers==4.18.0
3
+ sentencepiece==0.1.96
4
+
tokenizer-wordlevel.json ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 100,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 100
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 0,
16
+ "pad_type_id": 0,
17
+ "pad_token": "[PAD]"
18
+ },
19
+ "added_tokens": [
20
+ {
21
+ "id": 0,
22
+ "content": "[UNK]",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 1,
31
+ "content": "[CLS]",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 2,
40
+ "content": "[SEP]",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 3,
49
+ "content": "[PAD]",
50
+ "single_word": false,
51
+ "lstrip": false,
52
+ "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
+ },
56
+ {
57
+ "id": 4,
58
+ "content": "[MASK]",
59
+ "single_word": false,
60
+ "lstrip": false,
61
+ "rstrip": false,
62
+ "normalized": false,
63
+ "special": true
64
+ }
65
+ ],
66
+ "normalizer": null,
67
+ "pre_tokenizer": {
68
+ "type": "Whitespace"
69
+ },
70
+ "post_processor": {
71
+ "type": "TemplateProcessing",
72
+ "single": [
73
+ {
74
+ "SpecialToken": {
75
+ "id": "[CLS]",
76
+ "type_id": 0
77
+ }
78
+ },
79
+ {
80
+ "Sequence": {
81
+ "id": "A",
82
+ "type_id": 0
83
+ }
84
+ },
85
+ {
86
+ "SpecialToken": {
87
+ "id": "[SEP]",
88
+ "type_id": 0
89
+ }
90
+ }
91
+ ],
92
+ "pair": [
93
+ {
94
+ "SpecialToken": {
95
+ "id": "[CLS]",
96
+ "type_id": 0
97
+ }
98
+ },
99
+ {
100
+ "Sequence": {
101
+ "id": "A",
102
+ "type_id": 0
103
+ }
104
+ },
105
+ {
106
+ "SpecialToken": {
107
+ "id": "[SEP]",
108
+ "type_id": 0
109
+ }
110
+ },
111
+ {
112
+ "Sequence": {
113
+ "id": "B",
114
+ "type_id": 1
115
+ }
116
+ },
117
+ {
118
+ "SpecialToken": {
119
+ "id": "[SEP]",
120
+ "type_id": 1
121
+ }
122
+ }
123
+ ],
124
+ "special_tokens": {
125
+ "[CLS]": {
126
+ "id": "[CLS]",
127
+ "ids": [
128
+ 1
129
+ ],
130
+ "tokens": [
131
+ "[CLS]"
132
+ ]
133
+ },
134
+ "[SEP]": {
135
+ "id": "[SEP]",
136
+ "ids": [
137
+ 2
138
+ ],
139
+ "tokens": [
140
+ "[SEP]"
141
+ ]
142
+ }
143
+ }
144
+ },
145
+ "decoder": null,
146
+ "model": {
147
+ "type": "WordLevel",
148
+ "vocab": {
149
+ "[UNK]": 0,
150
+ "[CLS]": 1,
151
+ "[SEP]": 2,
152
+ "[PAD]": 3,
153
+ "[MASK]": 4,
154
+ "}": 5,
155
+ "{": 6,
156
+ "\\": 7,
157
+ "_": 8,
158
+ "^": 9,
159
+ "(": 10,
160
+ ")": 11,
161
+ "2": 12,
162
+ "1": 13,
163
+ "-": 14,
164
+ "=": 15,
165
+ ",": 16,
166
+ "+": 17,
167
+ "frac": 18,
168
+ "i": 19,
169
+ "0": 20,
170
+ "x": 21,
171
+ "n": 22,
172
+ ".": 23,
173
+ "d": 24,
174
+ "\\,": 25,
175
+ "a": 26,
176
+ "mu": 27,
177
+ "left": 28,
178
+ "right": 29,
179
+ "e": 30,
180
+ "k": 31,
181
+ "c": 32,
182
+ "m": 33,
183
+ "r": 34,
184
+ "p": 35,
185
+ "3": 36,
186
+ "alpha": 37,
187
+ "t": 38,
188
+ "partial": 39,
189
+ "~": 40,
190
+ "l": 41,
191
+ "A": 42,
192
+ "s": 43,
193
+ "&": 44,
194
+ "4": 45,
195
+ "j": 46,
196
+ "\\;": 47,
197
+ "g": 48,
198
+ "prime": 49,
199
+ "]": 50,
200
+ "[": 51,
201
+ "nu": 52,
202
+ "z": 53,
203
+ "pi": 54,
204
+ "|": 55,
205
+ "b": 56,
206
+ "phi": 57,
207
+ "\\\\": 58,
208
+ "mathrm": 59,
209
+ "q": 60,
210
+ "operatorname": 61,
211
+ "cal": 62,
212
+ "N": 63,
213
+ "delta": 64,
214
+ "f": 65,
215
+ "lambda": 66,
216
+ "beta": 67,
217
+ "bar": 68,
218
+ "T": 69,
219
+ "int": 70,
220
+ "array": 71,
221
+ "R": 72,
222
+ "S": 73,
223
+ "D": 74,
224
+ "L": 75,
225
+ "M": 76,
226
+ "B": 77,
227
+ "y": 78,
228
+ "sigma": 79,
229
+ "F": 80,
230
+ "theta": 81,
231
+ "/": 82,
232
+ "gamma": 83,
233
+ "h": 84,
234
+ "hat": 85,
235
+ "psi": 86,
236
+ "sqrt": 87,
237
+ "sum": 88,
238
+ "u": 89,
239
+ "H": 90,
240
+ "o": 91,
241
+ "rho": 92,
242
+ "tilde": 93,
243
+ "tau": 94,
244
+ "C": 95,
245
+ "P": 96,
246
+ "G": 97,
247
+ "V": 98,
248
+ "I": 99,
249
+ "X": 100,
250
+ "omega": 101,
251
+ "epsilon": 102,
252
+ "E": 103,
253
+ "J": 104,
254
+ "bf": 105,
255
+ "eta": 106,
256
+ "v": 107,
257
+ "xi": 108,
258
+ "Q": 109,
259
+ "Phi": 110,
260
+ "quad": 111,
261
+ "*": 112,
262
+ "5": 113,
263
+ "\\{": 114,
264
+ "vec": 115,
265
+ "begin": 116,
266
+ "end": 117,
267
+ "Gamma": 118,
268
+ "K": 119,
269
+ "infty": 120,
270
+ "\\}": 121,
271
+ "6": 122,
272
+ "U": 123,
273
+ "rangle": 124,
274
+ "dot": 125,
275
+ "W": 126,
276
+ "pm": 127,
277
+ "Lambda": 128,
278
+ "Z": 129,
279
+ "varphi": 130,
280
+ "Delta": 131,
281
+ "w": 132,
282
+ "chi": 133,
283
+ ";": 134,
284
+ "8": 135,
285
+ "\\!": 136,
286
+ "Omega": 137,
287
+ "kappa": 138,
288
+ "qquad": 139,
289
+ "cdot": 140,
290
+ "Psi": 141,
291
+ "equiv": 142,
292
+ "langle": 143,
293
+ "overline": 144,
294
+ ">": 145,
295
+ "<": 146,
296
+ "dagger": 147,
297
+ "zeta": 148,
298
+ "varepsilon": 149,
299
+ "cdots": 150,
300
+ "rightarrow": 151,
301
+ "O": 152,
302
+ "nabla": 153,
303
+ "Y": 154,
304
+ "ldots": 155,
305
+ ":": 156,
306
+ "Sigma": 157,
307
+ "ell": 158,
308
+ "7": 159,
309
+ "mathcal": 160,
310
+ "\\:": 161,
311
+ "!": 162,
312
+ "otimes": 163,
313
+ "prod": 164,
314
+ "wedge": 165,
315
+ "9": 166,
316
+ "hspace": 167,
317
+ "Pi": 168,
318
+ "hbar": 169,
319
+ "sim": 170,
320
+ "vert": 171,
321
+ "in": 172,
322
+ "Big": 173,
323
+ "widetilde": 174,
324
+ "displaystyle": 175,
325
+ "times": 176,
326
+ "Theta": 177,
327
+ "underline": 178,
328
+ "mid": 179,
329
+ "to": 180,
330
+ "dots": 181,
331
+ "mathbf": 182,
332
+ "ast": 183,
333
+ "leq": 184,
334
+ "approx": 185,
335
+ "star": 186,
336
+ "stackrel": 187,
337
+ "perp": 188,
338
+ "widehat": 189,
339
+ "big": 190,
340
+ "vartheta": 191,
341
+ "'": 192,
342
+ "Bigr": 193,
343
+ "geq": 194,
344
+ "mp": 195,
345
+ "Bigl": 196,
346
+ "dag": 197,
347
+ "neq": 198,
348
+ "simeq": 199
349
+ },
350
+ "unk_token": "[UNK]"
351
+ }
352
+ }
version-history.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ | Version | # epochs | max # tokens | vocab size | notebook and training log | Comments |
2
+ |---------|----------|--------------|------------|----------------------------------------------------------------------------------------------------|----------|
3
+ | v4 | 10 | 100 | 200 | [link](https://www.kaggle.com/code/younghoshin/finetuning-trocr/notebook?scriptVersionId=94172330) | |
4
+ | | | | | | |
5
+ | | | | | | |
6
+ | | | | | | |