Commit
·
bb96823
1
Parent(s):
b714afc
initial commit
Browse files- README.md +66 -0
- added_tokens.json +1 -0
- blaze-koneski.jpg +0 -0
- config.json +32 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
README.md
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- mk
|
| 4 |
+
thumbnail: https://huggingface.co/macedonizer/mk-roberta-base/blaze-koneski.jpg
|
| 5 |
+
license: Apache 2.0
|
| 6 |
+
datasets:
|
| 7 |
+
- wiki-mk
|
| 8 |
+
- time-mk-news-2010-2015
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# mk-gpt2
|
| 12 |
+
Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large
|
| 13 |
+
Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in
|
| 14 |
+
[this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf)
|
| 15 |
+
and first released at [this page](https://openai.com/blog/better-language-models/).
|
| 16 |
+
|
| 17 |
+
## Model description
|
| 18 |
+
mk-gpt2 is a transformers model pretrained on a very large corpus of Macedonian data in a self-supervised fashion. This
|
| 19 |
+
means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots
|
| 20 |
+
of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely,
|
| 21 |
+
it was trained to guess the next word in sentences.
|
| 22 |
+
More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence,
|
| 23 |
+
shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the
|
| 24 |
+
predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens.
|
| 25 |
+
This way, the model learns an inner representation of the Macedonian language that can then be used to extract features
|
| 26 |
+
useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a
|
| 27 |
+
prompt.
|
| 28 |
+
|
| 29 |
+
### How to use
|
| 30 |
+
Here is how to use this model to get the features of a given text in PyTorch:
|
| 31 |
+
|
| 32 |
+
import random
|
| 33 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
| 34 |
+
|
| 35 |
+
tokenizer = AutoTokenizer.from_pretrained('macedonizer/mk-gpt2') \
|
| 36 |
+
model = AutoModelWithLMHead.from_pretrained('macedonizer/mk-gpt2')
|
| 37 |
+
|
| 38 |
+
input_text = 'Скопје е '
|
| 39 |
+
|
| 40 |
+
if len(input_text) == 0: \
|
| 41 |
+
encoded_input = tokenizer(input_text, return_tensors="pt") \
|
| 42 |
+
output = model.generate( \
|
| 43 |
+
bos_token_id=random.randint(1, 50000), \
|
| 44 |
+
do_sample=True, \
|
| 45 |
+
top_k=50, \
|
| 46 |
+
max_length=1024, \
|
| 47 |
+
top_p=0.95, \
|
| 48 |
+
num_return_sequences=1, \
|
| 49 |
+
) \
|
| 50 |
+
else: \
|
| 51 |
+
encoded_input = tokenizer(input_text, return_tensors="pt") \
|
| 52 |
+
output = model.generate( \
|
| 53 |
+
**encoded_input, \
|
| 54 |
+
bos_token_id=random.randint(1, 50000), \
|
| 55 |
+
do_sample=True, \
|
| 56 |
+
top_k=50, \
|
| 57 |
+
max_length=1024, \
|
| 58 |
+
top_p=0.95, \
|
| 59 |
+
num_return_sequences=1, \
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
decoded_output = [] \
|
| 63 |
+
for sample in output: \
|
| 64 |
+
decoded_output.append(tokenizer.decode(sample, skip_special_tokens=True))
|
| 65 |
+
|
| 66 |
+
print(decoded_output)
|
added_tokens.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"<|endoftext|>": 52000, "<|beginoftext|>": 52001, "<PAD>": 52002}
|
blaze-koneski.jpg
ADDED
|
config.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "../../models/mk-gpt2",
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 0,
|
| 9 |
+
"embd_pdrop": 0.1,
|
| 10 |
+
"eos_token_id": 2,
|
| 11 |
+
"gradient_checkpointing": false,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"layer_norm_epsilon": 1e-05,
|
| 14 |
+
"model_type": "gpt2",
|
| 15 |
+
"n_ctx": 1024,
|
| 16 |
+
"n_embd": 768,
|
| 17 |
+
"n_head": 12,
|
| 18 |
+
"n_inner": null,
|
| 19 |
+
"n_layer": 12,
|
| 20 |
+
"n_positions": 1024,
|
| 21 |
+
"resid_pdrop": 0.1,
|
| 22 |
+
"scale_attn_weights": true,
|
| 23 |
+
"summary_activation": null,
|
| 24 |
+
"summary_first_dropout": 0.1,
|
| 25 |
+
"summary_proj_to_labels": true,
|
| 26 |
+
"summary_type": "cls_index",
|
| 27 |
+
"summary_use_proj": true,
|
| 28 |
+
"torch_dtype": "float32",
|
| 29 |
+
"transformers_version": "4.9.1",
|
| 30 |
+
"use_cache": true,
|
| 31 |
+
"vocab_size": 52003
|
| 32 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3a81e38ee1b4a5a90f3e229bff5c81483e78269919e8b4f1e1d96847e370667
|
| 3 |
+
size 515767529
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<|beginoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<unk>", "pad_token": "<PAD>", "mask_token": "<mask>"}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": "../../models/mk-gpt2", "tokenizer_file": "../../models/mk-gpt2\\tokenizer.json", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|