| from char_tokenizer import CharTokenizer | |
| import gradio as gr | |
| from transformers import GPT2LMHeadModel | |
| tokenizer = CharTokenizer.load("saved_model/tokenizer.json") | |
| model = GPT2LMHeadModel.from_pretrained("saved_model") | |
| def generation(prompt, length): | |
| tokens = tokenizer(prompt=str(length) + prompt) | |
| output_ids = model.generate(tokens['input_ids'], | |
| do_sample=True, | |
| top_p=0.95, | |
| max_length=100) | |
| decoded_verse = tokenizer.decode(output_ids)[len(prompt) + 1:] | |
| return decoded_verse | |
| input_prompt = gr.inputs.Textbox() | |
| input_length = gr.inputs.Dropdown([5, 6, 7]) | |
| gr.Interface(fn=generation, inputs=[input_prompt, input_length], outputs="text").launch() |