import torch import gradio as gr from transformers import MBartForConditionalGeneration, MBart50Tokenizer model = MBartForConditionalGeneration.from_pretrained("ywc1/mbart-finetuned-ja-en-para") tokenizer = MBart50Tokenizer.from_pretrained("ywc1/mbart-finetuned-ja-en-para") def translate(text): if not text.strip(): return "Please enter a block of text." inputs = tokenizer(text, return_tensors="pt", padding=True) with torch.no_grad(): output = model.generate( **inputs, num_beams=2, # max_length=1024, early_stopping=True ) return tokenizer.decode(output[0], skip_special_tokens=True) # Gradio interface iface = gr.Interface( fn=translate, inputs=gr.Textbox(lines=7, placeholder="Enter Japanese text"), outputs="text", title="Japanese → English Academic Translator", description="This is a fine-tuned MBart Model that serves to translate paragraphs of Japanese academic text into English. \n You may find the sentece-level translation tool ywc1/ja-en-trans also helpful.", flagging_mode="manual", flagging_options=["Inaccurate", "Fluency", "Interesting", "Other"], ) iface.launch()