import torch import gradio as gr from transformers import MarianMTModel, MarianTokenizer model_name = "ywc1/marian-finetuned-ja-en" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) def translate(text): if not text.strip(): return "Please enter some text." inputs = tokenizer(text, return_tensors="pt", padding=True) with torch.no_grad(): output = model.generate( **inputs, num_beams=2, # max_length=1024, early_stopping=True ) return tokenizer.decode(output[0], skip_special_tokens=True) # Gradio interface iface = gr.Interface( fn=translate, inputs=gr.Textbox(lines=7, placeholder="Enter Japanese text (no length limit)"), outputs="text", title="Japanese → English Academic Translator", description="Translation powered by a fine-tuned MarianMT model. This tool works best for sentence-by-sentence translation.", flagging_mode="manual", flagging_options=["Inaccurate", "Fluency", "Interesting", "Other"], ) iface.launch()