Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from peft import PeftModel, PeftConfig | |
| # Model configuration - Gemma-3n-E4B fine-tuned | |
| MODEL_ID = "Laserhun/gemma-3n-E4B-luau-finetuned" | |
| BASE_MODEL_ID = "google/gemma-3n-E4B" | |
| print("Loading Gemma-3n-E4B fine-tuned model...") | |
| try: | |
| # Try loading as PEFT model | |
| peft_config = PeftConfig.from_pretrained(MODEL_ID) | |
| # Load base model | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| BASE_MODEL_ID, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| trust_remote_code=True, | |
| ignore_mismatched_sizes=True | |
| ) | |
| # Load PEFT adapters | |
| model = PeftModel.from_pretrained(base_model, MODEL_ID) | |
| print("Loaded Gemma-3n-E4B as PEFT model") | |
| except: | |
| # Load as regular model | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| trust_remote_code=True | |
| ) | |
| print("Loaded as regular model") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| if not tokenizer.pad_token: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| def generate_luau_code(prompt, max_length=512, temperature=0.7, top_p=0.95): | |
| """Generate Luau code using Gemma-3n-E4B model""" | |
| # Format for Gemma-3n | |
| formatted_prompt = f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model\n" | |
| # Tokenize | |
| inputs = tokenizer(formatted_prompt, return_tensors="pt", truncation=True, max_length=512) | |
| # Move to device | |
| inputs = {k: v.to(model.device) for k, v in inputs.items()} | |
| # Generate | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=max_length, | |
| temperature=temperature, | |
| top_p=top_p, | |
| do_sample=True, | |
| pad_token_id=tokenizer.pad_token_id, | |
| eos_token_id=tokenizer.eos_token_id | |
| ) | |
| # Decode | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract response | |
| if "<start_of_turn>model" in generated_text: | |
| response = generated_text.split("<start_of_turn>model")[-1].strip() | |
| else: | |
| response = generated_text[len(formatted_prompt):].strip() | |
| return response | |
| # Create Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_luau_code, | |
| inputs=[ | |
| gr.Textbox( | |
| lines=4, | |
| placeholder="Describe the Luau code you want to generate...", | |
| label="Enter your Luau code request" | |
| ), | |
| gr.Slider(minimum=100, maximum=1000, value=512, step=50, label="Max Length"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top P") | |
| ], | |
| outputs=gr.Code(language="lua", label="Generated Luau Code"), | |
| title="🎮 Gemma-3n-E4B Luau Code Generator", | |
| description="Generate Roblox Luau code using Gemma-3n-E4B model (8B parameters, 4B runtime) fine-tuned on Luau corpus.", | |
| examples=[ | |
| ["Create a smooth part movement function with easing", 512, 0.7, 0.95], | |
| ["Write a door script with click interaction and smooth animation", 512, 0.7, 0.95], | |
| ["Generate a complete inventory system with add, remove, and display functions", 700, 0.7, 0.95], | |
| ["Create a spawning system for objects at random positions", 400, 0.7, 0.95], | |
| ["Write a leaderboard system that saves player scores", 600, 0.7, 0.95] | |
| ], | |
| theme=gr.themes.Soft() | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |