A3ON / app.py
AryanRathod3097's picture
Rename App.py to app.py
2d8fe7b verified
# app.py
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# Initialize model and tokenizer
MODEL_NAME = "kaiiddo/A3ON"
TOKEN = "YOUR_HF_TOKEN" # Set in HF Secrets
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
token=TOKEN,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
token=TOKEN,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True
)
def generate_text(prompt, max_new_tokens=200, temperature=0.9, top_p=0.9):
"""Generate text using the A3ON model"""
inputs = tokenizer.encode(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Gradio interface
with gr.Blocks(title="A3ON Text Generator") as demo:
gr.Markdown("# A3ON Text Generator")
gr.Markdown("Generate text using the A3ON model. Adjust parameters for creative outputs.")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(
label="Input Prompt",
placeholder="Enter your prompt here...",
lines=5
)
max_tokens = gr.Slider(
50, 500, value=200, label="Max New Tokens"
)
temp = gr.Slider(
0.1, 2.0, value=0.9, label="Temperature"
)
top_p = gr.Slider(
0.1, 1.0, value=0.9, label="Top-P (Nucleus Sampling)"
)
generate_btn = gr.Button("Generate")
with gr.Column():
output = gr.Textbox(
label="Generated Text",
lines=10,
interactive=False
)
generate_btn.click(
generate_text,
inputs=[prompt, max_tokens, temp, top_p],
outputs=output
)
gr.Examples(
examples=[
["Once upon a time in a galaxy far far away"],
["The secret to happiness is"],
["In the year 2050, artificial intelligence"]
],
inputs=[prompt]
)
if __name__ == "__main__":
demo.launch()