File size: 795 Bytes
3ef61d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

app = FastAPI()

# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-1B-Instruct")
model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-1B-Instruct").to("cpu")  # Gunakan CPU karena HF Spaces gratis hanya menyediakan CPU

@app.get("/")
def home():
    return {"message": "FastAPI running with Llama-3.2-1B-Instruct"}

@app.post("/generate")
def generate_text(prompt: str):
    inputs = tokenizer(prompt, return_tensors="pt").to("cpu")  # Gunakan CPU
    output = model.generate(**inputs, max_length=200)
    generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
    return {"generated_text": generated_text}