File size: 1,124 Bytes
e27b9eb
e7020af
3c4bdb3
e27b9eb
 
 
 
e7020af
 
 
 
e27b9eb
 
e7020af
e27b9eb
 
 
 
3c4bdb3
e27b9eb
e7020af
e27b9eb
 
 
e7020af
 
e27b9eb
 
 
e7020af
e27b9eb
e7020af
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import torch
from unsloth import FastLanguageModel

# Check if CUDA is available
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the base model
base_model_name = "unsloth/Llama-3.2-3B-Instruct"
base_model, tokenizer = FastLanguageModel.from_pretrained(
    model_name=base_model_name,
    max_seq_length=2048,
    dtype=None,  # Auto-detect data type
    load_in_4bit=False,  # Disable 4-bit quantization for CPU
)
base_model.to(device)

# Apply LoRA adapters
from peft import PeftModel

lora_model_name = "oskaralf/lora_model"  # Replace with your LoRA model path
model = PeftModel.from_pretrained(base_model, lora_model_name)
model.to(device)

# Prepare for inference
FastLanguageModel.for_inference(model)

# Gradio interface
import gradio as gr

def chatbot(input_text):
    inputs = tokenizer(input_text, return_tensors="pt").to(device)
    outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=64)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Chatbot")
iface.launch()