Llama 3.2 Regression Head for Price Prediction

Model Description

This model improves upon the approach taught in Ed Donner's LLM Engineering Udemy course.

Test MAE: $38.82

License

MIT License (consistent with slm-pricer)

Model Architecture

Base Model: ed-donner/price-2025-11-28_18.47.07 (Revision: b19c8bfea3b6ff62237fbb0a8da9779fc12cefbd) Regression Head: 2 layers (1024, 128) with Dropout(0.367)

Usage

import torch
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
from huggingface_hub import hf_hub_download

# 1. Configuration
llama_base_model_name = "meta-llama/Llama-3.2-3B"
llama_fine_tuned_model_name = "ed-donner/price-2025-11-28_18.47.07"
llama_fine_tuned_model_revision = "b19c8bfea3b6ff62237fbb0a8da9779fc12cefbd"

# 2. Load Base Model & Adapter
tokenizer = AutoTokenizer.from_pretrained(llama_base_model_name)
llama_base_model = AutoModelForCausalLM.from_pretrained(llama_base_model_name, device_map="auto", dtype=torch.float16)
llama_fine_tuned_model = PeftModel.from_pretrained(llama_base_model, llama_fine_tuned_model_name, revision=llama_fine_tuned_model_revision).merge_and_unload()
llama_fine_tuned_model.eval()

# 3. Download Regression Head
model_path = hf_hub_download(repo_id="antonawinkler/llama-pricer-regression-head", filename="model.pth")
checkpoint = torch.load(model_path, map_location="cpu")
model_config = checkpoint["model_config"]
embedding_config = checkpoint["embedding_config"]

# 4. Define Regression Head
class PriceRegressor(torch.nn.Module):
    def __init__(self, input_dim, hidden_dim1, hidden_dim2, dropout):
        super().__init__()
        self.net = torch.nn.Sequential(
            torch.nn.Linear(input_dim, hidden_dim1),
            torch.nn.BatchNorm1d(hidden_dim1),
            torch.nn.ReLU(),
            torch.nn.Dropout(dropout),
            torch.nn.Linear(hidden_dim1, hidden_dim2),
            torch.nn.BatchNorm1d(hidden_dim2),
            torch.nn.ReLU(),
            torch.nn.Linear(hidden_dim2, 1),
        )

    def forward(self, x):
        return self.net(x).squeeze(-1)

regression_head = PriceRegressor(
    input_dim=model_config["input_dim"],
    hidden_dim1=model_config["hidden_dim1"],
    hidden_dim2=model_config["hidden_dim2"],
    dropout=model_config["dropout"]
)
regression_head.load_state_dict(checkpoint["model_state_dict"])
regression_head.eval().to(llama_fine_tuned_model.device)

# 5. Prediction Helper
def get_single_embedding(model, tokenizer, text, n_layers):
    inputs = tokenizer(text, return_tensors="pt").to(model.device)
    
    with torch.no_grad():
        outputs = model(**inputs, output_hidden_states=True)
        selected_layers = outputs.hidden_states[-n_layers:]
        # Extract last token from each layer
        layer_vecs = [layer[:, -1, :] for layer in selected_layers]
        return torch.cat(layer_vecs, dim=-1).float()

# 6. Predict
description = "Apple AirPods Pro (2nd Generation) with MagSafe Charging Case"
embedding = get_single_embedding(
    llama_fine_tuned_model, 
    tokenizer, 
    description, 
    n_layers=embedding_config["n_layers"]
)

with torch.no_grad():
    log_price = regression_head(embedding)
    price = np.exp(log_price.item())

print(f"Predicted Price: ${price:.2f}")
Downloads last month
4
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for antonawinkler/llama-pricer-regression-head

Finetuned
(1)
this model

Dataset used to train antonawinkler/llama-pricer-regression-head