松尾研LLM講座最終課題 サンプルコードDPOtemplate_20241207.ipynb
インストール
!pip install -U ipywidgets
!pip install transformers==4.46.3
!pip install -U bitsandbytes
!pip install -U accelerate
!pip install -U datasets
!pip install -U peft==0.13.2
推論方法
# 必要なライブラリを読み込み
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
)
from peft import PeftModel
import torch
from tqdm import tqdm
import json
# ベースとなるモデルと学習したLoRAのアダプタ(Hugging FaceのIDを指定)。
model_id = "llm-jp/llm-jp-3-13b"
adapter_id = "rutiru/llm-jp-3-13b-it_lora"
adapter_dpo_id = "rutiru/llm-jp-3-13b-dpo"
# Hugging Face Token を指定。
HF_TOKEN = ""
# QLoRA config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
# Load model
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=bnb_config,
device_map="auto",
token = HF_TOKEN
)
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token = HF_TOKEN)
# 元のモデルにLoRAのアダプタを統合。
model = PeftModel.from_pretrained(model, adapter_id, token = HF_TOKEN)
# LoRAのモデルにDPOのアダプタを統合。
model = PeftModel.from_pretrained(model, adapter_dpo_id, token = HF_TOKEN)
# データセットの読み込み。
# omnicampusの開発環境では、左にタスクのjsonlをドラッグアンドドロップしてから実行。
datasets = []
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
item = ""
for line in f:
line = line.strip()
item += line
if item.endswith("}"):
datasets.append(json.loads(item))
item = ""
# llmjp
results = []
for data in tqdm(datasets):
input = data["input"]
prompt = f"""### 指示
{input}
### 回答
"""
tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
attention_mask = torch.ones_like(tokenized_input)
with torch.no_grad():
outputs = model.generate(
tokenized_input,
attention_mask=attention_mask,
max_new_tokens=100,
do_sample=False,
repetition_penalty=1.2,
pad_token_id=tokenizer.eos_token_id
)[0]
output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True)
results.append({"task_id": data["task_id"], "input": input, "output": output})
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support