Spaces:
Paused
Paused
Commit
·
4642613
1
Parent(s):
4ba4563
Add GPU capability
Browse files
app.py
CHANGED
|
@@ -9,7 +9,8 @@ model = t.AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-hf",load
|
|
| 9 |
tokenizer.pad_token_id = 0
|
| 10 |
|
| 11 |
config = peft.LoraConfig(r=8, lora_alpha=16, target_modules=["q_proj", "v_proj"], lora_dropout=0.005, bias="none", task_type="CAUSAL_LM")
|
| 12 |
-
|
|
|
|
| 13 |
|
| 14 |
peft.set_peft_model_state_dict(model, torch.load(f".weights/adapter_model.bin"))
|
| 15 |
|
|
|
|
| 9 |
tokenizer.pad_token_id = 0
|
| 10 |
|
| 11 |
config = peft.LoraConfig(r=8, lora_alpha=16, target_modules=["q_proj", "v_proj"], lora_dropout=0.005, bias="none", task_type="CAUSAL_LM")
|
| 12 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
+
model = peft.get_peft_model(model, config).to(device)
|
| 14 |
|
| 15 |
peft.set_peft_model_state_dict(model, torch.load(f".weights/adapter_model.bin"))
|
| 16 |
|