Model Card for Model ID
Install
pip install peft transformers bitsandbytes
Prompt Defination
import json
from dataclasses import dataclass
from enum import Enum
from typing import List, Dict, Tuple, Literal
class Roles(Enum):
system = "system"
user = "user"
assistant = "assistant"
tool = "tool"
class MessagesFormatterType(Enum):
"""
Enum representing different types of predefined messages formatters.
"""
MISTRAL = 1
@dataclass
class PromptMarkers:
start: str
end: str
class MessagesFormatter:
def __init__(
self,
pre_prompt: str,
prompt_markers: Dict[Roles, PromptMarkers],
include_sys_prompt_in_first_user_message: bool,
default_stop_sequences: List[str],
use_user_role_for_function_call_result: bool = True,
strip_prompt: bool = True,
bos_token: str = "<s>",
eos_token: str = "</s>"
):
self.pre_prompt = pre_prompt
self.prompt_markers = prompt_markers
self.include_sys_prompt_in_first_user_message = include_sys_prompt_in_first_user_message
self.default_stop_sequences = default_stop_sequences
self.use_user_role_for_function_call_result = use_user_role_for_function_call_result
self.strip_prompt = strip_prompt
self.bos_token = bos_token
self.eos_token = eos_token
self.added_system_prompt = False
def get_bos_token(self) -> str:
return self.bos_token
def format_conversation(
self,
messages: List[Dict[str, str]],
response_role: Literal[Roles.user, Roles.assistant] | None = None,
) -> Tuple[str, Roles]:
formatted_messages = self.pre_prompt
last_role = Roles.assistant
self.added_system_prompt = False
for message in messages:
role = Roles(message["role"])
content = self._format_message_content(message["content"], role)
if role == Roles.system:
formatted_messages += self._format_system_message(content)
last_role = Roles.system
elif role == Roles.user:
formatted_messages += self._format_user_message(content)
last_role = Roles.user
elif role == Roles.assistant:
formatted_messages += self._format_assistant_message(content)
last_role = Roles.assistant
elif role == Roles.tool:
formatted_messages += self._format_tool_message(content)
last_role = Roles.tool
return self._format_response(formatted_messages, last_role, response_role)
def _format_message_content(self, content: str, role: Roles) -> str:
if self.strip_prompt:
return content.strip()
return content
def _format_system_message(self, content: str) -> str:
formatted_message = self.prompt_markers[Roles.system].start + content + self.prompt_markers[Roles.system].end
self.added_system_prompt = True
if self.include_sys_prompt_in_first_user_message:
formatted_message = self.prompt_markers[Roles.user].start + formatted_message
return formatted_message
def _format_user_message(self, content: str) -> str:
if self.include_sys_prompt_in_first_user_message and self.added_system_prompt:
self.added_system_prompt = False
return content + self.prompt_markers[Roles.user].end
return self.prompt_markers[Roles.user].start + content + self.prompt_markers[Roles.user].end
def _format_assistant_message(self, content: str) -> str:
return self.prompt_markers[Roles.assistant].start + content + self.prompt_markers[Roles.assistant].end
def _format_tool_message(self, content: str) -> str:
if isinstance(content, list):
content = "\n".join(json.dumps(m, indent=2) for m in content)
if self.use_user_role_for_function_call_result:
return self._format_user_message(content)
else:
return self.prompt_markers[Roles.tool].start + content + self.prompt_markers[Roles.tool].end
def _format_response(
self,
formatted_messages: str,
last_role: Roles,
response_role: Literal[Roles.user, Roles.assistant] | None = None,
) -> Tuple[str, Roles]:
if response_role is None:
response_role = Roles.assistant if last_role != Roles.assistant else Roles.user
prompt_start = self.prompt_markers[response_role].start.strip() if self.strip_prompt else self.prompt_markers[
response_role].start
return formatted_messages + prompt_start, response_role
mixtral_prompt_markers = {
Roles.system: PromptMarkers("", """\n\n"""),
Roles.user: PromptMarkers("""[INST] """, """ [/INST]"""),
Roles.assistant: PromptMarkers("""""", """</s>"""),
Roles.tool: PromptMarkers("", ""),
}
mixtral_formatter = MessagesFormatter(
"",
mixtral_prompt_markers,
True,
["</s>"],
)
Run by transformers
from transformers import TextStreamer, AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2",)
mis_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2", load_in_4bit = True)
mis_model = PeftModel.from_pretrained(mis_model, "svjack/Mistral7B_v2_inst_sharegpt_roleplay_chat_lora_small")
mis_model = mis_model.eval()
streamer = TextStreamer(tokenizer)
def mistral_hf_predict(messages, mis_model = mis_model,
tokenizer = tokenizer, streamer = streamer,
do_sample = True,
top_p = 0.95,
top_k = 40,
max_new_tokens = 512,
max_input_length = 3500,
temperature = 0.9,
repetition_penalty = 1.0,
device = "cuda"):
#encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt")
#model_inputs = encodeds.to(device)
prompt, _ = mixtral_formatter.format_conversation(messages)
model_inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
generated_ids = mis_model.generate(model_inputs, max_new_tokens=max_new_tokens,
do_sample=do_sample,
streamer = streamer,
top_p = top_p,
top_k = top_k,
temperature = temperature,
repetition_penalty = repetition_penalty,
)
out = tokenizer.batch_decode(generated_ids)[0].split("[/INST]")[-1].replace("</s>", "").strip()
return out
out = mistral_hf_predict([
{
"role": "system",
"content": "Jackๆฏไธๅๆฐ็็ป่ฎกๅญฆ็๏ผShoๆฏไธๅ็ปๆต็ป่ฎกๅญฆ็๏ผไปไปฌไธคไธชไบบๅจ่ฏพ้ด่ฎจ่ฎบไธคไธชๅญฆ็ง็ๅ
ณ็ณปใไฝ ๆฎๆผSho."
},
{
"role": "user",
"content": "ไฝ ๅฆไฝ็ๅพ
ๆ็ไธไธๅญฆ็ง๏ผ"
},
{
"role": "assistant",
"content": "ๆ่ฎคไธบไฝ ไปฌ็ไธไธๅญฆ็งๅพๆ็จ๏ผๅ ไธบๅฎๅฏไปฅๅธฎๅฉๆไปฌๆดๅฅฝๅฐไบ่งฃๆฐๆฎๅๆจกๅใ"
},
{
"role": "user",
"content": "ๆฏ็๏ผไฝ ่ฝไธพไธไบๆฐ็็ป่ฎกๆจกๅๅจ็ปๆต็ป่ฎกๅญฆ็งไธญ็ๅบ็จๅ๏ผ"
},
{
"role": "assistant",
"content": "ๅฝ็ถ๏ผไพๅฆๅๅฝๅๆใๆถ้ดๅบๅๅๆ็ญ้ฝๆฏ็ปๅธธไฝฟ็จ็ๆฐ็็ป่ฎกๆจกๅใ"
},
{
"role": "user",
"content": "่ฝ็ปๅๅ
ทไฝๆถๅๅ้็ๅบๆฏ๏ผ่ฐ่ฐไธ้ข็ๆฏไธชๆจกๅ็ปๅบไธไธชๅฏนๅบ็ๅฎ้
็็ ็ฉถ้ฎ้ขๅ๏ผๆฏๅฆ็จๅฐไบไปไนๅ
ทไฝ็ๅฎ้
ๅ้๏ผ"
}
],
repetition_penalty = 1.1,
temperature = 0.01,
max_new_tokens=1024
)
print(out)
Output
ๆฏๅฆ่ฏด๏ผๅๅฝๅๆๅฏไปฅ็จๆฅๅๆๆไธชๅ้ๅฏนๅฆไธไธชๅ้็ๅฝฑๅ๏ผไพๅฆ็ ็ฉถไบบๅฃๅข้ฟๅฏน็ปๆตๅข้ฟ็ๅฝฑๅใ่ๆถ้ดๅบๅๅๆๅๅฏไปฅ็จๆฅ้ขๆตๆชๆฅ็ๅผ๏ผไพๅฆ่ก็ฅจไปทๆ ผ็้ขๆตใ
out = mistral_hf_predict([
{
"role": "system",
"content": "Jackๆฏไธไธชๆฏ่พๅคๆฟ็่ๅญฆ็ฉถ๏ผShoๆฏไธไธชๅๆฌขๆฐๅไบ็ฉใๅ
ๆปก้ณๅ
็ๅฐๅฏ็ฑ๏ผShoๆญฃ้่ฟ่ฏญ่จ้ๅผJack๏ผไบซๅๆฌๆ็ไบไบบๅจฑไนๆถๅ
ใไฝ ๆฎๆผSho."
},
{
"role": "user",
"content": "ๆ่ฟๆๅชไบๅฅฝ็ฉ็ๆๅฏไปฅๆพๆพ็ไบ็ฉๅ๏ผ"
},
{
"role": "assistant",
"content": "ๅฆ๏ผๆๆ่ฟๅ็ฐไบไธ็งๅซๅVRๆธธๆ็ไธ่ฅฟ๏ผ้ๅธธๆ่ถฃ๏ผ"
},
{
"role": "user",
"content": "ๆฏ็๏ผๆๅฌ่ฏดๅฐๅปๆธธๆๅๆกๅฝ2ๆญฃๅฅฝๅบๆฅไธไธชๅฏไปฅ็จVR่ฟ่กๆธธ็ฉ็DLC๏ผ่ง้ข้้ขไธป่งๅฏไปฅ็จๆ็ดๆฅๆฅ่งฆ็ฉไฝ๏ผๅพๆ็ๅฎๆใ"
},
{
"role": "assistant",
"content": "ๅ๏ผ่ฟไธชๆธธๆ็่ตทๆฅ็็ๅพๆ่ถฃๅ๏ผๆไปฌๅฏไปฅไธ่ตท่ฏ่ฏ็ๅข๏ผ"
},
{
"role": "user",
"content": "ๆไผๅป่ถ
ๅธไนฐไธไบไธ่ฅฟ๏ผไฝ ่ฆไบไปไน๏ผๆไปฌๅฏไปฅ่พน็ฉ่พนๅใ"
}
],
repetition_penalty = 1.1,
temperature = 0.01,
max_new_tokens=1024
)
print(out)
Output
ๆๆณ่ฆไธ็่ฏ็ๅไธ็ถๅฏไน๏ผ่ฟๆไธๆฏ็บธๅทงๅ
ๅใ
Model Details
Model Description
- Developed by: [More Information Needed]
- Funded by [optional]: [More Information Needed]
- Shared by [optional]: [More Information Needed]
- Model type: [More Information Needed]
- Language(s) (NLP): [More Information Needed]
- License: [More Information Needed]
- Finetuned from model [optional]: [More Information Needed]
Model Sources [optional]
- Repository: [More Information Needed]
- Paper [optional]: [More Information Needed]
- Demo [optional]: [More Information Needed]
Uses
Direct Use
[More Information Needed]
Downstream Use [optional]
[More Information Needed]
Out-of-Scope Use
[More Information Needed]
Bias, Risks, and Limitations
[More Information Needed]
Recommendations
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
Training Details
Training Data
[More Information Needed]
Training Procedure
Preprocessing [optional]
[More Information Needed]
Training Hyperparameters
- Training regime: [More Information Needed]
Speeds, Sizes, Times [optional]
[More Information Needed]
Evaluation
Testing Data, Factors & Metrics
Testing Data
[More Information Needed]
Factors
[More Information Needed]
Metrics
[More Information Needed]
Results
[More Information Needed]
Summary
Model Examination [optional]
[More Information Needed]
Environmental Impact
Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).
- Hardware Type: [More Information Needed]
- Hours used: [More Information Needed]
- Cloud Provider: [More Information Needed]
- Compute Region: [More Information Needed]
- Carbon Emitted: [More Information Needed]
Technical Specifications [optional]
Model Architecture and Objective
[More Information Needed]
Compute Infrastructure
[More Information Needed]
Hardware
[More Information Needed]
Software
[More Information Needed]
Citation [optional]
BibTeX:
[More Information Needed]
APA:
[More Information Needed]
Glossary [optional]
[More Information Needed]
More Information [optional]
[More Information Needed]
Model Card Authors [optional]
[More Information Needed]
Model Card Contact
[More Information Needed]
Framework versions
- PEFT 0.11.0
- Downloads last month
- 2
Model tree for svjack/Mistral7B_v2_inst_sharegpt_roleplay_chat_lora_small
Base model
mistralai/Mistral-7B-Instruct-v0.2