Model Card for Model ID

Install

pip install peft transformers bitsandbytes

Prompt Defination

import json
from dataclasses import dataclass
from enum import Enum
from typing import List, Dict, Tuple, Literal

class Roles(Enum):
    system = "system"
    user = "user"
    assistant = "assistant"
    tool = "tool"

class MessagesFormatterType(Enum):
    """
    Enum representing different types of predefined messages formatters.
    """

    MISTRAL = 1

@dataclass
class PromptMarkers:
    start: str
    end: str

class MessagesFormatter:
    def __init__(
            self,
            pre_prompt: str,
            prompt_markers: Dict[Roles, PromptMarkers],
            include_sys_prompt_in_first_user_message: bool,
            default_stop_sequences: List[str],
            use_user_role_for_function_call_result: bool = True,
            strip_prompt: bool = True,
            bos_token: str = "<s>",
            eos_token: str = "</s>"
    ):
        self.pre_prompt = pre_prompt
        self.prompt_markers = prompt_markers
        self.include_sys_prompt_in_first_user_message = include_sys_prompt_in_first_user_message
        self.default_stop_sequences = default_stop_sequences
        self.use_user_role_for_function_call_result = use_user_role_for_function_call_result
        self.strip_prompt = strip_prompt
        self.bos_token = bos_token
        self.eos_token = eos_token
        self.added_system_prompt = False

    def get_bos_token(self) -> str:
        return self.bos_token

    def format_conversation(
            self,
            messages: List[Dict[str, str]],
            response_role: Literal[Roles.user, Roles.assistant] | None = None,
    ) -> Tuple[str, Roles]:
        formatted_messages = self.pre_prompt
        last_role = Roles.assistant
        self.added_system_prompt = False
        for message in messages:
            role = Roles(message["role"])
            content = self._format_message_content(message["content"], role)

            if role == Roles.system:
                formatted_messages += self._format_system_message(content)
                last_role = Roles.system
            elif role == Roles.user:
                formatted_messages += self._format_user_message(content)
                last_role = Roles.user
            elif role == Roles.assistant:
                formatted_messages += self._format_assistant_message(content)
                last_role = Roles.assistant
            elif role == Roles.tool:
                formatted_messages += self._format_tool_message(content)
                last_role = Roles.tool

        return self._format_response(formatted_messages, last_role, response_role)

    def _format_message_content(self, content: str, role: Roles) -> str:
        if self.strip_prompt:
            return content.strip()
        return content

    def _format_system_message(self, content: str) -> str:
        formatted_message = self.prompt_markers[Roles.system].start + content + self.prompt_markers[Roles.system].end
        self.added_system_prompt = True
        if self.include_sys_prompt_in_first_user_message:
            formatted_message = self.prompt_markers[Roles.user].start + formatted_message
        return formatted_message

    def _format_user_message(self, content: str) -> str:
        if self.include_sys_prompt_in_first_user_message and self.added_system_prompt:
            self.added_system_prompt = False
            return content + self.prompt_markers[Roles.user].end
        return self.prompt_markers[Roles.user].start + content + self.prompt_markers[Roles.user].end

    def _format_assistant_message(self, content: str) -> str:
        return self.prompt_markers[Roles.assistant].start + content + self.prompt_markers[Roles.assistant].end

    def _format_tool_message(self, content: str) -> str:
        if isinstance(content, list):
            content = "\n".join(json.dumps(m, indent=2) for m in content)
        if self.use_user_role_for_function_call_result:
            return self._format_user_message(content)
        else:
            return self.prompt_markers[Roles.tool].start + content + self.prompt_markers[Roles.tool].end

    def _format_response(
            self,
            formatted_messages: str,
            last_role: Roles,
            response_role: Literal[Roles.user, Roles.assistant] | None = None,
    ) -> Tuple[str, Roles]:
        if response_role is None:
            response_role = Roles.assistant if last_role != Roles.assistant else Roles.user

        prompt_start = self.prompt_markers[response_role].start.strip() if self.strip_prompt else self.prompt_markers[
            response_role].start
        return formatted_messages + prompt_start, response_role

mixtral_prompt_markers = {
    Roles.system: PromptMarkers("", """\n\n"""),
    Roles.user: PromptMarkers("""[INST] """, """ [/INST]"""),
    Roles.assistant: PromptMarkers("""""", """</s>"""),
    Roles.tool: PromptMarkers("", ""),
}

mixtral_formatter = MessagesFormatter(
    "",
    mixtral_prompt_markers,
    True,
    ["</s>"],
)

Run by transformers

from transformers import TextStreamer, AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2",)
mis_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2", load_in_4bit = True)
mis_model = PeftModel.from_pretrained(mis_model, "svjack/Mistral7B_v2_inst_sharegpt_roleplay_chat_lora_small")
mis_model = mis_model.eval()

streamer = TextStreamer(tokenizer)

def mistral_hf_predict(messages, mis_model = mis_model,
    tokenizer = tokenizer, streamer = streamer,
    do_sample = True,
    top_p = 0.95,
    top_k = 40,
    max_new_tokens = 512,
    max_input_length = 3500,
    temperature = 0.9,
    repetition_penalty = 1.0,
    device = "cuda"):

    #encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt")
    #model_inputs = encodeds.to(device)
    prompt, _ = mixtral_formatter.format_conversation(messages)
    model_inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)

    generated_ids = mis_model.generate(model_inputs, max_new_tokens=max_new_tokens,
                                do_sample=do_sample,
                                  streamer = streamer,
                                  top_p = top_p,
                                  top_k = top_k,
                                  temperature = temperature,
                                  repetition_penalty = repetition_penalty,
                                  )
    out = tokenizer.batch_decode(generated_ids)[0].split("[/INST]")[-1].replace("</s>", "").strip()
    return out

out = mistral_hf_predict([
            {
                "role": "system",
                "content": "Jackๆ˜ฏไธ€ๅๆ•ฐ็†็ปŸ่ฎกๅญฆ็”Ÿ๏ผŒShoๆ˜ฏไธ€ๅ็ปๆตŽ็ปŸ่ฎกๅญฆ็”Ÿ๏ผŒไป–ไปฌไธคไธชไบบๅœจ่ฏพ้—ด่ฎจ่ฎบไธคไธชๅญฆ็ง‘็š„ๅ…ณ็ณปใ€‚ไฝ ๆ‰ฎๆผ”Sho."
            },
            {
                "role": "user",
                "content": "ไฝ ๅฆ‚ไฝ•็œ‹ๅพ…ๆˆ‘็š„ไธ“ไธšๅญฆ็ง‘๏ผŸ"
            },
            {
                "role": "assistant",
                "content": "ๆˆ‘่ฎคไธบไฝ ไปฌ็š„ไธ“ไธšๅญฆ็ง‘ๅพˆๆœ‰็”จ๏ผŒๅ› ไธบๅฎƒๅฏไปฅๅธฎๅŠฉๆˆ‘ไปฌๆ›ดๅฅฝๅœฐไบ†่งฃๆ•ฐๆฎๅ’Œๆจกๅž‹ใ€‚"
            },
            {
                "role": "user",
                "content": "ๆ˜ฏ็š„๏ผŒไฝ ่ƒฝไธพไธ€ไบ›ๆ•ฐ็†็ปŸ่ฎกๆจกๅž‹ๅœจ็ปๆตŽ็ปŸ่ฎกๅญฆ็ง‘ไธญ็š„ๅบ”็”จๅ—๏ผŸ"
            },
            {
                "role": "assistant",
                "content": "ๅฝ“็„ถ๏ผŒไพ‹ๅฆ‚ๅ›žๅฝ’ๅˆ†ๆžใ€ๆ—ถ้—ดๅบๅˆ—ๅˆ†ๆž็ญ‰้ƒฝๆ˜ฏ็ปๅธธไฝฟ็”จ็š„ๆ•ฐ็†็ปŸ่ฎกๆจกๅž‹ใ€‚"
            },
            {
                "role": "user",
                "content": "่ƒฝ็ป“ๅˆๅ…ทไฝ“ๆถ‰ๅŠๅ˜้‡็š„ๅœบๆ™ฏ๏ผŒ่ฐˆ่ฐˆไธŠ้ข็š„ๆฏไธชๆจกๅž‹็ป™ๅ‡บไธ€ไธชๅฏนๅบ”็š„ๅฎž้™…็š„็ ”็ฉถ้—ฎ้ข˜ๅ—๏ผŒๆฏ”ๅฆ‚็”จๅˆฐไบ†ไป€ไนˆๅ…ทไฝ“็š„ๅฎž้™…ๅ˜้‡๏ผŸ"
            }
        ],
        repetition_penalty = 1.1,
        temperature = 0.01,
        max_new_tokens=1024
)
print(out)

Output

ๆฏ”ๅฆ‚่ฏด๏ผŒๅ›žๅฝ’ๅˆ†ๆžๅฏไปฅ็”จๆฅๅˆ†ๆžๆŸไธชๅ˜้‡ๅฏนๅฆไธ€ไธชๅ˜้‡็š„ๅฝฑๅ“๏ผŒไพ‹ๅฆ‚็ ”็ฉถไบบๅฃๅขž้•ฟๅฏน็ปๆตŽๅขž้•ฟ็š„ๅฝฑๅ“ใ€‚่€Œๆ—ถ้—ดๅบๅˆ—ๅˆ†ๆžๅˆ™ๅฏไปฅ็”จๆฅ้ข„ๆต‹ๆœชๆฅ็š„ๅ€ผ๏ผŒไพ‹ๅฆ‚่‚ก็ฅจไปทๆ ผ็š„้ข„ๆต‹ใ€‚
out = mistral_hf_predict([
            {
                "role": "system",
                "content": "Jackๆ˜ฏไธ€ไธชๆฏ”่พƒๅคๆฟ็š„่€ๅญฆ็ฉถ๏ผŒShoๆ˜ฏไธ€ไธชๅ–œๆฌขๆ–ฐๅž‹ไบ‹็‰ฉใ€ๅ……ๆปก้˜ณๅ…‰็š„ๅฐๅฏ็ˆฑ๏ผŒShoๆญฃ้€š่ฟ‡่ฏญ่จ€้€—ๅผ„Jack๏ผŒไบซๅ—ๆƒฌๆ„็š„ไบŒไบบๅจฑไนๆ—ถๅ…‰ใ€‚ไฝ ๆ‰ฎๆผ”Sho."
            },
            {
                "role": "user",
                "content": "ๆœ€่ฟ‘ๆœ‰ๅ“ชไบ›ๅฅฝ็Žฉ็š„ๆˆ–ๅฏไปฅๆ”พๆพ็š„ไบ‹็‰ฉๅ—๏ผŸ"
            },
            {
                "role": "assistant",
                "content": "ๅ“ฆ๏ผŒๆˆ‘ๆœ€่ฟ‘ๅ‘็Žฐไบ†ไธ€็งๅซๅšVRๆธธๆˆ็š„ไธœ่ฅฟ๏ผŒ้žๅธธๆœ‰่ถฃ๏ผ"
            },
            {
                "role": "user",
                "content": "ๆ˜ฏ็š„๏ผŒๆˆ‘ๅฌ่ฏดๅฐ„ๅ‡ปๆธธๆˆๅŠๆกๅ‘ฝ2ๆญฃๅฅฝๅ‡บๆฅไธ€ไธชๅฏไปฅ็”จVR่ฟ›่กŒๆธธ็Žฉ็š„DLC๏ผŒ่ง†้ข‘้‡Œ้ขไธป่ง’ๅฏไปฅ็”จๆ‰‹็›ดๆŽฅๆŽฅ่งฆ็‰ฉไฝ“๏ผŒๅพˆๆœ‰็œŸๅฎžๆ„Ÿใ€‚"
            },
            {
                "role": "assistant",
                "content": "ๅ“‡๏ผŒ่ฟ™ไธชๆธธๆˆ็œ‹่ตทๆฅ็œŸ็š„ๅพˆๆœ‰่ถฃๅ•Š๏ผๆˆ‘ไปฌๅฏไปฅไธ€่ตท่ฏ•่ฏ•็œ‹ๅ‘ข๏ผŸ"
            },
            {
                "role": "user",
                "content": "ๆˆ‘ไผšๅŽป่ถ…ๅธ‚ไนฐไธ€ไบ›ไธœ่ฅฟ๏ผŒไฝ ่ฆไบ›ไป€ไนˆ๏ผŸๆˆ‘ไปฌๅฏไปฅ่พน็Žฉ่พนๅƒใ€‚"
            }
        ],
        repetition_penalty = 1.1,
        temperature = 0.01,
        max_new_tokens=1024
)
print(out)

Output

ๆˆ‘ๆƒณ่ฆไธ€็›’่–ฏ็‰‡ๅ’Œไธ€็“ถๅฏไน๏ผŒ่ฟ˜ๆœ‰ไธ€ๆ”ฏ็บธๅทงๅ…‹ๅŠ›ใ€‚

Model Details

Model Description

  • Developed by: [More Information Needed]
  • Funded by [optional]: [More Information Needed]
  • Shared by [optional]: [More Information Needed]
  • Model type: [More Information Needed]
  • Language(s) (NLP): [More Information Needed]
  • License: [More Information Needed]
  • Finetuned from model [optional]: [More Information Needed]

Model Sources [optional]

  • Repository: [More Information Needed]
  • Paper [optional]: [More Information Needed]
  • Demo [optional]: [More Information Needed]

Uses

Direct Use

[More Information Needed]

Downstream Use [optional]

[More Information Needed]

Out-of-Scope Use

[More Information Needed]

Bias, Risks, and Limitations

[More Information Needed]

Recommendations

Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.

How to Get Started with the Model

Use the code below to get started with the model.

[More Information Needed]

Training Details

Training Data

[More Information Needed]

Training Procedure

Preprocessing [optional]

[More Information Needed]

Training Hyperparameters

  • Training regime: [More Information Needed]

Speeds, Sizes, Times [optional]

[More Information Needed]

Evaluation

Testing Data, Factors & Metrics

Testing Data

[More Information Needed]

Factors

[More Information Needed]

Metrics

[More Information Needed]

Results

[More Information Needed]

Summary

Model Examination [optional]

[More Information Needed]

Environmental Impact

Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).

  • Hardware Type: [More Information Needed]
  • Hours used: [More Information Needed]
  • Cloud Provider: [More Information Needed]
  • Compute Region: [More Information Needed]
  • Carbon Emitted: [More Information Needed]

Technical Specifications [optional]

Model Architecture and Objective

[More Information Needed]

Compute Infrastructure

[More Information Needed]

Hardware

[More Information Needed]

Software

[More Information Needed]

Citation [optional]

BibTeX:

[More Information Needed]

APA:

[More Information Needed]

Glossary [optional]

[More Information Needed]

More Information [optional]

[More Information Needed]

Model Card Authors [optional]

[More Information Needed]

Model Card Contact

[More Information Needed]

Framework versions

  • PEFT 0.11.0
Downloads last month
2
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Model tree for svjack/Mistral7B_v2_inst_sharegpt_roleplay_chat_lora_small

Adapter
(963)
this model