patrickvonplaten commited on
Commit
a9da202
·
verified ·
0 Parent(s):

Code relesae

Browse files
Files changed (5) hide show
  1. .gitattributes +36 -0
  2. README.md +125 -0
  3. consolidated.safetensors +3 -0
  4. params.json +22 -0
  5. tekken.json +3 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tekken.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ # Pixtral-12B-0910
6
+
7
+ > [!WARNING]
8
+ > We still need to validate official evaluations with the below usage example.
9
+
10
+
11
+ ...TODO
12
+
13
+ ## Usage
14
+
15
+ We recommend using Pixtral with the [vLLM library](https://github.com/vllm-project/vllm).
16
+
17
+ **Important**: Make sure you have installed vLLM from source - more specifically make sure you have installed [this commit (TODO)]( ).
18
+ Also make sure you have `mistral_common >= 1.4.0` installed:
19
+
20
+ ```
21
+ pip install --upgrade mistral_common
22
+ ```
23
+
24
+ **_Simple Example_**
25
+
26
+ ```py
27
+ from vllm import LLM
28
+ from vllm.sampling_params import SamplingParams
29
+
30
+ model_name = "mistralai/Pixtral-12B-2409"
31
+
32
+ sampling_params = SamplingParams(max_tokens=8192)
33
+
34
+ llm = LLM(model=model_name, tokenizer_mode="mistral")
35
+
36
+ prompt = "Describe this image in one sentence."
37
+ image_url = "https://picsum.photos/id/237/200/300"
38
+
39
+ messages = [
40
+ {
41
+ "role": "user",
42
+ "content": [{"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": image_url}}]
43
+ },
44
+ ]
45
+
46
+ outputs = vllm_model.model.chat(messages, sampling_params=sampling_params)
47
+
48
+ print(outputs[0].outputs[0].text)
49
+ ```
50
+
51
+ **_Advanced Example_**
52
+
53
+ You can also pass multiple images per message and/or pass multi-turn conversations
54
+
55
+ ```py
56
+ from vllm import LLM
57
+ from vllm.sampling_params import SamplingParams
58
+
59
+ model_name = "mistralai/Pixtral-12B-2409"
60
+ max_img_per_msg = 5
61
+ max_tokens_per_img = 4096
62
+
63
+ sampling_params = SamplingParams(max_tokens=8192, temperature=0.7)
64
+ llm = LLM(model=model_name, tokenizer_mode="mistral", limit_mm_per_prompt={"image": max_img_per_msg}, max_num_batched_tokens=max_img_per_msg * max_tokens_per_img)
65
+
66
+ prompt = "Describe the following image."
67
+
68
+ url_1 = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
69
+ url_2 = "https://picsum.photos/seed/picsum/200/300"
70
+ url_3 = "https://picsum.photos/id/32/512/512"
71
+
72
+ messages = [
73
+ {
74
+ "role": "user",
75
+ "content": [{"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": url_1}}, {"type": "image_url", "image_url": {"url": url_2}}],
76
+ },
77
+ {
78
+ "role": "assistant",
79
+ "content": "The images shows nature.",
80
+ },
81
+ {
82
+ "role": "user",
83
+ "content": "More details please and answer only in French!."
84
+ },
85
+ {
86
+ "role": "user",
87
+ "content": [{"type": "image_url", "image_url": {"url": url_3}}],
88
+ }
89
+ ]
90
+
91
+ outputs = llm.chat(messages=messages, sampling_params=sampling_params)
92
+ print(outputs[0].outputs[0].text)
93
+ ```
94
+
95
+ **_Server_**
96
+
97
+ You can also use pixtral in a server/client setting.
98
+
99
+ 1. Spin up a server:
100
+
101
+ ```
102
+ vllm serve mistralai/Pixtral-12B-2409 --tokenizer_mode mistral --limit_mm_per_prompt 'image=4' --max_num_batched_tokens 16384
103
+ ```
104
+
105
+ 2. And ping the client:
106
+
107
+ ```
108
+ curl --location 'http://<your-node-url>:8000/v1/chat/completions' \
109
+ --header 'Content-Type: application/json' \
110
+ --header 'Authorization: Bearer token' \
111
+ --data '{
112
+ "model": "mistralai/Pixtral-12B-2409",
113
+ "messages": [
114
+ {
115
+ "role": "user",
116
+ "content": [
117
+ {"type" : "text", "text": "Describe this image in detail please."},
118
+ {"type": "image_url", "image_url": {"url": "https://s3.amazonaws.com/cms.ipressroom.com/338/files/201808/5b894ee1a138352221103195_A680%7Ejogging-edit/A680%7Ejogging-edit_hero.jpg"}},
119
+ {"type" : "text", "text": "and this one as well. Answer in French."},
120
+ {"type": "image_url", "image_url": {"url": "https://www.wolframcloud.com/obj/resourcesystem/images/a0e/a0ee3983-46c6-4c92-b85d-059044639928/6af8cfb971db031b.png"}}
121
+ ]
122
+ }
123
+ ]
124
+ }'
125
+ ```
consolidated.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4816a81eef8c05103d7b6df82bf27d3d59f6066e8f1e4245568aaa39326cf4b4
3
+ size 25365548952
params.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dim": 5120,
3
+ "n_layers": 40,
4
+ "head_dim": 128,
5
+ "hidden_dim": 14336,
6
+ "n_heads": 32,
7
+ "n_kv_heads": 8,
8
+ "rope_theta": 1000000000.0,
9
+ "norm_eps": 1e-05,
10
+ "vocab_size": 131072,
11
+ "vision_encoder": {
12
+ "hidden_size": 1024,
13
+ "num_channels": 3,
14
+ "image_size": 1024,
15
+ "patch_size": 16,
16
+ "rope_theta": 10000.0,
17
+ "intermediate_size": 4096,
18
+ "num_hidden_layers": 24,
19
+ "num_attention_heads": 16,
20
+ "image_token_id": 10
21
+ }
22
+ }
tekken.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386b1f98fba69b38c3de512a4eb602dc69a95dae0e54e6ce048ea3e29a2627a8
3
+ size 19280967