Mungert commited on
Commit
ed1888e
·
verified ·
0 Parent(s):

Super-squash history to reclaim storage

Browse files
.gitattributes ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ f16/xLAM-2-32b-fc-r-f16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
37
+ f16/xLAM-2-32b-fc-r-f16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
38
+ xLAM-2-32b-fc-r-f16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
39
+ xLAM-2-32b-fc-r-bf16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
40
+ xLAM-2-32b-fc-r-f16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
41
+ xLAM-2-32b-fc-r-bf16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
42
+ xLAM-2-32b-fc-r-f16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text
43
+ xLAM-2-32b-fc-r-bf16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text
44
+ xLAM-2-32b-fc-r-q2_k_l.gguf filter=lfs diff=lfs merge=lfs -text
45
+ xLAM-2-32b-fc-r-q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
46
+ xLAM-2-32b-fc-r-q4_k_l.gguf filter=lfs diff=lfs merge=lfs -text
47
+ xLAM-2-32b-fc-r-q5_k_l.gguf filter=lfs diff=lfs merge=lfs -text
48
+ xLAM-2-32b-fc-r-q6_k_l.gguf filter=lfs diff=lfs merge=lfs -text
49
+ xLAM-2-32b-fc-r-q2_k_m.gguf filter=lfs diff=lfs merge=lfs -text
50
+ xLAM-2-32b-fc-r-q2_k_s.gguf filter=lfs diff=lfs merge=lfs -text
51
+ xLAM-2-32b-fc-r-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text
52
+ xLAM-2-32b-fc-r-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text
53
+ xLAM-2-32b-fc-r-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
54
+ xLAM-2-32b-fc-r-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
55
+ xLAM-2-32b-fc-r-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
56
+ xLAM-2-32b-fc-r-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text
57
+ xLAM-2-32b-fc-r-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text
58
+ xLAM-2-32b-fc-r-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
59
+ xLAM-2-32b-fc-r-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
60
+ xLAM-2-32b-fc-r-q4_1.gguf filter=lfs diff=lfs merge=lfs -text
61
+ xLAM-2-32b-fc-r-q4_0_l.gguf filter=lfs diff=lfs merge=lfs -text
62
+ xLAM-2-32b-fc-r-q4_1_l.gguf filter=lfs diff=lfs merge=lfs -text
63
+ xLAM-2-32b-fc-r-q5_0.gguf filter=lfs diff=lfs merge=lfs -text
64
+ xLAM-2-32b-fc-r-q5_1.gguf filter=lfs diff=lfs merge=lfs -text
65
+ xLAM-2-32b-fc-r-q5_0_l.gguf filter=lfs diff=lfs merge=lfs -text
66
+ xLAM-2-32b-fc-r-q5_1_l.gguf filter=lfs diff=lfs merge=lfs -text
67
+ xLAM-2-32b-fc-r-iq1_s.gguf filter=lfs diff=lfs merge=lfs -text
68
+ xLAM-2-32b-fc-r-iq1_m.gguf filter=lfs diff=lfs merge=lfs -text
69
+ xLAM-2-32b-fc-r-iq2_xs.gguf filter=lfs diff=lfs merge=lfs -text
70
+ xLAM-2-32b-fc-r-iq2_xxs.gguf filter=lfs diff=lfs merge=lfs -text
71
+ xLAM-2-32b-fc-r-iq2_s.gguf filter=lfs diff=lfs merge=lfs -text
72
+ xLAM-2-32b-fc-r-iq2_m.gguf filter=lfs diff=lfs merge=lfs -text
73
+ xLAM-2-32b-fc-r-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text
74
+ xLAM-2-32b-fc-r-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text
75
+ xLAM-2-32b-fc-r-iq3_s.gguf filter=lfs diff=lfs merge=lfs -text
76
+ xLAM-2-32b-fc-r-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text
77
+ xLAM-2-32b-fc-r-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text
78
+ xLAM-2-32b-fc-r-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text
79
+ xLAM-2-32b-fc-r.imatrix filter=lfs diff=lfs merge=lfs -text
80
+ bf16/xLAM-2-32b-fc-r-bf16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
81
+ bf16/xLAM-2-32b-fc-r-bf16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ datasets:
4
+ - Salesforce/APIGen-MT-5k
5
+ - Salesforce/xlam-function-calling-60k
6
+ language:
7
+ - en
8
+ pipeline_tag: text-generation
9
+ tags:
10
+ - function-calling
11
+ - LLM Agent
12
+ - tool-use
13
+ - llama
14
+ - qwen
15
+ - pytorch
16
+ - LLaMA-factory
17
+ library_name: transformers
18
+ ---
19
+
20
+ # <span style="color: #7FFF7F;">xLAM-2-32b-fc-r GGUF Models</span>
21
+
22
+
23
+ ## <span style="color: #7F7FFF;">Model Generation Details</span>
24
+
25
+ This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`238005c2`](https://github.com/ggerganov/llama.cpp/commit/238005c2dc67426cf678baa2d54c881701693288).
26
+
27
+
28
+
29
+
30
+
31
+ ---
32
+
33
+ ## <span style="color: #7FFF7F;">Quantization Beyond the IMatrix</span>
34
+
35
+ I've been experimenting with a new quantization approach that selectively elevates the precision of key layers beyond what the default IMatrix configuration provides.
36
+
37
+ In my testing, standard IMatrix quantization underperforms at lower bit depths, especially with Mixture of Experts (MoE) models. To address this, I'm using the `--tensor-type` option in `llama.cpp` to manually "bump" important layers to higher precision. You can see the implementation here:
38
+ 👉 [Layer bumping with llama.cpp](https://github.com/Mungert69/GGUFModelBuilder/blob/main/model-converter/tensor_list_builder.py)
39
+
40
+ While this does increase model file size, it significantly improves precision for a given quantization level.
41
+
42
+ ### **I'd love your feedback—have you tried this? How does it perform for you?**
43
+
44
+
45
+
46
+
47
+ ---
48
+
49
+ <a href="https://readyforquantum.com/huggingface_gguf_selection_guide.html" style="color: #7FFF7F;">
50
+ Click here to get info on choosing the right GGUF model format
51
+ </a>
52
+
53
+ ---
54
+
55
+
56
+
57
+ <!--Begin Original Model Card-->
58
+
59
+
60
+ <p align="center">
61
+ <img width="500px" alt="xLAM" src="https://huggingface.co/datasets/jianguozhang/logos/resolve/main/xlam-no-background.png">
62
+ </p>
63
+
64
+
65
+ <p align="center">
66
+ <a href="https://arxiv.org/abs/2504.03601">[Paper]</a> |
67
+ <a href="https://apigen-mt.github.io/">[Homepage]</a> |
68
+ <a href="https://huggingface.co/datasets/Salesforce/APIGen-MT-5k">[Dataset]</a> |
69
+ <a href="https://github.com/SalesforceAIResearch/xLAM">[Github]</a>
70
+ </p>
71
+ <hr>
72
+
73
+ # Welcome to the xLAM-2 Model Family!
74
+
75
+ [Large Action Models (LAMs)](https://blog.salesforceairesearch.com/large-action-models/) are advanced language models designed to enhance decision-making by translating user intentions into executable actions. As the **brains of AI agents**, LAMs autonomously plan and execute tasks to achieve specific goals, making them invaluable for automating workflows across diverse domains.
76
+ **This model release is for research purposes only.**
77
+
78
+ The new **xLAM-2** series, built on our most advanced data synthesis, processing, and training pipelines, marks a significant leap in **multi-turn conversation** and **tool usage**. Trained using our novel APIGen-MT framework, which generates high-quality training data through simulated agent-human interactions. Our models achieve state-of-the-art performance on [**BFCL**](https://gorilla.cs.berkeley.edu/leaderboard.html) and **τ-bench** benchmarks, outperforming frontier models like GPT-4o and Claude 3.5. Notably, even our smaller models demonstrate superior capabilities in multi-turn scenarios while maintaining exceptional consistency across trials.
79
+
80
+ We've also refined the **chat template** and **vLLM integration**, making it easier to build advanced AI agents. Compared to previous xLAM models, xLAM-2 offers superior performance and seamless deployment across applications.
81
+
82
+ <p align="center">
83
+ <img width="100%" alt="Model Performance Overview" src="https://github.com/apigen-mt/apigen-mt.github.io/blob/main/img/model_board.png?raw=true">
84
+ <br>
85
+ <small><i>Comparative performance of larger xLAM-2-fc-r models (8B-70B, trained with APIGen-MT data) against state-of-the-art baselines on function-calling (BFCL v3, as of date 04/02/2025) and agentic (τ-bench) capabilities.</i></small>
86
+ </p>
87
+
88
+
89
+ ## Table of Contents
90
+ - [Usage](#usage)
91
+ - [Basic Usage with Huggingface Chat Template](#basic-usage-with-huggingface-chat-template)
92
+ - [Using vLLM for Inference](#using-vllm-for-inference)
93
+ - [Setup and Serving](#setup-and-serving)
94
+ - [Testing with OpenAI API](#testing-with-openai-api)
95
+ - [Benchmark Results](#benchmark-results)
96
+ - [Citation](#citation)
97
+
98
+ ---
99
+
100
+ ## Model Series
101
+
102
+ [xLAM](https://huggingface.co/collections/Salesforce/xlam-models-65f00e2a0a63bbcd1c2dade4) series are significant better at many things including general tasks and function calling.
103
+ For the same number of parameters, the model have been fine-tuned across a wide range of agent tasks and scenarios, all while preserving the capabilities of the original model.
104
+
105
+
106
+ | Model | # Total Params | Context Length | Category | Download Model | Download GGUF files |
107
+ |------------------------|----------------|------------|-------|----------------|----------|
108
+ | Llama-xLAM-2-70b-fc-r | 70B | 128k | Multi-turn Conversation, Function-calling | [�� Link](https://huggingface.co/Salesforce/Llama-xLAM-2-70b-fc-r) | NA |
109
+ | Llama-xLAM-2-8b-fc-r | 8B | 128k | Multi-turn Conversation, Function-calling | [🤗 Link](https://huggingface.co/Salesforce/Llama-xLAM-2-8b-fc-r) | [🤗 Link](https://huggingface.co/Salesforce/Llama-xLAM-2-8b-fc-r-gguf) |
110
+ | xLAM-2-32b-fc-r | 32B | 32k (max 128k)* | Multi-turn Conversation, Function-calling | [🤗 Link](https://huggingface.co/Salesforce/xLAM-2-32b-fc-r) | NA |
111
+ | xLAM-2-3b-fc-r | 3B | 32k (max 128k)* | Multi-turn Conversation, Function-calling | [🤗 Link](https://huggingface.co/Salesforce/xLAM-2-3b-fc-r) | [🤗 Link](https://huggingface.co/Salesforce/xLAM-2-3b-fc-r-gguf) |
112
+ | xLAM-2-1b-fc-r | 1B | 32k (max 128k)* | Multi-turn Conversation, Function-calling | [🤗 Link](https://huggingface.co/Salesforce/xLAM-2-1b-fc-r) | [🤗 Link](https://huggingface.co/Salesforce/xLAM-2-1b-fc-r-gguf) |
113
+
114
+ ***Note:** The default context length for Qwen-2.5-based models is 32k, but you can use techniques like YaRN (Yet Another Recursive Network) to achieve maximum 128k context length. Please refer to [here](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct#processing-long-texts) for more details.
115
+
116
+ You can also explore our previous xLAM series [here](https://huggingface.co/collections/Salesforce/xlam-models-65f00e2a0a63bbcd1c2dade4).
117
+
118
+ The `-fc` suffix indicates that the models are fine-tuned for **function calling** tasks, while the `-r` suffix signifies a **research** release.
119
+
120
+ ✅ All models are fully compatible with vLLM and Transformers-based inference frameworks.
121
+
122
+
123
+ ## Usage
124
+
125
+ ### Framework versions
126
+
127
+ - Transformers 4.46.1 (or later)
128
+ - PyTorch 2.5.1+cu124 (or later)
129
+ - Datasets 3.1.0 (or later)
130
+ - Tokenizers 0.20.3 (or later)
131
+
132
+ ### Basic Usage with Huggingface Chat Template
133
+
134
+ The new xLAM models are designed to work seamlessly with the Hugging Face Transformers library and utilize natural chat templates for an easy and intuitive conversational experience. Below are examples of how to use these models.
135
+
136
+ ```python
137
+ import torch
138
+ from transformers import AutoModelForCausalLM, AutoTokenizer
139
+
140
+ tokenizer = AutoTokenizer.from_pretrained("Salesforce/Llama-xLAM-2-3b-fc-r")
141
+ model = AutoModelForCausalLM.from_pretrained("Salesforce/Llama-xLAM-2-3b-fc-r", torch_dtype=torch.bfloat16, device_map="auto")
142
+
143
+ # Example conversation with a tool call
144
+ messages = [
145
+ {"role": "user", "content": "Hi, how are you?"},
146
+ {"role": "assistant", "content": "Thanks. I am doing well. How can I help you?"},
147
+ {"role": "user", "content": "What's the weather like in London?"},
148
+ ]
149
+
150
+ tools = [
151
+ {
152
+ "name": "get_weather",
153
+ "description": "Get the current weather for a location",
154
+ "parameters": {
155
+ "type": "object",
156
+ "properties": {
157
+ "location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"},
158
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "description": "The unit of temperature to return"}
159
+ },
160
+ "required": ["location"]
161
+ }
162
+ }
163
+ ]
164
+
165
+ print("====== prompt after applying chat template ======")
166
+ print(tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, tokenize=False))
167
+
168
+ inputs = tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
169
+ input_ids_len = inputs["input_ids"].shape[-1] # Get the length of the input tokens
170
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
171
+ print("====== model response ======")
172
+ outputs = model.generate(**inputs, max_new_tokens=256)
173
+ generated_tokens = outputs[:, input_ids_len:] # Slice the output to get only the newly generated tokens
174
+ print(tokenizer.decode(generated_tokens[0], skip_special_tokens=True))
175
+ ```
176
+
177
+ ### Using vLLM for Inference
178
+
179
+ The xLAM models can also be efficiently served using vLLM for high-throughput inference. Please use `vllm>=0.6.5` since earlier versions will cause degraded performance for Qwen-based models.
180
+
181
+ #### Setup and Serving
182
+
183
+ 1. Install vLLM with the required version:
184
+ ```bash
185
+ pip install "vllm>=0.6.5"
186
+ ```
187
+
188
+ 2. Download the tool parser plugin to your local path:
189
+ ```bash
190
+ wget https://huggingface.co/Salesforce/xLAM-2-1b-fc-r/raw/main/xlam_tool_call_parser.py
191
+ ```
192
+
193
+ 3. Start the OpenAI API-compatible endpoint:
194
+ ```bash
195
+ vllm serve Salesforce/xLAM-2-1b-fc-r \
196
+ --enable-auto-tool-choice \
197
+ --tool-parser-plugin ./xlam_tool_call_parser.py \
198
+ --tool-call-parser xlam \
199
+ --tensor-parallel-size 1
200
+ ```
201
+
202
+ Note: Ensure that the tool parser plugin file is downloaded and that the path specified in `--tool-parser-plugin` correctly points to your local copy of the file. The xLAM series models all utilize the **same** tool call parser, so you only need to download it **once** for all models.
203
+
204
+ #### Testing with OpenAI API
205
+
206
+ Here's a minimal example to test tool usage with the served endpoint:
207
+
208
+ ```python
209
+ import openai
210
+ import json
211
+
212
+ # Configure the client to use your local vLLM endpoint
213
+ client = openai.OpenAI(
214
+ base_url="http://localhost:8000/v1", # Default vLLM server URL
215
+ api_key="empty" # Can be any string
216
+ )
217
+
218
+ # Define a tool/function
219
+ tools = [
220
+ {
221
+ "type": "function",
222
+ "function": {
223
+ "name": "get_weather",
224
+ "description": "Get the current weather for a location",
225
+ "parameters": {
226
+ "type": "object",
227
+ "properties": {
228
+ "location": {
229
+ "type": "string",
230
+ "description": "The city and state, e.g. San Francisco, CA"
231
+ },
232
+ "unit": {
233
+ "type": "string",
234
+ "enum": ["celsius", "fahrenheit"],
235
+ "description": "The unit of temperature to return"
236
+ }
237
+ },
238
+ "required": ["location"]
239
+ }
240
+ }
241
+ }
242
+ ]
243
+
244
+ # Create a chat completion
245
+ response = client.chat.completions.create(
246
+ model="Salesforce/xLAM-2-1b-fc-r", # Model name doesn't matter, vLLM uses the served model
247
+ messages=[
248
+ {"role": "system", "content": "You are a helpful assistant that can use tools."},
249
+ {"role": "user", "content": "What's the weather like in San Francisco?"}
250
+ ],
251
+ tools=tools,
252
+ tool_choice="auto"
253
+ )
254
+
255
+ # Print the response
256
+ print("Assistant's response:")
257
+ print(json.dumps(response.model_dump(), indent=2))
258
+ ```
259
+
260
+ For more advanced configurations and deployment options, please refer to the [vLLM documentation](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html).
261
+
262
+ ## Benchmark Results
263
+
264
+ ### Berkeley Function-Calling Leaderboard (BFCL v3)
265
+ <p align="center">
266
+ <img width="80%" alt="BFCL Results" src="https://github.com/apigen-mt/apigen-mt.github.io/blob/main/img/bfcl-result.png?raw=true">
267
+ <br>
268
+ <small><i>Performance comparison of different models on [BFCL leaderboard](https://gorilla.cs.berkeley.edu/leaderboard.html). The rank is based on the overall accuracy, which is a weighted average of different evaluation categories. "FC" stands for function-calling mode in contrast to using a customized "prompt" to extract the function calls.</i></small>
269
+ </p>
270
+
271
+ ### τ-bench Benchmark
272
+
273
+ <p align="center">
274
+ <img width="80%" alt="Tau-bench Results" src="https://github.com/apigen-mt/apigen-mt.github.io/blob/main/img/taubench-result.png?raw=true">
275
+ <br>
276
+ <small><i>Success Rate (pass@1) on τ-bench benchmark averaged across at least 5 trials. Our xLAM-2-70b-fc-r model achieves an overall success rate of 56.2% on τ-bench, significantly outperforming the base Llama 3.1 70B Instruct model (38.2%) and other open-source models like DeepSeek v3 (40.6%). Notably, our best model even outperforms proprietary models such as GPT-4o (52.9%) and approaches the performance of more recent models like Claude 3.5 Sonnet (new) (60.1%).</i></small>
277
+ </p>
278
+
279
+ <p align="center">
280
+ <img width="80%" alt="Pass^k curves" src="https://github.com/apigen-mt/apigen-mt.github.io/blob/main/img/pass_k_curves_retail_airline.png?raw=true">
281
+ <br>
282
+ <small><i>Pass^k curves measuring the probability that all 5 independent trials succeed for a given task, averaged across all tasks for τ-retail (left) and τ-airline (right) domains. Higher values indicate better consistency of the models.</i></small>
283
+ </p>
284
+
285
+
286
+ ## Ethical Considerations
287
+
288
+ This release is for research purposes only in support of an academic paper. Our models, datasets, and code are not specifically designed or evaluated for all downstream purposes. We strongly recommend users evaluate and address potential concerns related to accuracy, safety, and fairness before deploying this model. We encourage users to consider the common limitations of AI, comply with applicable laws, and leverage best practices when selecting use cases, particularly for high-risk scenarios where errors or misuse could significantly impact people's lives, rights, or safety. For further guidance on use cases, refer to our AUP and AI AUP.
289
+
290
+ ### Model Licenses
291
+
292
+ For all Llama relevant models, please also follow corresponding Llama license and terms. Meta Llama 3 is licensed under the Meta Llama 3 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.
293
+
294
+ ## Citation
295
+
296
+ If you use our model or dataset in your work, please cite our paper:
297
+
298
+ ```bibtex
299
+ @article{prabhakar2025apigen,
300
+ title={APIGen-MT: Agentic PIpeline for Multi-Turn Data Generation via Simulated Agent-Human Interplay},
301
+ author={Prabhakar, Akshara and Liu, Zuxin and Zhu, Ming and Zhang, Jianguo and Awalgaonkar, Tulika and Wang, Shiyu and Liu, Zhiwei and Chen, Haolin and Hoang, Thai and others},
302
+ journal={arXiv preprint arXiv:2504.03601},
303
+ year={2025}
304
+ }
305
+ ```
306
+
307
+ Additionally, please check our other awesome related works regarding xLAM series and consider citing them as well:
308
+
309
+
310
+ ```bibtex
311
+ @article{zhang2025actionstudio,
312
+ title={ActionStudio: A Lightweight Framework for Data and Training of Action Models},
313
+ author={Zhang, Jianguo and Hoang, Thai and Zhu, Ming and Liu, Zuxin and Wang, Shiyu and Awalgaonkar, Tulika and Prabhakar, Akshara and Chen, Haolin and Yao, Weiran and Liu, Zhiwei and others},
314
+ journal={arXiv preprint arXiv:2503.22673},
315
+ year={2025}
316
+ }
317
+ ```
318
+
319
+ ```bibtex
320
+ @article{zhang2024xlam,
321
+ title={xLAM: A Family of Large Action Models to Empower AI Agent Systems},
322
+ author={Zhang, Jianguo and Lan, Tian and Zhu, Ming and Liu, Zuxin and Hoang, Thai and Kokane, Shirley and Yao, Weiran and Tan, Juntao and Prabhakar, Akshara and Chen, Haolin and others},
323
+ journal={arXiv preprint arXiv:2409.03215},
324
+ year={2024}
325
+ }
326
+
327
+ ```
328
+
329
+ ```bibtex
330
+ @article{liu2024apigen,
331
+ title={Apigen: Automated pipeline for generating verifiable and diverse function-calling datasets},
332
+ author={Liu, Zuxin and Hoang, Thai and Zhang, Jianguo and Zhu, Ming and Lan, Tian and Tan, Juntao and Yao, Weiran and Liu, Zhiwei and Feng, Yihao and RN, Rithesh and others},
333
+ journal={Advances in Neural Information Processing Systems},
334
+ volume={37},
335
+ pages={54463--54482},
336
+ year={2024}
337
+ }
338
+ ```
339
+
340
+ ```bibtex
341
+ @article{zhang2024agentohana,
342
+ title={AgentOhana: Design Unified Data and Training Pipeline for Effective Agent Learning},
343
+ author={Zhang, Jianguo and Lan, Tian and Murthy, Rithesh and Liu, Zhiwei and Yao, Weiran and Tan, Juntao and Hoang, Thai and Yang, Liangwei and Feng, Yihao and Liu, Zuxin and others},
344
+ journal={arXiv preprint arXiv:2402.15506},
345
+ year={2024}
346
+ }
347
+ ```
348
+
349
+
350
+
351
+ <!--End Original Model Card-->
352
+
353
+ ---
354
+
355
+ # <span id="testllm" style="color: #7F7FFF;">🚀 If you find these models useful</span>
356
+
357
+ Help me test my **AI-Powered Quantum Network Monitor Assistant** with **quantum-ready security checks**:
358
+
359
+ 👉 [Quantum Network Monitor](https://readyforquantum.com/?assistant=open&utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme)
360
+
361
+
362
+ The full Open Source Code for the Quantum Network Monitor Service available at my github repos ( repos with NetworkMonitor in the name) : [Source Code Quantum Network Monitor](https://github.com/Mungert69). You will also find the code I use to quantize the models if you want to do it yourself [GGUFModelBuilder](https://github.com/Mungert69/GGUFModelBuilder)
363
+
364
+ 💬 **How to test**:
365
+ Choose an **AI assistant type**:
366
+ - `TurboLLM` (GPT-4.1-mini)
367
+ - `HugLLM` (Hugginface Open-source models)
368
+ - `TestLLM` (Experimental CPU-only)
369
+
370
+ ### **What I’m Testing**
371
+ I’m pushing the limits of **small open-source models for AI network monitoring**, specifically:
372
+ - **Function calling** against live network services
373
+ - **How small can a model go** while still handling:
374
+ - Automated **Nmap security scans**
375
+ - **Quantum-readiness checks**
376
+ - **Network Monitoring tasks**
377
+
378
+ 🟡 **TestLLM** – Current experimental model (llama.cpp on 2 CPU threads on huggingface docker space):
379
+ - ✅ **Zero-configuration setup**
380
+ - ⏳ 30s load time (slow inference but **no API costs**) . No token limited as the cost is low.
381
+ - 🔧 **Help wanted!** If you’re into **edge-device AI**, let’s collaborate!
382
+
383
+ ### **Other Assistants**
384
+ 🟢 **TurboLLM** – Uses **gpt-4.1-mini** :
385
+ - **It performs very well but unfortunatly OpenAI charges per token. For this reason tokens usage is limited.
386
+ - **Create custom cmd processors to run .net code on Quantum Network Monitor Agents**
387
+ - **Real-time network diagnostics and monitoring**
388
+ - **Security Audits**
389
+ - **Penetration testing** (Nmap/Metasploit)
390
+
391
+ 🔵 **HugLLM** – Latest Open-source models:
392
+ - 🌐 Runs on Hugging Face Inference API. Performs pretty well using the lastest models hosted on Novita.
393
+
394
+ ### 💡 **Example commands you could test**:
395
+ 1. `"Give me info on my websites SSL certificate"`
396
+ 2. `"Check if my server is using quantum safe encyption for communication"`
397
+ 3. `"Run a comprehensive security audit on my server"`
398
+ 4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a [Quantum Network Monitor Agent](https://readyforquantum.com/Download/?utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme) to run the .net code on. This is a very flexible and powerful feature. Use with caution!
399
+
400
+ ### Final Word
401
+
402
+ I fund the servers used to create these model files, run the Quantum Network Monitor service, and pay for inference from Novita and OpenAI—all out of my own pocket. All the code behind the model creation and the Quantum Network Monitor project is [open source](https://github.com/Mungert69). Feel free to use whatever you find helpful.
403
+
404
+ If you appreciate the work, please consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) ☕. Your support helps cover service costs and allows me to raise token limits for everyone.
405
+
406
+ I'm also open to job opportunities or sponsorship.
407
+
408
+ Thank you! 😊
bf16/xLAM-2-32b-fc-r-bf16-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:839d03e3bbf79b49b3189a00c1ca4451f9fc2988066fd0b7adfc3648dc73ac41
3
+ size 45902462976
bf16/xLAM-2-32b-fc-r-bf16-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e50643d6720516613e2cf45ef8fd4b0c3b9b79a2490dad6296ffe24fc3c0c205
3
+ size 19633507584
f16/xLAM-2-32b-fc-r-f16-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:492b765485d79f91ab2d037dede8a7b9de26b0722308e68f37afce50aa326ed9
3
+ size 45902462976
f16/xLAM-2-32b-fc-r-f16-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a62e4fb1fa8248318e3a54a289341cc0ddf22b04c7356f08169ef374ac38e04
3
+ size 19633507584
xLAM-2-32b-fc-r-bf16_q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ec9f29e671280193b597592d12a206f9289b9a93d64533c175fdb7d4b41b534
3
+ size 46661602560
xLAM-2-32b-fc-r-f16_q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919047932ecb7a75b3ae3d3b83360b1bc0e0ee20db5fbf9b81d26fd7ee5bf719
3
+ size 46661602560
xLAM-2-32b-fc-r-iq1_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d26b27c96aab1cac6baabd12df120b42edefe4adaad93d5b57bdbb13d0f7a3
3
+ size 9742306880
xLAM-2-32b-fc-r-iq1_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f37b7f26915700be706b78700fc7dd9ce6f2e849ebc1ee0c474d736cfe2691
3
+ size 8992493120
xLAM-2-32b-fc-r-iq2_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ded1f655144c82ebbdc5d2dd2ffa82d6ce3a633504429d490e6db08482f48afb
3
+ size 11985849920
xLAM-2-32b-fc-r-iq2_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f748a2362ccf110e292041ee0aa910ddad34935bad4240e2784629124e70254
3
+ size 11382263360
xLAM-2-32b-fc-r-iq2_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9125eac54c6b251b2ccb24046f89c403e1abf14c806092b9fad578df17cb4955
3
+ size 11016326720
xLAM-2-32b-fc-r-iq2_xxs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f1bcb66e01d2afcbb9696640472ae37c1036f1842d730f5f1be0f99595f06c2
3
+ size 10124955200
xLAM-2-32b-fc-r-iq3_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de5aded935701c5c5561b9bbe66c1efe27ef8f19747dceb97962e71e06b0feb7
3
+ size 15545355840
xLAM-2-32b-fc-r-iq3_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a9411dd1c5baf587d93d9fc21f5f64f75981f02bffc1221b6ebdf2d69f3bead
3
+ size 15545355840
xLAM-2-32b-fc-r-iq3_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:191255ea0a4b0a99f0a3164ee369cbb9a4dbdb8958b1c43e8a07e03ab5b05a0e
3
+ size 13793087040
xLAM-2-32b-fc-r-iq3_xxs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bcd7f56afc87fcd055330fe3999fe395cab144ff6143e161a651dcdcf051a2a
3
+ size 13628837440
xLAM-2-32b-fc-r-iq4_nl.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcca3a611564a10dc4fe5ab2a0eaf1fc2da05b66cf877773bdbb21817279418a
3
+ size 18682175040
xLAM-2-32b-fc-r-iq4_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31f705fec08321baa38fd52910725a557cd57641f9eaef374aecd2adf3fc6d31
3
+ size 17693154880
xLAM-2-32b-fc-r-q2_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a445f9f288f2882f364e129e175cbed4fa075a104f2f8f7b7dd445e74a630acd
3
+ size 12573011520
xLAM-2-32b-fc-r-q2_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da53164adb3d302cdabe7e9dd39f4647121300bcda355dcbd2652e1f1ff6c3dc
3
+ size 11547414080
xLAM-2-32b-fc-r-q3_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91aec5540c8fd950ea2b467f41e7034d3940525440e18963b74ef0bc0c0143b1
3
+ size 16091393600
xLAM-2-32b-fc-r-q3_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d3960b4df33db41da72edd950a5c86a5ba29b02014d44595f9c4a3a07e6aea6
3
+ size 14735658560
xLAM-2-32b-fc-r-q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2edaa3fbe3504bf1dd1301a8ab2cde2763a203ca00e165737b5890d89cd342f8
3
+ size 18439507520
xLAM-2-32b-fc-r-q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3769b1f6d2df616f6da7d8b5ff7182bf0877055cd234378962985f335032a43b
3
+ size 20487179840
xLAM-2-32b-fc-r-q4_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96cd5b6bdf742d0642b706a965efcb335d6e4b6265607eb304ef71a5da258c44
3
+ size 19824979520
xLAM-2-32b-fc-r-q4_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0bb1459a97f26f346e8b6a82070a035db283f04e6e047dac419118e2257bba
3
+ size 19161427520
xLAM-2-32b-fc-r-q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89e7a53cb2137ec719258e76dd8bfaa735d263ebf3bfbe5a72f5ecbff103faff
3
+ size 22534852160
xLAM-2-32b-fc-r-q5_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f66775c17a88ac007812e9e6070f9472c240d3a35e475dd0528de4e55b3c1d8
3
+ size 24582524480
xLAM-2-32b-fc-r-q5_k_l.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28dea24038dbe53573d7f17e54e27c12dea8cff235dbec1a63119b9dda104b72
3
+ size 23791423040
xLAM-2-32b-fc-r-q5_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c93474b3067bb537e10ee8c160962f3b6e7f17afa0eff69b82c20a068dc475c9
3
+ size 23414304320
xLAM-2-32b-fc-r-q5_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee2e1d4685ea727e59770d6da3e4e58a0d693d1ad87325f8e28ddcb3c01b99a0
3
+ size 23063359040
xLAM-2-32b-fc-r-q6_k_l.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc05be4c0cc44b5ccb46ad4f09fc02e174dedba055fe63a25697178ba084c895
3
+ size 27263274560
xLAM-2-32b-fc-r-q6_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5565877c4e3d69bb550326d72aacd8d4dbceb4e6f4b20cf8fcb12c886caef4b6
3
+ size 26886155840
xLAM-2-32b-fc-r-q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0d6a1c8c8772028ee960a5299a1321041bdc4a924037fa069bc6e901dd3365
3
+ size 34820885760
xLAM-2-32b-fc-r.imatrix ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494dfe54c7a05b6b174b028ef7e76eae4e3ff6b8102067915f7eab625fea5271
3
+ size 14957132