MiniMax-M2-AWQ-4bit / recipe.yaml
cpatonn's picture
Add files using upload-large-folder tool
88b5634 verified
default_stage:
default_modifiers:
AWQModifier:
config_groups:
group_0:
targets: [Linear]
weights:
num_bits: 4
type: int
symmetric: true
group_size: 32
strategy: group
block_structure: null
dynamic: false
actorder: null
observer: minmax
observer_kwargs: {}
input_activations: null
output_activations: null
format: null
targets: [Linear]
ignore: [model.embed_tokens, 're:.*block_sparse_moe[.]e_score_correction_bias$', 're:.*block_sparse_moe[.]gate$',
're:.*input_layernorm$', 're:.*post_attention_layernorm$', 're:.*k_norm$', 're:.*q_norm$',
model.norm, lm_head]
mappings:
- smooth_layer: re:.*input_layernorm$
balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$']
- smooth_layer: re:.*v_proj$
balance_layers: ['re:.*o_proj$']
- smooth_layer: re:.*post_attention_layernorm$
balance_layers: ['re:.*w1$', 're:.*w3$']
- smooth_layer: re:.*w3$
balance_layers: ['re:.*w2$']
offload_device: !!python/object/apply:torch.device [cpu]
duo_scaling: true