Shubhra Pandit
Upload model files
24d919d
raw
history blame
468 Bytes
quant_stage:
quant_modifiers:
GPTQModifier:
dampening_frac: 0.05
ignore: ['re:.*lm_head.*', 're:.*embed_tokens.*', 're:vision_tower.*', 're:multi_modal_projector.*']
sequential_targets: [Gemma3DecoderLayer]
sequential_update: true
config_groups:
group_0:
targets: [Linear]
weights: {num_bits: 4, group_size: 128, type: int, symmetric: false, strategy: group,
actorder: weight, observer: mse}