Merge remote-tracking branch 'thu/main'
Browse files- config.json +1 -0
- configuration_chatglm.py +7 -0
- modeling_chatglm.py +252 -111
- quantization.py +46 -32
- tokenization_chatglm.py +105 -12
config.json
CHANGED
|
@@ -10,6 +10,7 @@
|
|
| 10 |
},
|
| 11 |
"bos_token_id": 130004,
|
| 12 |
"eos_token_id": 130005,
|
|
|
|
| 13 |
"hidden_size": 4096,
|
| 14 |
"inner_hidden_size": 16384,
|
| 15 |
"layernorm_epsilon": 1e-05,
|
|
|
|
| 10 |
},
|
| 11 |
"bos_token_id": 130004,
|
| 12 |
"eos_token_id": 130005,
|
| 13 |
+
"pad_token_id": 3,
|
| 14 |
"hidden_size": 4096,
|
| 15 |
"inner_hidden_size": 16384,
|
| 16 |
"layernorm_epsilon": 1e-05,
|
configuration_chatglm.py
CHANGED
|
@@ -71,6 +71,9 @@ class ChatGLMConfig(PretrainedConfig):
|
|
| 71 |
max_sequence_length=2048,
|
| 72 |
inner_hidden_size=16384,
|
| 73 |
position_encoding_2d=True,
|
|
|
|
|
|
|
|
|
|
| 74 |
**kwargs
|
| 75 |
):
|
| 76 |
self.num_layers = num_layers
|
|
@@ -85,6 +88,10 @@ class ChatGLMConfig(PretrainedConfig):
|
|
| 85 |
self.eos_token_id = eos_token_id
|
| 86 |
self.pad_token_id = pad_token_id
|
| 87 |
self.position_encoding_2d = position_encoding_2d
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
super().__init__(
|
| 89 |
pad_token_id=pad_token_id,
|
| 90 |
bos_token_id=bos_token_id,
|
|
|
|
| 71 |
max_sequence_length=2048,
|
| 72 |
inner_hidden_size=16384,
|
| 73 |
position_encoding_2d=True,
|
| 74 |
+
quantization_bit=0,
|
| 75 |
+
pre_seq_len=None,
|
| 76 |
+
prefix_projection=False,
|
| 77 |
**kwargs
|
| 78 |
):
|
| 79 |
self.num_layers = num_layers
|
|
|
|
| 88 |
self.eos_token_id = eos_token_id
|
| 89 |
self.pad_token_id = pad_token_id
|
| 90 |
self.position_encoding_2d = position_encoding_2d
|
| 91 |
+
self.quantization_bit = quantization_bit
|
| 92 |
+
self.pre_seq_len = pre_seq_len
|
| 93 |
+
self.prefix_projection = prefix_projection
|
| 94 |
+
|
| 95 |
super().__init__(
|
| 96 |
pad_token_id=pad_token_id,
|
| 97 |
bos_token_id=bos_token_id,
|
modeling_chatglm.py
CHANGED
|
@@ -13,7 +13,7 @@ import torch.nn.functional as F
|
|
| 13 |
from torch import nn
|
| 14 |
from torch.nn import CrossEntropyLoss, LayerNorm
|
| 15 |
from torch.nn.utils import skip_init
|
| 16 |
-
from typing import Optional, Tuple, Union, List, Callable
|
| 17 |
|
| 18 |
from transformers.utils import (
|
| 19 |
add_code_sample_docstrings,
|
|
@@ -28,7 +28,7 @@ from transformers.modeling_outputs import (
|
|
| 28 |
from transformers.modeling_utils import PreTrainedModel
|
| 29 |
from transformers.utils import logging
|
| 30 |
from transformers.generation.logits_process import LogitsProcessor
|
| 31 |
-
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig
|
| 32 |
|
| 33 |
from .configuration_chatglm import ChatGLMConfig
|
| 34 |
|
|
@@ -134,6 +134,36 @@ def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):
|
|
| 134 |
return model
|
| 135 |
|
| 136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
@torch.jit.script
|
| 138 |
def gelu_impl(x):
|
| 139 |
"""OpenAI's gelu implementation."""
|
|
@@ -188,6 +218,13 @@ class RotaryEmbedding(torch.nn.Module):
|
|
| 188 |
self.cos_cached, self.sin_cached = cos_cached, sin_cached
|
| 189 |
return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
def rotate_half(x):
|
| 193 |
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
|
|
@@ -216,7 +253,7 @@ def attention_fn(
|
|
| 216 |
use_cache=False,
|
| 217 |
):
|
| 218 |
if layer_past is not None:
|
| 219 |
-
past_key, past_value = layer_past
|
| 220 |
key_layer = torch.cat((past_key, key_layer), dim=0)
|
| 221 |
value_layer = torch.cat((past_value, value_layer), dim=0)
|
| 222 |
|
|
@@ -616,10 +653,10 @@ class ChatGLMPreTrainedModel(PreTrainedModel):
|
|
| 616 |
"""
|
| 617 |
|
| 618 |
is_parallelizable = False
|
| 619 |
-
supports_gradient_checkpointing =
|
| 620 |
config_class = ChatGLMConfig
|
| 621 |
base_model_prefix = "transformer"
|
| 622 |
-
_no_split_modules = ["
|
| 623 |
|
| 624 |
def __init__(self, *inputs, **kwargs):
|
| 625 |
super().__init__(*inputs, **kwargs)
|
|
@@ -628,6 +665,43 @@ class ChatGLMPreTrainedModel(PreTrainedModel):
|
|
| 628 |
"""Initialize the weights."""
|
| 629 |
return
|
| 630 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 631 |
|
| 632 |
CHATGLM_6B_START_DOCSTRING = r"""
|
| 633 |
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
|
|
@@ -724,12 +798,15 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 724 |
self.inner_hidden_size = config.inner_hidden_size
|
| 725 |
self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
|
| 726 |
self.position_encoding_2d = config.position_encoding_2d
|
|
|
|
|
|
|
| 727 |
|
| 728 |
self.word_embeddings = skip_init(
|
| 729 |
torch.nn.Embedding,
|
| 730 |
num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
|
| 731 |
dtype=self.params_dtype
|
| 732 |
)
|
|
|
|
| 733 |
|
| 734 |
def get_layer(layer_id):
|
| 735 |
return GLMBlock(
|
|
@@ -752,43 +829,38 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 752 |
# Final layer norm before output.
|
| 753 |
self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
|
| 754 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 755 |
def get_input_embeddings(self):
|
| 756 |
return self.word_embeddings
|
| 757 |
|
| 758 |
def set_input_embeddings(self, new_embeddings: torch.Tensor):
|
| 759 |
self.word_embeddings = new_embeddings
|
| 760 |
|
| 761 |
-
def
|
| 762 |
-
|
| 763 |
-
|
| 764 |
-
|
| 765 |
-
|
| 766 |
-
|
| 767 |
-
|
| 768 |
-
|
| 769 |
-
|
| 770 |
-
|
| 771 |
-
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
|
| 775 |
-
|
| 776 |
-
position_ids = torch.arange(context_length, dtype=torch.long, device=device)
|
| 777 |
-
if not gmask:
|
| 778 |
-
position_ids[seq_length:] = mask_position
|
| 779 |
-
block_position_ids = torch.cat((
|
| 780 |
-
torch.zeros(seq_length, dtype=torch.long, device=device),
|
| 781 |
-
torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1
|
| 782 |
-
))
|
| 783 |
-
position_ids = torch.stack((position_ids, block_position_ids), dim=0)
|
| 784 |
-
else:
|
| 785 |
-
position_ids = torch.arange(context_length, dtype=torch.long, device=device)
|
| 786 |
-
if not gmask:
|
| 787 |
-
position_ids[context_length - 1:] = mask_position
|
| 788 |
-
|
| 789 |
-
position_ids = position_ids.unsqueeze(0)
|
| 790 |
-
|
| 791 |
-
return position_ids
|
| 792 |
|
| 793 |
@add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 794 |
@add_code_sample_docstrings(
|
|
@@ -816,6 +888,13 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 816 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 817 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 818 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 819 |
if input_ids is not None and inputs_embeds is not None:
|
| 820 |
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 821 |
elif input_ids is not None:
|
|
@@ -825,31 +904,41 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 825 |
else:
|
| 826 |
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 827 |
|
|
|
|
|
|
|
|
|
|
| 828 |
if past_key_values is None:
|
| 829 |
-
|
| 830 |
-
|
|
|
|
|
|
|
|
|
|
| 831 |
|
| 832 |
if attention_mask is None:
|
| 833 |
attention_mask = self.get_masks(
|
| 834 |
-
|
| 835 |
device=input_ids.device
|
| 836 |
)
|
| 837 |
|
|
|
|
| 838 |
if position_ids is None:
|
| 839 |
MASK, gMASK = 130000, 130001
|
| 840 |
mask_token = MASK if MASK in input_ids else gMASK
|
| 841 |
use_gmask = False if MASK in input_ids else gMASK
|
| 842 |
|
| 843 |
-
|
| 844 |
position_ids = self.get_position_ids(
|
| 845 |
-
|
| 846 |
-
|
| 847 |
device=input_ids.device,
|
| 848 |
gmask=use_gmask
|
| 849 |
)
|
| 850 |
|
| 851 |
-
if
|
| 852 |
-
|
|
|
|
|
|
|
|
|
|
| 853 |
|
| 854 |
# [seq_len, batch, hidden_size]
|
| 855 |
hidden_states = inputs_embeds.transpose(0, 1)
|
|
@@ -858,11 +947,6 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 858 |
all_self_attentions = () if output_attentions else None
|
| 859 |
all_hidden_states = () if output_hidden_states else None
|
| 860 |
|
| 861 |
-
seq_length_with_past = seq_length
|
| 862 |
-
past_key_values_length = 0
|
| 863 |
-
if past_key_values[0] is not None:
|
| 864 |
-
past_key_values_length = past_key_values[0][0].shape[0]
|
| 865 |
-
seq_length_with_past = seq_length_with_past + past_key_values_length
|
| 866 |
if attention_mask is None:
|
| 867 |
attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
|
| 868 |
|
|
@@ -873,16 +957,29 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 873 |
|
| 874 |
if output_hidden_states:
|
| 875 |
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 876 |
-
|
| 877 |
-
|
| 878 |
-
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
|
| 882 |
-
|
| 883 |
-
|
| 884 |
-
|
| 885 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 886 |
|
| 887 |
hidden_states = layer_ret[0]
|
| 888 |
|
|
@@ -910,7 +1007,7 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|
| 910 |
|
| 911 |
|
| 912 |
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
| 913 |
-
def __init__(self, config):
|
| 914 |
super().__init__(config)
|
| 915 |
|
| 916 |
# self.hidden_size = config.hidden_size
|
|
@@ -930,37 +1027,53 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 930 |
dtype=torch.half
|
| 931 |
)
|
| 932 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 933 |
def get_output_embeddings(self):
|
| 934 |
return self.lm_head
|
| 935 |
|
| 936 |
def set_output_embeddings(self, new_embeddings):
|
| 937 |
self.lm_head = new_embeddings
|
| 938 |
|
| 939 |
-
def
|
| 940 |
-
|
| 941 |
-
|
| 942 |
-
|
| 943 |
-
|
| 944 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 945 |
|
| 946 |
-
|
| 947 |
-
|
| 948 |
-
|
| 949 |
-
if not
|
| 950 |
-
|
| 951 |
-
|
| 952 |
-
|
| 953 |
-
|
| 954 |
-
|
| 955 |
-
|
| 956 |
-
|
| 957 |
-
position_ids = torch.arange(context_length, dtype=torch.long, device=device)
|
| 958 |
-
if not gmask:
|
| 959 |
-
position_ids[context_length - 1:] = mask_position
|
| 960 |
|
| 961 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 962 |
|
| 963 |
-
return
|
| 964 |
|
| 965 |
def prepare_inputs_for_generation(
|
| 966 |
self,
|
|
@@ -968,27 +1081,34 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 968 |
past: Optional[torch.Tensor] = None,
|
| 969 |
past_key_values: Optional[torch.Tensor] = None,
|
| 970 |
attention_mask: Optional[torch.Tensor] = None,
|
|
|
|
| 971 |
**kwargs
|
| 972 |
) -> dict:
|
| 973 |
-
|
| 974 |
MASK, gMASK = 130000, 130001
|
| 975 |
mask_token = MASK if MASK in input_ids else gMASK
|
| 976 |
use_gmask = False if MASK in input_ids else gMASK
|
| 977 |
-
|
| 978 |
-
|
| 979 |
-
|
| 980 |
-
if mask_token not in seq:
|
| 981 |
-
raise ValueError("You have to add either [MASK] or [gMASK] in your input")
|
| 982 |
|
| 983 |
# only last token for input_ids if past is not None
|
| 984 |
if past is not None or past_key_values is not None:
|
| 985 |
-
context_length = seq.index(self.config.bos_token_id)
|
| 986 |
last_token = input_ids[:, -1].unsqueeze(-1)
|
| 987 |
-
if
|
| 988 |
-
|
| 989 |
-
device=input_ids.device)
|
| 990 |
else:
|
| 991 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 992 |
|
| 993 |
if past is None:
|
| 994 |
past = past_key_values
|
|
@@ -996,15 +1116,24 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 996 |
"input_ids": last_token,
|
| 997 |
"past_key_values": past,
|
| 998 |
"position_ids": position_ids,
|
|
|
|
| 999 |
}
|
| 1000 |
else:
|
| 1001 |
-
attention_mask
|
| 1002 |
-
|
| 1003 |
-
|
| 1004 |
-
|
| 1005 |
-
|
| 1006 |
-
|
| 1007 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1008 |
|
| 1009 |
return {
|
| 1010 |
"input_ids": input_ids,
|
|
@@ -1053,7 +1182,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 1053 |
shift_logits = lm_logits[..., :-1, :].contiguous()
|
| 1054 |
shift_labels = labels[..., 1:].contiguous()
|
| 1055 |
# Flatten the tokens
|
| 1056 |
-
loss_fct = CrossEntropyLoss()
|
| 1057 |
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| 1058 |
|
| 1059 |
lm_logits = lm_logits.to(hidden_states.dtype)
|
|
@@ -1122,10 +1251,10 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 1122 |
for i, (old_query, response) in enumerate(history):
|
| 1123 |
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
|
| 1124 |
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
|
| 1125 |
-
|
| 1126 |
-
|
| 1127 |
-
outputs = self.generate(**
|
| 1128 |
-
outputs = outputs.tolist()[0][len(
|
| 1129 |
response = tokenizer.decode(outputs)
|
| 1130 |
response = self.process_response(response)
|
| 1131 |
history = history + [(query, response)]
|
|
@@ -1148,10 +1277,10 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 1148 |
for i, (old_query, response) in enumerate(history):
|
| 1149 |
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
|
| 1150 |
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
|
| 1151 |
-
|
| 1152 |
-
|
| 1153 |
-
for outputs in self.stream_generate(**
|
| 1154 |
-
outputs = outputs.tolist()[0][len(
|
| 1155 |
response = tokenizer.decode(outputs)
|
| 1156 |
response = self.process_response(response)
|
| 1157 |
new_history = history + [(query, response)]
|
|
@@ -1259,7 +1388,19 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|
| 1259 |
break
|
| 1260 |
yield input_ids
|
| 1261 |
|
| 1262 |
-
def quantize(self, bits: int):
|
|
|
|
|
|
|
|
|
|
| 1263 |
from .quantization import quantize
|
| 1264 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1265 |
return self
|
|
|
|
| 13 |
from torch import nn
|
| 14 |
from torch.nn import CrossEntropyLoss, LayerNorm
|
| 15 |
from torch.nn.utils import skip_init
|
| 16 |
+
from typing import Optional, Tuple, Union, List, Callable, Dict, Any
|
| 17 |
|
| 18 |
from transformers.utils import (
|
| 19 |
add_code_sample_docstrings,
|
|
|
|
| 28 |
from transformers.modeling_utils import PreTrainedModel
|
| 29 |
from transformers.utils import logging
|
| 30 |
from transformers.generation.logits_process import LogitsProcessor
|
| 31 |
+
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
|
| 32 |
|
| 33 |
from .configuration_chatglm import ChatGLMConfig
|
| 34 |
|
|
|
|
| 134 |
return model
|
| 135 |
|
| 136 |
|
| 137 |
+
class PrefixEncoder(torch.nn.Module):
|
| 138 |
+
"""
|
| 139 |
+
The torch.nn model to encode the prefix
|
| 140 |
+
Input shape: (batch-size, prefix-length)
|
| 141 |
+
Output shape: (batch-size, prefix-length, 2*layers*hidden)
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
def __init__(self, config):
|
| 145 |
+
super().__init__()
|
| 146 |
+
self.prefix_projection = config.prefix_projection
|
| 147 |
+
if self.prefix_projection:
|
| 148 |
+
# Use a two-layer MLP to encode the prefix
|
| 149 |
+
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
|
| 150 |
+
self.trans = torch.nn.Sequential(
|
| 151 |
+
torch.nn.Linear(config.hidden_size, config.hidden_size),
|
| 152 |
+
torch.nn.Tanh(),
|
| 153 |
+
torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)
|
| 154 |
+
)
|
| 155 |
+
else:
|
| 156 |
+
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2)
|
| 157 |
+
|
| 158 |
+
def forward(self, prefix: torch.Tensor):
|
| 159 |
+
if self.prefix_projection:
|
| 160 |
+
prefix_tokens = self.embedding(prefix)
|
| 161 |
+
past_key_values = self.trans(prefix_tokens)
|
| 162 |
+
else:
|
| 163 |
+
past_key_values = self.embedding(prefix)
|
| 164 |
+
return past_key_values
|
| 165 |
+
|
| 166 |
+
|
| 167 |
@torch.jit.script
|
| 168 |
def gelu_impl(x):
|
| 169 |
"""OpenAI's gelu implementation."""
|
|
|
|
| 218 |
self.cos_cached, self.sin_cached = cos_cached, sin_cached
|
| 219 |
return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
|
| 220 |
|
| 221 |
+
def _apply(self, fn):
|
| 222 |
+
if self.cos_cached is not None:
|
| 223 |
+
self.cos_cached = fn(self.cos_cached)
|
| 224 |
+
if self.sin_cached is not None:
|
| 225 |
+
self.sin_cached = fn(self.sin_cached)
|
| 226 |
+
return super()._apply(fn)
|
| 227 |
+
|
| 228 |
|
| 229 |
def rotate_half(x):
|
| 230 |
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
|
|
|
|
| 253 |
use_cache=False,
|
| 254 |
):
|
| 255 |
if layer_past is not None:
|
| 256 |
+
past_key, past_value = layer_past[0], layer_past[1]
|
| 257 |
key_layer = torch.cat((past_key, key_layer), dim=0)
|
| 258 |
value_layer = torch.cat((past_value, value_layer), dim=0)
|
| 259 |
|
|
|
|
| 653 |
"""
|
| 654 |
|
| 655 |
is_parallelizable = False
|
| 656 |
+
supports_gradient_checkpointing = True
|
| 657 |
config_class = ChatGLMConfig
|
| 658 |
base_model_prefix = "transformer"
|
| 659 |
+
_no_split_modules = ["GLMBlock"]
|
| 660 |
|
| 661 |
def __init__(self, *inputs, **kwargs):
|
| 662 |
super().__init__(*inputs, **kwargs)
|
|
|
|
| 665 |
"""Initialize the weights."""
|
| 666 |
return
|
| 667 |
|
| 668 |
+
def get_masks(self, input_ids, device):
|
| 669 |
+
batch_size, seq_length = input_ids.shape
|
| 670 |
+
context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
|
| 671 |
+
attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)
|
| 672 |
+
attention_mask.tril_()
|
| 673 |
+
for i, context_length in enumerate(context_lengths):
|
| 674 |
+
attention_mask[i, :, :context_length] = 1
|
| 675 |
+
attention_mask.unsqueeze_(1)
|
| 676 |
+
attention_mask = (attention_mask < 0.5).bool()
|
| 677 |
+
|
| 678 |
+
return attention_mask
|
| 679 |
+
|
| 680 |
+
def get_position_ids(self, input_ids, mask_positions, device, gmask=False):
|
| 681 |
+
batch_size, seq_length = input_ids.shape
|
| 682 |
+
context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
|
| 683 |
+
if self.position_encoding_2d:
|
| 684 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
|
| 685 |
+
for i, context_length in enumerate(context_lengths):
|
| 686 |
+
position_ids[i, context_length:] = mask_positions[i]
|
| 687 |
+
block_position_ids = [torch.cat((
|
| 688 |
+
torch.zeros(context_length, dtype=torch.long, device=device),
|
| 689 |
+
torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1
|
| 690 |
+
)) for context_length in context_lengths]
|
| 691 |
+
block_position_ids = torch.stack(block_position_ids, dim=0)
|
| 692 |
+
position_ids = torch.stack((position_ids, block_position_ids), dim=1)
|
| 693 |
+
else:
|
| 694 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
|
| 695 |
+
if not gmask:
|
| 696 |
+
for i, context_length in enumerate(context_lengths):
|
| 697 |
+
position_ids[context_length:] = mask_positions[i]
|
| 698 |
+
|
| 699 |
+
return position_ids
|
| 700 |
+
|
| 701 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 702 |
+
if isinstance(module, ChatGLMModel):
|
| 703 |
+
module.gradient_checkpointing = value
|
| 704 |
+
|
| 705 |
|
| 706 |
CHATGLM_6B_START_DOCSTRING = r"""
|
| 707 |
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
|
|
|
|
| 798 |
self.inner_hidden_size = config.inner_hidden_size
|
| 799 |
self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
|
| 800 |
self.position_encoding_2d = config.position_encoding_2d
|
| 801 |
+
self.pre_seq_len = config.pre_seq_len
|
| 802 |
+
self.prefix_projection = config.prefix_projection
|
| 803 |
|
| 804 |
self.word_embeddings = skip_init(
|
| 805 |
torch.nn.Embedding,
|
| 806 |
num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
|
| 807 |
dtype=self.params_dtype
|
| 808 |
)
|
| 809 |
+
self.gradient_checkpointing = False
|
| 810 |
|
| 811 |
def get_layer(layer_id):
|
| 812 |
return GLMBlock(
|
|
|
|
| 829 |
# Final layer norm before output.
|
| 830 |
self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
|
| 831 |
|
| 832 |
+
if self.pre_seq_len is not None:
|
| 833 |
+
for param in self.parameters():
|
| 834 |
+
param.requires_grad = False
|
| 835 |
+
self.prefix_tokens = torch.arange(self.pre_seq_len).long()
|
| 836 |
+
self.prefix_encoder = PrefixEncoder(config)
|
| 837 |
+
self.dropout = torch.nn.Dropout(0.1)
|
| 838 |
+
|
| 839 |
+
# total_params = sum(p.numel() for p in self.parameters())
|
| 840 |
+
# trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
|
| 841 |
+
# print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params))
|
| 842 |
+
|
| 843 |
def get_input_embeddings(self):
|
| 844 |
return self.word_embeddings
|
| 845 |
|
| 846 |
def set_input_embeddings(self, new_embeddings: torch.Tensor):
|
| 847 |
self.word_embeddings = new_embeddings
|
| 848 |
|
| 849 |
+
def get_prompt(self, batch_size, device, dtype=torch.half):
|
| 850 |
+
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
|
| 851 |
+
past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
|
| 852 |
+
past_key_values = past_key_values.view(
|
| 853 |
+
batch_size,
|
| 854 |
+
self.pre_seq_len,
|
| 855 |
+
self.num_layers * 2,
|
| 856 |
+
self.num_attention_heads,
|
| 857 |
+
self.hidden_size // self.num_attention_heads
|
| 858 |
+
)
|
| 859 |
+
# seq_len, b, nh, hidden_size
|
| 860 |
+
past_key_values = self.dropout(past_key_values)
|
| 861 |
+
past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
|
| 862 |
+
# past_key_values = [(v[0], v[1]) for v in past_key_values]
|
| 863 |
+
return past_key_values
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 864 |
|
| 865 |
@add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 866 |
@add_code_sample_docstrings(
|
|
|
|
| 888 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 889 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 890 |
|
| 891 |
+
if self.gradient_checkpointing and self.training:
|
| 892 |
+
if use_cache:
|
| 893 |
+
logger.warning_once(
|
| 894 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 895 |
+
)
|
| 896 |
+
use_cache = False
|
| 897 |
+
|
| 898 |
if input_ids is not None and inputs_embeds is not None:
|
| 899 |
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 900 |
elif input_ids is not None:
|
|
|
|
| 904 |
else:
|
| 905 |
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 906 |
|
| 907 |
+
if inputs_embeds is None:
|
| 908 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 909 |
+
|
| 910 |
if past_key_values is None:
|
| 911 |
+
if self.pre_seq_len is not None:
|
| 912 |
+
past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device,
|
| 913 |
+
dtype=inputs_embeds.dtype)
|
| 914 |
+
else:
|
| 915 |
+
past_key_values = tuple([None] * len(self.layers))
|
| 916 |
|
| 917 |
if attention_mask is None:
|
| 918 |
attention_mask = self.get_masks(
|
| 919 |
+
input_ids,
|
| 920 |
device=input_ids.device
|
| 921 |
)
|
| 922 |
|
| 923 |
+
|
| 924 |
if position_ids is None:
|
| 925 |
MASK, gMASK = 130000, 130001
|
| 926 |
mask_token = MASK if MASK in input_ids else gMASK
|
| 927 |
use_gmask = False if MASK in input_ids else gMASK
|
| 928 |
|
| 929 |
+
mask_positions = [seq.tolist().index(mask_token) for seq in input_ids]
|
| 930 |
position_ids = self.get_position_ids(
|
| 931 |
+
input_ids,
|
| 932 |
+
mask_positions=mask_positions,
|
| 933 |
device=input_ids.device,
|
| 934 |
gmask=use_gmask
|
| 935 |
)
|
| 936 |
|
| 937 |
+
if self.pre_seq_len is not None and attention_mask is not None:
|
| 938 |
+
prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to(
|
| 939 |
+
attention_mask.device)
|
| 940 |
+
prefix_attention_mask = (prefix_attention_mask < 0.5).bool()
|
| 941 |
+
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3)
|
| 942 |
|
| 943 |
# [seq_len, batch, hidden_size]
|
| 944 |
hidden_states = inputs_embeds.transpose(0, 1)
|
|
|
|
| 947 |
all_self_attentions = () if output_attentions else None
|
| 948 |
all_hidden_states = () if output_hidden_states else None
|
| 949 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 950 |
if attention_mask is None:
|
| 951 |
attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
|
| 952 |
|
|
|
|
| 957 |
|
| 958 |
if output_hidden_states:
|
| 959 |
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 960 |
+
layer_past = past_key_values[i]
|
| 961 |
+
|
| 962 |
+
if self.gradient_checkpointing and self.training:
|
| 963 |
+
layer_ret = torch.utils.checkpoint.checkpoint(
|
| 964 |
+
layer,
|
| 965 |
+
hidden_states,
|
| 966 |
+
position_ids,
|
| 967 |
+
attention_mask,
|
| 968 |
+
torch.tensor(i),
|
| 969 |
+
layer_past,
|
| 970 |
+
use_cache,
|
| 971 |
+
output_attentions
|
| 972 |
+
)
|
| 973 |
+
else:
|
| 974 |
+
layer_ret = layer(
|
| 975 |
+
hidden_states,
|
| 976 |
+
position_ids=position_ids,
|
| 977 |
+
attention_mask=attention_mask,
|
| 978 |
+
layer_id=torch.tensor(i),
|
| 979 |
+
layer_past=layer_past,
|
| 980 |
+
use_cache=use_cache,
|
| 981 |
+
output_attentions=output_attentions
|
| 982 |
+
)
|
| 983 |
|
| 984 |
hidden_states = layer_ret[0]
|
| 985 |
|
|
|
|
| 1007 |
|
| 1008 |
|
| 1009 |
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
| 1010 |
+
def __init__(self, config: ChatGLMConfig):
|
| 1011 |
super().__init__(config)
|
| 1012 |
|
| 1013 |
# self.hidden_size = config.hidden_size
|
|
|
|
| 1027 |
dtype=torch.half
|
| 1028 |
)
|
| 1029 |
|
| 1030 |
+
self.config = config
|
| 1031 |
+
|
| 1032 |
+
self.quantized = False
|
| 1033 |
+
|
| 1034 |
+
if self.config.quantization_bit:
|
| 1035 |
+
self.quantize(self.config.quantization_bit, empty_init=True)
|
| 1036 |
+
|
| 1037 |
def get_output_embeddings(self):
|
| 1038 |
return self.lm_head
|
| 1039 |
|
| 1040 |
def set_output_embeddings(self, new_embeddings):
|
| 1041 |
self.lm_head = new_embeddings
|
| 1042 |
|
| 1043 |
+
def _update_model_kwargs_for_generation(
|
| 1044 |
+
self,
|
| 1045 |
+
outputs: ModelOutput,
|
| 1046 |
+
model_kwargs: Dict[str, Any],
|
| 1047 |
+
is_encoder_decoder: bool = False,
|
| 1048 |
+
standardize_cache_format: bool = False,
|
| 1049 |
+
) -> Dict[str, Any]:
|
| 1050 |
+
# update past_key_values
|
| 1051 |
+
model_kwargs["past_key_values"] = self._extract_past_from_model_output(
|
| 1052 |
+
outputs, standardize_cache_format=standardize_cache_format
|
| 1053 |
+
)
|
| 1054 |
|
| 1055 |
+
# update attention mask
|
| 1056 |
+
if "attention_mask" in model_kwargs:
|
| 1057 |
+
attention_mask = model_kwargs["attention_mask"]
|
| 1058 |
+
if attention_mask is not None and attention_mask.dtype == torch.bool:
|
| 1059 |
+
attention_mask = torch.cat(
|
| 1060 |
+
[attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3)
|
| 1061 |
+
new_attention_mask = attention_mask[:, :, -1:].clone()
|
| 1062 |
+
new_attention_mask[..., -1] = False
|
| 1063 |
+
model_kwargs["attention_mask"] = torch.cat(
|
| 1064 |
+
[attention_mask, new_attention_mask], dim=2
|
| 1065 |
+
)
|
|
|
|
|
|
|
|
|
|
| 1066 |
|
| 1067 |
+
# update position ids
|
| 1068 |
+
if "position_ids" in model_kwargs:
|
| 1069 |
+
position_ids = model_kwargs["position_ids"]
|
| 1070 |
+
new_position_id = position_ids[..., -1:].clone()
|
| 1071 |
+
new_position_id[:, 1, :] += 1
|
| 1072 |
+
model_kwargs["position_ids"] = torch.cat(
|
| 1073 |
+
[position_ids, new_position_id], dim=-1
|
| 1074 |
+
)
|
| 1075 |
|
| 1076 |
+
return model_kwargs
|
| 1077 |
|
| 1078 |
def prepare_inputs_for_generation(
|
| 1079 |
self,
|
|
|
|
| 1081 |
past: Optional[torch.Tensor] = None,
|
| 1082 |
past_key_values: Optional[torch.Tensor] = None,
|
| 1083 |
attention_mask: Optional[torch.Tensor] = None,
|
| 1084 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1085 |
**kwargs
|
| 1086 |
) -> dict:
|
| 1087 |
+
batch_size, seq_length = input_ids.shape
|
| 1088 |
MASK, gMASK = 130000, 130001
|
| 1089 |
mask_token = MASK if MASK in input_ids else gMASK
|
| 1090 |
use_gmask = False if MASK in input_ids else gMASK
|
| 1091 |
+
seqs = input_ids.tolist()
|
| 1092 |
+
mask_positions = [seq.index(mask_token) for seq in seqs]
|
|
|
|
|
|
|
|
|
|
| 1093 |
|
| 1094 |
# only last token for input_ids if past is not None
|
| 1095 |
if past is not None or past_key_values is not None:
|
|
|
|
| 1096 |
last_token = input_ids[:, -1].unsqueeze(-1)
|
| 1097 |
+
if attention_mask is not None and attention_mask.dtype == torch.bool:
|
| 1098 |
+
attention_mask = attention_mask[:, :, -1:]
|
|
|
|
| 1099 |
else:
|
| 1100 |
+
attention_mask = None
|
| 1101 |
+
if position_ids is not None:
|
| 1102 |
+
position_ids = position_ids[..., -1:]
|
| 1103 |
+
else:
|
| 1104 |
+
context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs]
|
| 1105 |
+
if self.position_encoding_2d:
|
| 1106 |
+
position_ids = torch.tensor(
|
| 1107 |
+
[[mask_position, seq_length - context_length] for mask_position, context_length in
|
| 1108 |
+
zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1)
|
| 1109 |
+
else:
|
| 1110 |
+
position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long,
|
| 1111 |
+
device=input_ids.device).unsqueeze(-1)
|
| 1112 |
|
| 1113 |
if past is None:
|
| 1114 |
past = past_key_values
|
|
|
|
| 1116 |
"input_ids": last_token,
|
| 1117 |
"past_key_values": past,
|
| 1118 |
"position_ids": position_ids,
|
| 1119 |
+
"attention_mask": attention_mask
|
| 1120 |
}
|
| 1121 |
else:
|
| 1122 |
+
if attention_mask is not None and attention_mask.dtype != torch.bool:
|
| 1123 |
+
logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool")
|
| 1124 |
+
attention_mask = None
|
| 1125 |
+
if attention_mask is None:
|
| 1126 |
+
attention_mask = self.get_masks(
|
| 1127 |
+
input_ids,
|
| 1128 |
+
device=input_ids.device
|
| 1129 |
+
)
|
| 1130 |
+
if position_ids is None:
|
| 1131 |
+
position_ids = self.get_position_ids(
|
| 1132 |
+
input_ids,
|
| 1133 |
+
device=input_ids.device,
|
| 1134 |
+
mask_positions=mask_positions,
|
| 1135 |
+
gmask=use_gmask
|
| 1136 |
+
)
|
| 1137 |
|
| 1138 |
return {
|
| 1139 |
"input_ids": input_ids,
|
|
|
|
| 1182 |
shift_logits = lm_logits[..., :-1, :].contiguous()
|
| 1183 |
shift_labels = labels[..., 1:].contiguous()
|
| 1184 |
# Flatten the tokens
|
| 1185 |
+
loss_fct = CrossEntropyLoss(ignore_index=-100)
|
| 1186 |
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| 1187 |
|
| 1188 |
lm_logits = lm_logits.to(hidden_states.dtype)
|
|
|
|
| 1251 |
for i, (old_query, response) in enumerate(history):
|
| 1252 |
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
|
| 1253 |
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
|
| 1254 |
+
inputs = tokenizer([prompt], return_tensors="pt")
|
| 1255 |
+
inputs = inputs.to(self.device)
|
| 1256 |
+
outputs = self.generate(**inputs, **gen_kwargs)
|
| 1257 |
+
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
|
| 1258 |
response = tokenizer.decode(outputs)
|
| 1259 |
response = self.process_response(response)
|
| 1260 |
history = history + [(query, response)]
|
|
|
|
| 1277 |
for i, (old_query, response) in enumerate(history):
|
| 1278 |
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
|
| 1279 |
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
|
| 1280 |
+
inputs = tokenizer([prompt], return_tensors="pt")
|
| 1281 |
+
inputs = inputs.to(self.device)
|
| 1282 |
+
for outputs in self.stream_generate(**inputs, **gen_kwargs):
|
| 1283 |
+
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
|
| 1284 |
response = tokenizer.decode(outputs)
|
| 1285 |
response = self.process_response(response)
|
| 1286 |
new_history = history + [(query, response)]
|
|
|
|
| 1388 |
break
|
| 1389 |
yield input_ids
|
| 1390 |
|
| 1391 |
+
def quantize(self, bits: int, empty_init=False, **kwargs):
|
| 1392 |
+
if bits == 0:
|
| 1393 |
+
return
|
| 1394 |
+
|
| 1395 |
from .quantization import quantize
|
| 1396 |
+
|
| 1397 |
+
if self.quantized:
|
| 1398 |
+
logger.info("Already quantized.")
|
| 1399 |
+
return self
|
| 1400 |
+
|
| 1401 |
+
self.quantized = True
|
| 1402 |
+
|
| 1403 |
+
self.config.quantization_bit = bits
|
| 1404 |
+
|
| 1405 |
+
self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs)
|
| 1406 |
return self
|
quantization.py
CHANGED
|
@@ -5,20 +5,51 @@ import bz2
|
|
| 5 |
import torch
|
| 6 |
import base64
|
| 7 |
import ctypes
|
|
|
|
| 8 |
|
| 9 |
from typing import List
|
| 10 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
class W8A16Linear(torch.autograd.Function):
|
| 14 |
@staticmethod
|
| 15 |
def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
|
| 16 |
ctx.inp_shape = inp.size()
|
| 17 |
-
ctx.weight_shape = quant_w.size()
|
| 18 |
ctx.weight_bit_width = weight_bit_width
|
| 19 |
out_features = quant_w.size(0)
|
| 20 |
inp = inp.contiguous().view(-1, inp.size(-1))
|
| 21 |
weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
|
|
|
|
| 22 |
output = inp.mm(weight.t())
|
| 23 |
ctx.save_for_backward(inp, quant_w, scale_w)
|
| 24 |
return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
|
|
@@ -30,31 +61,7 @@ class W8A16Linear(torch.autograd.Function):
|
|
| 30 |
grad_output = grad_output.contiguous().view(-1, weight.size(0))
|
| 31 |
grad_input = grad_output.mm(weight)
|
| 32 |
grad_weight = grad_output.t().mm(inp)
|
| 33 |
-
return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
class Kernel:
|
| 37 |
-
def __init__(self, code: bytes, function_names: List[str]):
|
| 38 |
-
self.code = code
|
| 39 |
-
self._function_names = function_names
|
| 40 |
-
self._cmodule = LazyKernelCModule(self.code)
|
| 41 |
-
|
| 42 |
-
for name in self._function_names:
|
| 43 |
-
setattr(self, name, KernelFunction(self._cmodule, name))
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
|
| 47 |
-
|
| 48 |
-
kernels = Kernel(
|
| 49 |
-
bz2.decompress(base64.b64decode(quantization_code)),
|
| 50 |
-
[
|
| 51 |
-
"int4WeightCompression",
|
| 52 |
-
"int4WeightExtractionFloat",
|
| 53 |
-
"int4WeightExtractionHalf",
|
| 54 |
-
"int8WeightExtractionFloat",
|
| 55 |
-
"int8WeightExtractionHalf",
|
| 56 |
-
],
|
| 57 |
-
)
|
| 58 |
|
| 59 |
|
| 60 |
def compress_int4_weight(weight: torch.Tensor): # (n, m)
|
|
@@ -111,18 +118,18 @@ def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, sourc
|
|
| 111 |
|
| 112 |
|
| 113 |
class QuantizedLinear(Linear):
|
| 114 |
-
def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, *args, **kwargs):
|
| 115 |
super(QuantizedLinear, self).__init__(*args, **kwargs)
|
| 116 |
self.weight_bit_width = weight_bit_width
|
| 117 |
|
| 118 |
shape = self.weight.shape
|
| 119 |
del self.weight
|
| 120 |
|
| 121 |
-
if weight_tensor is None:
|
| 122 |
self.weight = torch.empty(
|
| 123 |
shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
|
| 124 |
)
|
| 125 |
-
self.weight_scale = torch.empty(shape[0], dtype=kwargs["
|
| 126 |
else:
|
| 127 |
self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()
|
| 128 |
self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
|
|
@@ -131,7 +138,10 @@ class QuantizedLinear(Linear):
|
|
| 131 |
|
| 132 |
self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
|
| 133 |
self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
|
| 134 |
-
|
|
|
|
|
|
|
|
|
|
| 135 |
|
| 136 |
def forward(self, input):
|
| 137 |
output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
|
|
@@ -140,7 +150,7 @@ class QuantizedLinear(Linear):
|
|
| 140 |
return output
|
| 141 |
|
| 142 |
|
| 143 |
-
def quantize(model, weight_bit_width):
|
| 144 |
"""Replace fp16 linear with quantized linear"""
|
| 145 |
|
| 146 |
for layer in model.layers:
|
|
@@ -153,6 +163,7 @@ def quantize(model, weight_bit_width):
|
|
| 153 |
bias=True,
|
| 154 |
dtype=torch.half,
|
| 155 |
device=layer.attention.query_key_value.weight.device,
|
|
|
|
| 156 |
)
|
| 157 |
layer.attention.dense = QuantizedLinear(
|
| 158 |
weight_bit_width=weight_bit_width,
|
|
@@ -163,6 +174,7 @@ def quantize(model, weight_bit_width):
|
|
| 163 |
bias=True,
|
| 164 |
dtype=torch.half,
|
| 165 |
device=layer.attention.dense.weight.device,
|
|
|
|
| 166 |
)
|
| 167 |
layer.mlp.dense_h_to_4h = QuantizedLinear(
|
| 168 |
weight_bit_width=weight_bit_width,
|
|
@@ -173,6 +185,7 @@ def quantize(model, weight_bit_width):
|
|
| 173 |
bias=True,
|
| 174 |
dtype=torch.half,
|
| 175 |
device=layer.mlp.dense_h_to_4h.weight.device,
|
|
|
|
| 176 |
)
|
| 177 |
layer.mlp.dense_4h_to_h = QuantizedLinear(
|
| 178 |
weight_bit_width=weight_bit_width,
|
|
@@ -183,5 +196,6 @@ def quantize(model, weight_bit_width):
|
|
| 183 |
bias=True,
|
| 184 |
dtype=torch.half,
|
| 185 |
device=layer.mlp.dense_4h_to_h.weight.device,
|
|
|
|
| 186 |
)
|
| 187 |
return model
|
|
|
|
| 5 |
import torch
|
| 6 |
import base64
|
| 7 |
import ctypes
|
| 8 |
+
from transformers.utils import logging
|
| 9 |
|
| 10 |
from typing import List
|
| 11 |
+
from functools import partial
|
| 12 |
+
|
| 13 |
+
logger = logging.get_logger(__name__)
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
|
| 17 |
+
|
| 18 |
+
class Kernel:
|
| 19 |
+
def __init__(self, code: bytes, function_names: List[str]):
|
| 20 |
+
self.code = code
|
| 21 |
+
self._function_names = function_names
|
| 22 |
+
self._cmodule = LazyKernelCModule(self.code)
|
| 23 |
+
|
| 24 |
+
for name in self._function_names:
|
| 25 |
+
setattr(self, name, KernelFunction(self._cmodule, name))
|
| 26 |
+
|
| 27 |
+
quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
|
| 28 |
+
|
| 29 |
+
kernels = Kernel(
|
| 30 |
+
bz2.decompress(base64.b64decode(quantization_code)),
|
| 31 |
+
[
|
| 32 |
+
"int4WeightCompression",
|
| 33 |
+
"int4WeightExtractionFloat",
|
| 34 |
+
"int4WeightExtractionHalf",
|
| 35 |
+
"int8WeightExtractionFloat",
|
| 36 |
+
"int8WeightExtractionHalf",
|
| 37 |
+
],
|
| 38 |
+
)
|
| 39 |
+
except Exception as exception:
|
| 40 |
+
kernels = None
|
| 41 |
+
logger.warning("Failed to load cpm_kernels:" + str(exception))
|
| 42 |
|
| 43 |
|
| 44 |
class W8A16Linear(torch.autograd.Function):
|
| 45 |
@staticmethod
|
| 46 |
def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
|
| 47 |
ctx.inp_shape = inp.size()
|
|
|
|
| 48 |
ctx.weight_bit_width = weight_bit_width
|
| 49 |
out_features = quant_w.size(0)
|
| 50 |
inp = inp.contiguous().view(-1, inp.size(-1))
|
| 51 |
weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
|
| 52 |
+
ctx.weight_shape = weight.size()
|
| 53 |
output = inp.mm(weight.t())
|
| 54 |
ctx.save_for_backward(inp, quant_w, scale_w)
|
| 55 |
return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
|
|
|
|
| 61 |
grad_output = grad_output.contiguous().view(-1, weight.size(0))
|
| 62 |
grad_input = grad_output.mm(weight)
|
| 63 |
grad_weight = grad_output.t().mm(inp)
|
| 64 |
+
return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
def compress_int4_weight(weight: torch.Tensor): # (n, m)
|
|
|
|
| 118 |
|
| 119 |
|
| 120 |
class QuantizedLinear(Linear):
|
| 121 |
+
def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs):
|
| 122 |
super(QuantizedLinear, self).__init__(*args, **kwargs)
|
| 123 |
self.weight_bit_width = weight_bit_width
|
| 124 |
|
| 125 |
shape = self.weight.shape
|
| 126 |
del self.weight
|
| 127 |
|
| 128 |
+
if weight_tensor is None or empty_init:
|
| 129 |
self.weight = torch.empty(
|
| 130 |
shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
|
| 131 |
)
|
| 132 |
+
self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"])
|
| 133 |
else:
|
| 134 |
self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()
|
| 135 |
self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
|
|
|
|
| 138 |
|
| 139 |
self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
|
| 140 |
self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
|
| 141 |
+
if bias_tensor is not None:
|
| 142 |
+
self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False)
|
| 143 |
+
else:
|
| 144 |
+
self.bias = None
|
| 145 |
|
| 146 |
def forward(self, input):
|
| 147 |
output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
|
|
|
|
| 150 |
return output
|
| 151 |
|
| 152 |
|
| 153 |
+
def quantize(model, weight_bit_width, empty_init=False, **kwargs):
|
| 154 |
"""Replace fp16 linear with quantized linear"""
|
| 155 |
|
| 156 |
for layer in model.layers:
|
|
|
|
| 163 |
bias=True,
|
| 164 |
dtype=torch.half,
|
| 165 |
device=layer.attention.query_key_value.weight.device,
|
| 166 |
+
empty_init=empty_init
|
| 167 |
)
|
| 168 |
layer.attention.dense = QuantizedLinear(
|
| 169 |
weight_bit_width=weight_bit_width,
|
|
|
|
| 174 |
bias=True,
|
| 175 |
dtype=torch.half,
|
| 176 |
device=layer.attention.dense.weight.device,
|
| 177 |
+
empty_init=empty_init
|
| 178 |
)
|
| 179 |
layer.mlp.dense_h_to_4h = QuantizedLinear(
|
| 180 |
weight_bit_width=weight_bit_width,
|
|
|
|
| 185 |
bias=True,
|
| 186 |
dtype=torch.half,
|
| 187 |
device=layer.mlp.dense_h_to_4h.weight.device,
|
| 188 |
+
empty_init=empty_init
|
| 189 |
)
|
| 190 |
layer.mlp.dense_4h_to_h = QuantizedLinear(
|
| 191 |
weight_bit_width=weight_bit_width,
|
|
|
|
| 196 |
bias=True,
|
| 197 |
dtype=torch.half,
|
| 198 |
device=layer.mlp.dense_4h_to_h.weight.device,
|
| 199 |
+
empty_init=empty_init
|
| 200 |
)
|
| 201 |
return model
|
tokenization_chatglm.py
CHANGED
|
@@ -1,17 +1,14 @@
|
|
| 1 |
"""Tokenization classes for ChatGLM."""
|
| 2 |
-
import sys
|
| 3 |
-
import unicodedata
|
| 4 |
from typing import List, Optional, Union
|
| 5 |
-
from functools import lru_cache
|
| 6 |
import os
|
| 7 |
-
import collections
|
| 8 |
-
import re
|
| 9 |
|
| 10 |
from transformers.tokenization_utils import PreTrainedTokenizer
|
| 11 |
from icetk.text_tokenizer import TextTokenizer
|
| 12 |
-
from icetk.utils import auto_create
|
| 13 |
import icetk.sentencepiece_model_pb2 as sp_model
|
| 14 |
-
from transformers.utils import logging
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
logger = logging.get_logger(__name__)
|
| 17 |
|
|
@@ -170,7 +167,7 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|
| 170 |
|
| 171 |
vocab_files_names = {"vocab_file": "ice_text.model"}
|
| 172 |
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 173 |
-
model_input_names = ["input_ids"]
|
| 174 |
|
| 175 |
def __init__(
|
| 176 |
self,
|
|
@@ -200,7 +197,7 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|
| 200 |
self.eos_token = eos_token
|
| 201 |
self.eop_token = eop_token
|
| 202 |
self.mask_token = mask_token
|
| 203 |
-
self.
|
| 204 |
|
| 205 |
self.sp_tokenizer = SPTokenizer(vocab_file)
|
| 206 |
|
|
@@ -321,10 +318,9 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|
| 321 |
Returns:
|
| 322 |
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 323 |
"""
|
| 324 |
-
if token_ids_1 is not None:
|
| 325 |
-
token_ids_0 += token_ids_1
|
| 326 |
mask_ids = self.sp_tokenizer[self.mask_token]
|
| 327 |
-
gmask_ids = self.sp_tokenizer[self.
|
|
|
|
| 328 |
if mask_ids not in token_ids_0 and gmask_ids not in token_ids_0:
|
| 329 |
token_ids_0 += [gmask_ids]
|
| 330 |
|
|
@@ -333,4 +329,101 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|
| 333 |
|
| 334 |
token_ids_0 += [self.sp_tokenizer[self.bos_token]]
|
| 335 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
return token_ids_0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
"""Tokenization classes for ChatGLM."""
|
|
|
|
|
|
|
| 2 |
from typing import List, Optional, Union
|
|
|
|
| 3 |
import os
|
|
|
|
|
|
|
| 4 |
|
| 5 |
from transformers.tokenization_utils import PreTrainedTokenizer
|
| 6 |
from icetk.text_tokenizer import TextTokenizer
|
|
|
|
| 7 |
import icetk.sentencepiece_model_pb2 as sp_model
|
| 8 |
+
from transformers.utils import logging, PaddingStrategy
|
| 9 |
+
from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
|
| 10 |
+
from typing import Dict
|
| 11 |
+
import numpy as np
|
| 12 |
|
| 13 |
logger = logging.get_logger(__name__)
|
| 14 |
|
|
|
|
| 167 |
|
| 168 |
vocab_files_names = {"vocab_file": "ice_text.model"}
|
| 169 |
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 170 |
+
model_input_names = ["input_ids", "attention_mask", "position_ids"]
|
| 171 |
|
| 172 |
def __init__(
|
| 173 |
self,
|
|
|
|
| 197 |
self.eos_token = eos_token
|
| 198 |
self.eop_token = eop_token
|
| 199 |
self.mask_token = mask_token
|
| 200 |
+
self.gmask_token = gmask_token
|
| 201 |
|
| 202 |
self.sp_tokenizer = SPTokenizer(vocab_file)
|
| 203 |
|
|
|
|
| 318 |
Returns:
|
| 319 |
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
| 320 |
"""
|
|
|
|
|
|
|
| 321 |
mask_ids = self.sp_tokenizer[self.mask_token]
|
| 322 |
+
gmask_ids = self.sp_tokenizer[self.gmask_token]
|
| 323 |
+
eop_id = self.sp_tokenizer[self.eop_token]
|
| 324 |
if mask_ids not in token_ids_0 and gmask_ids not in token_ids_0:
|
| 325 |
token_ids_0 += [gmask_ids]
|
| 326 |
|
|
|
|
| 329 |
|
| 330 |
token_ids_0 += [self.sp_tokenizer[self.bos_token]]
|
| 331 |
|
| 332 |
+
if token_ids_1 is not None:
|
| 333 |
+
if not token_ids_1 or token_ids_1[-1] != eop_id:
|
| 334 |
+
token_ids_1 += [eop_id]
|
| 335 |
+
token_ids_0 += token_ids_1
|
| 336 |
+
|
| 337 |
return token_ids_0
|
| 338 |
+
|
| 339 |
+
def _pad(
|
| 340 |
+
self,
|
| 341 |
+
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
| 342 |
+
max_length: Optional[int] = None,
|
| 343 |
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
| 344 |
+
pad_to_multiple_of: Optional[int] = None,
|
| 345 |
+
return_attention_mask: Optional[bool] = None,
|
| 346 |
+
) -> dict:
|
| 347 |
+
"""
|
| 348 |
+
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
encoded_inputs:
|
| 352 |
+
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
|
| 353 |
+
max_length: maximum length of the returned list and optionally padding length (see below).
|
| 354 |
+
Will truncate by taking into account the special tokens.
|
| 355 |
+
padding_strategy: PaddingStrategy to use for padding.
|
| 356 |
+
|
| 357 |
+
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
|
| 358 |
+
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
|
| 359 |
+
- PaddingStrategy.DO_NOT_PAD: Do not pad
|
| 360 |
+
The tokenizer padding sides are defined in self.padding_side:
|
| 361 |
+
|
| 362 |
+
- 'left': pads on the left of the sequences
|
| 363 |
+
- 'right': pads on the right of the sequences
|
| 364 |
+
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
|
| 365 |
+
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
|
| 366 |
+
`>= 7.5` (Volta).
|
| 367 |
+
return_attention_mask:
|
| 368 |
+
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
|
| 369 |
+
"""
|
| 370 |
+
# Load from model defaults
|
| 371 |
+
bos_token_id = self.sp_tokenizer[self.bos_token]
|
| 372 |
+
mask_token_id = self.sp_tokenizer[self.mask_token]
|
| 373 |
+
gmask_token_id = self.sp_tokenizer[self.gmask_token]
|
| 374 |
+
assert self.padding_side == "left"
|
| 375 |
+
|
| 376 |
+
required_input = encoded_inputs[self.model_input_names[0]]
|
| 377 |
+
seq_length = len(required_input)
|
| 378 |
+
|
| 379 |
+
if padding_strategy == PaddingStrategy.LONGEST:
|
| 380 |
+
max_length = len(required_input)
|
| 381 |
+
|
| 382 |
+
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
| 383 |
+
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
| 384 |
+
|
| 385 |
+
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
|
| 386 |
+
|
| 387 |
+
# Initialize attention mask if not present.
|
| 388 |
+
if max_length is not None:
|
| 389 |
+
if "attention_mask" not in encoded_inputs:
|
| 390 |
+
if bos_token_id in required_input:
|
| 391 |
+
context_length = required_input.index(bos_token_id)
|
| 392 |
+
else:
|
| 393 |
+
context_length = seq_length
|
| 394 |
+
attention_mask = np.ones((1, seq_length, seq_length))
|
| 395 |
+
attention_mask = np.tril(attention_mask)
|
| 396 |
+
attention_mask[:, :, :context_length] = 1
|
| 397 |
+
attention_mask = np.bool_(attention_mask < 0.5)
|
| 398 |
+
encoded_inputs["attention_mask"] = attention_mask
|
| 399 |
+
|
| 400 |
+
if "position_ids" not in encoded_inputs:
|
| 401 |
+
position_ids = np.arange(seq_length, dtype=np.int64)
|
| 402 |
+
mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id
|
| 403 |
+
if mask_token in required_input:
|
| 404 |
+
mask_position = required_input.index(mask_token)
|
| 405 |
+
position_ids[context_length:] = mask_position
|
| 406 |
+
block_position_ids = np.concatenate(
|
| 407 |
+
[np.zeros(context_length, dtype=np.int64),
|
| 408 |
+
np.arange(1, seq_length - context_length + 1, dtype=np.int64)])
|
| 409 |
+
encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0)
|
| 410 |
+
|
| 411 |
+
if needs_to_be_padded:
|
| 412 |
+
difference = max_length - len(required_input)
|
| 413 |
+
|
| 414 |
+
if "attention_mask" in encoded_inputs:
|
| 415 |
+
encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"],
|
| 416 |
+
pad_width=[(0, 0), (difference, 0), (difference, 0)],
|
| 417 |
+
mode='constant', constant_values=True)
|
| 418 |
+
if "token_type_ids" in encoded_inputs:
|
| 419 |
+
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
|
| 420 |
+
"token_type_ids"
|
| 421 |
+
]
|
| 422 |
+
if "special_tokens_mask" in encoded_inputs:
|
| 423 |
+
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
|
| 424 |
+
if "position_ids" in encoded_inputs:
|
| 425 |
+
encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"],
|
| 426 |
+
pad_width=[(0, 0), (difference, 0)])
|
| 427 |
+
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
|
| 428 |
+
|
| 429 |
+
return encoded_inputs
|