hemantn commited on
Commit
ddafba1
·
1 Parent(s): 7ebbadf

Remove ablang2 folder - repository now fully self-contained

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. {ablang2/models/ablang2/__pycache__ → __pycache__}/ablang.cpython-310.pyc +0 -0
  2. ablang2/__init__.py +0 -1
  3. ablang2/__pycache__/__init__.cpython-310.pyc +0 -0
  4. ablang2/__pycache__/adapter.cpython-310.pyc +0 -0
  5. ablang2/__pycache__/configuration_ablang2paired.cpython-310.pyc +0 -0
  6. ablang2/__pycache__/load_model.cpython-310.pyc +0 -0
  7. ablang2/__pycache__/pretrained.cpython-310.pyc +0 -0
  8. ablang2/adapter.py +0 -306
  9. ablang2/alignment.py +0 -87
  10. ablang2/config.json +0 -18
  11. ablang2/configuration_ablang2paired.py +0 -31
  12. ablang2/encodings.py +0 -97
  13. ablang2/environment.yaml +0 -44
  14. ablang2/extra_utils.py +0 -165
  15. ablang2/hparams.json +0 -1
  16. ablang2/load_model.py +0 -119
  17. ablang2/model.pt +0 -3
  18. ablang2/modeling_ablang2paired.py +0 -81
  19. ablang2/models/__init__.py +0 -0
  20. ablang2/models/__pycache__/__init__.cpython-310.pyc +0 -0
  21. ablang2/models/__pycache__/__init__.cpython-312.pyc +0 -0
  22. ablang2/models/ablang1/__init__.py +0 -3
  23. ablang2/models/ablang1/__pycache__/__init__.cpython-310.pyc +0 -0
  24. ablang2/models/ablang1/__pycache__/__init__.cpython-312.pyc +0 -0
  25. ablang2/models/ablang1/__pycache__/embedding.cpython-310.pyc +0 -0
  26. ablang2/models/ablang1/__pycache__/embedding.cpython-312.pyc +0 -0
  27. ablang2/models/ablang1/__pycache__/encoderblocks.cpython-310.pyc +0 -0
  28. ablang2/models/ablang1/__pycache__/encoderblocks.cpython-312.pyc +0 -0
  29. ablang2/models/ablang1/__pycache__/extra_fns.cpython-310.pyc +0 -0
  30. ablang2/models/ablang1/__pycache__/extra_fns.cpython-312.pyc +0 -0
  31. ablang2/models/ablang1/__pycache__/fairseq_mha.cpython-310.pyc +0 -0
  32. ablang2/models/ablang1/__pycache__/fairseq_mha.cpython-312.pyc +0 -0
  33. ablang2/models/ablang1/__pycache__/model.cpython-310.pyc +0 -0
  34. ablang2/models/ablang1/__pycache__/model.cpython-312.pyc +0 -0
  35. ablang2/models/ablang1/__pycache__/pretrained.cpython-310.pyc +0 -0
  36. ablang2/models/ablang1/__pycache__/pretrained.cpython-312.pyc +0 -0
  37. ablang2/models/ablang1/__pycache__/tokenizers.cpython-310.pyc +0 -0
  38. ablang2/models/ablang1/__pycache__/tokenizers.cpython-312.pyc +0 -0
  39. ablang2/models/ablang1/embedding.py +0 -36
  40. ablang2/models/ablang1/encoderblocks.py +0 -141
  41. ablang2/models/ablang1/extra_fns.py +0 -26
  42. ablang2/models/ablang1/fairseq_mha.py +0 -1306
  43. ablang2/models/ablang1/model.py +0 -102
  44. ablang2/models/ablang1/pretrained.py +0 -358
  45. ablang2/models/ablang1/tokenizers.py +0 -50
  46. ablang2/models/ablang2/__init__.py +0 -0
  47. ablang2/models/ablang2/__pycache__/__init__.cpython-310.pyc +0 -0
  48. ablang2/models/ablang2/__pycache__/__init__.cpython-312.pyc +0 -0
  49. ablang2/models/ablang2/__pycache__/ablang.cpython-312.pyc +0 -0
  50. ablang2/models/ablang2/__pycache__/encoderblock.cpython-310.pyc +0 -0
{ablang2/models/ablang2/__pycache__ → __pycache__}/ablang.cpython-310.pyc RENAMED
Binary files a/ablang2/models/ablang2/__pycache__/ablang.cpython-310.pyc and b/__pycache__/ablang.cpython-310.pyc differ
 
ablang2/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .pretrained import pretrained
 
 
ablang2/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (166 Bytes)
 
ablang2/__pycache__/adapter.cpython-310.pyc DELETED
Binary file (10.4 kB)
 
ablang2/__pycache__/configuration_ablang2paired.cpython-310.pyc DELETED
Binary file (1.06 kB)
 
ablang2/__pycache__/load_model.cpython-310.pyc DELETED
Binary file (3.22 kB)
 
ablang2/__pycache__/pretrained.cpython-310.pyc DELETED
Binary file (3.27 kB)
 
ablang2/adapter.py DELETED
@@ -1,306 +0,0 @@
1
- from ablang2.pretrained_utils.restoration import AbRestore
2
- from ablang2.pretrained_utils.encodings import AbEncoding
3
- from ablang2.pretrained_utils.alignment import AbAlignment
4
- from ablang2.pretrained_utils.scores import AbScores
5
- import torch
6
- import numpy as np
7
- from ablang2.pretrained_utils.extra_utils import res_to_seq, res_to_list
8
-
9
- class HuggingFaceTokenizerAdapter:
10
- def __init__(self, tokenizer, device):
11
- self.tokenizer = tokenizer
12
- self.device = device
13
- self.pad_token_id = tokenizer.pad_token_id
14
- self.mask_token_id = getattr(tokenizer, 'mask_token_id', None) or tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
15
- self.vocab = tokenizer.get_vocab() if hasattr(tokenizer, 'get_vocab') else tokenizer.vocab
16
- self.inv_vocab = {v: k for k, v in self.vocab.items()}
17
- self.all_special_tokens = tokenizer.all_special_tokens
18
-
19
- def __call__(self, seqs, pad=True, w_extra_tkns=False, device=None, mode=None):
20
- tokens = self.tokenizer(seqs, padding=True, return_tensors='pt')
21
- input_ids = tokens['input_ids'].to(self.device if device is None else device)
22
- if mode == 'decode':
23
- # seqs is a tensor of token ids
24
- if isinstance(seqs, torch.Tensor):
25
- seqs = seqs.cpu().numpy()
26
- decoded = []
27
- for i, seq in enumerate(seqs):
28
- chars = [self.inv_vocab.get(int(t), '') for t in seq if self.inv_vocab.get(int(t), '') not in {'-', '*', '<', '>'} and self.inv_vocab.get(int(t), '') != '']
29
- # Use res_to_seq for formatting, pass (sequence, length) tuple as in original code
30
- # The length is not always available, so use len(chars) as fallback
31
- formatted = res_to_seq([ ''.join(chars), len(chars) ], mode='restore')
32
- decoded.append(formatted)
33
- return decoded
34
- return input_ids
35
-
36
- class HFAbRestore(AbRestore):
37
- def __init__(self, hf_model, hf_tokenizer, spread=11, device='cpu', ncpu=1):
38
- super().__init__(spread=spread, device=device, ncpu=ncpu)
39
- self.used_device = device
40
- self._hf_model = hf_model
41
- self.tokenizer = HuggingFaceTokenizerAdapter(hf_tokenizer, device)
42
-
43
- @property
44
- def AbLang(self):
45
- def model_call(x):
46
- output = self._hf_model(x)
47
- if hasattr(output, 'last_hidden_state'):
48
- return output.last_hidden_state
49
- return output
50
- return model_call
51
-
52
- def add_angle_brackets(seq):
53
- # Assumes input is 'VH|VL' or 'VH|' or '|VL'
54
- if '|' in seq:
55
- vh, vl = seq.split('|', 1)
56
- else:
57
- vh, vl = seq, ''
58
- return f"<{vh}>|<{vl}>"
59
-
60
- class AbLang2PairedHuggingFaceAdapter(AbEncoding, AbRestore, AbAlignment, AbScores):
61
- """
62
- Adapter to use pretrained utilities with a HuggingFace-loaded ablang2_paired model and tokenizer.
63
- Automatically uses CUDA if available, otherwise CPU.
64
- """
65
- def __init__(self, model, tokenizer, device=None, ncpu=1):
66
- super().__init__()
67
- if device is None:
68
- self.used_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
69
- else:
70
- self.used_device = torch.device(device)
71
- self.AbLang = model # HuggingFace model instance
72
- self.tokenizer = tokenizer
73
- self.AbLang.to(self.used_device)
74
- self.AbLang.eval()
75
- # Always get AbRep from the underlying model
76
- if hasattr(self.AbLang, 'model') and hasattr(self.AbLang.model, 'AbRep'):
77
- self.AbRep = self.AbLang.model.AbRep
78
- else:
79
- raise AttributeError("Could not find AbRep in the HuggingFace model or its underlying model.")
80
- self.ncpu = ncpu
81
- self.spread = 11 # For compatibility with original utilities
82
- # The following is no longer needed since all_special_tokens now returns IDs directly
83
- # self.tokenizer.all_special_token_ids = [
84
- # self.tokenizer.convert_tokens_to_ids(tok) for tok in self.tokenizer.all_special_tokens
85
- # ]
86
- # self.tokenizer._all_special_tokens_str = self.tokenizer.all_special_tokens
87
- # self.tokenizer.all_special_tokens = [
88
- # self.tokenizer.convert_tokens_to_ids(tok) for tok in self.tokenizer._all_special_tokens_str
89
- # ]
90
-
91
- def freeze(self):
92
- self.AbLang.eval()
93
-
94
- def unfreeze(self):
95
- self.AbLang.train()
96
-
97
- def _encode_sequences(self, seqs):
98
- # Use HuggingFace-style padding and return PyTorch tensors
99
- tokens = self.tokenizer(seqs, padding=True, return_tensors='pt')
100
- tokens = extract_input_ids(tokens, self.used_device)
101
- return self.AbRep(tokens).last_hidden_states.detach()
102
-
103
- def _predict_logits(self, seqs):
104
- tokens = self.tokenizer(seqs, padding=True, return_tensors='pt')
105
- tokens = extract_input_ids(tokens, self.used_device)
106
- output = self.AbLang(tokens)
107
- if hasattr(output, 'last_hidden_state'):
108
- return output.last_hidden_state.detach()
109
- return output.detach()
110
-
111
- def _preprocess_labels(self, labels):
112
- labels = extract_input_ids(labels, self.used_device)
113
- return labels
114
-
115
- def __call__(self, seqs, mode='seqcoding', align=False, stepwise_masking=False, fragmented=False, batch_size=50):
116
- """
117
- Use different modes for different usecases, mimicking the original pretrained class.
118
- """
119
- from ablang2.pretrained import format_seq_input
120
-
121
- valid_modes = [
122
- 'rescoding', 'seqcoding', 'restore', 'likelihood', 'probability',
123
- 'pseudo_log_likelihood', 'confidence'
124
- ]
125
- if mode not in valid_modes:
126
- raise SyntaxError(f"Given mode doesn't exist. Please select one of the following: {valid_modes}.")
127
-
128
- seqs, chain = format_seq_input(seqs, fragmented=fragmented)
129
-
130
- if align:
131
- numbered_seqs, seqs, number_alignment = self.number_sequences(
132
- seqs, chain=chain, fragmented=fragmented
133
- )
134
- else:
135
- numbered_seqs = None
136
- number_alignment = None
137
-
138
- subset_list = []
139
- for subset in [seqs[x:x+batch_size] for x in range(0, len(seqs), batch_size)]:
140
- subset_list.append(getattr(self, mode)(subset, align=align, stepwise_masking=stepwise_masking))
141
-
142
- return self.reformat_subsets(
143
- subset_list,
144
- mode=mode,
145
- align=align,
146
- numbered_seqs=numbered_seqs,
147
- seqs=seqs,
148
- number_alignment=number_alignment,
149
- )
150
-
151
- def pseudo_log_likelihood(self, seqs, **kwargs):
152
- """
153
- Original (non-vectorized) pseudo log-likelihood computation matching notebook behavior.
154
- """
155
- # Format input: join VH and VL with '|'
156
- formatted_seqs = []
157
- for s in seqs:
158
- if isinstance(s, (list, tuple)):
159
- formatted_seqs.append('|'.join(s))
160
- else:
161
- formatted_seqs.append(s)
162
-
163
- # Tokenize all sequences in batch
164
- labels = self.tokenizer(
165
- formatted_seqs, padding=True, return_tensors='pt'
166
- )
167
- labels = extract_input_ids(labels, self.used_device)
168
-
169
- # Convert special tokens to IDs
170
- if isinstance(self.tokenizer.all_special_tokens[0], int):
171
- special_token_ids = set(self.tokenizer.all_special_tokens)
172
- else:
173
- special_token_ids = set(self.tokenizer.convert_tokens_to_ids(tok) for tok in self.tokenizer.all_special_tokens)
174
- pad_token_id = self.tokenizer.pad_token_id
175
-
176
- mask_token_id = getattr(self.tokenizer, 'mask_token_id', None)
177
- if mask_token_id is None:
178
- mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
179
-
180
- plls = []
181
- with torch.no_grad():
182
- for i, seq_label in enumerate(labels):
183
- seq_pll = []
184
- for j, token_id in enumerate(seq_label):
185
- if token_id.item() in special_token_ids or token_id.item() == pad_token_id:
186
- continue
187
- masked = seq_label.clone()
188
- masked[j] = mask_token_id
189
- logits = self.AbLang(masked.unsqueeze(0))
190
- if hasattr(logits, 'last_hidden_state'):
191
- logits = logits.last_hidden_state
192
- logits = logits[0, j]
193
- nll = torch.nn.functional.cross_entropy(
194
- logits.unsqueeze(0), token_id.unsqueeze(0), reduction="none"
195
- )
196
- seq_pll.append(-nll.item())
197
- if seq_pll:
198
- plls.append(np.mean(seq_pll))
199
- else:
200
- plls.append(float('nan'))
201
- return np.array(plls)
202
-
203
- def confidence(self, seqs, **kwargs):
204
- """Confidence calculation - match original ablang2 implementation by excluding all special tokens from loss."""
205
- # Format input: join VH and VL with '|'
206
- formatted_seqs = []
207
- for s in seqs:
208
- if isinstance(s, (list, tuple)):
209
- formatted_seqs.append('|'.join(s))
210
- else:
211
- formatted_seqs.append(s)
212
-
213
- plls = []
214
- for seq in formatted_seqs:
215
- tokens = self.tokenizer([seq], padding=True, return_tensors='pt')
216
- input_ids = extract_input_ids(tokens, self.used_device)
217
-
218
- with torch.no_grad():
219
- output = self.AbLang(input_ids)
220
- if hasattr(output, 'last_hidden_state'):
221
- logits = output.last_hidden_state
222
- else:
223
- logits = output
224
-
225
- # Get the sequence (remove batch dimension)
226
- logits = logits[0] # [seq_len, vocab_size]
227
- input_ids = input_ids[0] # [seq_len]
228
-
229
- # Exclude all special tokens (pad, mask, etc.)
230
- if isinstance(self.tokenizer.all_special_tokens[0], int):
231
- special_token_ids = set(self.tokenizer.all_special_tokens)
232
- else:
233
- special_token_ids = set(self.tokenizer.convert_tokens_to_ids(tok) for tok in self.tokenizer.all_special_tokens)
234
- valid_mask = ~torch.isin(input_ids, torch.tensor(list(special_token_ids), device=input_ids.device))
235
-
236
- if valid_mask.sum() > 0:
237
- valid_logits = logits[valid_mask]
238
- valid_labels = input_ids[valid_mask]
239
-
240
- # Calculate cross-entropy loss
241
- nll = torch.nn.functional.cross_entropy(
242
- valid_logits,
243
- valid_labels,
244
- reduction="mean"
245
- )
246
- pll = -nll.item()
247
- else:
248
- pll = 0.0
249
-
250
- plls.append(pll)
251
-
252
- return np.array(plls, dtype=np.float32)
253
-
254
- def probability(self, seqs, align=False, stepwise_masking=False, **kwargs):
255
- """
256
- Probability of mutations - applies softmax to logits to get probabilities
257
- """
258
- # Format input: join VH and VL with '|'
259
- formatted_seqs = []
260
- for s in seqs:
261
- if isinstance(s, (list, tuple)):
262
- formatted_seqs.append('|'.join(s))
263
- else:
264
- formatted_seqs.append(s)
265
-
266
- # Get logits
267
- if stepwise_masking:
268
- # For stepwise masking, we need to implement it similar to likelihood
269
- # This is a simplified version - you might want to implement full stepwise masking
270
- logits = self._predict_logits(formatted_seqs)
271
- else:
272
- logits = self._predict_logits(formatted_seqs)
273
-
274
- # Apply softmax to get probabilities
275
- probs = logits.softmax(-1).cpu().numpy()
276
-
277
- if align:
278
- return probs
279
- else:
280
- # Return residue-level probabilities (excluding special tokens)
281
- return [res_to_list(state, seq) for state, seq in zip(probs, formatted_seqs)]
282
-
283
- def restore(self, seqs, align=False, **kwargs):
284
- hf_abrestore = HFAbRestore(self.AbLang, self.tokenizer, spread=self.spread, device=self.used_device, ncpu=self.ncpu)
285
- restored = hf_abrestore.restore(seqs, align=align)
286
- # Apply angle brackets formatting
287
- if isinstance(restored, np.ndarray):
288
- restored = np.array([add_angle_brackets(seq) for seq in restored])
289
- else:
290
- restored = [add_angle_brackets(seq) for seq in restored]
291
- return restored
292
-
293
- def extract_input_ids(tokens, device):
294
- if hasattr(tokens, 'input_ids'):
295
- return tokens.input_ids.to(device)
296
- elif isinstance(tokens, dict):
297
- if 'input_ids' in tokens:
298
- return tokens['input_ids'].to(device)
299
- else:
300
- for v in tokens.values():
301
- if hasattr(v, 'ndim') or torch.is_tensor(v):
302
- return v.to(device)
303
- elif torch.is_tensor(tokens):
304
- return tokens.to(device)
305
- else:
306
- raise ValueError("Could not extract input_ids from tokenizer output")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/alignment.py DELETED
@@ -1,87 +0,0 @@
1
- from dataclasses import dataclass
2
- import numpy as np
3
- import torch
4
-
5
- from .extra_utils import paired_msa_numbering, unpaired_msa_numbering, create_alignment
6
-
7
-
8
- class AbAlignment:
9
-
10
- def __init__(self, device = 'cpu', ncpu = 1):
11
-
12
- self.device = device
13
- self.ncpu = ncpu
14
-
15
- def number_sequences(self, seqs, chain = 'H', fragmented = False):
16
- if chain == 'HL':
17
- numbered_seqs, seqs, number_alignment = paired_msa_numbering(seqs, fragmented = fragmented, n_jobs = self.ncpu)
18
- else:
19
- assert chain == 'HL', 'Currently "Align==True" only works for paired sequences. \nPlease use paired sequences or Align=False.'
20
- numbered_seqs, seqs, number_alignment = unpaired_msa_numbering(
21
- seqs, chain = chain, fragmented = fragmented, n_jobs = self.ncpu
22
- )
23
-
24
- return numbered_seqs, seqs, number_alignment
25
-
26
- def align_encodings(self, encodings, numbered_seqs, seqs, number_alignment):
27
-
28
- aligned_encodings = np.concatenate(
29
- [[
30
- create_alignment(
31
- res_embed, numbered_seq, seq, number_alignment
32
- ) for res_embed, numbered_seq, seq in zip(encodings, numbered_seqs, seqs)
33
- ]], axis=0
34
- )
35
- return aligned_encodings
36
-
37
-
38
- def reformat_subsets(
39
- self,
40
- subset_list,
41
- mode = 'seqcoding',
42
- align = False,
43
- numbered_seqs = None,
44
- seqs = None,
45
- number_alignment = None,
46
- ):
47
-
48
- if mode in [
49
- 'seqcoding',
50
- 'restore',
51
- 'pseudo_log_likelihood',
52
- 'confidence'
53
- ]:
54
- return np.concatenate(subset_list)
55
- elif align:
56
- subset_list = [
57
- self.align_encodings(
58
- subset,
59
- numbered_seqs[num*len(subset):(num+1)*len(subset)],
60
- seqs[num*len(subset):(num+1)*len(subset)],
61
- number_alignment
62
- ) for num, subset in enumerate(subset_list)
63
- ]
64
-
65
- subset = np.concatenate(subset_list)
66
-
67
- return aligned_results(
68
- aligned_seqs = [''.join(alist) for alist in subset[:,:,-1]],
69
- aligned_embeds = subset[:,:,:-1].astype(float),
70
- number_alignment=number_alignment.apply(lambda x: '{}{}'.format(*x[0]), axis=1).values
71
- )
72
-
73
- elif not align:
74
- return sum(subset_list, [])
75
- else:
76
- return np.concatenate(subset_list) # this needs to be changed
77
-
78
-
79
- @dataclass
80
- class aligned_results():
81
- """
82
- Dataclass used to store output.
83
- """
84
-
85
- aligned_seqs: None
86
- aligned_embeds: None
87
- number_alignment: None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/config.json DELETED
@@ -1,18 +0,0 @@
1
- {
2
- "model_type": "ablang2-paired",
3
- "vocab_size": 26,
4
- "hidden_embed_size": 480,
5
- "n_attn_heads": 20,
6
- "n_encoder_blocks": 12,
7
- "padding_tkn": 21,
8
- "mask_tkn": 23,
9
- "layer_norm_eps": 1e-12,
10
- "a_fn": "swiglu",
11
- "dropout": 0.0,
12
- "tokenizer_class": "AbLang2PairedTokenizer",
13
- "auto_map": {
14
- "AutoConfig": "configuration_ablang2paired.AbLang2PairedConfig",
15
- "AutoModel": "modeling_ablang2paired.AbLang2PairedHFModel",
16
- "AutoTokenizer": ["tokenizer_ablang2paired.AbLang2PairedTokenizer", "tokenizer_ablang2paired.AbLang2PairedTokenizer"]
17
- }
18
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/configuration_ablang2paired.py DELETED
@@ -1,31 +0,0 @@
1
- from transformers import PretrainedConfig
2
-
3
- class AbLang2PairedConfig(PretrainedConfig):
4
- model_type = "ablang2-paired"
5
-
6
- def __init__(
7
- self,
8
- vocab_size=26,
9
- hidden_embed_size=480,
10
- n_attn_heads=20,
11
- n_encoder_blocks=12,
12
- padding_tkn=21,
13
- mask_tkn=23,
14
- layer_norm_eps=1e-12,
15
- a_fn="swiglu",
16
- dropout=0.0,
17
- **kwargs
18
- ):
19
- super().__init__(**kwargs)
20
- self.vocab_size = vocab_size
21
- self.hidden_embed_size = hidden_embed_size
22
- self.hidden_size = hidden_embed_size # Add this for Hugging Face compatibility
23
- self.n_attn_heads = n_attn_heads
24
- self.num_attention_heads = n_attn_heads # Add this for Hugging Face compatibility
25
- self.num_hidden_layers = n_encoder_blocks # Add this for Hugging Face compatibility
26
- self.n_encoder_blocks = n_encoder_blocks
27
- self.padding_tkn = padding_tkn
28
- self.mask_tkn = mask_tkn
29
- self.layer_norm_eps = layer_norm_eps
30
- self.a_fn = a_fn
31
- self.dropout = dropout
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/encodings.py DELETED
@@ -1,97 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
- from .extra_utils import res_to_list, res_to_seq
5
-
6
-
7
- class AbEncoding:
8
-
9
- def __init__(self, device = 'cpu', ncpu = 1):
10
-
11
- self.device = device
12
- self.ncpu = ncpu
13
-
14
- def _initiate_abencoding(self, model, tokenizer):
15
- self.AbLang = model
16
- self.tokenizer = tokenizer
17
-
18
- def _encode_sequences(self, seqs):
19
- tokens = self.tokenizer(seqs, pad=True, w_extra_tkns=False, device=self.used_device)
20
- with torch.no_grad():
21
- return self.AbLang.AbRep(tokens).last_hidden_states
22
-
23
- def _predict_logits(self, seqs):
24
- tokens = self.tokenizer(seqs, pad=True, w_extra_tkns=False, device=self.used_device)
25
- with torch.no_grad():
26
- return self.AbLang(tokens)
27
-
28
- def _predict_logits_with_step_masking(self, seqs):
29
-
30
- tokens = self.tokenizer(seqs, pad=True, w_extra_tkns=False, device=self.used_device)
31
-
32
- logits = []
33
- for single_seq_tokens in tokens:
34
-
35
- tkn_len = len(single_seq_tokens)
36
- masked_tokens = single_seq_tokens.repeat(tkn_len, 1)
37
- for num in range(tkn_len):
38
- masked_tokens[num, num] = self.tokenizer.mask_token
39
-
40
- with torch.no_grad():
41
- logits_tmp = self.AbLang(masked_tokens)
42
-
43
- logits_tmp = torch.stack([logits_tmp[num, num] for num in range(tkn_len)])
44
-
45
- logits.append(logits_tmp)
46
-
47
- return torch.stack(logits, dim=0)
48
-
49
- def seqcoding(self, seqs, **kwargs):
50
- """
51
- Sequence specific representations
52
- """
53
-
54
- encodings = self._encode_sequences(seqs).cpu().numpy()
55
-
56
- lens = np.vectorize(len)(seqs)
57
- lens = np.tile(lens.reshape(-1,1,1), (encodings.shape[2], 1))
58
-
59
- return np.apply_along_axis(res_to_seq, 2, np.c_[np.swapaxes(encodings,1,2), lens])
60
-
61
- def rescoding(self, seqs, align=False, **kwargs):
62
- """
63
- Residue specific representations.
64
- """
65
- encodings = self._encode_sequences(seqs).cpu().numpy()
66
-
67
- if align: return encodings
68
-
69
- else: return [res_to_list(state, seq) for state, seq in zip(encodings, seqs)]
70
-
71
- def likelihood(self, seqs, align=False, stepwise_masking=False, **kwargs):
72
- """
73
- Likelihood of mutations
74
- """
75
- if stepwise_masking:
76
- logits = self._predict_logits_with_step_masking(seqs).cpu().numpy()
77
- else:
78
- logits = self._predict_logits(seqs).cpu().numpy()
79
-
80
- if align: return logits
81
-
82
- else: return [res_to_list(state, seq) for state, seq in zip(logits, seqs)]
83
-
84
- def probability(self, seqs, align=False, stepwise_masking=False, **kwargs):
85
- """
86
- Probability of mutations
87
- """
88
- if stepwise_masking:
89
- logits = self._predict_logits_with_step_masking(seqs)
90
- else:
91
- logits = self._predict_logits(seqs)
92
- probs = logits.softmax(-1).cpu().numpy()
93
-
94
- if align: return probs
95
-
96
- else: return [res_to_list(state, seq) for state, seq in zip(probs, seqs)]
97
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/environment.yaml DELETED
@@ -1,44 +0,0 @@
1
- name: AbLang
2
- channels:
3
- - conda-forge
4
- - pytorch
5
- - bioconda
6
- - defaults
7
- dependencies:
8
- - python=3.10.18
9
- - pip
10
- - pytorch=2.5.1
11
- - pytorch-cuda=12.4
12
- - numpy=2.2.6
13
- - pandas=2.3.1
14
- - transformers=4.53.3
15
- - anarci=2024.05.21
16
- - jupyter=7.4.4
17
- - notebook=7.4.4
18
- - ipython=8.37.0
19
- - ipykernel=6.29.5
20
- - matplotlib-inline=0.1.7
21
- - scikit-learn
22
- - matplotlib
23
- - seaborn
24
- - biopython=1.85
25
- - huggingface_hub=0.33.4
26
- - tokenizers=0.21.3
27
- - safetensors=0.5.3
28
- - einops=0.8.1
29
- - tqdm=4.67.1
30
- - requests=2.32.4
31
- - urllib3=2.5.0
32
- - certifi=2025.7.14
33
- - filelock=3.18.0
34
- - fsspec=2025.3.0
35
- - packaging=25.0
36
- - regex=2024.11.6
37
- - sympy=1.13.3
38
- - networkx=3.4.2
39
- - jinja2=3.1.6
40
- - pyyaml=6.0.2
41
- - typing_extensions=4.14.1
42
- - pip:
43
- - numba=0.61.2
44
- - llvmlite=0.44.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/extra_utils.py DELETED
@@ -1,165 +0,0 @@
1
- import string, re
2
- import numpy as np
3
-
4
-
5
- def res_to_list(logits, seq):
6
- return logits[:len(seq)]
7
-
8
- def res_to_seq(a, mode='mean'):
9
- """
10
- Function for how we go from n_values for each amino acid to n_values for each sequence.
11
-
12
- We leave out padding tokens.
13
- """
14
-
15
- if mode=='sum':
16
- return a[0:(int(a[-1]))].sum()
17
-
18
- elif mode=='mean':
19
- return a[0:(int(a[-1]))].mean()
20
-
21
- elif mode=='restore':
22
- return a[0][0:(int(a[-1]))]
23
-
24
- def get_number_alignment(numbered_seqs):
25
- """
26
- Creates a number alignment from the anarci results.
27
- """
28
- import pandas as pd
29
-
30
- alist = [pd.DataFrame(aligned_seq, columns = [0,1,'resi']) for aligned_seq in numbered_seqs]
31
- unsorted_alignment = pd.concat(alist).drop_duplicates(subset=0)
32
- max_alignment = get_max_alignment()
33
-
34
- return max_alignment.merge(unsorted_alignment.query("resi!='-'"), left_on=0, right_on=0)[[0,1]]
35
-
36
- def get_max_alignment():
37
- """
38
- Create maximum possible alignment for sorting
39
- """
40
- import pandas as pd
41
-
42
- sortlist = [[("<", "")]]
43
- for num in range(1, 128+1):
44
- if num in [33,61,112]:
45
- for char in string.ascii_uppercase[::-1]:
46
- sortlist.append([(num, char)])
47
-
48
- sortlist.append([(num,' ')])
49
- else:
50
- sortlist.append([(num,' ')])
51
- for char in string.ascii_uppercase:
52
- sortlist.append([(num, char)])
53
-
54
- return pd.DataFrame(sortlist + [[(">", "")]])
55
-
56
-
57
- def paired_msa_numbering(ab_seqs, fragmented = False, n_jobs = 10):
58
-
59
- import pandas as pd
60
-
61
- tmp_seqs = [pairs.replace(">", "").replace("<", "").split("|") for pairs in ab_seqs]
62
-
63
- numbered_seqs_heavy, seqs_heavy, number_alignment_heavy = unpaired_msa_numbering(
64
- [i[0] for i in tmp_seqs], 'H', fragmented = fragmented, n_jobs = n_jobs
65
- )
66
- numbered_seqs_light, seqs_light, number_alignment_light = unpaired_msa_numbering(
67
- [i[1] for i in tmp_seqs], 'L', fragmented = fragmented, n_jobs = n_jobs
68
- )
69
-
70
- number_alignment = pd.concat([
71
- number_alignment_heavy,
72
- pd.DataFrame([[("|",""), "|"]]),
73
- number_alignment_light]
74
- ).reset_index(drop=True)
75
-
76
- seqs = [f"{heavy}|{light}" for heavy, light in zip(seqs_heavy, seqs_light)]
77
- numbered_seqs = [
78
- heavy + [(("|",""), "|", "|")] + light for heavy, light in zip(numbered_seqs_heavy, numbered_seqs_light)
79
- ]
80
-
81
- return numbered_seqs, seqs, number_alignment
82
-
83
-
84
- def unpaired_msa_numbering(seqs, chain = 'H', fragmented = False, n_jobs = 10):
85
-
86
- numbered_seqs = number_with_anarci(seqs, chain = chain, fragmented = fragmented, n_jobs = n_jobs)
87
- number_alignment = get_number_alignment(numbered_seqs)
88
- number_alignment[1] = chain
89
-
90
- seqs = [''.join([i[2] for i in numbered_seq]).replace('-','') for numbered_seq in numbered_seqs]
91
- return numbered_seqs, seqs, number_alignment
92
-
93
-
94
- def number_with_anarci(seqs, chain = 'H', fragmented = False, n_jobs = 1):
95
-
96
- import anarci
97
- import pandas as pd
98
-
99
- anarci_out = anarci.run_anarci(
100
- pd.DataFrame(seqs).reset_index().values.tolist(),
101
- ncpu=n_jobs,
102
- scheme='imgt',
103
- allowed_species=['human', 'mouse'],
104
- )
105
-
106
- numbered_seqs = []
107
- for onarci in anarci_out[1]:
108
- numbered_seq = []
109
- for i in onarci[0][0]:
110
- if i[1] != '-':
111
- numbered_seq.append((i[0], chain, i[1]))
112
-
113
- if fragmented:
114
- numbered_seqs.append(numbered_seq)
115
- else:
116
- numbered_seqs.append([(("<",""), chain, "<")] + numbered_seq + [((">",""), chain, ">")])
117
-
118
- return numbered_seqs
119
-
120
-
121
- def create_alignment(res_embeds, numbered_seqs, seq, number_alignment):
122
-
123
- import pandas as pd
124
-
125
- datadf = pd.DataFrame(numbered_seqs)
126
- sequence_alignment = number_alignment.merge(datadf, how='left', on=[0, 1]).fillna('-')[2]
127
-
128
- idxs = np.where(sequence_alignment.values == '-')[0]
129
- idxs = [idx-num for num, idx in enumerate(idxs)]
130
-
131
- aligned_embeds = pd.DataFrame(np.insert(res_embeds[:len(seq)], idxs , 0, axis=0))
132
-
133
- return pd.concat([aligned_embeds, sequence_alignment], axis=1).values
134
-
135
-
136
- def get_spread_sequences(seq, spread, start_position):
137
- """
138
- Test sequences which are 8 positions shorter (position 10 + max CDR1 gap of 7) up to 2 positions longer (possible insertions).
139
- """
140
- spread_sequences = []
141
-
142
- for diff in range(start_position-8, start_position+2+1):
143
- spread_sequences.append('*'*diff+seq)
144
-
145
- return np.array(spread_sequences)
146
-
147
- def get_sequences_from_anarci(out_anarci, max_position, spread):
148
- """
149
- Ensures correct masking on each side of sequence
150
- """
151
-
152
- if out_anarci == 'ANARCI_error':
153
- return np.array(['ANARCI-ERR']*spread)
154
-
155
- end_position = int(re.search(r'\d+', out_anarci[::-1]).group()[::-1])
156
- # Fixes ANARCI error of poor numbering of the CDR1 region
157
- start_position = int(re.search(r'\d+,\s\'.\'\),\s\'[^-]+\'\),\s\(\(\d+,\s\'.\'\),\s\'[^-]+\'\),\s\(\(\d+,\s\'.\'\),\s\'[^-]+\'\),\s\(\(\d+,\s\'.\'\),\s\'[^-]+',
158
- out_anarci).group().split(',')[0]) - 1
159
-
160
- sequence = "".join(re.findall(r"(?i)[A-Z*]", "".join(re.findall(r'\),\s\'[A-Z*]', out_anarci))))
161
-
162
- sequence_j = ''.join(sequence).replace('-','').replace('X','*') + '*'*(max_position-int(end_position))
163
-
164
- return get_spread_sequences(sequence_j, spread, start_position)
165
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/hparams.json DELETED
@@ -1 +0,0 @@
1
- {"name": "AbLang-2", "n_encoder_blocks": 12, "hidden_embed_size": 480, "n_attn_heads": 20, "a_fn": "swiglu", "layer_norm_eps": 1e-12, "pad_tkn": 21, "start_tkn": 0, "end_tkn": 22, "sep_tkn": 25, "mask_tkn": 23, "vocab_size": 26}
 
 
ablang2/load_model.py DELETED
@@ -1,119 +0,0 @@
1
- import os, subprocess, json, argparse,requests
2
- import torch
3
-
4
- list_of_models = {
5
- "ablang1-heavy":["https://opig.stats.ox.ac.uk/data/downloads/ablang-heavy.tar.gz", "amodel.pt"],
6
- "ablang1-light":["https://opig.stats.ox.ac.uk/data/downloads/ablang-light.tar.gz", "amodel.pt"],
7
- "ablang2-paired":["https://zenodo.org/records/10185169/files/ablang2-weights.tar.gz", "model.pt"],
8
- "tcrlang-paired":["https://zenodo.org/records/11208211/files/tcrlang-weights.tar.gz", "model.pt"],
9
- }
10
- ablang1_models = ["ablang1-heavy", "ablang1-light"]
11
- ablang2_models = ["ablang2-paired", "tcrlang-paired"]
12
-
13
-
14
- def load_model(model_to_use = "ablang2-paired", random_init = False, device = 'cpu'):
15
-
16
- if model_to_use in ablang1_models:
17
- AbLang, tokenizer, hparams = fetch_ablang1(
18
- model_to_use,
19
- random_init=random_init,
20
- device=device
21
- )
22
- elif model_to_use in ablang2_models:
23
- AbLang, tokenizer, hparams = fetch_ablang2(
24
- model_to_use,
25
- random_init=random_init,
26
- device=device
27
- )
28
- elif "ABLANG-" in model_to_use:
29
- AbLang, tokenizer, hparams = fetch_ablang2(
30
- model_to_use,
31
- random_init=random_init,
32
- device=device
33
- )
34
- else:
35
- assert False, f"The selected model to use ({model_to_use}) does not exist.\
36
- Please select a valid model."
37
-
38
- return AbLang, tokenizer, hparams
39
-
40
-
41
- def download_model(model_to_use = "ablang2-paired"):
42
- """
43
- If not already downloaded, download model inside environment.
44
- """
45
-
46
- local_model_folder = os.path.join(os.path.dirname(__file__), "model-weights-{}".format(model_to_use))
47
- os.makedirs(local_model_folder, exist_ok = True)
48
-
49
- file_w_weights, file_model = list_of_models[model_to_use] # modify list of models
50
-
51
- if not os.path.isfile(os.path.join(local_model_folder, file_model)):
52
- print("Downloading model ...")
53
- tmp_file = os.path.join(local_model_folder, "tmp.tar.gz")
54
-
55
- with open(tmp_file,'wb') as f: f.write(requests.get(file_w_weights).content)
56
-
57
- subprocess.run(["tar", "-zxvf", tmp_file, "-C", local_model_folder], check = True)
58
- os.remove(tmp_file)
59
-
60
- return local_model_folder
61
-
62
-
63
- def fetch_ablang1(model_to_use, random_init=False, device='cpu'):
64
-
65
- from .models.ablang1 import model as ablang_1_model
66
- from .models.ablang1 import tokenizers as ablang_1_tokenizer
67
-
68
- local_model_folder = download_model(model_to_use)
69
-
70
- with open(os.path.join(local_model_folder, 'hparams.json'), 'r', encoding='utf-8') as f:
71
- hparams = argparse.Namespace(**json.load(f))
72
-
73
- AbLang = ablang_1_model.AbLang(hparams)
74
- if not random_init:
75
- AbLang.load_state_dict(
76
- torch.load(
77
- os.path.join(local_model_folder, 'amodel.pt'),
78
- map_location=torch.device(device)
79
- )
80
- )
81
- tokenizer = ablang_1_tokenizer.ABtokenizer(os.path.join(local_model_folder, 'vocab.json'))
82
-
83
- return AbLang, tokenizer, hparams
84
-
85
-
86
- def fetch_ablang2(model_to_use, random_init=False, device='cpu'):
87
-
88
- from .models.ablang2 import ablang
89
- from .models.ablang2 import tokenizers
90
-
91
- if model_to_use in ablang2_models:
92
- local_model_folder = download_model(model_to_use)
93
- else:
94
- local_model_folder = model_to_use
95
-
96
- with open(os.path.join(local_model_folder, 'hparams.json'), 'r', encoding='utf-8') as f:
97
- hparams = argparse.Namespace(**json.load(f))
98
-
99
- AbLang = ablang.AbLang(
100
- vocab_size = hparams.vocab_size,
101
- hidden_embed_size = hparams.hidden_embed_size,
102
- n_attn_heads = hparams.n_attn_heads,
103
- n_encoder_blocks = hparams.n_encoder_blocks,
104
- padding_tkn = hparams.pad_tkn,
105
- mask_tkn = hparams.mask_tkn,
106
- layer_norm_eps = hparams.layer_norm_eps,
107
- a_fn = hparams.a_fn,
108
- )
109
-
110
- if not random_init:
111
- AbLang.load_state_dict(
112
- torch.load(
113
- os.path.join(local_model_folder, 'model.pt'),
114
- map_location=torch.device(device)
115
- )
116
- )
117
- tokenizer = tokenizers.ABtokenizer()
118
-
119
- return AbLang, tokenizer, hparams
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/model.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:56d6f07862a6f824f88c8707bbc03e4026c9db762be2d3041e9767e2e6f86386
3
- size 179314477
 
 
 
 
ablang2/modeling_ablang2paired.py DELETED
@@ -1,81 +0,0 @@
1
- import torch
2
- import os
3
- from torch import nn
4
- from transformers import PreTrainedModel
5
- from ablang2.models.ablang2.ablang import AbLang as AbLang2
6
- from ablang2_paired.configuration_ablang2paired import AbLang2PairedConfig
7
-
8
- class AbLang2PairedHFModel(PreTrainedModel):
9
- config_class = AbLang2PairedConfig
10
- model_type = "ablang2-paired"
11
-
12
- def __init__(self, config: AbLang2PairedConfig):
13
- super().__init__(config)
14
- self.model = AbLang2(
15
- vocab_size=config.vocab_size,
16
- hidden_embed_size=config.hidden_embed_size,
17
- n_attn_heads=config.n_attn_heads,
18
- n_encoder_blocks=config.n_encoder_blocks,
19
- padding_tkn=config.padding_tkn,
20
- mask_tkn=config.mask_tkn,
21
- layer_norm_eps=config.layer_norm_eps,
22
- a_fn=config.a_fn,
23
- dropout=config.dropout,
24
- )
25
-
26
- def forward(self, input_ids=None, x=None, attention_mask=None, **kwargs):
27
- # Handle both Hugging Face format (input_ids) and original format (x)
28
- if input_ids is not None:
29
- x = input_ids
30
- elif x is None:
31
- raise ValueError("Either input_ids or x must be provided")
32
-
33
- # Get the output from the underlying model
34
- output = self.model(x, attention_mask)
35
-
36
- # Return as a simple object with last_hidden_state attribute
37
- class ModelOutput:
38
- def __init__(self, last_hidden_state):
39
- self.last_hidden_state = last_hidden_state
40
-
41
- return ModelOutput(output)
42
-
43
- @classmethod
44
- def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
45
- # Check if we have custom weights
46
- model_path = pretrained_model_name_or_path
47
- custom_weights_path = os.path.join(model_path, "model.pt")
48
-
49
- if os.path.exists(custom_weights_path):
50
- # Load config
51
- config = kwargs.get("config")
52
- if config is None:
53
- from transformers import AutoConfig
54
- config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
55
-
56
- # Create model with only the config argument
57
- model = cls(config)
58
-
59
- # Load custom weights
60
- state_dict = torch.load(custom_weights_path, map_location="cpu", weights_only=True)
61
- model.model.load_state_dict(state_dict)
62
-
63
- # Move model to appropriate device (GPU if available, otherwise CPU)
64
- device = kwargs.get("device", None)
65
- if device is None:
66
- device = "cuda" if torch.cuda.is_available() else "cpu"
67
- model = model.to(device)
68
-
69
- return model
70
- else:
71
- # Fall back to standard Hugging Face loading
72
- return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
73
-
74
- def save_pretrained(self, save_directory, **kwargs):
75
- os.makedirs(save_directory, exist_ok=True)
76
- # Save custom weights
77
- torch.save(self.model.state_dict(), f"{save_directory}/model.pt")
78
- # Save config
79
- self.config.save_pretrained(save_directory)
80
- # Call parent method for any additional saving
81
- super().save_pretrained(save_directory, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/__init__.py DELETED
File without changes
ablang2/models/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (142 Bytes)
 
ablang2/models/__pycache__/__init__.cpython-312.pyc DELETED
Binary file (146 Bytes)
 
ablang2/models/ablang1/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .tokenizers import ABtokenizer
2
- from .model import AbLang, AbRep, AbHead
3
- from .pretrained import pretrained
 
 
 
 
ablang2/models/ablang1/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (298 Bytes)
 
ablang2/models/ablang1/__pycache__/__init__.cpython-312.pyc DELETED
Binary file (310 Bytes)
 
ablang2/models/ablang1/__pycache__/embedding.cpython-310.pyc DELETED
Binary file (1.59 kB)
 
ablang2/models/ablang1/__pycache__/embedding.cpython-312.pyc DELETED
Binary file (2.66 kB)
 
ablang2/models/ablang1/__pycache__/encoderblocks.cpython-310.pyc DELETED
Binary file (5 kB)
 
ablang2/models/ablang1/__pycache__/encoderblocks.cpython-312.pyc DELETED
Binary file (7.78 kB)
 
ablang2/models/ablang1/__pycache__/extra_fns.cpython-310.pyc DELETED
Binary file (1.07 kB)
 
ablang2/models/ablang1/__pycache__/extra_fns.cpython-312.pyc DELETED
Binary file (1.74 kB)
 
ablang2/models/ablang1/__pycache__/fairseq_mha.cpython-310.pyc DELETED
Binary file (30.3 kB)
 
ablang2/models/ablang1/__pycache__/fairseq_mha.cpython-312.pyc DELETED
Binary file (55 kB)
 
ablang2/models/ablang1/__pycache__/model.cpython-310.pyc DELETED
Binary file (3.44 kB)
 
ablang2/models/ablang1/__pycache__/model.cpython-312.pyc DELETED
Binary file (6.23 kB)
 
ablang2/models/ablang1/__pycache__/pretrained.cpython-310.pyc DELETED
Binary file (11.5 kB)
 
ablang2/models/ablang1/__pycache__/pretrained.cpython-312.pyc DELETED
Binary file (19.8 kB)
 
ablang2/models/ablang1/__pycache__/tokenizers.cpython-310.pyc DELETED
Binary file (2.61 kB)
 
ablang2/models/ablang1/__pycache__/tokenizers.cpython-312.pyc DELETED
Binary file (3.3 kB)
 
ablang2/models/ablang1/embedding.py DELETED
@@ -1,36 +0,0 @@
1
- import torch
2
-
3
-
4
- class AbEmbeddings(torch.nn.Module):
5
- """
6
- Residue embedding and Positional embedding
7
- """
8
-
9
- def __init__(self, hparams):
10
- super().__init__()
11
- self.pad_token_id = hparams.pad_token_id
12
-
13
- self.AAEmbeddings = torch.nn.Embedding(hparams.vocab_size, hparams.hidden_size, padding_idx=self.pad_token_id)
14
- self.PositionEmbeddings = torch.nn.Embedding(hparams.max_position_embeddings, hparams.hidden_size, padding_idx=0) # here padding_idx is always 0
15
-
16
- self.LayerNorm = torch.nn.LayerNorm(hparams.hidden_size, eps=hparams.layer_norm_eps)
17
- self.Dropout = torch.nn.Dropout(hparams.hidden_dropout_prob)
18
-
19
- def forward(self, src):
20
-
21
- inputs_embeds = self.AAEmbeddings(src)
22
-
23
- position_ids = self.create_position_ids_from_input_ids(src, self.pad_token_id)
24
- position_embeddings = self.PositionEmbeddings(position_ids)
25
-
26
- embeddings = inputs_embeds + position_embeddings
27
-
28
- return self.Dropout(self.LayerNorm(embeddings))
29
-
30
- def create_position_ids_from_input_ids(self, input_ids, padding_idx):
31
- """
32
- Replace non-padding symbols with their position numbers. Padding idx will get position 0, which will be ignored later on.
33
- """
34
- mask = input_ids.ne(padding_idx).int()
35
-
36
- return torch.cumsum(mask, dim=1).long() * mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang1/encoderblocks.py DELETED
@@ -1,141 +0,0 @@
1
- import math
2
- from typing import List, Optional, Tuple
3
- from dataclasses import dataclass
4
-
5
- import torch
6
- import torch.nn as nn
7
- #from fairseq.modules.multihead_attention import MultiheadAttention
8
- from .fairseq_mha import MultiheadAttention
9
-
10
- from .extra_fns import ACT2FN
11
-
12
-
13
- @dataclass
14
- class AbRepOutput():
15
- """
16
- Dataclass used to store AbRep output.
17
- """
18
-
19
- last_hidden_states: torch.FloatTensor
20
- all_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
21
- attentions: Optional[Tuple[torch.FloatTensor]] = None
22
-
23
-
24
- class EncoderBlocks(torch.nn.Module):
25
- """
26
- Wrapper for multiple EncoderBlocks (or a single).
27
- """
28
- def __init__(self, hparams):
29
- super().__init__()
30
- self.hparams = hparams
31
- self.Layers = nn.ModuleList([EncoderBlock(hparams) for _ in range(hparams.num_hidden_layers)])
32
-
33
- def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False):
34
-
35
- all_hidden_states = () if output_hidden_states else None
36
- all_self_attentions = () if output_attentions else None
37
-
38
- for num_block, a_EncoderBlock in enumerate(self.Layers):
39
-
40
- hidden_states, attentions = a_EncoderBlock(hidden_states, attention_mask, output_attentions)
41
- #print(attentions)
42
-
43
- if output_hidden_states:
44
- all_hidden_states = all_hidden_states + (hidden_states,) # Takes out each hidden states after each EncoderBlock
45
-
46
- if output_attentions:
47
- all_self_attentions = all_self_attentions + (attentions,) # Takes out attention layers for analysis
48
-
49
- return AbRepOutput(last_hidden_states=hidden_states, all_hidden_states=all_hidden_states, attentions=all_self_attentions)
50
-
51
-
52
- class EncoderBlock(torch.nn.Module):
53
- """
54
- Single EncoderBlock.
55
-
56
- An EncoderBlock consists of a MultiHeadAttention and a IntermediateLayer.
57
- """
58
- def __init__(self, hparams):
59
- super().__init__()
60
-
61
- self.MultiHeadAttention = ThirdMultiHeadAttention(hparams)
62
- self.MHADropout = nn.Dropout(hparams.hidden_dropout_prob)
63
- self.MHALayerNorm = nn.LayerNorm(hparams.hidden_size, eps=hparams.layer_norm_eps)
64
-
65
- self.IntermediateLayer = IntermediateLayer(hparams)
66
-
67
- def forward(self, hidden_states, attention_mask=None, output_attentions=False):
68
-
69
- MHAoutput, attentions = self.MultiHeadAttention(hidden_states, attention_mask, output_attentions=output_attentions)
70
-
71
- output = self.MHADropout(MHAoutput)
72
- output = self.MHALayerNorm(output + hidden_states) # HIDDEN_STATES ARE ADDED FOR RESIDUAL BLOCK EFFECT
73
-
74
- output = self.IntermediateLayer(output) # INTERMEDIATELAYER HAS RESIDUAL BLOCK EFFECT INTERNALLY
75
-
76
- #outputs = (layer_output,) + self_attention_outputs[1:] # if output_attentions=False then 1: is empty
77
-
78
- return output, attentions
79
-
80
-
81
- class ThirdMultiHeadAttention(torch.nn.Module):
82
- """
83
- New MultiHeadAttention which can return the weights of the individual heads.
84
- """
85
-
86
- def __init__(self, hparams):
87
- super().__init__()
88
-
89
- self.Attention = MultiheadAttention(hparams.hidden_size, hparams.num_attention_heads, dropout=hparams.attention_probs_dropout_prob, self_attention=True)
90
-
91
- def forward(self, hidden_states, attention_mask=None, output_attentions=False):
92
-
93
- hidden_states = torch.transpose(hidden_states, 0, 1)
94
-
95
- # static_kv is only True because there is currently a bug which doesn't return the head weights unaveraged unless its true
96
- attn_output, attn_weights = self.Attention(hidden_states, hidden_states, hidden_states, key_padding_mask=attention_mask, static_kv=True,
97
- need_weights=output_attentions, need_head_weights=output_attentions)
98
-
99
- return torch.transpose(attn_output, 0, 1), attn_weights
100
-
101
-
102
- class OldMultiHeadAttention(torch.nn.Module):
103
- """
104
- MultiHeadAttention contains a Scaled Dot Product Attention and a Linear Layer.
105
- """
106
- def __init__(self, config):
107
- super().__init__()
108
- self.Attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, config.attention_probs_dropout_prob)
109
-
110
- def forward(self, hidden_states, attention_mask=None, output_attentions=False):
111
-
112
- hidden_states = torch.transpose(hidden_states, 0, 1)
113
- output, attentions = self.Attention(hidden_states, hidden_states, hidden_states, key_padding_mask=attention_mask, need_weights=output_attentions)
114
-
115
- attention_output = torch.transpose(output, 0, 1)
116
-
117
- return attention_output, attentions
118
-
119
-
120
- class IntermediateLayer(nn.Module):
121
- """
122
- Contains an expanding layer, while also functioning as a residual block ending with a drop-norm layer
123
- """
124
- def __init__(self, config):
125
- super().__init__()
126
- self.expand_dense = nn.Linear(config.hidden_size, config.intermediate_size)
127
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
128
-
129
- self.dense_dense = nn.Linear(config.intermediate_size, config.hidden_size)
130
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
131
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
132
-
133
- def forward(self, hidden_states):
134
- output = self.expand_dense(hidden_states)
135
- output = self.intermediate_act_fn(output)
136
-
137
- output = self.dense_dense(output)
138
- output = self.dropout(output)
139
- output = self.LayerNorm(output + hidden_states)
140
-
141
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang1/extra_fns.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import math
3
-
4
-
5
- def gelu_new(x):
6
- """
7
- Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
8
- the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
9
- """
10
- return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
11
-
12
- def gelu_fast(x):
13
- return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
14
-
15
- def mish(x):
16
- return x * torch.tanh(torch.nn.functional.softplus(x))
17
-
18
- ACT2FN = {
19
- "relu": torch.nn.functional.relu,
20
- "gelu": torch.nn.functional.gelu,
21
- "tanh": torch.tanh,
22
- "gelu_new": gelu_new,
23
- "gelu_fast": gelu_fast,
24
- "mish": mish,
25
- "sigmoid": torch.sigmoid,
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang1/fairseq_mha.py DELETED
@@ -1,1306 +0,0 @@
1
- import math
2
- from typing import Dict, List, Optional, Tuple
3
- import uuid
4
-
5
- import torch
6
- import torch.nn.functional as F
7
- from torch import Tensor, nn
8
- from torch.nn import Parameter
9
-
10
- _xformers_available = False
11
-
12
- # TODO: move this into xformers?
13
- # TODO: uint8 input type should just output a bool
14
- def _mask_for_xformers(mask: Tensor, to_dtype: Optional[torch.dtype] = None):
15
- """
16
- call to pytorch multihead accepts three mask types:
17
- - ByteTensor where non-zero means to mask
18
- - FloatTensor which is an additive mask
19
- - BoolTensor where True means to mask
20
- xFormers currently accepts boolean and additive maks. For boolean masks
21
- the values have opposite meaning. For a BoolTensor True mean to keep the value.
22
- """
23
- float_types = [torch.float, torch.float16]
24
- # If an input mask is a float it is an additive mask. Otherwise it is either uint8 or bool.
25
- additive = mask.dtype in float_types
26
- # If to_dype is not specified, keep same dtype as mask.
27
- to_dtype = mask.dtype if to_dtype is None else to_dtype
28
- to_additive = to_dtype in float_types
29
-
30
- if additive:
31
- if to_additive:
32
- return mask.to(to_dtype)
33
- mask = mask < 0
34
-
35
- if to_additive:
36
- # return additive mask
37
- new_mask = torch.zeros_like(mask, dtype=to_dtype)
38
- new_mask = new_mask.masked_fill_(mask, -float("inf"))
39
- return new_mask
40
-
41
- # In xFormers True is value to keep rather than value to mask
42
- mask = ~mask.to(torch.bool)
43
- mask = mask.to(to_dtype)
44
- return mask
45
-
46
- class FairseqDecoder(nn.Module):
47
- """Base class for decoders."""
48
-
49
- def __init__(self, dictionary):
50
- super().__init__()
51
- self.dictionary = dictionary
52
- self.onnx_trace = False
53
- self.adaptive_softmax = None
54
-
55
- def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
56
- """
57
- Args:
58
- prev_output_tokens (LongTensor): shifted output tokens of shape
59
- `(batch, tgt_len)`, for teacher forcing
60
- encoder_out (dict, optional): output from the encoder, used for
61
- encoder-side attention
62
-
63
- Returns:
64
- tuple:
65
- - the decoder's output of shape `(batch, tgt_len, vocab)`
66
- - a dictionary with any model-specific outputs
67
- """
68
- x, extra = self.extract_features(
69
- prev_output_tokens, encoder_out=encoder_out, **kwargs
70
- )
71
- x = self.output_layer(x)
72
- return x, extra
73
-
74
- def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
75
- """
76
- Returns:
77
- tuple:
78
- - the decoder's features of shape `(batch, tgt_len, embed_dim)`
79
- - a dictionary with any model-specific outputs
80
- """
81
- raise NotImplementedError
82
-
83
- def output_layer(self, features, **kwargs):
84
- """
85
- Project features to the default output size, e.g., vocabulary size.
86
-
87
- Args:
88
- features (Tensor): features returned by *extract_features*.
89
- """
90
- raise NotImplementedError
91
-
92
- def get_normalized_probs(
93
- self,
94
- net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
95
- log_probs: bool,
96
- sample: Optional[Dict[str, Tensor]] = None,
97
- ):
98
- """Get normalized probabilities (or log probs) from a net's output."""
99
- return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
100
-
101
- # TorchScript doesn't support super() method so that the scriptable Subclass
102
- # can't access the base class model in Torchscript.
103
- # Current workaround is to add a helper function with different name and
104
- # call the helper function from scriptable Subclass.
105
- def get_normalized_probs_scriptable(
106
- self,
107
- net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
108
- log_probs: bool,
109
- sample: Optional[Dict[str, Tensor]] = None,
110
- ):
111
- """Get normalized probabilities (or log probs) from a net's output."""
112
-
113
- if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
114
- if sample is not None:
115
- assert "target" in sample
116
- target = sample["target"]
117
- else:
118
- target = None
119
- out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
120
- return out.exp_() if not log_probs else out
121
-
122
- logits = net_output[0]
123
- if log_probs:
124
- return log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
125
- else:
126
- return softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
127
-
128
- def max_positions(self):
129
- """Maximum input length supported by the decoder."""
130
- return 1e6 # an arbitrary large number
131
-
132
- def upgrade_state_dict_named(self, state_dict, name):
133
- """Upgrade old state dicts to work with newer code."""
134
- return state_dict
135
-
136
- def prepare_for_onnx_export_(self):
137
- self.onnx_trace = True
138
-
139
-
140
- class FairseqIncrementalState(object):
141
- def __init__(self, *args, **kwargs):
142
- super().__init__(*args, **kwargs)
143
- self.init_incremental_state()
144
-
145
- def init_incremental_state(self):
146
- self._incremental_state_id = str(uuid.uuid4())
147
-
148
- def _get_full_incremental_state_key(self, key: str) -> str:
149
- return "{}.{}".format(self._incremental_state_id, key)
150
-
151
- def get_incremental_state(
152
- self,
153
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
154
- key: str,
155
- ) -> Optional[Dict[str, Optional[Tensor]]]:
156
- """Helper for getting incremental state for an nn.Module."""
157
- full_key = self._get_full_incremental_state_key(key)
158
- if incremental_state is None or full_key not in incremental_state:
159
- return None
160
- return incremental_state[full_key]
161
-
162
- def set_incremental_state(
163
- self,
164
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
165
- key: str,
166
- value: Dict[str, Optional[Tensor]],
167
- ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
168
- """Helper for setting incremental state for an nn.Module."""
169
- if incremental_state is not None:
170
- full_key = self._get_full_incremental_state_key(key)
171
- incremental_state[full_key] = value
172
- return incremental_state
173
-
174
-
175
- def with_incremental_state(cls):
176
- cls.__bases__ = (FairseqIncrementalState,) + tuple(
177
- b for b in cls.__bases__ if b != FairseqIncrementalState
178
- )
179
- return cls
180
-
181
-
182
- @with_incremental_state
183
- class FairseqIncrementalDecoder(FairseqDecoder):
184
- """Base class for incremental decoders.
185
-
186
- Incremental decoding is a special mode at inference time where the Model
187
- only receives a single timestep of input corresponding to the previous
188
- output token (for teacher forcing) and must produce the next output
189
- *incrementally*. Thus the model must cache any long-term state that is
190
- needed about the sequence, e.g., hidden states, convolutional states, etc.
191
-
192
- Compared to the standard :class:`FairseqDecoder` interface, the incremental
193
- decoder interface allows :func:`forward` functions to take an extra keyword
194
- argument (*incremental_state*) that can be used to cache state across
195
- time-steps.
196
-
197
- The :class:`FairseqIncrementalDecoder` interface also defines the
198
- :func:`reorder_incremental_state` method, which is used during beam search
199
- to select and reorder the incremental state based on the selection of beams.
200
-
201
- To learn more about how incremental decoding works, refer to `this blog
202
- <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.
203
- """
204
-
205
- def __init__(self, dictionary):
206
- super().__init__(dictionary)
207
-
208
- def forward(
209
- self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
210
- ):
211
- """
212
- Args:
213
- prev_output_tokens (LongTensor): shifted output tokens of shape
214
- `(batch, tgt_len)`, for teacher forcing
215
- encoder_out (dict, optional): output from the encoder, used for
216
- encoder-side attention
217
- incremental_state (dict, optional): dictionary used for storing
218
- state during :ref:`Incremental decoding`
219
-
220
- Returns:
221
- tuple:
222
- - the decoder's output of shape `(batch, tgt_len, vocab)`
223
- - a dictionary with any model-specific outputs
224
- """
225
- raise NotImplementedError
226
-
227
- def extract_features(
228
- self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
229
- ):
230
- """
231
- Returns:
232
- tuple:
233
- - the decoder's features of shape `(batch, tgt_len, embed_dim)`
234
- - a dictionary with any model-specific outputs
235
- """
236
- raise NotImplementedError
237
-
238
- def reorder_incremental_state(
239
- self,
240
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
241
- new_order: Tensor,
242
- ):
243
- """Reorder incremental state.
244
-
245
- This will be called when the order of the input has changed from the
246
- previous time step. A typical use case is beam search, where the input
247
- order changes between time steps based on the selection of beams.
248
- """
249
- pass
250
-
251
- def reorder_incremental_state_scripting(
252
- self,
253
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
254
- new_order: Tensor,
255
- ):
256
- """Main entry point for reordering the incremental state.
257
-
258
- Due to limitations in TorchScript, we call this function in
259
- :class:`fairseq.sequence_generator.SequenceGenerator` instead of
260
- calling :func:`reorder_incremental_state` directly.
261
- """
262
- for module in self.modules():
263
- if hasattr(module, "reorder_incremental_state"):
264
- result = module.reorder_incremental_state(incremental_state, new_order)
265
- if result is not None:
266
- incremental_state = result
267
-
268
- def set_beam_size(self, beam_size):
269
- """Sets the beam size in the decoder and all children."""
270
- if getattr(self, "_beam_size", -1) != beam_size:
271
- seen = set()
272
-
273
- def apply_set_beam_size(module):
274
- if (
275
- module != self
276
- and hasattr(module, "set_beam_size")
277
- and module not in seen
278
- ):
279
- seen.add(module)
280
- module.set_beam_size(beam_size)
281
-
282
- self.apply(apply_set_beam_size)
283
- self._beam_size = beam_size
284
-
285
-
286
-
287
-
288
-
289
-
290
- class MultiheadAttention(FairseqIncrementalDecoder):
291
- """Multi-headed attention.
292
-
293
- See "Attention Is All You Need" for more details.
294
- """
295
-
296
- def __init__(
297
- self,
298
- embed_dim,
299
- num_heads,
300
- kdim=None,
301
- vdim=None,
302
- dropout=0.0,
303
- bias=True,
304
- add_bias_kv=False,
305
- add_zero_attn=False,
306
- self_attention=False,
307
- encoder_decoder_attention=False,
308
- dictionary=None,
309
- q_noise=0.0,
310
- qn_block_size=8,
311
- # TODO: pass in config rather than string.
312
- # config defined in xformers.components.attention.AttentionConfig
313
- xformers_att_config: Optional[str] = None,
314
- xformers_blocksparse_layout: Optional[
315
- torch.Tensor
316
- ] = None, # This should be part of the config
317
- xformers_blocksparse_blocksize: Optional[
318
- int
319
- ] = 16, # This should be part of the config
320
- ):
321
- super().__init__(dictionary)
322
-
323
- #xformers_att_config = utils.eval_str_dict(xformers_att_config)
324
- self.use_xformers = False #xformers_att_config is not None
325
- if self.use_xformers and not _xformers_available:
326
- raise ImportError("\n\n Please install xFormers.")
327
- self.embed_dim = embed_dim
328
- self.kdim = kdim if kdim is not None else embed_dim
329
- self.vdim = vdim if vdim is not None else embed_dim
330
- self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
331
-
332
- self.num_heads = num_heads
333
- self.dropout_module = FairseqDropout(
334
- dropout, module_name=self.__class__.__name__
335
- )
336
-
337
- self.head_dim = embed_dim // num_heads
338
- assert (
339
- self.head_dim * num_heads == self.embed_dim
340
- ), "embed_dim must be divisible by num_heads"
341
- self.scaling = self.head_dim**-0.5
342
-
343
- self.self_attention = self_attention
344
- self.encoder_decoder_attention = encoder_decoder_attention
345
-
346
- assert not self.self_attention or self.qkv_same_dim, (
347
- "Self-attention requires query, key and " "value to be of the same size"
348
- )
349
-
350
- self.k_proj = quant_noise(
351
- nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
352
- )
353
- self.v_proj = quant_noise(
354
- nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
355
- )
356
- self.q_proj = quant_noise(
357
- nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
358
- )
359
-
360
- self.out_proj = quant_noise(
361
- nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
362
- )
363
-
364
- if add_bias_kv:
365
- self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
366
- self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
367
- else:
368
- self.bias_k = self.bias_v = None
369
-
370
- self.add_zero_attn = add_zero_attn
371
- self.beam_size = 1
372
- self.reset_parameters()
373
-
374
- if self.use_xformers:
375
- xformers_att_config["dropout"] = xformers_att_config.get("dropout", dropout)
376
- xformers_att_config["num_heads"] = xformers_att_config.get(
377
- "num_heads", num_heads
378
- )
379
-
380
- if xformers_blocksparse_layout is not None:
381
- # Could be part of a single config passed only once
382
- xformers_att_config["block_size"] = xformers_blocksparse_blocksize
383
- xformers_att_config["layout"] = xformers_blocksparse_layout
384
- xformers_att_config["name"] = "blocksparse"
385
-
386
- self.attention = build_attention(xformers_att_config)
387
-
388
- self.onnx_trace = False
389
- self.skip_embed_dim_check = False
390
- self.init_incremental_state()
391
-
392
- def prepare_for_onnx_export_(self):
393
- self.onnx_trace = True
394
-
395
- def reset_parameters(self):
396
- if self.qkv_same_dim:
397
- # Empirically observed the convergence to be much better with
398
- # the scaled initialization
399
- nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
400
- nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
401
- nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
402
- else:
403
- nn.init.xavier_uniform_(self.k_proj.weight)
404
- nn.init.xavier_uniform_(self.v_proj.weight)
405
- nn.init.xavier_uniform_(self.q_proj.weight)
406
-
407
- nn.init.xavier_uniform_(self.out_proj.weight)
408
- if self.out_proj.bias is not None:
409
- nn.init.constant_(self.out_proj.bias, 0.0)
410
- if self.bias_k is not None:
411
- nn.init.xavier_normal_(self.bias_k)
412
- if self.bias_v is not None:
413
- nn.init.xavier_normal_(self.bias_v)
414
-
415
- def _get_reserve_head_index(self, num_heads_to_keep: int):
416
- k_proj_heads_norm = []
417
- q_proj_heads_norm = []
418
- v_proj_heads_norm = []
419
-
420
- for i in range(self.num_heads):
421
- start_idx = i * self.head_dim
422
- end_idx = (i + 1) * self.head_dim
423
- k_proj_heads_norm.append(
424
- torch.sum(
425
- torch.abs(
426
- self.k_proj.weight[
427
- start_idx:end_idx,
428
- ]
429
- )
430
- ).tolist()
431
- + torch.sum(torch.abs(self.k_proj.bias[start_idx:end_idx])).tolist()
432
- )
433
- q_proj_heads_norm.append(
434
- torch.sum(
435
- torch.abs(
436
- self.q_proj.weight[
437
- start_idx:end_idx,
438
- ]
439
- )
440
- ).tolist()
441
- + torch.sum(torch.abs(self.q_proj.bias[start_idx:end_idx])).tolist()
442
- )
443
- v_proj_heads_norm.append(
444
- torch.sum(
445
- torch.abs(
446
- self.v_proj.weight[
447
- start_idx:end_idx,
448
- ]
449
- )
450
- ).tolist()
451
- + torch.sum(torch.abs(self.v_proj.bias[start_idx:end_idx])).tolist()
452
- )
453
-
454
- heads_norm = []
455
- for i in range(self.num_heads):
456
- heads_norm.append(
457
- k_proj_heads_norm[i] + q_proj_heads_norm[i] + v_proj_heads_norm[i]
458
- )
459
-
460
- sorted_head_index = sorted(
461
- range(self.num_heads), key=lambda k: heads_norm[k], reverse=True
462
- )
463
- reserve_head_index = []
464
- for i in range(num_heads_to_keep):
465
- start = sorted_head_index[i] * self.head_dim
466
- end = (sorted_head_index[i] + 1) * self.head_dim
467
- reserve_head_index.append((start, end))
468
- return reserve_head_index
469
-
470
- def _adaptive_prune_heads(self, reserve_head_index: List[Tuple[int, int]]):
471
- new_q_weight = []
472
- new_q_bias = []
473
- new_k_weight = []
474
- new_k_bias = []
475
- new_v_weight = []
476
- new_v_bias = []
477
- new_out_proj_weight = []
478
-
479
- for ele in reserve_head_index:
480
- start_idx, end_idx = ele
481
- new_q_weight.append(
482
- self.q_proj.weight[
483
- start_idx:end_idx,
484
- ]
485
- )
486
- new_q_bias.append(self.q_proj.bias[start_idx:end_idx])
487
-
488
- new_k_weight.append(
489
- self.k_proj.weight[
490
- start_idx:end_idx,
491
- ]
492
- )
493
-
494
- new_k_bias.append(self.k_proj.bias[start_idx:end_idx])
495
-
496
- new_v_weight.append(
497
- self.v_proj.weight[
498
- start_idx:end_idx,
499
- ]
500
- )
501
- new_v_bias.append(self.v_proj.bias[start_idx:end_idx])
502
-
503
- new_out_proj_weight.append(self.out_proj.weight[:, start_idx:end_idx])
504
-
505
- new_q_weight = torch.cat(new_q_weight).detach()
506
- new_k_weight = torch.cat(new_k_weight).detach()
507
- new_v_weight = torch.cat(new_v_weight).detach()
508
- new_out_proj_weight = torch.cat(new_out_proj_weight, dim=-1).detach()
509
- new_q_weight.requires_grad = True
510
- new_k_weight.requires_grad = True
511
- new_v_weight.requires_grad = True
512
- new_out_proj_weight.requires_grad = True
513
-
514
- new_q_bias = torch.cat(new_q_bias).detach()
515
- new_q_bias.requires_grad = True
516
-
517
- new_k_bias = torch.cat(new_k_bias).detach()
518
- new_k_bias.requires_grad = True
519
-
520
- new_v_bias = torch.cat(new_v_bias).detach()
521
- new_v_bias.requires_grad = True
522
-
523
- self.q_proj.weight = torch.nn.Parameter(new_q_weight)
524
- self.q_proj.bias = torch.nn.Parameter(new_q_bias)
525
-
526
- self.k_proj.weight = torch.nn.Parameter(new_k_weight)
527
- self.k_proj.bias = torch.nn.Parameter(new_k_bias)
528
-
529
- self.v_proj.weight = torch.nn.Parameter(new_v_weight)
530
- self.v_proj.bias = torch.nn.Parameter(new_v_bias)
531
-
532
- self.out_proj.weight = torch.nn.Parameter(new_out_proj_weight)
533
-
534
- self.num_heads = len(reserve_head_index)
535
- self.embed_dim = self.head_dim * self.num_heads
536
- self.q_proj.out_features = self.embed_dim
537
- self.k_proj.out_features = self.embed_dim
538
- self.v_proj.out_features = self.embed_dim
539
-
540
- def _set_skip_embed_dim_check(self):
541
- self.skip_embed_dim_check = True
542
-
543
- def _pad_masks(
544
- self,
545
- key_padding_mask: Optional[Tensor],
546
- attn_mask: Optional[Tensor],
547
- ) -> Tuple[Optional[Tensor], Optional[Tensor]]:
548
- if attn_mask is not None:
549
- shape = attn_mask.size()[:-1] + torch.Size([1])
550
- attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(shape)], dim=-1)
551
- if key_padding_mask is not None:
552
- shape = key_padding_mask.size()[:-1] + torch.Size([1])
553
- key_padding_mask = torch.cat(
554
- [
555
- key_padding_mask,
556
- key_padding_mask.new_zeros(shape),
557
- ],
558
- dim=-1,
559
- )
560
- return key_padding_mask, attn_mask
561
-
562
- def _add_bias(
563
- self,
564
- k: Tensor,
565
- v: Tensor,
566
- key_padding_mask: Optional[Tensor],
567
- attn_mask: Optional[Tensor],
568
- bsz: int,
569
- ) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
570
- assert self.bias_k is not None
571
- assert self.bias_v is not None
572
- k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
573
- v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
574
- key_padding_mask, attn_mask = self._pad_masks(
575
- key_padding_mask=key_padding_mask, attn_mask=attn_mask
576
- )
577
- return k, v, key_padding_mask, attn_mask
578
-
579
- def _append_zero_attn(
580
- self,
581
- k: Tensor,
582
- v: Tensor,
583
- key_padding_mask: Optional[Tensor],
584
- attn_mask: Optional[Tensor],
585
- ) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
586
- zero_attn_shape = k.size()[:-2] + torch.Size([1]) + k.size()[-1:]
587
- k = torch.cat(
588
- [k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=-2
589
- )
590
- v = torch.cat(
591
- [v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=-2
592
- )
593
- key_padding_mask, attn_mask = self._pad_masks(
594
- key_padding_mask=key_padding_mask, attn_mask=attn_mask
595
- )
596
- return k, v, key_padding_mask, attn_mask
597
-
598
- def _xformers_attn_forward(
599
- self,
600
- query,
601
- key: Optional[Tensor],
602
- value: Optional[Tensor],
603
- key_padding_mask: Optional[Tensor] = None,
604
- need_weights: bool = True,
605
- attn_mask: Optional[Tensor] = None,
606
- ) -> Tuple[Tensor, Optional[Tensor]]:
607
-
608
- tgt_len, bsz, embed_dim = query.size()
609
-
610
- if key_padding_mask is not None:
611
- assert key_padding_mask.size(0) == bsz
612
- assert key_padding_mask.size(1) == tgt_len
613
-
614
- if self.self_attention:
615
- key = query
616
- value = query
617
- elif self.encoder_decoder_attention:
618
- value = key
619
-
620
- q = self.q_proj(query)
621
- k = self.k_proj(key)
622
- v = self.v_proj(value)
623
-
624
- if self.bias_k is not None:
625
- assert self.bias_v is not None
626
- k, v, attn_mask, key_padding_mask = self._add_bias(
627
- k, v, attn_mask, key_padding_mask, bsz
628
- )
629
-
630
- def fold_heads(x):
631
- return (
632
- x.contiguous()
633
- .view(-1, bsz * self.num_heads, self.head_dim)
634
- .transpose(0, 1)
635
- )
636
-
637
- def split_heads(x):
638
- return (
639
- x.contiguous()
640
- .view(-1, bsz, self.num_heads, self.head_dim)
641
- .transpose(0, 1)
642
- .transpose(1, 2)
643
- )
644
-
645
- massage = split_heads if self.attention.requires_head_dimension else fold_heads
646
- q = massage(q)
647
- if k is not None:
648
- k = massage(k)
649
- if v is not None:
650
- v = massage(v)
651
-
652
- if self.add_zero_attn:
653
- k, v, key_padding_mask, attn_mask = self._append_zero_attn(
654
- k=k, v=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask
655
- )
656
-
657
- kwargs = {}
658
-
659
- if attn_mask is not None and self.attention.supports_attention_mask:
660
- attn_mask = _mask_for_xformers(attn_mask, to_dtype=q.dtype)
661
- kwargs["att_mask"] = attn_mask
662
-
663
- if key_padding_mask is not None:
664
- key_padding_mask = _mask_for_xformers(key_padding_mask, to_dtype=torch.bool)
665
- if not self.attention.requires_separate_masks:
666
- attn_mask = maybe_merge_masks(
667
- attn_mask,
668
- key_padding_mask,
669
- batch_size=bsz,
670
- src_len=k.size(-2),
671
- tgt_len=q.size(-2),
672
- num_heads=self.num_heads,
673
- )
674
- key_padding_mask = None
675
- kwargs["att_mask"] = attn_mask
676
- if self.attention.supports_key_padding_mask:
677
- kwargs["key_padding_mask"] = key_padding_mask
678
-
679
- y = self.attention(q, k, v, **kwargs)
680
-
681
- y = (
682
- y.view(bsz, self.num_heads, tgt_len, self.head_dim)
683
- .transpose(1, 2)
684
- .flatten(start_dim=2, end_dim=3)
685
- .transpose(0, 1)
686
- )
687
- assert list(y.size()) == [tgt_len, bsz, embed_dim]
688
-
689
- # Dropout not needed because already applied in attention.
690
- # It is applied to the attention weights before matmul with v.
691
- y = self.out_proj(y)
692
-
693
- # TODO: support returning attention weights if needed.
694
- return y, None
695
-
696
- def forward(
697
- self,
698
- query: Tensor,
699
- key: Optional[Tensor],
700
- value: Optional[Tensor],
701
- key_padding_mask: Optional[Tensor] = None,
702
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
703
- need_weights: bool = True,
704
- static_kv: bool = False,
705
- attn_mask: Optional[Tensor] = None,
706
- before_softmax: bool = False,
707
- need_head_weights: bool = False,
708
- ) -> Tuple[Tensor, Optional[Tensor]]:
709
- """Input shape: Time x Batch x Channel
710
-
711
- Args:
712
- key_padding_mask (ByteTensor, optional): mask to exclude
713
- keys that are pads, of shape `(batch, src_len)`, where
714
- padding elements are indicated by 1s.
715
- need_weights (bool, optional): return the attention weights,
716
- averaged over heads (default: False).
717
- attn_mask (ByteTensor, optional): typically used to
718
- implement causal attention, where the mask prevents the
719
- attention from looking forward in time (default: None).
720
- before_softmax (bool, optional): return the raw attention
721
- weights and values before the attention softmax.
722
- need_head_weights (bool, optional): return the attention
723
- weights for each head. Implies *need_weights*. Default:
724
- return the average attention weights over all heads.
725
- """
726
- if need_head_weights:
727
- need_weights = True
728
-
729
- is_tpu = query.device.type == "xla"
730
-
731
- tgt_len, bsz, embed_dim = query.size()
732
- src_len = tgt_len
733
- if not self.skip_embed_dim_check:
734
- assert (
735
- embed_dim == self.embed_dim
736
- ), f"query dim {embed_dim} != {self.embed_dim}"
737
- assert list(query.size()) == [tgt_len, bsz, embed_dim]
738
- if key is not None:
739
- src_len, key_bsz, _ = key.size()
740
- if not torch.jit.is_scripting():
741
- assert value is not None
742
- assert src_len, key_bsz == value.shape[:2]
743
-
744
- if (
745
- not self.onnx_trace
746
- and not is_tpu # don't use PyTorch version on TPUs
747
- and incremental_state is None
748
- and not static_kv
749
- # A workaround for quantization to work. Otherwise JIT compilation
750
- # treats bias in linear module as method.
751
- and not torch.jit.is_scripting()
752
- # The Multihead attention implemented in pytorch forces strong dimension check
753
- # for input embedding dimention and K,Q,V projection dimension.
754
- # Since pruning will break the dimension check and it is not easy to modify the pytorch API,
755
- # it is preferred to bypass the pytorch MHA when we need to skip embed_dim_check
756
- and not self.skip_embed_dim_check
757
- ):
758
- assert key is not None and value is not None
759
-
760
- if self.use_xformers:
761
- return self._xformers_attn_forward(
762
- query, key, value, key_padding_mask, need_weights, attn_mask
763
- )
764
-
765
- else:
766
- return F.multi_head_attention_forward(
767
- query,
768
- key,
769
- value,
770
- self.embed_dim,
771
- self.num_heads,
772
- torch.empty([0]),
773
- torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
774
- self.bias_k,
775
- self.bias_v,
776
- self.add_zero_attn,
777
- self.dropout_module.p,
778
- self.out_proj.weight,
779
- self.out_proj.bias,
780
- self.training or self.dropout_module.apply_during_inference,
781
- key_padding_mask.bool() if key_padding_mask is not None else None,
782
- need_weights,
783
- attn_mask,
784
- use_separate_proj_weight=True,
785
- q_proj_weight=self.q_proj.weight,
786
- k_proj_weight=self.k_proj.weight,
787
- v_proj_weight=self.v_proj.weight,
788
- )
789
-
790
- if incremental_state is not None:
791
- saved_state = self._get_input_buffer(incremental_state)
792
- if saved_state is not None and "prev_key" in saved_state:
793
- # previous time steps are cached - no need to recompute
794
- # key and value if they are static
795
- if static_kv:
796
- assert self.encoder_decoder_attention and not self.self_attention
797
- key = value = None
798
- else:
799
- saved_state = None
800
-
801
- if self.self_attention:
802
- q = self.q_proj(query)
803
- k = self.k_proj(query)
804
- v = self.v_proj(query)
805
- elif self.encoder_decoder_attention:
806
- # encoder-decoder attention
807
- q = self.q_proj(query)
808
- if key is None:
809
- assert value is None
810
- k = v = None
811
- else:
812
- if self.beam_size > 1 and bsz == key.size(1):
813
- # key is [T, bsz*beam_size, C], reduce to [T, bsz, C]
814
- key = key.view(key.size(0), -1, self.beam_size, key.size(2))[
815
- :, :, 0, :
816
- ]
817
- if key_padding_mask is not None:
818
- key_padding_mask = key_padding_mask.view(
819
- -1, self.beam_size, key_padding_mask.size(1)
820
- )[:, 0, :]
821
- k = self.k_proj(key)
822
- v = self.v_proj(key)
823
-
824
- else:
825
- assert key is not None and value is not None
826
- q = self.q_proj(query)
827
- k = self.k_proj(key)
828
- v = self.v_proj(value)
829
- q *= self.scaling
830
-
831
- if self.bias_k is not None:
832
- assert self.bias_v is not None
833
- k, v, attn_mask, key_padding_mask = self._add_bias(
834
- k, v, attn_mask, key_padding_mask, bsz
835
- )
836
-
837
- q = (
838
- q.contiguous()
839
- .view(tgt_len, bsz * self.num_heads, self.head_dim)
840
- .transpose(0, 1)
841
- )
842
- kv_bsz = bsz # need default value for scripting
843
- if k is not None:
844
- kv_bsz = k.size(1)
845
- k = (
846
- k.contiguous()
847
- .view(-1, kv_bsz * self.num_heads, self.head_dim)
848
- .transpose(0, 1)
849
- )
850
- if v is not None:
851
- v = (
852
- v.contiguous()
853
- .view(-1, kv_bsz * self.num_heads, self.head_dim)
854
- .transpose(0, 1)
855
- )
856
-
857
- if saved_state is not None:
858
- # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
859
- if "prev_key" in saved_state:
860
- _prev_key = saved_state["prev_key"]
861
- assert _prev_key is not None
862
- kv_bsz = _prev_key.size(0)
863
- prev_key = _prev_key.view(kv_bsz * self.num_heads, -1, self.head_dim)
864
- if static_kv:
865
- k = prev_key
866
- else:
867
- assert k is not None
868
- k = torch.cat([prev_key, k], dim=1)
869
- src_len = k.size(1)
870
- if "prev_value" in saved_state:
871
- _prev_value = saved_state["prev_value"]
872
- assert _prev_value is not None
873
- assert kv_bsz == _prev_value.size(0)
874
- prev_value = _prev_value.view(
875
- kv_bsz * self.num_heads, -1, self.head_dim
876
- )
877
- if static_kv:
878
- v = prev_value
879
- else:
880
- assert v is not None
881
- v = torch.cat([prev_value, v], dim=1)
882
- prev_key_padding_mask: Optional[Tensor] = None
883
- if "prev_key_padding_mask" in saved_state:
884
- prev_key_padding_mask = saved_state["prev_key_padding_mask"]
885
- assert k is not None and v is not None
886
- key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
887
- key_padding_mask=key_padding_mask,
888
- prev_key_padding_mask=prev_key_padding_mask,
889
- batch_size=kv_bsz,
890
- src_len=k.size(1),
891
- static_kv=static_kv,
892
- )
893
-
894
- saved_state["prev_key"] = k.view(kv_bsz, self.num_heads, -1, self.head_dim)
895
- saved_state["prev_value"] = v.view(
896
- kv_bsz, self.num_heads, -1, self.head_dim
897
- )
898
- saved_state["prev_key_padding_mask"] = key_padding_mask
899
- # In this branch incremental_state is never None
900
- assert incremental_state is not None
901
- incremental_state = self._set_input_buffer(incremental_state, saved_state)
902
- assert k is not None
903
- assert k.size(1) == src_len
904
-
905
- # This is part of a workaround to get around fork/join parallelism
906
- # not supporting Optional types.
907
- if key_padding_mask is not None and key_padding_mask.dim() == 0:
908
- key_padding_mask = None
909
-
910
- if key_padding_mask is not None:
911
- assert key_padding_mask.size(0) == kv_bsz
912
- assert key_padding_mask.size(1) == src_len
913
-
914
- if self.add_zero_attn:
915
- assert v is not None
916
- src_len += 1
917
- k, v, key_padding_mask, attn_mask = self._append_zero_attn(
918
- k=k, v=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask
919
- )
920
-
921
- if self.encoder_decoder_attention and bsz != kv_bsz:
922
- attn_weights = torch.einsum(
923
- "bxhtd,bhsd->bxhts",
924
- q.view((kv_bsz, -1, self.num_heads) + q.size()[1:]),
925
- k.view((kv_bsz, self.num_heads) + k.size()[1:]),
926
- )
927
- attn_weights = attn_weights.reshape((-1,) + attn_weights.size()[-2:])
928
- else:
929
- attn_weights = torch.bmm(q, k.transpose(1, 2))
930
- attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
931
-
932
- assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
933
-
934
- if attn_mask is not None:
935
- attn_mask = attn_mask.unsqueeze(0)
936
- if self.onnx_trace:
937
- attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
938
- attn_weights += attn_mask
939
-
940
- if key_padding_mask is not None:
941
- # don't attend to padding symbols
942
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
943
- if not is_tpu:
944
- attn_weights = attn_weights.view(
945
- kv_bsz, -1, self.num_heads, tgt_len, src_len
946
- )
947
- attn_weights = attn_weights.masked_fill(
948
- key_padding_mask.unsqueeze(1)
949
- .unsqueeze(2)
950
- .unsqueeze(3)
951
- .to(torch.bool),
952
- float("-inf"),
953
- )
954
- else:
955
- attn_weights = attn_weights.transpose(0, 2)
956
- attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
957
- attn_weights = attn_weights.transpose(0, 2)
958
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
959
-
960
- if before_softmax:
961
- return attn_weights, v
962
-
963
- attn_weights_float = softmax(
964
- attn_weights, dim=-1, onnx_trace=self.onnx_trace
965
- )
966
- attn_weights = attn_weights_float.type_as(attn_weights)
967
- attn_probs = self.dropout_module(attn_weights)
968
-
969
- assert v is not None
970
- attn: Optional[Tensor] = None
971
- if self.encoder_decoder_attention and bsz != kv_bsz:
972
- attn = torch.einsum(
973
- "bxhts,bhsd->bxhtd",
974
- attn_probs.view(
975
- (
976
- kv_bsz,
977
- -1,
978
- self.num_heads,
979
- )
980
- + attn_probs.size()[1:]
981
- ),
982
- v.view(
983
- (
984
- kv_bsz,
985
- self.num_heads,
986
- )
987
- + v.size()[1:]
988
- ),
989
- )
990
- attn = attn.reshape((-1,) + attn.size()[-2:])
991
- else:
992
- attn = torch.bmm(attn_probs, v)
993
- assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
994
- if self.onnx_trace and attn.size(1) == 1:
995
- # when ONNX tracing a single decoder step (sequence length == 1)
996
- # the transpose is a no-op copy before view, thus unnecessary
997
- attn = attn.contiguous().view(tgt_len, bsz, self.embed_dim)
998
- else:
999
- attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
1000
- attn = self.out_proj(attn)
1001
- attn_weights: Optional[Tensor] = None
1002
- if need_weights:
1003
- attn_weights = attn_weights_float.view(
1004
- bsz, self.num_heads, tgt_len, src_len
1005
- ).transpose(1, 0)
1006
- if not need_head_weights:
1007
- # average attention weights over heads
1008
- attn_weights = attn_weights.mean(dim=0)
1009
-
1010
- return attn, attn_weights
1011
-
1012
- @staticmethod
1013
- def _append_prev_key_padding_mask(
1014
- key_padding_mask: Optional[Tensor],
1015
- prev_key_padding_mask: Optional[Tensor],
1016
- batch_size: int,
1017
- src_len: int,
1018
- static_kv: bool,
1019
- ) -> Optional[Tensor]:
1020
- # saved key padding masks have shape (bsz, seq_len)
1021
- if prev_key_padding_mask is not None and static_kv:
1022
- new_key_padding_mask = prev_key_padding_mask
1023
- elif prev_key_padding_mask is not None and key_padding_mask is not None:
1024
- new_key_padding_mask = torch.cat(
1025
- [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
1026
- )
1027
- # During incremental decoding, as the padding token enters and
1028
- # leaves the frame, there will be a time when prev or current
1029
- # is None
1030
- elif prev_key_padding_mask is not None:
1031
- if src_len > prev_key_padding_mask.size(1):
1032
- filler = torch.zeros(
1033
- (batch_size, src_len - prev_key_padding_mask.size(1)),
1034
- device=prev_key_padding_mask.device,
1035
- )
1036
- new_key_padding_mask = torch.cat(
1037
- [prev_key_padding_mask.float(), filler.float()], dim=1
1038
- )
1039
- else:
1040
- new_key_padding_mask = prev_key_padding_mask.float()
1041
- elif key_padding_mask is not None:
1042
- if src_len > key_padding_mask.size(1):
1043
- filler = torch.zeros(
1044
- (batch_size, src_len - key_padding_mask.size(1)),
1045
- device=key_padding_mask.device,
1046
- )
1047
- new_key_padding_mask = torch.cat(
1048
- [filler.float(), key_padding_mask.float()], dim=1
1049
- )
1050
- else:
1051
- new_key_padding_mask = key_padding_mask.float()
1052
- else:
1053
- new_key_padding_mask = prev_key_padding_mask
1054
- return new_key_padding_mask
1055
-
1056
- @torch.jit.export
1057
- def reorder_incremental_state(
1058
- self,
1059
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
1060
- new_order: Tensor,
1061
- ):
1062
- """Reorder buffered internal state (for incremental generation)."""
1063
- input_buffer = self._get_input_buffer(incremental_state)
1064
- if input_buffer is not None:
1065
- for k in input_buffer.keys():
1066
- input_buffer_k = input_buffer[k]
1067
- if input_buffer_k is not None:
1068
- if self.encoder_decoder_attention:
1069
- if input_buffer_k.size(0) * self.beam_size == new_order.size(0):
1070
- return incremental_state
1071
- elif self.beam_size > 1:
1072
- input_buffer[k] = input_buffer_k.index_select(
1073
- 0,
1074
- new_order.reshape(-1, self.beam_size)[:, 0]
1075
- // self.beam_size,
1076
- )
1077
- else:
1078
- input_buffer[k] = input_buffer_k.index_select(0, new_order)
1079
- else:
1080
- input_buffer[k] = input_buffer_k.index_select(0, new_order)
1081
- incremental_state = self._set_input_buffer(incremental_state, input_buffer)
1082
- return incremental_state
1083
-
1084
- def set_beam_size(self, beam_size):
1085
- """Used for effiecient beamable enc-dec attention"""
1086
- self.beam_size = beam_size
1087
-
1088
- def _get_input_buffer(
1089
- self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
1090
- ) -> Dict[str, Optional[Tensor]]:
1091
- result = self.get_incremental_state(incremental_state, "attn_state")
1092
- if result is not None:
1093
- return result
1094
- else:
1095
- empty_result: Dict[str, Optional[Tensor]] = {}
1096
- return empty_result
1097
-
1098
- def _set_input_buffer(
1099
- self,
1100
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
1101
- buffer: Dict[str, Optional[Tensor]],
1102
- ):
1103
- return self.set_incremental_state(incremental_state, "attn_state", buffer)
1104
-
1105
- def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
1106
- return attn_weights
1107
-
1108
- def upgrade_state_dict_named(self, state_dict, name):
1109
- prefix = name + "." if name != "" else ""
1110
- items_to_add = {}
1111
- keys_to_remove = []
1112
- for k in state_dict.keys():
1113
- if k.endswith(prefix + "in_proj_weight"):
1114
- # in_proj_weight used to be q + k + v with same dimensions
1115
- dim = int(state_dict[k].shape[0] / 3)
1116
- items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
1117
- items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
1118
- items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
1119
-
1120
- keys_to_remove.append(k)
1121
-
1122
- k_bias = prefix + "in_proj_bias"
1123
- if k_bias in state_dict.keys():
1124
- dim = int(state_dict[k].shape[0] / 3)
1125
- items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
1126
- items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
1127
- dim : 2 * dim
1128
- ]
1129
- items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
1130
-
1131
- keys_to_remove.append(prefix + "in_proj_bias")
1132
-
1133
- for k in keys_to_remove:
1134
- del state_dict[k]
1135
-
1136
- for key, value in items_to_add.items():
1137
- state_dict[key] = value
1138
-
1139
-
1140
-
1141
-
1142
-
1143
-
1144
-
1145
-
1146
-
1147
-
1148
-
1149
- class FairseqDropout(nn.Module):
1150
- def __init__(self, p, module_name=None):
1151
- super().__init__()
1152
- self.p = p
1153
- self.module_name = module_name
1154
- self.apply_during_inference = False
1155
-
1156
- def forward(self, x, inplace: bool = False):
1157
- if self.p > 0 and (self.training or self.apply_during_inference):
1158
- return F.dropout(x, p=self.p, training=True, inplace=inplace)
1159
- else:
1160
- return x
1161
-
1162
- def make_generation_fast_(
1163
- self,
1164
- name: str,
1165
- retain_dropout: bool = False,
1166
- retain_dropout_modules: Optional[List[str]] = None,
1167
- **kwargs
1168
- ):
1169
- if retain_dropout:
1170
- if retain_dropout_modules is not None and self.module_name is None:
1171
- logger.warning(
1172
- "Cannot enable dropout during inference for module {} "
1173
- "because module_name was not set".format(name)
1174
- )
1175
- elif (
1176
- retain_dropout_modules is None # if None, apply to all modules
1177
- or self.module_name in retain_dropout_modules
1178
- ):
1179
- logger.info(
1180
- "Enabling dropout during inference for module: {}".format(name)
1181
- )
1182
- self.apply_during_inference = True
1183
- else:
1184
- logger.info("Disabling dropout for module: {}".format(name))
1185
-
1186
-
1187
- def quant_noise(module, p, block_size):
1188
- """
1189
- Wraps modules and applies quantization noise to the weights for
1190
- subsequent quantization with Iterative Product Quantization as
1191
- described in "Training with Quantization Noise for Extreme Model Compression"
1192
-
1193
- Args:
1194
- - module: nn.Module
1195
- - p: amount of Quantization Noise
1196
- - block_size: size of the blocks for subsequent quantization with iPQ
1197
-
1198
- Remarks:
1199
- - Module weights must have the right sizes wrt the block size
1200
- - Only Linear, Embedding and Conv2d modules are supported for the moment
1201
- - For more detail on how to quantize by blocks with convolutional weights,
1202
- see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
1203
- - We implement the simplest form of noise here as stated in the paper
1204
- which consists in randomly dropping blocks
1205
- """
1206
-
1207
- # if no quantization noise, don't register hook
1208
- if p <= 0:
1209
- return module
1210
-
1211
- # supported modules
1212
- assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
1213
-
1214
- # test whether module.weight has the right sizes wrt block_size
1215
- is_conv = module.weight.ndim == 4
1216
-
1217
- # 2D matrix
1218
- if not is_conv:
1219
- assert (
1220
- module.weight.size(1) % block_size == 0
1221
- ), "Input features must be a multiple of block sizes"
1222
-
1223
- # 4D matrix
1224
- else:
1225
- # 1x1 convolutions
1226
- if module.kernel_size == (1, 1):
1227
- assert (
1228
- module.in_channels % block_size == 0
1229
- ), "Input channels must be a multiple of block sizes"
1230
- # regular convolutions
1231
- else:
1232
- k = module.kernel_size[0] * module.kernel_size[1]
1233
- assert k % block_size == 0, "Kernel size must be a multiple of block size"
1234
-
1235
- def _forward_pre_hook(mod, input):
1236
- # no noise for evaluation
1237
- if mod.training:
1238
- if not is_conv:
1239
- # gather weight and sizes
1240
- weight = mod.weight
1241
- in_features = weight.size(1)
1242
- out_features = weight.size(0)
1243
-
1244
- # split weight matrix into blocks and randomly drop selected blocks
1245
- mask = torch.zeros(
1246
- in_features // block_size * out_features, device=weight.device
1247
- )
1248
- mask.bernoulli_(p)
1249
- mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
1250
-
1251
- else:
1252
- # gather weight and sizes
1253
- weight = mod.weight
1254
- in_channels = mod.in_channels
1255
- out_channels = mod.out_channels
1256
-
1257
- # split weight matrix into blocks and randomly drop selected blocks
1258
- if mod.kernel_size == (1, 1):
1259
- mask = torch.zeros(
1260
- int(in_channels // block_size * out_channels),
1261
- device=weight.device,
1262
- )
1263
- mask.bernoulli_(p)
1264
- mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
1265
- else:
1266
- mask = torch.zeros(
1267
- weight.size(0), weight.size(1), device=weight.device
1268
- )
1269
- mask.bernoulli_(p)
1270
- mask = (
1271
- mask.unsqueeze(2)
1272
- .unsqueeze(3)
1273
- .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
1274
- )
1275
-
1276
- # scale weights and apply mask
1277
- mask = mask.to(
1278
- torch.bool
1279
- ) # x.bool() is not currently supported in TorchScript
1280
- s = 1 / (1 - p)
1281
- mod.weight.data = s * weight.masked_fill(mask, 0)
1282
-
1283
- module.register_forward_pre_hook(_forward_pre_hook)
1284
- return module
1285
-
1286
-
1287
-
1288
-
1289
-
1290
-
1291
-
1292
-
1293
-
1294
-
1295
-
1296
- def softmax(x, dim: int, onnx_trace: bool = False):
1297
- if onnx_trace:
1298
- return F.softmax(x.float(), dim=dim)
1299
- else:
1300
- return F.softmax(x, dim=dim, dtype=torch.float32)
1301
-
1302
- def log_softmax(x, dim: int, onnx_trace: bool = False):
1303
- if onnx_trace:
1304
- return F.log_softmax(x.float(), dim=dim)
1305
- else:
1306
- return F.log_softmax(x, dim=dim, dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang1/model.py DELETED
@@ -1,102 +0,0 @@
1
- import torch
2
-
3
- from .extra_fns import ACT2FN
4
- from .encoderblocks import EncoderBlocks
5
- from .embedding import AbEmbeddings
6
-
7
-
8
- class AbLang(torch.nn.Module):
9
- """
10
- Pretraining model includes Abrep and the head model used for training.
11
- """
12
- def __init__(self, hparams):
13
- super().__init__()
14
- self.hparams = hparams
15
-
16
- self.AbRep = AbRep(self.hparams)
17
- self.AbHead = AbHead(self.hparams)
18
-
19
- def forward(self, x, attention_mask=None):
20
-
21
- representations = self.AbRep(x, attention_mask)
22
-
23
- output = self.AbHead(representations.last_hidden_states)
24
-
25
- return output
26
-
27
- def get_aa_embeddings(self):
28
- "This function is used to extract the trained aa_embeddings."
29
- return self.AbRep.AbEmbeddings.aa_embeddings#().weight.detach()
30
-
31
-
32
- class AbRep(torch.nn.Module):
33
- """
34
- This is the AbRep model.
35
- """
36
- def __init__(self, hparams):
37
- super().__init__()
38
- self.hparams = hparams
39
-
40
- self.AbEmbeddings = AbEmbeddings(self.hparams)
41
- self.EncoderBlocks = EncoderBlocks(self.hparams)
42
-
43
- self.init_weights()
44
-
45
- def forward(self, src, attention_mask=None, output_attentions=False):
46
-
47
- attention_mask = torch.zeros(*src.shape, device=src.device).masked_fill(src == self.hparams.pad_token_id, 1)
48
-
49
- src = self.AbEmbeddings(src)
50
-
51
- output = self.EncoderBlocks(src, attention_mask=attention_mask, output_attentions=output_attentions)
52
-
53
- return output
54
-
55
- def _init_weights(self, module):
56
- """ Initialize the weights """
57
- if isinstance(module, (torch.nn.Linear, torch.nn.Embedding)):
58
- module.weight.data.normal_(mean=0.0, std=self.hparams.initializer_range)
59
- elif isinstance(module, torch.nn.LayerNorm):
60
- module.bias.data.zero_()
61
- module.weight.data.fill_(1.0)
62
- if isinstance(module, torch.nn.Linear) and module.bias is not None:
63
- module.bias.data.zero_()
64
-
65
- def init_weights(self):
66
- """
67
- Initializes and prunes weights if needed.
68
- """
69
- # Initialize weights
70
- self.apply(self._init_weights)
71
-
72
-
73
- class AbHead(torch.nn.Module):
74
- """
75
- Head for masked sequence prediction.
76
- """
77
-
78
- def __init__(self, hparams):
79
- super().__init__()
80
- self.hparams = hparams
81
- self.dense = torch.nn.Linear(self.hparams.hidden_size, self.hparams.hidden_size)
82
- self.layer_norm = torch.nn.LayerNorm(self.hparams.hidden_size, eps=self.hparams.layer_norm_eps)
83
-
84
- self.decoder = torch.nn.Linear(self.hparams.hidden_size, self.hparams.vocab_size, bias=False)
85
- self.bias = torch.nn.Parameter(torch.zeros(self.hparams.vocab_size))
86
-
87
- self.activation = ACT2FN[self.hparams.hidden_act]
88
-
89
- ## self.init_weights() - need to have a function doing this
90
-
91
- self.decoder.bias = self.bias # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
92
-
93
- def forward(self, features, **kwargs):
94
- x = self.dense(features)
95
-
96
- x = self.activation(x)
97
- x = self.layer_norm(x)
98
-
99
- # project back to size of vocabulary with bias
100
- x = self.decoder(x)
101
-
102
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang1/pretrained.py DELETED
@@ -1,358 +0,0 @@
1
- import os, json, argparse, string, subprocess, re
2
- from dataclasses import dataclass
3
-
4
- from numba import jit
5
- from numba.typed import Dict, List
6
- from numba.types import unicode_type, DictType
7
-
8
- import numpy as np
9
- import torch
10
- import requests
11
-
12
- from . import tokenizers, model
13
-
14
-
15
- class pretrained:
16
- """
17
- Initializes AbLang for heavy or light chains.
18
- """
19
-
20
- def __init__(self, chain="heavy", model_folder="download", random_init=False, ncpu=7, device='cpu'):
21
- super().__init__()
22
-
23
- self.used_device = torch.device(device)
24
-
25
- if model_folder == "download":
26
- # Download model and save to specific place - if already downloaded do not download again
27
- model_folder = os.path.join(os.path.dirname(__file__), "model-weights-{}".format(chain))
28
- os.makedirs(model_folder, exist_ok = True)
29
-
30
- if not os.path.isfile(os.path.join(model_folder, "amodel.pt")):
31
- print("Downloading model ...")
32
-
33
- url = "https://opig.stats.ox.ac.uk/data/downloads/ablang-{}.tar.gz".format(chain)
34
- tmp_file = os.path.join(model_folder, "tmp.tar.gz")
35
-
36
- with open(tmp_file,'wb') as f: f.write(requests.get(url).content)
37
-
38
- subprocess.run(["tar", "-zxvf", tmp_file, "-C", model_folder], check = True)
39
-
40
- os.remove(tmp_file)
41
-
42
- self.hparams_file = os.path.join(model_folder, 'hparams.json')
43
- self.model_file = os.path.join(model_folder, 'amodel.pt')
44
-
45
- with open(self.hparams_file, 'r', encoding='utf-8') as f:
46
- self.hparams = argparse.Namespace(**json.load(f))
47
-
48
- self.AbLang = model.AbLang(self.hparams)
49
- self.AbLang.to(self.used_device)
50
-
51
- if not random_init:
52
- self.AbLang.load_state_dict(torch.load(self.model_file, map_location=self.used_device))
53
-
54
- self.tokenizer = tokenizers.ABtokenizer(os.path.join(model_folder, 'vocab.json'))
55
- self.AbRep = self.AbLang.AbRep
56
-
57
- self.ncpu = ncpu
58
- self.spread = 11 # Based on get_spread_sequences function
59
- if chain == 'heavy':
60
- self.max_position = 128
61
- else:
62
- self.max_position = 127
63
-
64
-
65
- def freeze(self):
66
- self.AbLang.eval()
67
-
68
- def unfreeze(self):
69
- self.AbLang.train()
70
-
71
- def __call__(self, sequence, mode='seqcoding', align=False, splitSize=50):
72
- """
73
- Mode: sequence, residue, restore or likelihood.
74
- """
75
- if not mode in ['rescoding', 'seqcoding', 'restore', 'likelihood']:
76
- raise SyntaxError("Given mode doesn't exist.")
77
-
78
- if isinstance(sequence, str): sequence = [sequence]
79
-
80
-
81
- if align and mode=='restore':
82
- sequence = self.sequence_aligning(sequence)
83
- splitSize = ((splitSize//self.spread)+1)*self.spread
84
-
85
- aList = []
86
- for sequence_part in [sequence[x:x+splitSize] for x in range(0, len(sequence), splitSize)]:
87
- aList.append(getattr(self, mode)(sequence_part, align))
88
-
89
- if mode == 'rescoding':
90
- if align==True:
91
- return aList
92
-
93
- return sum(aList, [])
94
-
95
- return np.concatenate(aList)
96
-
97
- def seqcoding(self, seqs, align=False):
98
- """
99
- Sequence specific representations
100
- """
101
-
102
- tokens = self.tokenizer(seqs, pad=True, device=self.used_device)
103
-
104
- residue_states = self.AbRep(tokens).last_hidden_states
105
-
106
- if torch.is_tensor(residue_states): residue_states = residue_states.cpu().detach().numpy()
107
-
108
- lens = np.vectorize(len)(seqs)
109
-
110
- lens = np.tile(lens.reshape(-1,1,1), (residue_states.shape[2], 1))
111
-
112
- seq_codings = np.apply_along_axis(res_to_seq, 2, np.c_[np.swapaxes(residue_states,1,2), lens])
113
-
114
- del lens
115
- del residue_states
116
-
117
- return seq_codings
118
-
119
- def restore(self, seqs, align=False):
120
- """
121
- Restore sequences
122
- """
123
-
124
- if align:
125
- nr_seqs = len(seqs)//self.spread
126
-
127
- tokens = self.tokenizer(seqs, pad=True, device=self.used_device)
128
- predictions = self.AbLang(tokens)[:,:,1:21]
129
-
130
- # Reshape
131
- tokens = tokens.reshape(nr_seqs, self.spread, -1)
132
- predictions = predictions.reshape(nr_seqs, self.spread, -1, 20)
133
- seqs = seqs.reshape(nr_seqs, -1)
134
-
135
- # Find index of best predictions
136
- best_seq_idx = torch.argmax(torch.max(predictions, -1).values[:,:,1:2].mean(2), -1)
137
-
138
- # Select best predictions
139
- tokens = tokens.gather(1, best_seq_idx.view(-1, 1).unsqueeze(1).repeat(1, 1, tokens.shape[-1])).squeeze(1)
140
- predictions = predictions[range(predictions.shape[0]), best_seq_idx]
141
- seqs = np.take_along_axis(seqs, best_seq_idx.view(-1, 1).cpu().numpy(), axis=1)
142
-
143
-
144
- else:
145
- tokens = self.tokenizer(seqs, pad=True, device=self.used_device)
146
- predictions = self.AbLang(tokens)[:,:,1:21]
147
-
148
- predicted_tokens = torch.max(predictions, -1).indices + 1
149
- restored_tokens = torch.where(tokens==23, predicted_tokens, tokens)
150
-
151
- restored_seqs = self.tokenizer(restored_tokens, encode=False)
152
-
153
- return np.array([res_to_seq(seq, 'reconstruct') for seq in np.c_[restored_seqs, np.vectorize(len)(seqs)]])
154
-
155
- def likelihood(self, seqs, align=False):
156
- """
157
- Possible Mutations
158
- """
159
-
160
- tokens = self.tokenizer(seqs, pad=True, device=self.used_device)
161
-
162
- predictions = self.AbLang(tokens)[:,:,1:21]
163
-
164
- if torch.is_tensor(predictions): predictions = predictions.cpu().detach().numpy()
165
-
166
- return predictions
167
-
168
- def rescoding(self, seqs, align=False):
169
- """
170
- Residue specific representations.
171
- """
172
-
173
- if align:
174
-
175
- import pandas as pd
176
- import anarci
177
-
178
- anarci_out = anarci.run_anarci(pd.DataFrame(seqs).reset_index().values.tolist(), ncpu=7, scheme='imgt')
179
- number_alignment = get_number_alignment(anarci_out)
180
-
181
- seqs = np.array([''.join([i[1] for i in onarci[0][0]]).replace('-','') for onarci in anarci_out[1]])
182
-
183
- tokens = self.tokenizer(seqs, pad=True, device=self.used_device)
184
- residue_states = self.AbRep(tokens).last_hidden_states
185
-
186
- if torch.is_tensor(residue_states): residue_states = residue_states.cpu().detach().numpy()
187
-
188
- residue_output = np.array([create_alignment(res_embed, oanarci, seq, number_alignment) for res_embed, oanarci, seq in zip(residue_states, anarci_out[1], seqs)])
189
- del residue_states
190
- del tokens
191
-
192
- return output(aligned_embeds=residue_output, number_alignment=number_alignment.apply(lambda x: '{}{}'.format(*x[0]), axis=1).values)
193
-
194
- else:
195
-
196
- tokens = self.tokenizer(seqs, pad=True, device=self.used_device)
197
- residue_states = self.AbRep(tokens).last_hidden_states
198
-
199
- if torch.is_tensor(residue_states): residue_states = residue_states.cpu().detach().numpy()
200
-
201
- residue_output = [res_to_list(state, seq) for state, seq in zip(residue_states, seqs)]
202
-
203
- return residue_output
204
-
205
- def sequence_aligning(self, seqs):
206
-
207
- import pandas as pd
208
- import anarci
209
-
210
- anarci_out = anarci.run_anarci(
211
- pd.DataFrame([seq.replace('*', 'X') for seq in seqs]).reset_index().values.tolist(),
212
- ncpu=self.ncpu,
213
- scheme='imgt'
214
- ) #, allowed_species=['human', 'mouse']
215
- anarci_data = pd.DataFrame([str(anarci[0][0]) if anarci else 'ANARCI_error' for anarci in anarci_out[1]], columns=['anarci']).astype('<U90')
216
-
217
- seqs = anarci_data.apply(lambda x: get_sequences_from_anarci(x.anarci,
218
- self.max_position,
219
- self.spread), axis=1, result_type='expand').to_numpy().reshape(-1)
220
-
221
- return seqs
222
-
223
-
224
-
225
-
226
-
227
- @dataclass
228
- class output():
229
- """
230
- Dataclass used to store output.
231
- """
232
-
233
- aligned_embeds: None
234
- number_alignment: None
235
-
236
-
237
- def res_to_list(state, seq):
238
- return state[1:1+len(seq)]
239
-
240
- def res_to_seq(a, mode='mean'):
241
- """
242
- Function for how we go from n_values for each amino acid to n_values for each sequence.
243
-
244
- We leave out the start, end and padding tokens.
245
- """
246
- if mode=='sum':
247
- return a[1:(1+int(a[-1]))].sum()
248
-
249
- elif mode=='mean':
250
- return a[1:(1+int(a[-1]))].mean()
251
-
252
- elif mode=='reconstruct':
253
-
254
- return a[0][1:(1+int(a[-1]))]
255
-
256
- def get_number_alignment(oanarci):
257
- """
258
- Creates a number alignment from the anarci results.
259
- """
260
-
261
- import pandas as pd
262
-
263
- alist = []
264
-
265
- for aligned_seq in oanarci[1]:
266
- alist.append(pd.DataFrame(aligned_seq[0][0])[0])
267
-
268
- unsorted_alignment = pd.concat(alist).drop_duplicates()
269
- max_alignment = get_max_alignment()
270
-
271
- return max_alignment.merge(unsorted_alignment.to_frame(), left_on=0, right_on=0)
272
-
273
- def get_max_alignment():
274
- """
275
- Create maximum possible alignment for sorting
276
- """
277
-
278
- import pandas as pd
279
-
280
- sortlist = []
281
-
282
- for num in range(1, 128+1):
283
-
284
- if num==112:
285
- for char in string.ascii_uppercase[::-1]:
286
- sortlist.append([(num, char)])
287
-
288
- sortlist.append([(num,' ')])
289
-
290
- else:
291
- sortlist.append([(num,' ')])
292
- for char in string.ascii_uppercase:
293
- sortlist.append([(num, char)])
294
-
295
- return pd.DataFrame(sortlist)
296
-
297
-
298
- def create_alignment(res_embeds, oanarci, seq, number_alignment):
299
-
300
- import pandas as pd
301
-
302
- datadf = pd.DataFrame(oanarci[0][0])
303
-
304
- sequence_alignment = number_alignment.merge(datadf, how='left', on=0).fillna('-')[1]
305
-
306
- idxs = np.where(sequence_alignment.values == '-')[0]
307
-
308
- idxs = [idx-num for num, idx in enumerate(idxs)]
309
-
310
- aligned_embeds = pd.DataFrame(np.insert(res_embeds[1:1+len(seq)], idxs , 0, axis=0))
311
-
312
- return pd.concat([aligned_embeds, sequence_alignment], axis=1).values
313
-
314
- def turn_into_numba(anarcis):
315
- """
316
- Turns the nested anarci dictionary into a numba item, allowing us to use numba on it.
317
- """
318
-
319
- anarci_list = List.empty_list(unicode_type)
320
- [anarci_list.append(str(anarci)) for anarci in anarcis]
321
-
322
- return anarci_list
323
-
324
- @jit(nopython=True)
325
- def get_spread_sequences(seq, spread, start_position, numbaList):
326
- """
327
- Test sequences which are 8 positions shorter (position 10 + max CDR1 gap of 7) up to 2 positions longer (possible insertions).
328
- """
329
-
330
- for diff in range(start_position-8, start_position+2+1):
331
- numbaList.append('*'*diff+seq)
332
-
333
- return numbaList
334
-
335
- def get_sequences_from_anarci(out_anarci, max_position, spread):
336
- """
337
- Ensures correct masking on each side of sequence
338
- """
339
-
340
- if out_anarci == 'ANARCI_error':
341
- return np.array(['ANARCI-ERR']*spread)
342
-
343
- end_position = int(re.search(r'\d+', out_anarci[::-1]).group()[::-1])
344
- # Fixes ANARCI error of poor numbering of the CDR1 region
345
- start_position = int(re.search(r'\d+,\s\'.\'\),\s\'[^-]+\'\),\s\(\(\d+,\s\'.\'\),\s\'[^-]+\'\),\s\(\(\d+,\s\'.\'\),\s\'[^-]+\'\),\s\(\(\d+,\s\'.\'\),\s\'[^-]+',
346
- out_anarci).group().split(',')[0]) - 1
347
-
348
- sequence = "".join(re.findall(r"(?i)[A-Z*]", "".join(re.findall(r'\),\s\'[A-Z*]', out_anarci))))
349
-
350
- sequence_j = ''.join(sequence).replace('-','').replace('X','*') + '*'*(max_position-int(end_position))
351
-
352
- numba_list = List.empty_list(unicode_type)
353
-
354
- spread_seqs = np.array(get_spread_sequences(sequence_j, spread, start_position, numba_list))
355
-
356
- return spread_seqs
357
-
358
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang1/tokenizers.py DELETED
@@ -1,50 +0,0 @@
1
- import json
2
- import torch
3
-
4
- class ABtokenizer():
5
- """
6
- Tokenizer for proteins. Both aa to token and token to aa.
7
- """
8
-
9
- def __init__(self, vocab_dir):
10
- self.set_vocabs(vocab_dir)
11
- self.pad_token = self.vocab_to_token['-']
12
-
13
- def __call__(self, sequenceList, encode=True, pad=False, device='cpu'):
14
- #assert isinstance(sequenceList, list)
15
-
16
- if encode:
17
- data = [self.encode(seq, device=device) for seq in sequenceList]
18
- if pad: return torch.nn.utils.rnn.pad_sequence(data, batch_first=True, padding_value=self.pad_token)
19
- else: return data
20
-
21
- else: return [self.decode(token) for token in sequenceList]
22
-
23
- def set_vocabs(self, vocab_dir):
24
- with open(vocab_dir, encoding="utf-8") as vocab_handle:
25
- self.vocab_to_token=json.load(vocab_handle)
26
-
27
- self.vocab_to_aa = {v: k for k, v in self.vocab_to_token.items()}
28
-
29
- def encode(self, sequence, device='cpu'):
30
- try:
31
- encoded = [self.vocab_to_token["<"]]+[self.vocab_to_token[resn] for resn in sequence]+[self.vocab_to_token[">"]]
32
- except KeyError as e:
33
-
34
- wrong_aa = e.args
35
-
36
- e.args = (f"Following character(s) not accepted in sequences: {wrong_aa}. \
37
- Please only use amino acids (MRHKDESTNQCGPAVIFYWL) or the mask token (*).",)
38
- raise
39
-
40
- return torch.tensor(encoded, dtype=torch.long, device=device)
41
- # Start and Stop token should probably not be added here, but instead earlier
42
-
43
- def decode(self, seqtokens):
44
-
45
- if torch.is_tensor(seqtokens): seqtokens = seqtokens.cpu().numpy()
46
-
47
- return ''.join([self.vocab_to_aa[token] for token in seqtokens])
48
-
49
-
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ablang2/models/ablang2/__init__.py DELETED
File without changes
ablang2/models/ablang2/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (150 Bytes)
 
ablang2/models/ablang2/__pycache__/__init__.cpython-312.pyc DELETED
Binary file (154 Bytes)
 
ablang2/models/ablang2/__pycache__/ablang.cpython-312.pyc DELETED
Binary file (6.39 kB)
 
ablang2/models/ablang2/__pycache__/encoderblock.cpython-310.pyc DELETED
Binary file (4.57 kB)