akaarr commited on
Commit
625448f
·
verified ·
1 Parent(s): 04b0398

Upload crfBLSTM_Model.py

Browse files
Files changed (1) hide show
  1. crfBLSTM_Model.py +197 -0
crfBLSTM_Model.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import pandas as pd
5
+ from TorchCRF import CRF
6
+ from sklearn.model_selection import train_test_split
7
+ from torch.nn.utils.rnn import pad_sequence
8
+ from torch.utils.data import Dataset, DataLoader
9
+ from torch.cuda.amp import autocast, GradScaler
10
+
11
+ # Set device
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ print(f"Using device: {device}")
14
+
15
+
16
+ # Define the BiLSTM-CRF model with Layer Normalization
17
+ class BiLSTMCRFModel(nn.Module):
18
+ def __init__(self, vocab_size, embedding_dim, hidden_dim, num_labels):
19
+ super(BiLSTMCRFModel, self).__init__()
20
+ self.embedding = nn.Embedding(vocab_size, embedding_dim)
21
+ self.lstm = nn.LSTM(embedding_dim, hidden_dim, bidirectional=True, batch_first=True)
22
+ self.layer_norm = nn.LayerNorm(hidden_dim * 2) # Layer Normalization
23
+ self.fc = nn.Linear(hidden_dim * 2, num_labels)
24
+ self.crf = CRF(num_labels)
25
+
26
+ def forward(self, words, attention_mask, labels=None):
27
+ embedded = self.embedding(words)
28
+ lstm_out, _ = self.lstm(embedded)
29
+ lstm_out = self.layer_norm(lstm_out) # Stabilize outputs
30
+ emissions = self.fc(lstm_out)
31
+
32
+ if labels is not None:
33
+ loss = -self.crf(emissions, labels, mask=attention_mask.bool())
34
+ return loss
35
+ else:
36
+ return self.crf.viterbi_decode(emissions, mask=attention_mask.bool())
37
+
38
+
39
+ # Dataset class
40
+ class NERDataset(Dataset):
41
+ def __init__(self, words, tags):
42
+ self.words = words
43
+ self.tags = tags
44
+
45
+ def __len__(self):
46
+ return len(self.words)
47
+
48
+ def __getitem__(self, idx):
49
+ return torch.tensor(self.words[idx]), torch.tensor(self.tags[idx])
50
+
51
+
52
+ # Proper collate function for DataLoader
53
+ def collate_fn(batch):
54
+ words, tags = zip(*batch) # Unpack batch into separate lists
55
+ words_padded = pad_sequence(words, batch_first=True, padding_value=0)
56
+ tags_padded = pad_sequence(tags, batch_first=True, padding_value=0)
57
+ return words_padded, tags_padded
58
+
59
+
60
+ # Load and preprocess data
61
+ def prepare_data(df):
62
+ df['Tag'] = df['Tag'].fillna('O').astype(str).apply(lambda x: x.strip().upper())
63
+
64
+ word_to_id = {word: idx for idx, word in enumerate(set(df['Word']))}
65
+ word_to_id['<UNK>'] = len(word_to_id)
66
+
67
+ tag_to_id = {tag: idx for idx, tag in enumerate(set(df['Tag']))}
68
+ id_to_tag = {idx: tag for tag, idx in tag_to_id.items()}
69
+
70
+ words, tags = [], []
71
+ for _, group in df.groupby('Sentence'):
72
+ words.append([word_to_id.get(w, word_to_id['<UNK>']) for w in group['Word']])
73
+ tags.append([tag_to_id[t] for t in group['Tag']])
74
+
75
+ return words, tags, word_to_id, tag_to_id, id_to_tag
76
+
77
+
78
+ # Load dataset
79
+ df = pd.read_excel('Augmented_Dataset.xlsx', engine='openpyxl')
80
+
81
+ # Shuffle the dataset before splitting
82
+ df = df.sample(frac=1, random_state=42).reset_index(drop=True) # Shuffling the dataset
83
+
84
+ words, tags, word_to_id, tag_to_id, id_to_tag = prepare_data(df)
85
+
86
+ # Split into train and test
87
+ train_words, test_words, train_tags, test_tags = train_test_split(words, tags, test_size=0.2, random_state=42,
88
+ shuffle=True)
89
+
90
+ # Create PyTorch DataLoaders
91
+ train_dataset = NERDataset(train_words, train_tags)
92
+ test_dataset = NERDataset(test_words, test_tags)
93
+
94
+ train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True, collate_fn=collate_fn)
95
+ test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False, collate_fn=collate_fn)
96
+
97
+ # Model initialization
98
+ vocab_size = len(word_to_id)
99
+ embedding_dim = 100
100
+ hidden_dim = 128
101
+ num_labels = len(tag_to_id)
102
+
103
+ model = BiLSTMCRFModel(vocab_size, embedding_dim, hidden_dim, num_labels).to(device)
104
+ optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-5)
105
+ scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2)
106
+ scaler = GradScaler() # Mixed precision training
107
+
108
+ # Training loop with optimizations
109
+ num_epochs = 10
110
+ accumulation_steps = 4
111
+ best_loss = float('inf')
112
+
113
+ print("Starting Training...")
114
+ for epoch in range(num_epochs):
115
+ model.train()
116
+ total_loss = 0
117
+ optimizer.zero_grad()
118
+
119
+ for i, (batch_words, batch_tags) in enumerate(train_loader):
120
+ batch_words, batch_tags = batch_words.to(device), batch_tags.to(device)
121
+ attention_mask = (batch_words != 0).to(device)
122
+
123
+ with autocast(): # Mixed precision training
124
+ loss = model(batch_words, attention_mask, batch_tags)
125
+ loss = loss.mean() / accumulation_steps # Scale loss
126
+
127
+ scaler.scale(loss).backward() # Scale gradients
128
+
129
+ if (i + 1) % accumulation_steps == 0:
130
+ scaler.step(optimizer)
131
+ scaler.update()
132
+ optimizer.zero_grad()
133
+
134
+ total_loss += loss.item()
135
+
136
+ avg_loss = total_loss / len(train_loader)
137
+ scheduler.step(avg_loss)
138
+
139
+ print(f"Epoch {epoch + 1}, Loss: {avg_loss:.4f}, LR: {optimizer.param_groups[0]['lr']}")
140
+
141
+ if avg_loss < best_loss:
142
+ best_loss = avg_loss
143
+ torch.save(model.state_dict(), "best_model.pth")
144
+ print(f"New best model saved with loss: {best_loss:.4f}")
145
+
146
+ torch.cuda.empty_cache() # Free GPU memory
147
+
148
+ print("Training Complete!")
149
+
150
+ # Evaluate model
151
+ def evaluate_model(model, test_loader, id_to_tag):
152
+ model.eval()
153
+ true_labels, pred_labels = [], []
154
+
155
+ with torch.no_grad():
156
+ for batch_words, batch_tags in test_loader:
157
+ batch_words, batch_tags = batch_words.to(device), batch_tags.to(device)
158
+ attention_mask = (batch_words != 0).to(device) # Masking out padding tokens
159
+
160
+ pred_tags = model(batch_words, attention_mask)
161
+
162
+ for i in range(batch_words.shape[0]): # Iterate over batch
163
+ true_seq = batch_tags[i].tolist()
164
+ pred_seq = pred_tags[i]
165
+
166
+ # Remove padding (ignore 0-padded labels)
167
+ true_seq_filtered = [id_to_tag[t] for t in true_seq if t in id_to_tag]
168
+ pred_seq_filtered = [id_to_tag[p] for p in pred_seq if p in id_to_tag]
169
+
170
+ # Ensure equal lengths (trim longer list)
171
+ min_len = min(len(true_seq_filtered), len(pred_seq_filtered))
172
+ true_labels.extend(true_seq_filtered[:min_len])
173
+ pred_labels.extend(pred_seq_filtered[:min_len])
174
+
175
+ # Check if lengths are now consistent
176
+ assert len(true_labels) == len(pred_labels), "Mismatch in true and predicted label counts!"
177
+
178
+ from sklearn.metrics import classification_report, confusion_matrix
179
+ import seaborn as sns
180
+ import matplotlib.pyplot as plt
181
+
182
+ print("Classification Report:")
183
+ print(classification_report(true_labels, pred_labels))
184
+
185
+ cm = confusion_matrix(true_labels, pred_labels, labels=list(id_to_tag.values()))
186
+ plt.figure(figsize=(10, 8))
187
+ sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=list(id_to_tag.values()), yticklabels=list(id_to_tag.values()))
188
+ plt.xlabel('Predicted')
189
+ plt.ylabel('True')
190
+ plt.title('Confusion Matrix')
191
+ plt.show()
192
+
193
+
194
+
195
+ # Evaluate
196
+ print("\nFinal Evaluation:")
197
+ evaluate_model(model, test_loader, id_to_tag)