Yaxin commited on
Commit
32929a0
·
1 Parent(s): 46319c5

Update SemEval2015.py

Browse files

添加使用nltk对text分词,并加入了ATESP_BIEOS_tags,ATESP_BIO_tags,ATE_BIEOS_tags,ATE_BIO_tags 4种标记方法。

Files changed (1) hide show
  1. SemEval2015.py +97 -5
SemEval2015.py CHANGED
@@ -42,7 +42,7 @@ _CONFIG = [
42
  "laptops"
43
  ]
44
 
45
- _VERSION = "0.0.1"
46
 
47
  _HOMEPAGE_URL = "https://alt.qcri.org/semeval2015/task12/index.php?id=data-and-tools/"
48
  _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2015Task12Corrected/{split}/{domain}_{split}.xml"
@@ -89,6 +89,12 @@ class SemEval2015(datasets.GeneratorBasedBuilder):
89
  'target': datasets.Value(dtype='string'),
90
  'to': datasets.Value(dtype='string')}
91
  ],
 
 
 
 
 
 
92
  'domain': datasets.Value(dtype='string'),
93
  'reviewId': datasets.Value(dtype='string'),
94
  'sentenceId': datasets.Value(dtype='string')
@@ -173,12 +179,98 @@ class SemEvalXMLDataset():
173
 
174
  Opinions.sort(key=lambda x: x["from"])
175
  # 从小到大排序
176
-
177
- self.SentenceWithOpinions.append({
178
  "text": text,
179
  "opinions": Opinions,
180
  "domain": domain,
181
  "reviewId": reviewId,
182
  "sentenceId": sentenceId
183
- }
184
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  "laptops"
43
  ]
44
 
45
+ _VERSION = "0.1.0"
46
 
47
  _HOMEPAGE_URL = "https://alt.qcri.org/semeval2015/task12/index.php?id=data-and-tools/"
48
  _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2015Task12Corrected/{split}/{domain}_{split}.xml"
 
89
  'target': datasets.Value(dtype='string'),
90
  'to': datasets.Value(dtype='string')}
91
  ],
92
+ 'tokens': [datasets.Value(dtype='string')],
93
+ 'ATESP_BIEOS_tags': [datasets.Value(dtype='string')],
94
+ 'ATESP_BIO_tags': [datasets.Value(dtype='string')],
95
+ 'ATE_BIEOS_tags': [datasets.Value(dtype='string')],
96
+ 'ATE_BIO_tags': [datasets.Value(dtype='string')],
97
+
98
  'domain': datasets.Value(dtype='string'),
99
  'reviewId': datasets.Value(dtype='string'),
100
  'sentenceId': datasets.Value(dtype='string')
 
179
 
180
  Opinions.sort(key=lambda x: x["from"])
181
  # 从小到大排序
182
+ example = {
 
183
  "text": text,
184
  "opinions": Opinions,
185
  "domain": domain,
186
  "reviewId": reviewId,
187
  "sentenceId": sentenceId
188
+ }
189
+ example = addTokenAndLabel(example)
190
+ self.SentenceWithOpinions.append(example)
191
+
192
+ import nltk
193
+
194
+ def clearOpinion(example):
195
+ opinions = example['opinions']
196
+ skipNullOpinions = []
197
+ # 去掉NULL的opinion
198
+ for opinion in opinions:
199
+ targetKey = 'target'
200
+ target = opinion[targetKey]
201
+ from_ = opinion['from']
202
+ to = opinion['to']
203
+ # skill NULL
204
+ if target.lower() == 'null' or target == '' or from_ == to:
205
+ continue
206
+ skipNullOpinions.append(opinion)
207
+
208
+ # delete repeate Opinions
209
+ skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
210
+ UniOpinions = []
211
+ for opinion in skipNullOpinions:
212
+ if len(UniOpinions) < 1:
213
+ UniOpinions.append(opinion)
214
+ else:
215
+ if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
216
+ UniOpinions.append(opinion)
217
+ return UniOpinions
218
+
219
+
220
+ def addTokenAndLabel(example):
221
+ tokens = []
222
+ labels = []
223
+
224
+ text = example['text']
225
+ UniOpinions = clearOpinion(example)
226
+ text_begin = 0
227
+
228
+ for aspect in UniOpinions:
229
+ polarity = aspect['polarity'][:3].upper()
230
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
231
+ tokens.extend(pre_O_tokens)
232
+ labels.extend(['O']*len(pre_O_tokens))
233
+
234
+ BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
235
+ tokens.extend(BIES_tokens)
236
+
237
+ assert len(BIES_tokens) > 0, print('error in BIES_tokens length')
238
+
239
+ if len(BIES_tokens)==1:
240
+ labels.append('S-'+polarity)
241
+ elif len(BIES_tokens)==2:
242
+ labels.append('B-'+polarity)
243
+ labels.append('E-'+polarity)
244
+ else:
245
+ labels.append('B-'+polarity)
246
+ labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
247
+ labels.append('E-'+polarity)
248
+
249
+ text_begin = int(aspect['to'])
250
+
251
+
252
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
253
+ labels.extend(['O']*len(pre_O_tokens))
254
+ tokens.extend(pre_O_tokens)
255
+
256
+ example['tokens'] = tokens
257
+ example['ATESP_BIEOS_tags'] = labels
258
+
259
+
260
+ ATESP_BIO_labels = []
261
+ for label in labels:
262
+ ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
263
+ example['ATESP_BIO_tags'] = ATESP_BIO_labels
264
+
265
+
266
+ ATE_BIEOS_labels = []
267
+ for label in labels:
268
+ ATE_BIEOS_labels.append(label[0])
269
+ example['ATE_BIEOS_tags'] = ATE_BIEOS_labels
270
+
271
+ ATE_BIO_labels = []
272
+ for label in ATESP_BIO_labels:
273
+ ATE_BIO_labels.append(label[0])
274
+ example['ATE_BIO_tags'] = ATE_BIO_labels
275
+
276
+ return example