File size: 10,059 Bytes
e111511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32929a0
e111511
 
46319c5
e111511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32929a0
 
 
 
 
 
e111511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32929a0
e111511
 
 
 
 
32929a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""The SemEval2015 Task12 Reviews Corpus"""

import datasets

_CITATION = """\
@inproceedings{pontiki2015semeval,
  title={Semeval-2015 task 12: Aspect based sentiment analysis},
  author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Harris and Manandhar, Suresh and Androutsopoulos, Ion},
  booktitle={Proceedings of the 9th international workshop on semantic evaluation (SemEval 2015)},
  pages={486--495},
  year={2015}
}
"""

_LICENSE = """\
    Please click on the homepage URL for license details.
"""

_DESCRIPTION = """\
A collection of SemEval2015 specifically designed to aid research in Aspect Based Sentiment Analysis.
"""

_CONFIG = [
    # restaruants Domain
    "restaurants",
    # Consumer Electronics Domain
    "laptops"
]

_VERSION = "0.1.0"

_HOMEPAGE_URL = "https://alt.qcri.org/semeval2015/task12/index.php?id=data-and-tools/"
_DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2015Task12Corrected/{split}/{domain}_{split}.xml"


class SemEval2015Config(datasets.BuilderConfig):
    """BuilderConfig for SemEval2015Config."""

    def __init__(self, _CONFIG, **kwargs):
        super(SemEval2015Config, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
        self.configs = _CONFIG


class SemEval2015(datasets.GeneratorBasedBuilder):
    """The lingual Amazon Reviews Corpus"""

    BUILDER_CONFIGS = [
        SemEval2015Config(
            name="All",
            _CONFIG=_CONFIG,
            description="A collection of SemEval2015 specifically designed to aid research in lingual Aspect Based Sentiment Analysis.",
        )
    ] + [
        SemEval2015Config(
            name=config,
            _CONFIG=[config],
            description=f"{config} of SemEval2015 specifically designed to aid research in lingual Aspect Based Sentiment Analysis",
        )
        for config in _CONFIG
    ]
    
    BUILDER_CONFIG_CLASS = SemEval2015Config
    DEFAULT_CONFIG_NAME = "All"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {'text': datasets.Value(dtype='string'),
                'opinions': [
                    {'category': datasets.Value(dtype='string'),
                    'from': datasets.Value(dtype='string'),
                    'polarity': datasets.Value(dtype='string'),
                    'target': datasets.Value(dtype='string'),
                    'to': datasets.Value(dtype='string')}
                ],
                'tokens': [datasets.Value(dtype='string')],
                'ATESP_BIEOS_tags': [datasets.Value(dtype='string')],
                'ATESP_BIO_tags': [datasets.Value(dtype='string')],
                'ATE_BIEOS_tags': [datasets.Value(dtype='string')],
                'ATE_BIO_tags': [datasets.Value(dtype='string')],

                'domain': datasets.Value(dtype='string'),
                'reviewId': datasets.Value(dtype='string'),
                'sentenceId': datasets.Value(dtype='string')
            }
            ),
            supervised_keys=None,
            license=_LICENSE,
            homepage=_HOMEPAGE_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        
        train_urls = [_DOWNLOAD_URL.format(split="train", domain=config) for config in self.config.configs]
        dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config) for config in self.config.configs]
        test_urls = [_DOWNLOAD_URL.format(split="test", domain=config) for config in self.config.configs]

        train_paths = dl_manager.download_and_extract(train_urls)
        dev_paths = dl_manager.download_and_extract(dev_urls)
        test_paths = dl_manager.download_and_extract(test_urls)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "domain_list": self.config.configs}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "domain_list": self.config.configs}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "domain_list": self.config.configs}),
        ]

    def _generate_examples(self, file_paths, domain_list):
        row_count = 0
        assert len(file_paths)==len(domain_list)

        for i in range(len(file_paths)):
            file_path, domain = file_paths[i], domain_list[i]
            semEvalDataset = SemEvalXMLDataset(file_path, domain)

            for example in semEvalDataset.SentenceWithOpinions:

                yield row_count, example
                row_count += 1


# 输入:xlm文件的文件路径
# 输出:一个DataSet,每个样例包含[reviewid, sentenceId, text, UniOpinions]
#      每个样例包含的Opinion,是一个列表,包含的是单个Opinion的详情

from xml.dom.minidom import parse

class SemEvalXMLDataset():
    def __init__(self, file_name, domain):
        # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)

        self.SentenceWithOpinions = []
        self.xml_path = file_name

        self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')

        for sentenceXml in self.sentenceXmlList:
            reviewId = sentenceXml.getAttribute("id").split(':')[0]
            sentenceId = sentenceXml.getAttribute("id")
            if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
                # skip no reviews part
                continue
            text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
            OpinionXmlList = sentenceXml.getElementsByTagName("Opinion")
            Opinions = []
            for opinionXml in OpinionXmlList:
                # some text maybe have no opinion
                target = opinionXml.getAttribute("target")
                category = opinionXml.getAttribute("category")
                polarity = opinionXml.getAttribute("polarity")
                from_ = opinionXml.getAttribute("from")
                to = opinionXml.getAttribute("to")

                opinionDict = {
                    "target": target,
                    "category": category,
                    "polarity": polarity,
                    "from": from_,
                    "to": to
                }
                Opinions.append(opinionDict)
                
            Opinions.sort(key=lambda x: x["from"])
            # 从小到大排序
            example = {
                    "text": text, 
                    "opinions": Opinions, 
                    "domain": domain, 
                    "reviewId": reviewId, 
                    "sentenceId": sentenceId
                }
            example = addTokenAndLabel(example)
            self.SentenceWithOpinions.append(example)

import nltk

def clearOpinion(example):
    opinions = example['opinions']
    skipNullOpinions = []
    # 去掉NULL的opinion
    for opinion in opinions:
        targetKey = 'target'
        target = opinion[targetKey]
        from_ = opinion['from']
        to = opinion['to']
        # skill NULL
        if target.lower() == 'null' or target == '' or from_ == to:
            continue
        skipNullOpinions.append(opinion)
        
    # delete repeate Opinions
    skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
    UniOpinions = []
    for opinion in skipNullOpinions:
        if len(UniOpinions) < 1:
            UniOpinions.append(opinion)
        else:
            if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
                UniOpinions.append(opinion)
    return UniOpinions
    

def addTokenAndLabel(example):
    tokens = []
    labels = []

    text = example['text']
    UniOpinions = clearOpinion(example)
    text_begin = 0
    
    for aspect in UniOpinions:
        polarity = aspect['polarity'][:3].upper()
        pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
        tokens.extend(pre_O_tokens)
        labels.extend(['O']*len(pre_O_tokens))
        
        BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
        tokens.extend(BIES_tokens)
        
        assert len(BIES_tokens) > 0, print('error in BIES_tokens length')

        if len(BIES_tokens)==1:
            labels.append('S-'+polarity)
        elif len(BIES_tokens)==2:
            labels.append('B-'+polarity)
            labels.append('E-'+polarity)
        else:
            labels.append('B-'+polarity)
            labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
            labels.append('E-'+polarity)

        text_begin = int(aspect['to'])
    

    pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
    labels.extend(['O']*len(pre_O_tokens))
    tokens.extend(pre_O_tokens)

    example['tokens'] = tokens
    example['ATESP_BIEOS_tags'] = labels
    
    
    ATESP_BIO_labels = []
    for label in labels:
        ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
    example['ATESP_BIO_tags'] = ATESP_BIO_labels
    
    
    ATE_BIEOS_labels = []
    for label in labels:
        ATE_BIEOS_labels.append(label[0])
    example['ATE_BIEOS_tags'] = ATE_BIEOS_labels

    ATE_BIO_labels = []
    for label in ATESP_BIO_labels:
        ATE_BIO_labels.append(label[0])
    example['ATE_BIO_tags'] = ATE_BIO_labels

    return example