Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
Japanese
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
| import re | |
| from typing import List | |
| import spacy | |
| __all__ = 'SentSplit' | |
| class JASplitter: | |
| """ JA sentence splitter from https://github.com/himkt/konoha/blob/master/konoha/sentence_tokenizer.py """ | |
| PERIOD = "γ" | |
| PERIOD_SPECIAL = "__PERIOD__" | |
| PATTERNS = [re.compile(r"οΌ.*?οΌ"), re.compile(r"γ.*?γ")] | |
| def conv_period(item) -> str: | |
| return item.group(0).replace(JASplitter.PERIOD, JASplitter.PERIOD_SPECIAL) | |
| def __call__(self, document) -> List[str]: | |
| for pattern in JASplitter.PATTERNS: | |
| document = re.sub(pattern, self.conv_period, document) | |
| result = [] | |
| for line in document.split("\n"): | |
| line = line.rstrip() | |
| line = line.replace("\n", "") | |
| line = line.replace("\r", "") | |
| line = line.replace("γ", "γ\n") | |
| sentences = line.split("\n") | |
| for sentence in sentences: | |
| if not sentence: | |
| continue | |
| period_special = JASplitter.PERIOD_SPECIAL | |
| period = JASplitter.PERIOD | |
| sentence = sentence.replace(period_special, period) | |
| result.append(sentence) | |
| return result | |