File size: 15,762 Bytes
29c0409
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
import re
import torch
import torchaudio.functional as F
import torchaudio
import uroman as ur
import logging
import traceback

def convert_to_list_with_punctuation_mixed(text):
    """处理中文文本(可能包含英文单词) - 中文按字符分割,英文单词保持完整"""
    result = []
    text = text.strip()
    
    if not text:
        return result
    
    def is_chinese(char):
        """检查是否是汉字"""
        return '\u4e00' <= char <= '\u9fff'
    
    # 使用更精确的正则表达式来分割文本
    # 匹配:英文单词(含数字)、单个汉字、标点符号
    pattern = r'[a-zA-Z]+[a-zA-Z0-9]*|[\u4e00-\u9fff]|[^\w\s\u4e00-\u9fff]'
    tokens = re.findall(pattern, text)
    
    for token in tokens:
        if not token.strip():  # 跳过空字符
            continue
        
        if re.match(r'^[a-zA-Z]+[a-zA-Z0-9]*$', token):  # 英文单词(可能包含数字)
            result.append(token)
        elif is_chinese(token):  # 单个汉字
            result.append(token)
        else:  # 标点符号等其他字符
            # 标点符号加到前一个词后面
            if result:
                result[-1] += token
            else:
                # 如果是文本开头的标点,单独作为一项
                result.append(token)
    
    return result

def split_and_merge_punctuation(text):
    """处理英文 - 按单词分割,保持单词完整性"""
    # 先按空格拆分文本
    elements = text.split()
    
    # 用于保存最终的结果
    result = []
    
    # 遍历每个拆分后的元素
    for ele in elements:
        # 使用正则表达式提取连续字母、数字和标点
        parts = re.findall(r'[a-zA-Z0-9]+|[^\w\s]+', ele)
        
        # 用于保存拆分后的部分
        merged_parts = []
        
        for i in range(len(parts)):
            if i % 2 == 0:  # 如果是字母或数字部分
                # 将字母或数字部分添加到结果中
                merged_parts.append(parts[i])
            else:  # 如果是标点或其他符号部分
                # 将标点部分与前面的字母或数字部分合并
                if merged_parts:
                    merged_parts[-1] += parts[i]
                else:
                    merged_parts.append(parts[i])
        
        # 将合并后的部分加入最终结果
        result.extend(merged_parts)
    
    return result


def get_aligned_result_text_with_punctuation(alignment_result, text, language):
    """
    将对齐结果转换为正确的文本tokens,英文保持单词级别,中文保持字符级别(但英文单词完整)
    """
    logging.info("start change text to text_tokens")
    
    if language == "EN":
        text_tokens = split_and_merge_punctuation(text)  # 英文按单词分词
    elif language == "ZH":
        text_tokens = convert_to_list_with_punctuation_mixed(text)  # 中文按字符分割,但英文单词保持完整
    else:
        raise ValueError(f"Unsupported language: {language}")

    logging.info(f"Text tokens count: {len(text_tokens)}, Alignment result count: {len(alignment_result)}")
    
    punctuations = set(',.!?;:()[]<>\'\"…·,。;:!?()【】《》''""\、')

    logging.info("start get align result text with punctuation")
    updated_alignment_result = []
    token_idx = 0
    
    for index, align_item in enumerate(alignment_result):
        if token_idx >= len(text_tokens):
            # 如果text_tokens用完了但还有对齐结果,跳出循环
            logging.warning(f"Text tokens exhausted at index {token_idx}, but alignment has more items")
            break
            
        start = align_item["start"]
        end = align_item["end"]
        text_token = text_tokens[token_idx]
        
        # 检查该 token 后是否有连续标点(仅对中文)
        if language == "ZH":
            while token_idx + 1 < len(text_tokens) and text_tokens[token_idx + 1] in punctuations:
                assert False, "???"  # 这里理论上应该进不去??
                text_token += text_tokens[token_idx + 1]  # 将标点加入
                token_idx += 1
        else:
            # 英文不需要特殊的标点处理,因为标点已经在split_and_merge_punctuation中处理了
            pass
        
        # 更新对齐结果
        updated_item = {
            "start": start,
            "end": end,
            "transcript": text_token
        }
        updated_item.update({key: align_item[key] for key in align_item if key not in ["start", "end", "transcript"]})
        
        updated_alignment_result.append(updated_item)
        token_idx += 1

    logging.info("end get align result text with punctuation")
    return updated_alignment_result


class AlignmentModel:
    def __init__(self, device, model_dir='/data-mnt/data/wy/X-Codec-2.0/checkpoints'):
        """
        初始化对齐模型并加载必要的资源
        :param device: 设备类型 ("cuda" 或 "cpu")
        :param model_dir: 模型目录路径
        """
        self.device = torch.device(device)
        self.bundle = torchaudio.pipelines.MMS_FA
        self.align_model = self.bundle.get_model(with_star=False, dl_kwargs={'model_dir': model_dir}).to(self.device)
        self.uroman = ur.Uroman()
        self.DICTIONARY = self.bundle.get_dict()

    def align(self, emission, tokens):
        """
        执行强对齐
        :param emission: 模型的输出
        :param tokens: 目标 tokens
        :return: 对齐的 tokens 和分数
        """
        alignments, scores = F.forced_align(
            log_probs=emission,
            targets=tokens,
            blank=0
        )
        alignments, scores = alignments[0], scores[0]
        scores = scores.exp()
        return alignments, scores

    def unflatten(self, list_, lengths):
        """
        将一个长列表按照长度拆分成子列表
        :param list_: 长列表
        :param lengths: 各子列表的长度
        :return: 拆分后的子列表
        """
        assert len(list_) == sum(lengths)
        i = 0
        ret = []
        for l in lengths:
            ret.append(list_[i:i + l])
            i += l
        return ret

    def preview_word(self, waveform, spans, num_frames, transcript, sample_rate):
        """
        预览每个单词的开始时间和结束时间
        :param waveform: 音频波形
        :param spans: 单词的跨度
        :param num_frames: 帧数
        :param transcript: 转录文本
        :param sample_rate: 采样率
        :return: 单词的对齐信息
        """
        end = 0
        alignment_result = []
        for span, trans in zip(spans, transcript):
            ratio = waveform.size(1) / num_frames
            x0 = int(ratio * span[0].start)
            x1 = int(ratio * span[-1].end)
            align_info = {
                "transcript": trans,
                "start": round(x0 / sample_rate, 3),
                "end": round(x1 / sample_rate, 3)
            }
            align_info["pause"] = round(align_info["start"] - end, 3)
            align_info["duration"] = round(align_info["end"] - align_info["start"], 3)
            end = align_info["end"]
            alignment_result.append(align_info)
        return alignment_result

    def make_wav_batch(self, wav_list):
        """
        将 wav_list 中的每个 wav 张量填充为相同的长度,返回填充后的张量和每个张量的原始长度。
        :param wav_list: wav 文件列表
        :return: 填充后的音频张量和原始长度
        """
        wav_lengths = torch.tensor([wav.size(0) for wav in wav_list], dtype=torch.long)
        max_length = max(wav_lengths)
        # 确保张量在正确的设备上
        wavs_tensors = torch.zeros(len(wav_list), max_length, device=self.device)
        for i, wav in enumerate(wav_list):
            wav = wav.to(self.device)  # 确保wav在正确的设备上
            wavs_tensors[i, :wav_lengths[i]] = wav
        return wavs_tensors, wav_lengths.to(self.device)

    def get_target(self, transcript, language):
        """
        获取给定转录文本的目标 tokens - 修正版本,保持英文单词完整性
        """
        original_transcript = transcript  # 保存原始文本用于调试
        
        if language == "ZH":
            # 中文处理:保持英文单词完整,只对中文字符进行romanization
            # 使用相同的分词逻辑
            pattern = r'[a-zA-Z]+[a-zA-Z0-9]*|[\u4e00-\u9fff]|[^\w\s\u4e00-\u9fff]'
            tokens = re.findall(pattern, transcript)
            
            # 分别处理中文字符和英文单词
            processed_parts = []
            for token in tokens:
                if not token.strip():
                    continue
                elif re.match(r'^[a-zA-Z]+[a-zA-Z0-9]*$', token):  # 英文单词
                    # 英文单词保持原样,不进行romanization
                    processed_parts.append(token.lower())
                elif '\u4e00' <= token <= '\u9fff':  # 中文字符
                    # 只对中文字符进行romanization
                    romanized = self.uroman.romanize_string(token)
                    processed_parts.append(romanized)
                else:  # 标点符号等
                    # 标点符号直接添加,但会在后续步骤中被过滤掉
                    processed_parts.append(token)
            
            # 用空格连接所有部分
            transcript = ' '.join(processed_parts)
        
        elif language == "EN":
            # 英文处理:保持单词结构,只是清理标点
            pass
        else:
            assert False, f"Unsupported language: {language}"

        # 清理标点符号
        transcript = re.sub(r'[^\w\s]', r' ', transcript)
        TRANSCRIPT = transcript.lower().split()
        
        # 提前获取字典中的特殊符号 token
        star_token = self.DICTIONARY['*']
        tokenized_transcript = []

        # 统一的tokenization逻辑
        for word in TRANSCRIPT:
            # 对每个word中的字符进行token化
            word_tokens = []
            for c in word:
                if c in self.DICTIONARY and c != '-':
                    word_tokens.append(self.DICTIONARY[c])
                else:
                    word_tokens.append(star_token)
            tokenized_transcript.extend(word_tokens)
        
        logging.info(f"Original transcript: {original_transcript}")
        logging.info(f"Processed transcript: {transcript}")
        logging.info(f"Final TRANSCRIPT: {TRANSCRIPT}")
        
        return torch.tensor([tokenized_transcript], dtype=torch.int32, device=self.device)

    def get_alignment_result(self, emission_padded, emission_length, aligned_tokens, alignment_scores, transcript, waveform, language):
        """
        根据给定的 emission 和对齐信息生成对齐结果 - 修正版本
        """
        original_transcript = transcript  # 保存原始文本
        
        if language == "ZH":
            # 使用与get_target相同的处理逻辑
            pattern = r'[a-zA-Z]+[a-zA-Z0-9]*|[\u4e00-\u9fff]|[^\w\s\u4e00-\u9fff]'
            tokens = re.findall(pattern, transcript)
            
            processed_parts = []
            for token in tokens:
                if not token.strip():
                    continue
                elif re.match(r'^[a-zA-Z]+[a-zA-Z0-9]*$', token):  # 英文单词
                    processed_parts.append(token.lower())
                elif '\u4e00' <= token <= '\u9fff':  # 中文字符
                    romanized = self.uroman.romanize_string(token)
                    processed_parts.append(romanized)
                else:  # 标点符号等
                    processed_parts.append(token)
            
            transcript = ' '.join(processed_parts)
        elif language == "EN":
            pass
        else:
            assert False, f"Unsupported language: {language}"
        
        transcript = re.sub(r'[^\w\s]', r' ', transcript)
        emission = emission_padded[:emission_length, :].unsqueeze(0)
        TRANSCRIPT = transcript.lower().split()
        
        token_spans = F.merge_tokens(aligned_tokens, alignment_scores)
        
        # 统一的分组逻辑
        word_spans = self.unflatten(token_spans, [len(word) for word in TRANSCRIPT])
        
        num_frames = emission.size(1)
        
        logging.info(f"Original transcript for alignment: {original_transcript}")
        logging.info(f"Processed TRANSCRIPT: {TRANSCRIPT}")
        
        return self.preview_word(waveform.unsqueeze(0), word_spans, num_frames, TRANSCRIPT, self.bundle.sample_rate)

    def batch_alignment(self, wav_list, transcript_list, language_list):
        """
        批量对齐
        :param wav_list: wav 文件列表
        :param transcript_list: 转录文本列表
        :param language_list: 语言类型列表
        :return: 对齐结果列表
        """
        wavs_tensors, wavs_lengths_tensor = self.make_wav_batch(wav_list)
        logging.info("start alignment model forward")
        with torch.inference_mode():
            emission, emission_lengths = self.align_model(wavs_tensors.to(self.device), wavs_lengths_tensor)
            star_dim = torch.zeros((emission.shape[0], emission.size(1), 1), dtype=emission.dtype, device=self.device)
            emission = torch.cat((emission, star_dim), dim=-1)
        
        logging.info("end alignment model forward")
        
        target_list = [self.get_target(transcript, language) for transcript, language in zip(transcript_list, language_list)]
        
        logging.info("align success")
        align_results = [
            self.align(emission_padded[:emission_length, :].unsqueeze(0), target)
            for emission_padded, emission_length, target in zip(emission, emission_lengths, target_list)
        ]
        
        logging.info("get align result")
        batch_aligned_tokens = [align_result[0] for align_result in align_results]
        batch_alignment_scores = [align_result[1] for align_result in align_results]

        alignment_result_list = [
            self.get_alignment_result(emission_padded, emission_length, aligned_tokens, alignment_scores, transcript, waveform, language)
            for emission_padded, emission_length, aligned_tokens, alignment_scores, transcript, waveform, language
            in zip(emission, emission_lengths, batch_aligned_tokens, batch_alignment_scores, transcript_list, wav_list, language_list)
        ]
        logging.info("get align result success")
        return alignment_result_list
    

def batch_get_alignment_result(alignment_model, wav_list, transcript_list, language_list):
    """
    批量获取对齐结果的便捷函数
    """
    alignment_results = alignment_model.batch_alignment(
        wav_list=wav_list, 
        transcript_list=transcript_list, 
        language_list=language_list
    )
    
    alignments_results_with_text_and_punctuation = []
    for alignment_result, transcript, language in zip(alignment_results, transcript_list, language_list):
        try:
            result = get_aligned_result_text_with_punctuation(alignment_result, transcript, language)
            alignments_results_with_text_and_punctuation.append(result)
        except:
            logger = logging.getLogger("tokenize")
            logger.error(f"Error in processing {alignment_result}")
            traceback.print_exc()
            alignments_results_with_text_and_punctuation.append(alignment_result)
    return alignments_results_with_text_and_punctuation