aarvis commited on
Commit
d2a01f1
·
1 Parent(s): f474fc0

initial-commit

Browse files
.gitattributes CHANGED
@@ -33,4 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
36
  demo.mp4 filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
  demo.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,525 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: snorbyte/snorTTS-Indic-v0
3
+ tags:
4
+ - text-to-speech
5
+ - tts
6
+ - transformers
7
+ - unsloth
8
+ - llama
9
+ - audio
10
+ - speech-synthesis
11
+ license: apache-2.0
12
+ language:
13
+ - hi
14
+ - gu
15
+ - mr
16
+ - pa
17
+ - bn
18
+ - te
19
+ - kn
20
+ - ml
21
+ - ta
22
+ ---
23
+
24
+ # snorTTS-Indic-v0
25
+
26
+ **Open-source multilingual Indic TTS model**
27
+ Human Sounding Indic TTS multi-stage Finetuned by Snorbyte on 140 hrs of proprietary speech across 9 Indic languages. The Base model is a LLaMA-3.2-3B Instruct model pretrained in 100k hours of English and finetuned in Hindi by canopylabs.
28
+
29
+ ---
30
+
31
+ ## Capabilities
32
+
33
+ - Human Sounding Speech
34
+ - Natural Human-like Delivery of Colloquial Transcripts (with English Mix and disfluencies)
35
+ - Multi-Lingual Code Switching
36
+
37
+ ---
38
+ ## Model Overview
39
+
40
+ | Item | Details |
41
+ |------------------------|----------------------------------------------------------------------------------------------------------------------------|
42
+ | **Base model** | `canopylabs/3b-hi-pretrain-research_release` |
43
+ | **Architecture** | LLaMA-3.2-3B-Instruct (`transformers`) |
44
+ | **Audio codec** | SNAC @ 24 kHz, 3 codebooks (12,288 new tokens) |
45
+ | **Training toolkit** | [Unsloth](https://github.com/unslothai/unsloth) + HF TRL |
46
+ | **Languages** | Hindi (hi), Gujarati (gu), Marathi (mr), Punjabi (pa), Bengali (bn), Telugu (te), Kannada (kn), Malayalam (ml), Tamil (ta) |
47
+
48
+ ---
49
+ ## Inference
50
+
51
+ ```python
52
+ # Load SNAC Model
53
+ snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
54
+ logger.success("Loaded SNAC model for audio decoding.")
55
+ ```
56
+
57
+ ```python
58
+ # Function to construct audio file from SNAC codes generated by Model
59
+ def generate_audio(
60
+ row, model, user=False, temperature=0.4, top_p=0.9, repetition_penalty=1.05
61
+ ):
62
+ if user:
63
+ prompt = row["eval_text_user"]
64
+ else:
65
+ prompt = row["eval_text_no_user"]
66
+ inputs = tokenizer(prompt, add_special_tokens=False, return_tensors="pt")
67
+ max_tokens = MAX_SEQ_LENGTH - inputs.input_ids.shape[1]
68
+ output = model.generate(
69
+ input_ids=inputs.input_ids.to("cuda"),
70
+ attention_mask=inputs.attention_mask.to("cuda"),
71
+ max_new_tokens=max_tokens,
72
+ temperature=temperature,
73
+ top_p=top_p,
74
+ repetition_penalty=repetition_penalty,
75
+ eos_token_id=end_of_speech_id,
76
+ )
77
+ audio_ids = []
78
+ for id in output[0]:
79
+ if id >= audio_start_id:
80
+ audio_ids.append(id.item())
81
+ clean_audio_ids = []
82
+ for i in range((len(audio_ids) + 1) // 7):
83
+ for j in range(7):
84
+ clean_audio_ids += [audio_ids[7 * i + j] - audio_start_id]
85
+ codes = [[], [], []]
86
+ for i in range((len(clean_audio_ids) + 1) // 7):
87
+ codes[0].append(clean_audio_ids[7 * i])
88
+ codes[1].append(clean_audio_ids[7 * i + 1] - 4096)
89
+ codes[2].append(clean_audio_ids[7 * i + 2] - (2 * 4096))
90
+ codes[2].append(clean_audio_ids[7 * i + 3] - (3 * 4096))
91
+ codes[1].append(clean_audio_ids[7 * i + 4] - (4 * 4096))
92
+ codes[2].append(clean_audio_ids[7 * i + 5] - (5 * 4096))
93
+ codes[2].append(clean_audio_ids[7 * i + 6] - (6 * 4096))
94
+ codes = [
95
+ torch.tensor(codes[0]).unsqueeze(0),
96
+ torch.tensor(codes[1]).unsqueeze(0),
97
+ torch.tensor(codes[2]).unsqueeze(0),
98
+ ]
99
+ try:
100
+ audio = snac_model.decode(codes)
101
+ except Exception as e:
102
+ logger.error(f"Error decoding audio: {e}")
103
+ return None
104
+ return audio.detach().squeeze().to("cpu").numpy()
105
+ ```
106
+
107
+ ```python
108
+ prompt = {
109
+ "eval_text_no_user": f"<custom_token_3><|begin_of_text|>நிச்சயமா. ரோம் ல் இரவு நேரம் ரொம்ப அழகா இருக்கு—piazzaகள் சுத்துறதுக்கு நல்ல நேரம்.<|eot_id|><custom_token_4><custom_token_5><custom_token_1>"
110
+ },
111
+ train_sample = generate_audio(prompt, model, True)
112
+ if train_sample is None:
113
+ logger.error("Failed to generate audio")
114
+ else:
115
+ sf.write("output.wav", train_sample, 24000)
116
+ logger.success("Generated and saved audio as output.wav")
117
+ ```
118
+ ---
119
+
120
+ ## Types of Prompts
121
+
122
+ For better results, generate audio with specific speakerIds mentioned below.
123
+
124
+ - **Normal prompt:** Just pass the transcript in the format below
125
+ ```python
126
+ {
127
+ "eval_text_no_user": f"<custom_token_3><|begin_of_text|>நிச்சயமா. ரோம் ல் இரவு நேரம் ரொம்ப அழகா இருக்கு—piazzaகள் சுத்துறதுக்கு நல்ல நேரம்.<|eot_id|><custom_token_4><custom_token_5><custom_token_1>"
128
+ },
129
+ ```
130
+ - **Speaker specific prompt**: Stick to the same format, just pass ```<language>{speakerId}:`` before the transcript. You can make any speaker speak in any of the 9 Languages
131
+ ```python
132
+ {
133
+ "eval_text_user": f"<custom_token_3><|begin_of_text|>hindi159: चलते रहो इस सफर में बिना रुके, क्योंकि मंज़िलें खुद राह दिखाने लगती हैं <|eot_id|><custom_token_4><custom_token_5><custom_token_1>"
134
+ }
135
+ ```
136
+ ---
137
+
138
+ ### Recommended Speaker Ids
139
+
140
+ | Language | Speakers |
141
+ |-----------|--------------|
142
+ | Hindi | [159,49] |
143
+ | Tamil | [188,128] |
144
+ | Bengali | [125] |
145
+ | Malayalam | [189,124] |
146
+ | Kannada | [142,138] |
147
+ | Telugu | [69,133] |
148
+ | Punjabi | [191,67,201] |
149
+ | Gujarati | [62,190,187] |
150
+ | Marathi | [205,82] |
151
+
152
+ - **Multi-lingual transcript specific prompt**:. Stick to the same format, just pass ```<language>{speakerId}:`` before the transcript. Pass the native language of the speakerId. YOu can
153
+ ```python
154
+ {
155
+ "eval_text_user": f"<custom_token_3><|begin_of_text|>bengali125: मुझे तो लगा वो आएगा, ஆனா அவன் வந்து full drama பண்ணிட்டான், আর শেষে আবার আমাকে দোষ দিচ্ছে <|eot_id|><custom_token_4><custom_token_5><custom_token_1>"
156
+ }
157
+ ```
158
+ ---
159
+
160
+ ## Training Details
161
+ - **Dataset:** [indic-tts-sample-snac-encoded](https://huggingface.co/datasets/snorbyte/indic-tts-sample-snac-encoded) curated by [snorbyte](https://snorbyte.com)
162
+ - 135 hours (~68 k samples) split into:
163
+ - stage_1: Text-Reading (47 k scripted) + Semi-spontaneous dialogue (16 k)
164
+ - stage_2: Colloquial Conversational Snippets (4.4 k)
165
+ - eval: Evaluation samples for training (200)
166
+ - 9 Indic languages, balanced across high-/low-quality speakers.
167
+
168
+ - **Hyperparameters:**
169
+ - LoRA rank: 192
170
+ - LoRA alpha: 384
171
+ - Learning rate
172
+ - Batch size
173
+ - Per Device Train Batch Size: 8
174
+ - Gradient Accumulation Steps: 4
175
+ - Optimizer: adamw_8bit
176
+ - Learning Rate: 2e-5
177
+ - Scheduler: cosine
178
+ - Warmup Ratio: 0.02
179
+ - Epochs: 2
180
+ - Max Seq Length: 2048
181
+ - SFT Trainer Packing: True
182
+
183
+ - **Compute**
184
+ - GPU: 1 NVIDIA H100 on Vast.ai
185
+ ---
186
+
187
+ ## Training Code
188
+
189
+ ```bash
190
+ pip install torch unslot datasets loguru snac trl soundfile wandb transformers
191
+ ```
192
+
193
+ ```python
194
+ from unsloth import FastLanguageModel
195
+
196
+ import os
197
+
198
+ from datasets import load_dataset
199
+ from loguru import logger
200
+ from snac import SNAC
201
+ from trl import SFTConfig, SFTTrainer
202
+ import soundfile as sf
203
+ import torch
204
+ import wandb
205
+ from transformers import AutoModelForCausalLM
206
+ from transformers import AutoTokenizer
207
+ ```
208
+
209
+ ```python
210
+ # Set up constants and configurations.
211
+ BASE_MODEL = "canopylabs/3b-hi-pretrain-research_release"
212
+ STAGE = 1 #1 or 2 based on the dataset you are using
213
+ if STAGE == 1:
214
+ TRAIN_CSV_PATH = "" #path to stage_1 csv dataset
215
+ else:
216
+ TRAIN_CSV_PATH = "" #path to stage_2 csv dataset
217
+
218
+ MAX_SEQ_LENGTH = 2048
219
+ PER_DEVICE_TRAIN_BATCH_SIZE = 8
220
+ GRADIENT_ACCUMULATION_STEPS = 4
221
+ HUGGINGFACE_TOKEN = "" #pass you huggingface token
222
+ MODEL_NAME = "snorTTS-indic"
223
+ WANDB_USERNAME = "" #pass your wandb username
224
+ WANDB_PROJECT = "snorTTS-indic"
225
+ WANDB_LOG_MODEL = "checkpoint"
226
+ WANDB_RUN_NAME = "run-0"
227
+ WANDB_RUN_ID = None
228
+ SEED = 3407
229
+
230
+ # Set up environment variables for Weights & Biases.
231
+ os.environ["WANDB_PROJECT"] = WANDB_PROJECT
232
+ os.environ["WANDB_LOG_MODEL"] = WANDB_LOG_MODEL
233
+ ```
234
+
235
+ ```python
236
+ # Set up constants and configurations.
237
+ BASE_MODEL = "canopylabs/3b-hi-pretrain-research_release"
238
+ STAGE = 1 #1 or 2 based on the dataset you are using
239
+ if STAGE == 1:
240
+ TRAIN_CSV_PATH = "" #path to stage_1 csv dataset
241
+ else:
242
+ TRAIN_CSV_PATH = "" #path to stage_2 csv dataset
243
+
244
+ MAX_SEQ_LENGTH = 2048
245
+ PER_DEVICE_TRAIN_BATCH_SIZE = 8
246
+ GRADIENT_ACCUMULATION_STEPS = 4
247
+ HUGGINGFACE_TOKEN = "" #pass you huggingface token
248
+ MODEL_NAME = "snorTTS-indic"
249
+ WANDB_USERNAME = "" #pass your wandb username
250
+ WANDB_PROJECT = "snorTTS-indic"
251
+ WANDB_LOG_MODEL = "checkpoint"
252
+ WANDB_RUN_NAME = "run-0"
253
+ WANDB_RUN_ID = None
254
+ SEED = 3407
255
+
256
+ # Set up environment variables for Weights & Biases.
257
+ os.environ["WANDB_PROJECT"] = WANDB_PROJECT
258
+ os.environ["WANDB_LOG_MODEL"] = WANDB_LOG_MODEL
259
+ ```
260
+
261
+ ```python
262
+ # Load the model and tokenizer.
263
+ model, tokenizer = FastLanguageModel.from_pretrained(
264
+ model_name=BASE_MODEL,
265
+ load_in_4bit=true,
266
+ max_seq_length=MAX_SEQ_LENGTH,
267
+ token=HUGGINGFACE_TOKEN,
268
+ )
269
+ logger.success(f"Loaded model: {BASE_MODEL}")
270
+
271
+ # Get parameter efficient fine-tuning model.
272
+ model = FastLanguageModel.get_peft_model(
273
+ model,
274
+ r=192,
275
+ target_modules=[
276
+ "q_proj",
277
+ "k_proj",
278
+ "v_proj",
279
+ "o_proj",
280
+ "up_proj",
281
+ "down_proj",
282
+ "gate_proj",
283
+ "lm_head",
284
+ "embed_tokens",
285
+ ],
286
+ lora_alpha=384,
287
+ random_state=SEED,
288
+ )
289
+
290
+ # Load the special tokens for the tokenizer.
291
+ tokeniser_length = 128256
292
+
293
+ start_of_text_id = 128000
294
+ end_of_text_id = 128009
295
+ start_of_speech_id = tokeniser_length + 1
296
+ end_of_speech_id = tokeniser_length + 2
297
+ start_of_human_id = tokeniser_length + 3
298
+ end_of_human_id = tokeniser_length + 4
299
+ start_of_ai_id = tokeniser_length + 5
300
+ end_of_ai_id = tokeniser_length + 6
301
+ pad_token_id = tokeniser_length + 7
302
+ audio_start_id = tokeniser_length + 10
303
+
304
+ start_of_text_token = tokenizer.decode([start_of_text_id])
305
+ end_of_text_token = tokenizer.decode([end_of_text_id])
306
+ start_of_speech_token = tokenizer.decode([start_of_speech_id])
307
+ end_of_speech_token = tokenizer.decode([end_of_speech_id])
308
+ start_of_human_token = tokenizer.decode([start_of_human_id])
309
+ end_of_human_token = tokenizer.decode([end_of_human_id])
310
+ start_of_ai_token = tokenizer.decode([start_of_ai_id])
311
+ end_of_ai_token = tokenizer.decode([end_of_ai_id])
312
+ pad_token = tokenizer.decode([pad_token_id])
313
+ audio_start_token = tokenizer.decode([audio_start_id])
314
+
315
+ logger.success("Load special tokens for the tokenizer.")
316
+
317
+ # Set the padding token and padding side.
318
+ tokenizer.pad_token = pad_token
319
+ tokenizer.padding_side = "left"
320
+ logger.success("Set padding token and padding side for the tokenizer.")
321
+ ```
322
+
323
+ ```python
324
+ # Load training and validation datasets.
325
+ train_dataset = load_dataset("csv", data_files=TRAIN_CSV_PATH)["train"]
326
+ eval_dataset = load_dataset("csv", data_files=VALID_CSV_PATH)["train"]
327
+
328
+ if TRAIN_NUM_SAMPLES:
329
+ train_dataset = train_dataset.shuffle(seed=SEED).select(
330
+ range(min(TRAIN_NUM_SAMPLES, len(train_dataset)))
331
+ )
332
+
333
+ if EVAL_NUM_SAMPLES:
334
+ eval_dataset = eval_dataset.shuffle(seed=SEED).select(
335
+ range(min(EVAL_NUM_SAMPLES, len(eval_dataset)))
336
+ )
337
+
338
+ logger.success(
339
+ f"Loaded datasets: {len(train_dataset)} training samples, {len(eval_dataset)} evaluation samples."
340
+ )
341
+ ```
342
+
343
+ ```python
344
+ # Flatten (interleave) and get SNAC token IDs from the audio codes.
345
+ def flatten_and_get_audio_input_ids(row):
346
+ audio_codes = row["snac_codes"]
347
+ if isinstance(audio_codes, str):
348
+ audio_codes = eval(audio_codes)
349
+ snac_token_ids = []
350
+ for i in range(len(audio_codes[0])):
351
+ snac_token_ids.append(audio_codes[0][i] + 128266)
352
+ snac_token_ids.append(audio_codes[1][2 * i] + 128266 + 4096)
353
+ snac_token_ids.append(audio_codes[2][4 * i] + 128266 + (2 * 4096))
354
+ snac_token_ids.append(audio_codes[2][(4 * i) + 1] + 128266 + (3 * 4096))
355
+ snac_token_ids.append(audio_codes[1][(2 * i) + 1] + 128266 + (4 * 4096))
356
+ snac_token_ids.append(audio_codes[2][(4 * i) + 2] + 128266 + (5 * 4096))
357
+ snac_token_ids.append(audio_codes[2][(4 * i) + 3] + 128266 + (6 * 4096))
358
+ row["snac_token_ids"] = snac_token_ids
359
+ return row
360
+
361
+
362
+ train_dataset = train_dataset.map(flatten_and_get_audio_input_ids)
363
+ eval_dataset = eval_dataset.map(flatten_and_get_audio_input_ids)
364
+ logger.success("Flattened and extracted SNAC token IDs from audio codes.")
365
+ ```
366
+
367
+ ```python
368
+ # Filter out rows with empty or None audio codes.
369
+ train_dataset = train_dataset.filter(
370
+ lambda x: x["snac_token_ids"] is not None and len(x["snac_token_ids"]) > 0
371
+ )
372
+ eval_dataset = eval_dataset.filter(
373
+ lambda x: x["snac_token_ids"] is not None and len(x["snac_token_ids"]) > 0
374
+ )
375
+ logger.success("Filtered datasets to remove rows with empty or None audio codes.")
376
+ ```
377
+
378
+ ```python
379
+ # Remove duplicate frames from the audio codes.
380
+ def remove_duplicate_frames(row):
381
+ vals = row["snac_token_ids"]
382
+ if len(vals) % 7 != 0:
383
+ raise ValueError("Input list length must be divisible by 7")
384
+ result = vals[:7]
385
+ for i in range(7, len(vals), 7):
386
+ current_first = vals[i]
387
+ previous_first = result[-7]
388
+ if current_first != previous_first:
389
+ result.extend(vals[i : i + 7])
390
+ row["snac_token_ids"] = result
391
+ return row
392
+
393
+
394
+ train_dataset = train_dataset.map(remove_duplicate_frames)
395
+ eval_dataset = eval_dataset.map(remove_duplicate_frames)
396
+ logger.success("Removed duplicate frames from audio codes.")
397
+ ```
398
+
399
+ ```python
400
+ # Define a function to format the prompt for each row in the dataset.
401
+ def format_text(row):
402
+ text = (
403
+ f"{start_of_human_token}{start_of_text_token}{row['language']}{row['user']}: {row['utterance']}{end_of_text_token}"
404
+ f"{end_of_human_token}{start_of_ai_token}{start_of_speech_token}"
405
+ f"{tokenizer.decode(row['snac_token_ids'])}{end_of_speech_token}{end_of_ai_token}"
406
+ )
407
+ eval_text_user = (
408
+ f"{start_of_human_token}{start_of_text_token}{row['language']}{row['user']}: {row['utterance']}{end_of_text_token}"
409
+ f"{end_of_human_token}{start_of_ai_token}{start_of_speech_token}"
410
+ )
411
+ eval_text_no_user = (
412
+ f"{start_of_human_token}{start_of_text_token}{row['utterance']}{end_of_text_token}"
413
+ f"{end_of_human_token}{start_of_ai_token}{start_of_speech_token}"
414
+ )
415
+ row["text"] = text
416
+ row["eval_text_user"] = eval_text_user
417
+ row["eval_text_no_user"] = eval_text_no_user
418
+ return row
419
+
420
+
421
+ train_dataset = train_dataset.map(format_text)
422
+ eval_dataset = eval_dataset.map(format_text)
423
+ logger.success("Formatted text for training and evaluation datasets.")
424
+ ```
425
+
426
+ ```python
427
+ # Tokenize the text in the datasets without adding special tokens.
428
+ def tokenize_function(example):
429
+ return tokenizer(
430
+ example["text"],
431
+ add_special_tokens=False,
432
+ truncation=True,
433
+ max_length=MAX_SEQ_LENGTH,
434
+ )
435
+
436
+
437
+ train_dataset = train_dataset.map(tokenize_function)
438
+ eval_dataset = eval_dataset.map(tokenize_function)
439
+ logger.success("Tokenized text in the datasets without adding special tokens.")
440
+ ```
441
+
442
+ ```python
443
+ # Set training arguments.
444
+ training_args = SFTConfig(
445
+ num_train_epochs=2,
446
+ per_device_train_batch_size=PER_DEVICE_TRAIN_BATCH_SIZE,
447
+ gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
448
+ optim="adamw_8bit",
449
+ learning_rate=2e-5,
450
+ lr_scheduler_type="cosine",
451
+ warmup_ratio=0.02,
452
+ do_eval=True,
453
+ eval_strategy="steps",
454
+ eval_steps=50,
455
+ logging_strategy="steps",
456
+ logging_steps=1,
457
+ save_strategy="no",
458
+ save_only_model=True,
459
+ # save_steps=250,
460
+ output_dir="outputs",
461
+ report_to="wandb",
462
+ run_name=WANDB_RUN_NAME,
463
+ seed=SEED,
464
+ )
465
+
466
+ # Initialize the SFTTrainer.
467
+ trainer = SFTTrainer(
468
+ model=model,
469
+ tokenizer=tokenizer,
470
+ train_dataset=train_dataset,
471
+ eval_dataset=eval_dataset,
472
+ dataset_text_field="text",
473
+ max_seq_length=MAX_SEQ_LENGTH,
474
+ dataset_num_proc=2,
475
+ packing=True,
476
+ args=training_args,
477
+ )
478
+
479
+ logger.success("Initialized SFTTrainer with the specified configuration.")
480
+ ```
481
+
482
+ ```python
483
+ # Start the training process.
484
+ logger.info("Starting the training process...")
485
+
486
+ run = wandb.init()
487
+
488
+ if WANDB_RUN_ID:
489
+ logger.info(f"Resuming from Weights & Biases run ID: {WANDB_RUN_ID}")
490
+
491
+ artifact = run.use_artifact(
492
+ f"{WANDB_USERNAME}/{WANDB_PROJECT}/{WANDB_RUN_ID}", type="model"
493
+ )
494
+
495
+ artifact_dir = artifact.download()
496
+
497
+ trainer.train(resume_from_checkpoint=artifact_dir)
498
+ else:
499
+ try:
500
+ logger.info("Attempting to resume training from the last checkpoint...")
501
+
502
+ trainer.train(resume_from_checkpoint=True)
503
+ except Exception as err:
504
+ trainer.train()
505
+
506
+ # Finish the Weights & Biases run.
507
+ wandb.finish()
508
+
509
+ logger.success("Training completed successfully.")
510
+ ```
511
+ ---
512
+
513
+ ## Citation
514
+
515
+ BibTeX:
516
+
517
+ ```bibtex
518
+ @misc{indictextaudio2025,
519
+ title={snorTTS-Indic-v0: Multilingual Indic TTS},
520
+ author={snorbyte},
521
+ year={2025},
522
+ howpublished={\url{snorbyte/snorTTS-Indic-v0}},
523
+ note={Apache-2.0}
524
+ }
525
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{- bos_token }}
2
+ {%- if custom_tools is defined %}
3
+ {%- set tools = custom_tools %}
4
+ {%- endif %}
5
+ {%- if not tools_in_user_message is defined %}
6
+ {%- set tools_in_user_message = true %}
7
+ {%- endif %}
8
+ {%- if not date_string is defined %}
9
+ {%- if strftime_now is defined %}
10
+ {%- set date_string = strftime_now("%d %b %Y") %}
11
+ {%- else %}
12
+ {%- set date_string = "26 Jul 2024" %}
13
+ {%- endif %}
14
+ {%- endif %}
15
+ {%- if not tools is defined %}
16
+ {%- set tools = none %}
17
+ {%- endif %}
18
+
19
+ {#- This block extracts the system message, so we can slot it into the right place. #}
20
+ {%- if messages[0]['role'] == 'system' %}
21
+ {%- set system_message = messages[0]['content']|trim %}
22
+ {%- set messages = messages[1:] %}
23
+ {%- else %}
24
+ {%- set system_message = "" %}
25
+ {%- endif %}
26
+
27
+ {#- System message #}
28
+ {{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
29
+ {%- if tools is not none %}
30
+ {{- "Environment: ipython\n" }}
31
+ {%- endif %}
32
+ {{- "Cutting Knowledge Date: December 2023\n" }}
33
+ {{- "Today Date: " + date_string + "\n\n" }}
34
+ {%- if tools is not none and not tools_in_user_message %}
35
+ {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
36
+ {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
37
+ {{- "Do not use variables.\n\n" }}
38
+ {%- for t in tools %}
39
+ {{- t | tojson(indent=4) }}
40
+ {{- "\n\n" }}
41
+ {%- endfor %}
42
+ {%- endif %}
43
+ {{- system_message }}
44
+ {{- "<|eot_id|>" }}
45
+
46
+ {#- Custom tools are passed in a user message with some extra guidance #}
47
+ {%- if tools_in_user_message and not tools is none %}
48
+ {#- Extract the first user message so we can plug it in here #}
49
+ {%- if messages | length != 0 %}
50
+ {%- set first_user_message = messages[0]['content']|trim %}
51
+ {%- set messages = messages[1:] %}
52
+ {%- else %}
53
+ {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
54
+ {%- endif %}
55
+ {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
56
+ {{- "Given the following functions, please respond with a JSON for a function call " }}
57
+ {{- "with its proper arguments that best answers the given prompt.\n\n" }}
58
+ {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
59
+ {{- "Do not use variables.\n\n" }}
60
+ {%- for t in tools %}
61
+ {{- t | tojson(indent=4) }}
62
+ {{- "\n\n" }}
63
+ {%- endfor %}
64
+ {{- first_user_message + "<|eot_id|>"}}
65
+ {%- endif %}
66
+
67
+ {%- for message in messages %}
68
+ {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
69
+ {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
70
+ {%- elif 'tool_calls' in message %}
71
+ {%- if not message.tool_calls|length == 1 %}
72
+ {{- raise_exception("This model only supports single tool-calls at once!") }}
73
+ {%- endif %}
74
+ {%- set tool_call = message.tool_calls[0].function %}
75
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
76
+ {{- '{"name": "' + tool_call.name + '", ' }}
77
+ {{- '"parameters": ' }}
78
+ {{- tool_call.arguments | tojson }}
79
+ {{- "}" }}
80
+ {{- "<|eot_id|>" }}
81
+ {%- elif message.role == "tool" or message.role == "ipython" %}
82
+ {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
83
+ {%- if message.content is mapping or message.content is iterable %}
84
+ {{- message.content | tojson }}
85
+ {%- else %}
86
+ {{- message.content }}
87
+ {%- endif %}
88
+ {{- "<|eot_id|>" }}
89
+ {%- endif %}
90
+ {%- endfor %}
91
+ {%- if add_generation_prompt %}
92
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
93
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 128000,
8
+ "eos_token_id": 128001,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 3072,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 8192,
14
+ "max_position_embeddings": 131072,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 24,
18
+ "num_hidden_layers": 28,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 128004,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": {
24
+ "factor": 32.0,
25
+ "high_freq_factor": 4.0,
26
+ "low_freq_factor": 1.0,
27
+ "original_max_position_embeddings": 8192,
28
+ "rope_type": "llama3"
29
+ },
30
+ "rope_theta": 500000.0,
31
+ "tie_word_embeddings": true,
32
+ "torch_dtype": "bfloat16",
33
+ "transformers_version": "4.53.1",
34
+ "unsloth_version": "2025.6.12",
35
+ "use_cache": true,
36
+ "vocab_size": 156940
37
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 128000,
4
+ "do_sample": true,
5
+ "eos_token_id": 128001,
6
+ "max_length": 131072,
7
+ "pad_token_id": 128004,
8
+ "temperature": 0.6,
9
+ "top_p": 0.9,
10
+ "transformers_version": "4.53.1"
11
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3df5ddcdf658979751c511457760629635b59a74268a968c3f984da836a75b2
3
+ size 3438608176
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eec4f6982f07ae393c3b8891190530de25916db7c8ba0a6e4c91d47e3afe480b
3
+ size 2466558064
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2157d2c55bd9948478f99016bd6b2cee9770ca1d950ba8924e9f2aea5161c836
3
+ size 1661186464
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b999ed243b570750bc19b4fd3d513664e8552a38d79ed29439dfee7ecb346a36
3
+ size 1928478848
model.safetensors.index.json ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15131947008
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
198
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
199
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
200
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
201
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
202
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
203
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
204
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
205
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
206
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
207
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
208
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
209
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
210
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
211
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
212
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
213
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
214
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
215
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
216
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
224
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
225
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
226
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
227
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
228
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
229
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
230
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
231
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
232
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
233
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
234
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
235
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
236
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
237
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
238
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
239
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
240
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
241
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
242
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
243
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
244
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
245
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
246
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
247
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
248
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
249
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
250
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
251
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
252
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
253
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
254
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
255
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
256
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
257
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
258
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
259
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
260
+ "model.norm.weight": "model-00003-of-00004.safetensors"
261
+ }
262
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|audio|>"
4
+ ],
5
+ "bos_token": {
6
+ "content": "<|begin_of_text|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "content": "<|eot_id|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "pad_token": "<custom_token_7>"
20
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:044e2a10201774018db120391980464472baabf223bd353cea49b17da0b66abc
3
+ size 22849546
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff