Upload canary-1b-v2
Browse files- canary-1b-v2/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- canary-1b-v2/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- canary-1b-v2/AudioEncoder.mlmodelc/metadata.json +87 -0
- canary-1b-v2/AudioEncoder.mlmodelc/model.mil +0 -0
- canary-1b-v2/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- canary-1b-v2/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- canary-1b-v2/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- canary-1b-v2/MelSpectrogram.mlmodelc/metadata.json +77 -0
- canary-1b-v2/MelSpectrogram.mlmodelc/model.mil +86 -0
- canary-1b-v2/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- canary-1b-v2/config.json +37 -0
- canary-1b-v2/preprocessor_config.json +13 -0
- canary-1b-v2/tokenizer.json +0 -0
- canary-1b-v2/tokenizer_config.json +0 -0
- canary-1b-v2/vocab.json +0 -0
canary-1b-v2/AudioEncoder.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea2bb79ff3927283c2cd29756ff5c9dc53036e6038ef53e22d93494e80f7df2f
|
| 3 |
+
size 243
|
canary-1b-v2/AudioEncoder.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef5ed889fcb32e62f1c0d8c172fff4161bcb429d449a46834f3c7a2376d2d7be
|
| 3 |
+
size 374
|
canary-1b-v2/AudioEncoder.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (6 bits), Sparse)",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 16385 × 1 × 188)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 16385, 1, 188]",
|
| 13 |
+
"name" : "ctc_head_raw_output",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"modelParameters" : [
|
| 18 |
+
|
| 19 |
+
],
|
| 20 |
+
"specificationVersion" : 8,
|
| 21 |
+
"mlProgramOperationTypeHistogram" : {
|
| 22 |
+
"Pad" : 24,
|
| 23 |
+
"Ios17.mul" : 121,
|
| 24 |
+
"Split" : 24,
|
| 25 |
+
"Ios17.transpose" : 1,
|
| 26 |
+
"Ios17.sub" : 1,
|
| 27 |
+
"Ios16.constexprLutToDense" : 266,
|
| 28 |
+
"Ios17.conv" : 560,
|
| 29 |
+
"Ios17.matmul" : 72,
|
| 30 |
+
"Ios16.sigmoid" : 24,
|
| 31 |
+
"Ios17.add" : 433,
|
| 32 |
+
"Ios17.sliceByIndex" : 48,
|
| 33 |
+
"Ios16.constexprSparseToDense" : 265,
|
| 34 |
+
"Ios16.relu" : 3,
|
| 35 |
+
"Ios17.batchNorm" : 120,
|
| 36 |
+
"Ios16.softmax" : 24,
|
| 37 |
+
"Ios17.reshape" : 193,
|
| 38 |
+
"Ios17.layerNorm" : 120,
|
| 39 |
+
"Ios16.silu" : 72
|
| 40 |
+
},
|
| 41 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 42 |
+
"isUpdatable" : "0",
|
| 43 |
+
"stateSchema" : [
|
| 44 |
+
|
| 45 |
+
],
|
| 46 |
+
"availability" : {
|
| 47 |
+
"macOS" : "14.0",
|
| 48 |
+
"tvOS" : "17.0",
|
| 49 |
+
"visionOS" : "1.0",
|
| 50 |
+
"watchOS" : "10.0",
|
| 51 |
+
"iOS" : "17.0",
|
| 52 |
+
"macCatalyst" : "17.0"
|
| 53 |
+
},
|
| 54 |
+
"modelType" : {
|
| 55 |
+
"name" : "MLModelType_mlProgram"
|
| 56 |
+
},
|
| 57 |
+
"userDefinedMetadata" : {
|
| 58 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 59 |
+
"com.github.apple.coremltools.source" : "torch==2.5.0",
|
| 60 |
+
"com.github.apple.coremltools.version" : "8.3.0"
|
| 61 |
+
},
|
| 62 |
+
"inputSchema" : [
|
| 63 |
+
{
|
| 64 |
+
"hasShapeFlexibility" : "0",
|
| 65 |
+
"isOptional" : "0",
|
| 66 |
+
"dataType" : "Float16",
|
| 67 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 1501 × 128)",
|
| 68 |
+
"shortDescription" : "",
|
| 69 |
+
"shape" : "[1, 1, 1501, 128]",
|
| 70 |
+
"name" : "melspectrogram_features",
|
| 71 |
+
"type" : "MultiArray"
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"hasShapeFlexibility" : "0",
|
| 75 |
+
"isOptional" : "0",
|
| 76 |
+
"dataType" : "Float16",
|
| 77 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 1 × 1)",
|
| 78 |
+
"shortDescription" : "",
|
| 79 |
+
"shape" : "[1, 1, 1, 1]",
|
| 80 |
+
"name" : "input_1",
|
| 81 |
+
"type" : "MultiArray"
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"generatedClassName" : "AudioEncoder_mixedBitPalettized_6_bit_6_bit",
|
| 85 |
+
"method" : "predict"
|
| 86 |
+
}
|
| 87 |
+
]
|
canary-1b-v2/AudioEncoder.mlmodelc/model.mil
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
canary-1b-v2/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:804ea36c2fdf98a99af26e1314e20f7d5b3b1835409bbbde539d441e04d9b0d6
|
| 3 |
+
size 569490114
|
canary-1b-v2/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:18a67d3ece5958dea8f020020a16d72c2d6763103e5a671a2641444167b1c38d
|
| 3 |
+
size 243
|
canary-1b-v2/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:319a6328b0dd0cade7f4249ed7bb68b50aa5ffde93e968fb796cf0f5ad735c9c
|
| 3 |
+
size 331
|
canary-1b-v2/MelSpectrogram.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float32",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 1501 × 128)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 1, 1501, 128]",
|
| 13 |
+
"name" : "melspectrogram_features",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"modelParameters" : [
|
| 18 |
+
|
| 19 |
+
],
|
| 20 |
+
"specificationVersion" : 8,
|
| 21 |
+
"mlProgramOperationTypeHistogram" : {
|
| 22 |
+
"Identity" : 1,
|
| 23 |
+
"Ios17.mul" : 2,
|
| 24 |
+
"Ios17.sqrt" : 1,
|
| 25 |
+
"Ios17.square" : 3,
|
| 26 |
+
"Ios17.transpose" : 1,
|
| 27 |
+
"Ios17.sub" : 2,
|
| 28 |
+
"Ios17.matmul" : 1,
|
| 29 |
+
"Ios17.conv" : 2,
|
| 30 |
+
"Ios17.log" : 1,
|
| 31 |
+
"Ios17.sliceByIndex" : 2,
|
| 32 |
+
"Ios17.add" : 3,
|
| 33 |
+
"Ios16.reduceMean" : 2,
|
| 34 |
+
"Ios17.realDiv" : 1,
|
| 35 |
+
"Ios17.expandDims" : 4,
|
| 36 |
+
"Ios17.squeeze" : 2,
|
| 37 |
+
"Ios17.reshape" : 2,
|
| 38 |
+
"Ios17.cast" : 2,
|
| 39 |
+
"Pad" : 2
|
| 40 |
+
},
|
| 41 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
| 42 |
+
"isUpdatable" : "0",
|
| 43 |
+
"stateSchema" : [
|
| 44 |
+
|
| 45 |
+
],
|
| 46 |
+
"availability" : {
|
| 47 |
+
"macOS" : "14.0",
|
| 48 |
+
"tvOS" : "17.0",
|
| 49 |
+
"visionOS" : "1.0",
|
| 50 |
+
"watchOS" : "10.0",
|
| 51 |
+
"iOS" : "17.0",
|
| 52 |
+
"macCatalyst" : "17.0"
|
| 53 |
+
},
|
| 54 |
+
"modelType" : {
|
| 55 |
+
"name" : "MLModelType_mlProgram"
|
| 56 |
+
},
|
| 57 |
+
"userDefinedMetadata" : {
|
| 58 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 59 |
+
"com.github.apple.coremltools.source" : "torch==2.5.0",
|
| 60 |
+
"com.github.apple.coremltools.version" : "8.3.0"
|
| 61 |
+
},
|
| 62 |
+
"inputSchema" : [
|
| 63 |
+
{
|
| 64 |
+
"hasShapeFlexibility" : "0",
|
| 65 |
+
"isOptional" : "0",
|
| 66 |
+
"dataType" : "Float16",
|
| 67 |
+
"formattedType" : "MultiArray (Float16 240000)",
|
| 68 |
+
"shortDescription" : "",
|
| 69 |
+
"shape" : "[240000]",
|
| 70 |
+
"name" : "audio",
|
| 71 |
+
"type" : "MultiArray"
|
| 72 |
+
}
|
| 73 |
+
],
|
| 74 |
+
"generatedClassName" : "MelSpectrogram",
|
| 75 |
+
"method" : "predict"
|
| 76 |
+
}
|
| 77 |
+
]
|
canary-1b-v2/MelSpectrogram.mlmodelc/model.mil
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3404.16.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
|
| 3 |
+
{
|
| 4 |
+
func main<ios17>(tensor<fp16, [240000]> audio) {
|
| 5 |
+
tensor<string, []> cast_0_dtype_0 = const()[name = tensor<string, []>("cast_0_dtype_0"), val = tensor<string, []>("fp32")];
|
| 6 |
+
tensor<fp32, [128, 257]> mel_filters = const()[name = tensor<string, []>("mel_filters"), val = tensor<fp32, [128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 7 |
+
tensor<int32, [1]> var_8_begin_0 = const()[name = tensor<string, []>("op_8_begin_0"), val = tensor<int32, [1]>([1])];
|
| 8 |
+
tensor<int32, [1]> var_8_end_0 = const()[name = tensor<string, []>("op_8_end_0"), val = tensor<int32, [1]>([240000])];
|
| 9 |
+
tensor<bool, [1]> var_8_end_mask_0 = const()[name = tensor<string, []>("op_8_end_mask_0"), val = tensor<bool, [1]>([true])];
|
| 10 |
+
tensor<fp32, [240000]> cast_0 = cast(dtype = cast_0_dtype_0, x = audio)[name = tensor<string, []>("cast_9")];
|
| 11 |
+
tensor<fp32, [239999]> var_8 = slice_by_index(begin = var_8_begin_0, end = var_8_end_0, end_mask = var_8_end_mask_0, x = cast_0)[name = tensor<string, []>("op_8")];
|
| 12 |
+
tensor<int32, [1]> var_13_begin_0 = const()[name = tensor<string, []>("op_13_begin_0"), val = tensor<int32, [1]>([0])];
|
| 13 |
+
tensor<int32, [1]> var_13_end_0 = const()[name = tensor<string, []>("op_13_end_0"), val = tensor<int32, [1]>([239999])];
|
| 14 |
+
tensor<bool, [1]> var_13_end_mask_0 = const()[name = tensor<string, []>("op_13_end_mask_0"), val = tensor<bool, [1]>([false])];
|
| 15 |
+
tensor<fp32, [239999]> var_13 = slice_by_index(begin = var_13_begin_0, end = var_13_end_0, end_mask = var_13_end_mask_0, x = cast_0)[name = tensor<string, []>("op_13")];
|
| 16 |
+
tensor<fp32, []> var_14 = const()[name = tensor<string, []>("op_14"), val = tensor<fp32, []>(0x1.f0a3d8p-1)];
|
| 17 |
+
tensor<fp32, [239999]> var_15 = mul(x = var_13, y = var_14)[name = tensor<string, []>("op_15")];
|
| 18 |
+
tensor<fp32, [239999]> input_1 = sub(x = var_8, y = var_15)[name = tensor<string, []>("input_1")];
|
| 19 |
+
tensor<fp32, []> const_0 = const()[name = tensor<string, []>("const_0"), val = tensor<fp32, []>(0x0p+0)];
|
| 20 |
+
tensor<int32, [2]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [2]>([1, 0])];
|
| 21 |
+
tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("constant")];
|
| 22 |
+
tensor<fp32, [240000]> input_3 = pad(constant_val = const_0, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1)[name = tensor<string, []>("input_3")];
|
| 23 |
+
tensor<int32, [3]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [3]>([1, 1, 240000])];
|
| 24 |
+
tensor<fp32, [1, 1, 240000]> input_5 = reshape(shape = var_30, x = input_3)[name = tensor<string, []>("input_5")];
|
| 25 |
+
tensor<fp32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<fp32, []>(0x0p+0)];
|
| 26 |
+
tensor<int32, [6]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 256, 256])];
|
| 27 |
+
tensor<string, []> input_7_mode_0 = const()[name = tensor<string, []>("input_7_mode_0"), val = tensor<string, []>("reflect")];
|
| 28 |
+
tensor<fp32, [1, 1, 240512]> input_7 = pad(constant_val = const_2, mode = input_7_mode_0, pad = input_7_pad_0, x = input_5)[name = tensor<string, []>("input_7")];
|
| 29 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([240512])];
|
| 30 |
+
tensor<fp32, [240512]> input = reshape(shape = var_42, x = input_7)[name = tensor<string, []>("input")];
|
| 31 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 32 |
+
tensor<fp32, [1, 240512]> expand_dims_0 = expand_dims(axes = expand_dims_0_axes_0, x = input)[name = tensor<string, []>("expand_dims_0")];
|
| 33 |
+
tensor<fp32, [257, 1, 512]> expand_dims_1 = const()[name = tensor<string, []>("expand_dims_1"), val = tensor<fp32, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131712)))];
|
| 34 |
+
tensor<fp32, [257, 1, 512]> expand_dims_2 = const()[name = tensor<string, []>("expand_dims_2"), val = tensor<fp32, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(658112)))];
|
| 35 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
| 36 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
| 37 |
+
tensor<fp32, [1, 1, 240512]> expand_dims_4 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0)[name = tensor<string, []>("expand_dims_4")];
|
| 38 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
| 39 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 40 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 41 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
| 42 |
+
tensor<fp32, [1, 257, 1501]> conv_0 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1, x = expand_dims_4)[name = tensor<string, []>("conv_0")];
|
| 43 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 44 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 45 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 46 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
| 47 |
+
tensor<fp32, [1, 257, 1501]> conv_1 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2, x = expand_dims_4)[name = tensor<string, []>("conv_1")];
|
| 48 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 49 |
+
tensor<fp32, [257, 1501]> squeeze_0 = squeeze(axes = squeeze_0_axes_0, x = conv_0)[name = tensor<string, []>("squeeze_0")];
|
| 50 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
| 51 |
+
tensor<fp32, [257, 1501]> squeeze_1 = squeeze(axes = squeeze_1_axes_0, x = conv_1)[name = tensor<string, []>("squeeze_1")];
|
| 52 |
+
tensor<fp32, [257, 1501]> square_1 = square(x = squeeze_0)[name = tensor<string, []>("square_1")];
|
| 53 |
+
tensor<fp32, [257, 1501]> square_2 = square(x = squeeze_1)[name = tensor<string, []>("square_2")];
|
| 54 |
+
tensor<fp32, [257, 1501]> add_1 = add(x = square_1, y = square_2)[name = tensor<string, []>("add_1")];
|
| 55 |
+
tensor<fp32, [257, 1501]> magnitudes = identity(x = add_1)[name = tensor<string, []>("magnitudes")];
|
| 56 |
+
tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
| 57 |
+
tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
| 58 |
+
tensor<fp32, [128, 1501]> mel_spec_1 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters, y = magnitudes)[name = tensor<string, []>("mel_spec_1")];
|
| 59 |
+
tensor<fp32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<fp32, []>(0x1p-24)];
|
| 60 |
+
tensor<fp32, [128, 1501]> mel_spec_3 = add(x = mel_spec_1, y = var_56)[name = tensor<string, []>("mel_spec_3")];
|
| 61 |
+
tensor<fp32, []> mel_spec_5_epsilon_0 = const()[name = tensor<string, []>("mel_spec_5_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
| 62 |
+
tensor<fp32, [128, 1501]> mel_spec_5 = log(epsilon = mel_spec_5_epsilon_0, x = mel_spec_3)[name = tensor<string, []>("mel_spec_5")];
|
| 63 |
+
tensor<int32, [1]> per_feature_mean_axes_0 = const()[name = tensor<string, []>("per_feature_mean_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 64 |
+
tensor<bool, []> per_feature_mean_keep_dims_0 = const()[name = tensor<string, []>("per_feature_mean_keep_dims_0"), val = tensor<bool, []>(true)];
|
| 65 |
+
tensor<fp32, [128, 1]> per_feature_mean = reduce_mean(axes = per_feature_mean_axes_0, keep_dims = per_feature_mean_keep_dims_0, x = mel_spec_5)[name = tensor<string, []>("per_feature_mean")];
|
| 66 |
+
tensor<fp32, [128, 1501]> sub_0 = sub(x = mel_spec_5, y = per_feature_mean)[name = tensor<string, []>("sub_0")];
|
| 67 |
+
tensor<fp32, [128, 1501]> square_0 = square(x = sub_0)[name = tensor<string, []>("square_0")];
|
| 68 |
+
tensor<int32, [1]> reduce_mean_1_axes_0 = const()[name = tensor<string, []>("reduce_mean_1_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 69 |
+
tensor<bool, []> reduce_mean_1_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_1_keep_dims_0"), val = tensor<bool, []>(true)];
|
| 70 |
+
tensor<fp32, [128, 1]> reduce_mean_1 = reduce_mean(axes = reduce_mean_1_axes_0, keep_dims = reduce_mean_1_keep_dims_0, x = square_0)[name = tensor<string, []>("reduce_mean_1")];
|
| 71 |
+
tensor<fp32, []> real_div_0 = const()[name = tensor<string, []>("real_div_0"), val = tensor<fp32, []>(0x1.002bbp+0)];
|
| 72 |
+
tensor<fp32, [128, 1]> mul_0 = mul(x = reduce_mean_1, y = real_div_0)[name = tensor<string, []>("mul_0")];
|
| 73 |
+
tensor<fp32, [128, 1]> sqrt_0 = sqrt(x = mul_0)[name = tensor<string, []>("sqrt_0")];
|
| 74 |
+
tensor<fp32, []> var_70 = const()[name = tensor<string, []>("op_70"), val = tensor<fp32, []>(0x1.4f8b58p-17)];
|
| 75 |
+
tensor<fp32, [128, 1]> per_feature_std = add(x = sqrt_0, y = var_70)[name = tensor<string, []>("per_feature_std")];
|
| 76 |
+
tensor<fp32, [128, 1501]> mel_spec = real_div(x = sub_0, y = per_feature_std)[name = tensor<string, []>("mel_spec")];
|
| 77 |
+
tensor<int32, [2]> var_75_perm_0 = const()[name = tensor<string, []>("op_75_perm_0"), val = tensor<int32, [2]>([1, 0])];
|
| 78 |
+
tensor<int32, [1]> var_77_axes_0 = const()[name = tensor<string, []>("op_77_axes_0"), val = tensor<int32, [1]>([0])];
|
| 79 |
+
tensor<fp32, [1501, 128]> var_75 = transpose(perm = var_75_perm_0, x = mel_spec)[name = tensor<string, []>("transpose_0")];
|
| 80 |
+
tensor<fp32, [1, 1501, 128]> var_77 = expand_dims(axes = var_77_axes_0, x = var_75)[name = tensor<string, []>("op_77")];
|
| 81 |
+
tensor<int32, [1]> var_79_axes_0 = const()[name = tensor<string, []>("op_79_axes_0"), val = tensor<int32, [1]>([1])];
|
| 82 |
+
tensor<fp32, [1, 1, 1501, 128]> var_79 = expand_dims(axes = var_79_axes_0, x = var_77)[name = tensor<string, []>("op_79")];
|
| 83 |
+
tensor<string, []> cast_7_dtype_0 = const()[name = tensor<string, []>("cast_7_dtype_0"), val = tensor<string, []>("fp16")];
|
| 84 |
+
tensor<fp16, [1, 1, 1501, 128]> melspectrogram_features = cast(dtype = cast_7_dtype_0, x = var_79)[name = tensor<string, []>("cast_8")];
|
| 85 |
+
} -> (melspectrogram_features);
|
| 86 |
+
}
|
canary-1b-v2/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1570ad9beb3077cf61314606d8e4c30fc1d7b971e3bb73ee9444467faa9dd671
|
| 3 |
+
size 1184512
|
canary-1b-v2/config.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"ParakeetForCTC"
|
| 4 |
+
],
|
| 5 |
+
"ctc_loss_reduction": "mean",
|
| 6 |
+
"ctc_zero_infinity": true,
|
| 7 |
+
"dtype": "float32",
|
| 8 |
+
"encoder_config": {
|
| 9 |
+
"activation_dropout": 0.1,
|
| 10 |
+
"attention_bias": true,
|
| 11 |
+
"attention_dropout": 0.1,
|
| 12 |
+
"conv_kernel_size": 9,
|
| 13 |
+
"dropout": 0.1,
|
| 14 |
+
"dropout_positions": 0.0,
|
| 15 |
+
"hidden_act": "silu",
|
| 16 |
+
"hidden_size": 1024,
|
| 17 |
+
"initializer_range": 0.02,
|
| 18 |
+
"intermediate_size": 4096,
|
| 19 |
+
"layerdrop": 0.1,
|
| 20 |
+
"max_position_embeddings": 5000,
|
| 21 |
+
"model_type": "parakeet_encoder",
|
| 22 |
+
"num_attention_heads": 8,
|
| 23 |
+
"num_hidden_layers": 24,
|
| 24 |
+
"num_key_value_heads": 8,
|
| 25 |
+
"num_mel_bins": 128,
|
| 26 |
+
"scale_input": false,
|
| 27 |
+
"subsampling_conv_channels": 256,
|
| 28 |
+
"subsampling_conv_kernel_size": 3,
|
| 29 |
+
"subsampling_conv_stride": 2,
|
| 30 |
+
"subsampling_factor": 8
|
| 31 |
+
},
|
| 32 |
+
"initializer_range": 0.02,
|
| 33 |
+
"model_type": "parakeet_ctc",
|
| 34 |
+
"pad_token_id": 1024,
|
| 35 |
+
"transformers_version": "4.57.1",
|
| 36 |
+
"vocab_size": 16385
|
| 37 |
+
}
|
canary-1b-v2/preprocessor_config.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"feature_extractor_type": "ParakeetFeatureExtractor",
|
| 3 |
+
"feature_size": 128,
|
| 4 |
+
"hop_length": 160,
|
| 5 |
+
"n_fft": 512,
|
| 6 |
+
"padding_side": "right",
|
| 7 |
+
"padding_value": 0.0,
|
| 8 |
+
"preemphasis": 0.97,
|
| 9 |
+
"processor_class": "ParakeetProcessor",
|
| 10 |
+
"return_attention_mask": true,
|
| 11 |
+
"sampling_rate": 16000,
|
| 12 |
+
"win_length": 400
|
| 13 |
+
}
|
canary-1b-v2/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
canary-1b-v2/tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
canary-1b-v2/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|