bweng commited on
Commit
a93ce63
·
verified ·
1 Parent(s): b10f709

Upload 5 files

Browse files
Preprocessor.mlmodelc/analytics/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cca5dad4b924708d2c78c0596f73bbc55518e3d0170896bf573f5a3d33859a43
3
  size 243
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9beeb989c8d66f8be11df59bc6df277ec76cee404f6865b46243835ef562f6d
3
  size 243
Preprocessor.mlmodelc/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b610eb955ab8c5fe20c3588a6f1e043cf77482f1d0af9f8448399e11df83f42
3
- size 485
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbde3f2300842c1fd51ef3ff948a0bcffe65ffd2dca10707f2509f32c1d65b1d
3
+ size 486
Preprocessor.mlmodelc/metadata.json CHANGED
@@ -1,7 +1,7 @@
1
  [
2
  {
3
  "metadataOutputVersion" : "3.0",
4
- "shortDescription" : "Parakeet preprocessor (15 s window)",
5
  "outputSchema" : [
6
  {
7
  "hasShapeFlexibility" : "0",
@@ -24,7 +24,7 @@
24
  "type" : "MultiArray"
25
  }
26
  ],
27
- "storagePrecision" : "Float16",
28
  "modelParameters" : [
29
 
30
  ],
@@ -32,7 +32,6 @@
32
  "specificationVersion" : 8,
33
  "mlProgramOperationTypeHistogram" : {
34
  "Ios17.reshape" : 2,
35
- "Identity" : 1,
36
  "Ios17.matmul" : 1,
37
  "Select" : 3,
38
  "Ios17.expandDims" : 8,
@@ -42,6 +41,7 @@
42
  "Ios16.reduceSum" : 4,
43
  "Pad" : 1,
44
  "Ios17.log" : 1,
 
45
  "Ios17.less" : 1,
46
  "Ios17.sub" : 4,
47
  "Ios17.conv" : 2,
 
1
  [
2
  {
3
  "metadataOutputVersion" : "3.0",
4
+ "shortDescription" : "int8-linear quantized - preprocessor",
5
  "outputSchema" : [
6
  {
7
  "hasShapeFlexibility" : "0",
 
24
  "type" : "MultiArray"
25
  }
26
  ],
27
+ "storagePrecision" : "Int8",
28
  "modelParameters" : [
29
 
30
  ],
 
32
  "specificationVersion" : 8,
33
  "mlProgramOperationTypeHistogram" : {
34
  "Ios17.reshape" : 2,
 
35
  "Ios17.matmul" : 1,
36
  "Select" : 3,
37
  "Ios17.expandDims" : 8,
 
41
  "Ios16.reduceSum" : 4,
42
  "Pad" : 1,
43
  "Ios17.log" : 1,
44
+ "Ios16.constexprAffineDequantize" : 4,
45
  "Ios17.less" : 1,
46
  "Ios17.sub" : 4,
47
  "Ios17.conv" : 2,
Preprocessor.mlmodelc/model.mil CHANGED
@@ -1,5 +1,5 @@
1
  program(1.0)
2
- [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}, {"coremltools-component-torch", "2.7.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
3
  {
4
  func main<ios17>(tensor<int32, [1]> audio_length, tensor<fp32, [1, 240000]> audio_signal) {
5
  tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(1)];
@@ -11,7 +11,7 @@ program(1.0)
11
  tensor<int32, [1]> floor_div_0 = floor_div(x = var_37, y = var_10)[name = tensor<string, []>("floor_div_0")];
12
  tensor<string, []> var_38_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_38_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
13
  tensor<fp16, []> var_39_promoted_to_fp16 = const()[name = tensor<string, []>("op_39_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
14
- tensor<fp16, [1]> floor_div_0_to_fp16 = cast(dtype = var_38_to_fp16_dtype_0, x = floor_div_0)[name = tensor<string, []>("cast_14")];
15
  tensor<fp16, [1]> seq_len_1_cast_fp16 = add(x = floor_div_0_to_fp16, y = var_39_promoted_to_fp16)[name = tensor<string, []>("seq_len_1_cast_fp16")];
16
  tensor<string, []> seq_len_dtype_0 = const()[name = tensor<string, []>("seq_len_dtype_0"), val = tensor<string, []>("int32")];
17
  tensor<int32, [2]> var_43_begin_0 = const()[name = tensor<string, []>("op_43_begin_0"), val = tensor<int32, [2]>([0, 0])];
@@ -19,7 +19,7 @@ program(1.0)
19
  tensor<bool, [2]> var_43_end_mask_0 = const()[name = tensor<string, []>("op_43_end_mask_0"), val = tensor<bool, [2]>([true, false])];
20
  tensor<bool, [2]> var_43_squeeze_mask_0 = const()[name = tensor<string, []>("op_43_squeeze_mask_0"), val = tensor<bool, [2]>([false, true])];
21
  tensor<string, []> audio_signal_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_signal_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
22
- tensor<fp16, [1, 240000]> audio_signal_to_fp16 = cast(dtype = audio_signal_to_fp16_dtype_0, x = audio_signal)[name = tensor<string, []>("cast_12")];
23
  tensor<fp16, [1]> var_43_cast_fp16 = slice_by_index(begin = var_43_begin_0, end = var_43_end_0, end_mask = var_43_end_mask_0, squeeze_mask = var_43_squeeze_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_43_cast_fp16")];
24
  tensor<int32, [1]> var_44_axes_0 = const()[name = tensor<string, []>("op_44_axes_0"), val = tensor<int32, [1]>([1])];
25
  tensor<fp16, [1, 1]> var_44_cast_fp16 = expand_dims(axes = var_44_axes_0, x = var_43_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
@@ -51,14 +51,14 @@ program(1.0)
51
  tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
52
  tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
53
  tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
54
- tensor<fp16, [257, 1, 512]> expand_dims_3_to_fp16 = const()[name = tensor<string, []>("expand_dims_3_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
55
- tensor<fp16, [1, 257, 1501]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_5, weight = expand_dims_3_to_fp16, x = expand_dims_6_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
56
  tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
57
  tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
58
  tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
59
  tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
60
- tensor<fp16, [257, 1, 512]> expand_dims_4_to_fp16 = const()[name = tensor<string, []>("expand_dims_4_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(263296)))];
61
- tensor<fp16, [1, 257, 1501]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_5, weight = expand_dims_4_to_fp16, x = expand_dims_6_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
62
  tensor<int32, []> stack_0_axis_0 = const()[name = tensor<string, []>("stack_0_axis_0"), val = tensor<int32, []>(-1)];
63
  tensor<fp16, [1, 257, 1501, 2]> stack_0_cast_fp16 = stack(axis = stack_0_axis_0, values = (conv_0_cast_fp16, conv_1_cast_fp16))[name = tensor<string, []>("stack_0_cast_fp16")];
64
  tensor<fp16, []> var_17_promoted_to_fp16 = const()[name = tensor<string, []>("op_17_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
@@ -66,33 +66,32 @@ program(1.0)
66
  tensor<int32, [1]> var_69_axes_0 = const()[name = tensor<string, []>("op_69_axes_0"), val = tensor<int32, [1]>([-1])];
67
  tensor<bool, []> var_69_keep_dims_0 = const()[name = tensor<string, []>("op_69_keep_dims_0"), val = tensor<bool, []>(false)];
68
  tensor<fp16, [1, 257, 1501]> var_69_cast_fp16 = reduce_sum(axes = var_69_axes_0, keep_dims = var_69_keep_dims_0, x = var_67_cast_fp16)[name = tensor<string, []>("op_69_cast_fp16")];
69
- tensor<fp16, [1, 257, 1501]> x_9_cast_fp16 = identity(x = var_69_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
70
  tensor<bool, []> x_11_transpose_x_0 = const()[name = tensor<string, []>("x_11_transpose_x_0"), val = tensor<bool, []>(false)];
71
  tensor<bool, []> x_11_transpose_y_0 = const()[name = tensor<string, []>("x_11_transpose_y_0"), val = tensor<bool, []>(false)];
72
- tensor<fp16, [1, 128, 257]> const_6_to_fp16 = const()[name = tensor<string, []>("const_6_to_fp16"), val = tensor<fp16, [1, 128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(526528)))];
73
- tensor<fp16, [1, 128, 1501]> x_11_cast_fp16 = matmul(transpose_x = x_11_transpose_x_0, transpose_y = x_11_transpose_y_0, x = const_6_to_fp16, y = x_9_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
74
  tensor<fp16, []> var_76_to_fp16 = const()[name = tensor<string, []>("op_76_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
75
  tensor<fp16, [1, 128, 1501]> var_77_cast_fp16 = add(x = x_11_cast_fp16, y = var_76_to_fp16)[name = tensor<string, []>("op_77_cast_fp16")];
76
  tensor<fp32, []> x_13_epsilon_0 = const()[name = tensor<string, []>("x_13_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
77
  tensor<fp16, [1, 128, 1501]> x_13_cast_fp16 = log(epsilon = x_13_epsilon_0, x = var_77_cast_fp16)[name = tensor<string, []>("x_13_cast_fp16")];
78
  tensor<int32, [1, 1501]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [1, 1501]>([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500]])];
79
  tensor<int32, [1]> var_85_axes_0 = const()[name = tensor<string, []>("op_85_axes_0"), val = tensor<int32, [1]>([1])];
80
- tensor<int32, [1]> mel_length = cast(dtype = seq_len_dtype_0, x = seq_len_1_cast_fp16)[name = tensor<string, []>("cast_13")];
81
  tensor<int32, [1, 1]> var_85 = expand_dims(axes = var_85_axes_0, x = mel_length)[name = tensor<string, []>("op_85")];
82
  tensor<bool, [1, 1501]> valid_mask = less(x = var_82, y = var_85)[name = tensor<string, []>("valid_mask")];
83
  tensor<int32, [1]> var_87_axes_0 = const()[name = tensor<string, []>("op_87_axes_0"), val = tensor<int32, [1]>([1])];
84
  tensor<bool, [1, 1, 1501]> var_87 = expand_dims(axes = var_87_axes_0, x = valid_mask)[name = tensor<string, []>("op_87")];
85
  tensor<int32, [3]> var_87_after_broadcast_reps_0 = const()[name = tensor<string, []>("op_87_after_broadcast_reps_0"), val = tensor<int32, [3]>([1, 128, 1])];
86
  tensor<bool, [1, 128, 1501]> var_87_after_broadcast = tile(reps = var_87_after_broadcast_reps_0, x = var_87)[name = tensor<string, []>("op_87_after_broadcast")];
87
- tensor<fp16, [1, 128, 1501]> var_24_after_broadcast_to_fp16 = const()[name = tensor<string, []>("op_24_after_broadcast_to_fp16"), val = tensor<fp16, [1, 128, 1501]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(592384)))];
88
- tensor<fp16, [1, 128, 1501]> var_88_cast_fp16 = select(a = x_13_cast_fp16, b = var_24_after_broadcast_to_fp16, cond = var_87_after_broadcast)[name = tensor<string, []>("op_88_cast_fp16")];
89
  tensor<int32, [1]> x_mean_numerator_axes_0 = const()[name = tensor<string, []>("x_mean_numerator_axes_0"), val = tensor<int32, [1]>([2])];
90
  tensor<bool, []> x_mean_numerator_keep_dims_0 = const()[name = tensor<string, []>("x_mean_numerator_keep_dims_0"), val = tensor<bool, []>(false)];
91
  tensor<fp16, [1, 128]> x_mean_numerator_cast_fp16 = reduce_sum(axes = x_mean_numerator_axes_0, keep_dims = x_mean_numerator_keep_dims_0, x = var_88_cast_fp16)[name = tensor<string, []>("x_mean_numerator_cast_fp16")];
92
  tensor<int32, [1]> x_mean_denominator_axes_0 = const()[name = tensor<string, []>("x_mean_denominator_axes_0"), val = tensor<int32, [1]>([1])];
93
  tensor<bool, []> x_mean_denominator_keep_dims_0 = const()[name = tensor<string, []>("x_mean_denominator_keep_dims_0"), val = tensor<bool, []>(false)];
94
  tensor<string, []> cast_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("cast_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
95
- tensor<fp16, [1, 1501]> valid_mask_to_fp16 = cast(dtype = cast_2_to_fp16_dtype_0, x = valid_mask)[name = tensor<string, []>("cast_11")];
96
  tensor<fp16, [1]> x_mean_denominator_cast_fp16 = reduce_sum(axes = x_mean_denominator_axes_0, keep_dims = x_mean_denominator_keep_dims_0, x = valid_mask_to_fp16)[name = tensor<string, []>("x_mean_denominator_cast_fp16")];
97
  tensor<int32, [1]> var_93_axes_0 = const()[name = tensor<string, []>("op_93_axes_0"), val = tensor<int32, [1]>([1])];
98
  tensor<fp16, [1, 1]> var_93_cast_fp16 = expand_dims(axes = var_93_axes_0, x = x_mean_denominator_cast_fp16)[name = tensor<string, []>("op_93_cast_fp16")];
@@ -100,7 +99,7 @@ program(1.0)
100
  tensor<int32, [1]> var_96_axes_0 = const()[name = tensor<string, []>("op_96_axes_0"), val = tensor<int32, [1]>([2])];
101
  tensor<fp16, [1, 128, 1]> var_96_cast_fp16 = expand_dims(axes = var_96_axes_0, x = x_mean_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
102
  tensor<fp16, [1, 128, 1501]> var_97_cast_fp16 = sub(x = x_13_cast_fp16, y = var_96_cast_fp16)[name = tensor<string, []>("op_97_cast_fp16")];
103
- tensor<fp16, [1, 128, 1501]> var_98_cast_fp16 = select(a = var_97_cast_fp16, b = var_24_after_broadcast_to_fp16, cond = var_87_after_broadcast)[name = tensor<string, []>("op_98_cast_fp16")];
104
  tensor<fp16, []> var_17_promoted_1_to_fp16 = const()[name = tensor<string, []>("op_17_promoted_1_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
105
  tensor<fp16, [1, 128, 1501]> var_99_cast_fp16 = pow(x = var_98_cast_fp16, y = var_17_promoted_1_to_fp16)[name = tensor<string, []>("op_99_cast_fp16")];
106
  tensor<int32, [1]> var_101_axes_0 = const()[name = tensor<string, []>("op_101_axes_0"), val = tensor<int32, [1]>([2])];
@@ -121,6 +120,6 @@ program(1.0)
121
  tensor<fp16, []> var_24_to_fp16 = const()[name = tensor<string, []>("op_24_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
122
  tensor<fp16, [1, 128, 1501]> processed_signal_cast_fp16 = select(a = var_24_to_fp16, b = x_cast_fp16, cond = var_119)[name = tensor<string, []>("processed_signal_cast_fp16")];
123
  tensor<string, []> processed_signal_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("processed_signal_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
124
- tensor<fp32, [1, 128, 1501]> mel = cast(dtype = processed_signal_cast_fp16_to_fp32_dtype_0, x = processed_signal_cast_fp16)[name = tensor<string, []>("cast_10")];
125
  } -> (mel, mel_length);
126
  }
 
1
  program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}})]
3
  {
4
  func main<ios17>(tensor<int32, [1]> audio_length, tensor<fp32, [1, 240000]> audio_signal) {
5
  tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(1)];
 
11
  tensor<int32, [1]> floor_div_0 = floor_div(x = var_37, y = var_10)[name = tensor<string, []>("floor_div_0")];
12
  tensor<string, []> var_38_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_38_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
13
  tensor<fp16, []> var_39_promoted_to_fp16 = const()[name = tensor<string, []>("op_39_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
14
+ tensor<fp16, [1]> floor_div_0_to_fp16 = cast(dtype = var_38_to_fp16_dtype_0, x = floor_div_0)[name = tensor<string, []>("cast_4")];
15
  tensor<fp16, [1]> seq_len_1_cast_fp16 = add(x = floor_div_0_to_fp16, y = var_39_promoted_to_fp16)[name = tensor<string, []>("seq_len_1_cast_fp16")];
16
  tensor<string, []> seq_len_dtype_0 = const()[name = tensor<string, []>("seq_len_dtype_0"), val = tensor<string, []>("int32")];
17
  tensor<int32, [2]> var_43_begin_0 = const()[name = tensor<string, []>("op_43_begin_0"), val = tensor<int32, [2]>([0, 0])];
 
19
  tensor<bool, [2]> var_43_end_mask_0 = const()[name = tensor<string, []>("op_43_end_mask_0"), val = tensor<bool, [2]>([true, false])];
20
  tensor<bool, [2]> var_43_squeeze_mask_0 = const()[name = tensor<string, []>("op_43_squeeze_mask_0"), val = tensor<bool, [2]>([false, true])];
21
  tensor<string, []> audio_signal_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_signal_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
22
+ tensor<fp16, [1, 240000]> audio_signal_to_fp16 = cast(dtype = audio_signal_to_fp16_dtype_0, x = audio_signal)[name = tensor<string, []>("cast_3")];
23
  tensor<fp16, [1]> var_43_cast_fp16 = slice_by_index(begin = var_43_begin_0, end = var_43_end_0, end_mask = var_43_end_mask_0, squeeze_mask = var_43_squeeze_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_43_cast_fp16")];
24
  tensor<int32, [1]> var_44_axes_0 = const()[name = tensor<string, []>("op_44_axes_0"), val = tensor<int32, [1]>([1])];
25
  tensor<fp16, [1, 1]> var_44_cast_fp16 = expand_dims(axes = var_44_axes_0, x = var_43_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
 
51
  tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
52
  tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
53
  tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
54
+ tensor<fp16, [257, 1, 512]> expand_dims_3_to_fp16_quantized = constexpr_affine_dequantize()[axis = tensor<int32, []>(0), name = tensor<string, []>("expand_dims_3_to_fp16_quantized"), quantized_data = tensor<int8, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), scale = tensor<fp16, [257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132096))), zero_point = tensor<int8, [257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131712)))];
55
+ tensor<fp16, [1, 257, 1501]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_5, weight = expand_dims_3_to_fp16_quantized, x = expand_dims_6_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
56
  tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
57
  tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
58
  tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
59
  tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
60
+ tensor<fp16, [257, 1, 512]> expand_dims_4_to_fp16_quantized = constexpr_affine_dequantize()[axis = tensor<int32, []>(0), name = tensor<string, []>("expand_dims_4_to_fp16_quantized"), quantized_data = tensor<int8, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132736))), scale = tensor<fp16, [257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(264768))), zero_point = tensor<int8, [257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(264384)))];
61
+ tensor<fp16, [1, 257, 1501]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_5, weight = expand_dims_4_to_fp16_quantized, x = expand_dims_6_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
62
  tensor<int32, []> stack_0_axis_0 = const()[name = tensor<string, []>("stack_0_axis_0"), val = tensor<int32, []>(-1)];
63
  tensor<fp16, [1, 257, 1501, 2]> stack_0_cast_fp16 = stack(axis = stack_0_axis_0, values = (conv_0_cast_fp16, conv_1_cast_fp16))[name = tensor<string, []>("stack_0_cast_fp16")];
64
  tensor<fp16, []> var_17_promoted_to_fp16 = const()[name = tensor<string, []>("op_17_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
 
66
  tensor<int32, [1]> var_69_axes_0 = const()[name = tensor<string, []>("op_69_axes_0"), val = tensor<int32, [1]>([-1])];
67
  tensor<bool, []> var_69_keep_dims_0 = const()[name = tensor<string, []>("op_69_keep_dims_0"), val = tensor<bool, []>(false)];
68
  tensor<fp16, [1, 257, 1501]> var_69_cast_fp16 = reduce_sum(axes = var_69_axes_0, keep_dims = var_69_keep_dims_0, x = var_67_cast_fp16)[name = tensor<string, []>("op_69_cast_fp16")];
 
69
  tensor<bool, []> x_11_transpose_x_0 = const()[name = tensor<string, []>("x_11_transpose_x_0"), val = tensor<bool, []>(false)];
70
  tensor<bool, []> x_11_transpose_y_0 = const()[name = tensor<string, []>("x_11_transpose_y_0"), val = tensor<bool, []>(false)];
71
+ tensor<fp16, [1, 128, 257]> const_6_to_fp16_quantized = constexpr_affine_dequantize()[axis = tensor<int32, []>(1), name = tensor<string, []>("const_6_to_fp16_quantized"), quantized_data = tensor<int8, [1, 128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(265408))), scale = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(298560))), zero_point = tensor<int8, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(298368)))];
72
+ tensor<fp16, [1, 128, 1501]> x_11_cast_fp16 = matmul(transpose_x = x_11_transpose_x_0, transpose_y = x_11_transpose_y_0, x = const_6_to_fp16_quantized, y = var_69_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
73
  tensor<fp16, []> var_76_to_fp16 = const()[name = tensor<string, []>("op_76_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
74
  tensor<fp16, [1, 128, 1501]> var_77_cast_fp16 = add(x = x_11_cast_fp16, y = var_76_to_fp16)[name = tensor<string, []>("op_77_cast_fp16")];
75
  tensor<fp32, []> x_13_epsilon_0 = const()[name = tensor<string, []>("x_13_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
76
  tensor<fp16, [1, 128, 1501]> x_13_cast_fp16 = log(epsilon = x_13_epsilon_0, x = var_77_cast_fp16)[name = tensor<string, []>("x_13_cast_fp16")];
77
  tensor<int32, [1, 1501]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [1, 1501]>([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500]])];
78
  tensor<int32, [1]> var_85_axes_0 = const()[name = tensor<string, []>("op_85_axes_0"), val = tensor<int32, [1]>([1])];
79
+ tensor<int32, [1]> mel_length = cast(dtype = seq_len_dtype_0, x = seq_len_1_cast_fp16)[name = tensor<string, []>("cast_2")];
80
  tensor<int32, [1, 1]> var_85 = expand_dims(axes = var_85_axes_0, x = mel_length)[name = tensor<string, []>("op_85")];
81
  tensor<bool, [1, 1501]> valid_mask = less(x = var_82, y = var_85)[name = tensor<string, []>("valid_mask")];
82
  tensor<int32, [1]> var_87_axes_0 = const()[name = tensor<string, []>("op_87_axes_0"), val = tensor<int32, [1]>([1])];
83
  tensor<bool, [1, 1, 1501]> var_87 = expand_dims(axes = var_87_axes_0, x = valid_mask)[name = tensor<string, []>("op_87")];
84
  tensor<int32, [3]> var_87_after_broadcast_reps_0 = const()[name = tensor<string, []>("op_87_after_broadcast_reps_0"), val = tensor<int32, [3]>([1, 128, 1])];
85
  tensor<bool, [1, 128, 1501]> var_87_after_broadcast = tile(reps = var_87_after_broadcast_reps_0, x = var_87)[name = tensor<string, []>("op_87_after_broadcast")];
86
+ tensor<fp16, [1, 128, 1501]> op_24_after_broadcast_to_fp16_quantized = constexpr_affine_dequantize()[axis = tensor<int32, []>(0), name = tensor<string, []>("op_24_after_broadcast_to_fp16_quantized"), quantized_data = tensor<int8, [1, 128, 1501]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(298880))), scale = tensor<fp16, []>(0x0p+0), zero_point = tensor<int8, []>(0)];
87
+ tensor<fp16, [1, 128, 1501]> var_88_cast_fp16 = select(a = x_13_cast_fp16, b = op_24_after_broadcast_to_fp16_quantized, cond = var_87_after_broadcast)[name = tensor<string, []>("op_88_cast_fp16")];
88
  tensor<int32, [1]> x_mean_numerator_axes_0 = const()[name = tensor<string, []>("x_mean_numerator_axes_0"), val = tensor<int32, [1]>([2])];
89
  tensor<bool, []> x_mean_numerator_keep_dims_0 = const()[name = tensor<string, []>("x_mean_numerator_keep_dims_0"), val = tensor<bool, []>(false)];
90
  tensor<fp16, [1, 128]> x_mean_numerator_cast_fp16 = reduce_sum(axes = x_mean_numerator_axes_0, keep_dims = x_mean_numerator_keep_dims_0, x = var_88_cast_fp16)[name = tensor<string, []>("x_mean_numerator_cast_fp16")];
91
  tensor<int32, [1]> x_mean_denominator_axes_0 = const()[name = tensor<string, []>("x_mean_denominator_axes_0"), val = tensor<int32, [1]>([1])];
92
  tensor<bool, []> x_mean_denominator_keep_dims_0 = const()[name = tensor<string, []>("x_mean_denominator_keep_dims_0"), val = tensor<bool, []>(false)];
93
  tensor<string, []> cast_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("cast_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
94
+ tensor<fp16, [1, 1501]> valid_mask_to_fp16 = cast(dtype = cast_2_to_fp16_dtype_0, x = valid_mask)[name = tensor<string, []>("cast_1")];
95
  tensor<fp16, [1]> x_mean_denominator_cast_fp16 = reduce_sum(axes = x_mean_denominator_axes_0, keep_dims = x_mean_denominator_keep_dims_0, x = valid_mask_to_fp16)[name = tensor<string, []>("x_mean_denominator_cast_fp16")];
96
  tensor<int32, [1]> var_93_axes_0 = const()[name = tensor<string, []>("op_93_axes_0"), val = tensor<int32, [1]>([1])];
97
  tensor<fp16, [1, 1]> var_93_cast_fp16 = expand_dims(axes = var_93_axes_0, x = x_mean_denominator_cast_fp16)[name = tensor<string, []>("op_93_cast_fp16")];
 
99
  tensor<int32, [1]> var_96_axes_0 = const()[name = tensor<string, []>("op_96_axes_0"), val = tensor<int32, [1]>([2])];
100
  tensor<fp16, [1, 128, 1]> var_96_cast_fp16 = expand_dims(axes = var_96_axes_0, x = x_mean_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
101
  tensor<fp16, [1, 128, 1501]> var_97_cast_fp16 = sub(x = x_13_cast_fp16, y = var_96_cast_fp16)[name = tensor<string, []>("op_97_cast_fp16")];
102
+ tensor<fp16, [1, 128, 1501]> var_98_cast_fp16 = select(a = var_97_cast_fp16, b = op_24_after_broadcast_to_fp16_quantized, cond = var_87_after_broadcast)[name = tensor<string, []>("op_98_cast_fp16")];
103
  tensor<fp16, []> var_17_promoted_1_to_fp16 = const()[name = tensor<string, []>("op_17_promoted_1_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
104
  tensor<fp16, [1, 128, 1501]> var_99_cast_fp16 = pow(x = var_98_cast_fp16, y = var_17_promoted_1_to_fp16)[name = tensor<string, []>("op_99_cast_fp16")];
105
  tensor<int32, [1]> var_101_axes_0 = const()[name = tensor<string, []>("op_101_axes_0"), val = tensor<int32, [1]>([2])];
 
120
  tensor<fp16, []> var_24_to_fp16 = const()[name = tensor<string, []>("op_24_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
121
  tensor<fp16, [1, 128, 1501]> processed_signal_cast_fp16 = select(a = var_24_to_fp16, b = x_cast_fp16, cond = var_119)[name = tensor<string, []>("processed_signal_cast_fp16")];
122
  tensor<string, []> processed_signal_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("processed_signal_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
123
+ tensor<fp32, [1, 128, 1501]> mel = cast(dtype = processed_signal_cast_fp16_to_fp32_dtype_0, x = processed_signal_cast_fp16)[name = tensor<string, []>("cast_0")];
124
  } -> (mel, mel_length);
125
  }
Preprocessor.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3c3d600f5fc6311ceff6406c10aeb4285a7f0fda4de6ac86b5bee3e76f80e32
3
- size 976704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129b76e3aeafa8afa3ea76d995b964b145fe83700d579f6ff42c4c38fa0968ea
3
+ size 491072