morriszms commited on
Commit
6b9d938
·
verified ·
1 Parent(s): 0f0ff0b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Veena-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Veena-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Veena-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Veena-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Veena-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Veena-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Veena-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ Veena-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ Veena-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ Veena-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ Veena-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ Veena-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ - hi
6
+ library_name: transformers
7
+ tags:
8
+ - text-to-speech
9
+ - tts
10
+ - hindi
11
+ - english
12
+ - llama
13
+ - audio
14
+ - speech
15
+ - india
16
+ - TensorBlock
17
+ - GGUF
18
+ datasets:
19
+ - proprietary
20
+ pipeline_tag: text-to-speech
21
+ co2_eq_emissions:
22
+ emissions: 0
23
+ source: Not specified
24
+ training_type: unknown
25
+ geographical_location: unknown
26
+ base_model: maya-research/Veena
27
+ ---
28
+
29
+ <div style="width: auto; margin-left: auto; margin-right: auto">
30
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
31
+ </div>
32
+
33
+ [![Website](https://img.shields.io/badge/Website-tensorblock.co-blue?logo=google-chrome&logoColor=white)](https://tensorblock.co)
34
+ [![Twitter](https://img.shields.io/twitter/follow/tensorblock_aoi?style=social)](https://twitter.com/tensorblock_aoi)
35
+ [![Discord](https://img.shields.io/badge/Discord-Join%20Us-5865F2?logo=discord&logoColor=white)](https://discord.gg/Ej5NmeHFf2)
36
+ [![GitHub](https://img.shields.io/badge/GitHub-TensorBlock-black?logo=github&logoColor=white)](https://github.com/TensorBlock)
37
+ [![Telegram](https://img.shields.io/badge/Telegram-Group-blue?logo=telegram)](https://t.me/TensorBlock)
38
+
39
+
40
+ ## maya-research/Veena - GGUF
41
+
42
+ <div style="text-align: left; margin: 20px 0;">
43
+ <a href="https://discord.com/invite/Ej5NmeHFf2" style="display: inline-block; padding: 10px 20px; background-color: #5865F2; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
44
+ Join our Discord to learn more about what we're building ↗
45
+ </a>
46
+ </div>
47
+
48
+ This repo contains GGUF format model files for [maya-research/Veena](https://huggingface.co/maya-research/Veena).
49
+
50
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b5753](https://github.com/ggml-org/llama.cpp/commit/73e53dc834c0a2336cd104473af6897197b96277).
51
+
52
+ ## Our projects
53
+ <table border="1" cellspacing="0" cellpadding="10">
54
+ <tr>
55
+ <th colspan="2" style="font-size: 25px;">Forge</th>
56
+ </tr>
57
+ <tr>
58
+ <th colspan="2">
59
+ <img src="https://imgur.com/faI5UKh.jpeg" alt="Forge Project" width="900"/>
60
+ </th>
61
+ </tr>
62
+ <tr>
63
+ <th colspan="2">An OpenAI-compatible multi-provider routing layer.</th>
64
+ </tr>
65
+ <tr>
66
+ <th colspan="2">
67
+ <a href="https://github.com/TensorBlock/forge" target="_blank" style="
68
+ display: inline-block;
69
+ padding: 8px 16px;
70
+ background-color: #FF7F50;
71
+ color: white;
72
+ text-decoration: none;
73
+ border-radius: 6px;
74
+ font-weight: bold;
75
+ font-family: sans-serif;
76
+ ">🚀 Try it now! 🚀</a>
77
+ </th>
78
+ </tr>
79
+
80
+ <tr>
81
+ <th style="font-size: 25px;">Awesome MCP Servers</th>
82
+ <th style="font-size: 25px;">TensorBlock Studio</th>
83
+ </tr>
84
+ <tr>
85
+ <th><img src="https://imgur.com/2Xov7B7.jpeg" alt="MCP Servers" width="450"/></th>
86
+ <th><img src="https://imgur.com/pJcmF5u.jpeg" alt="Studio" width="450"/></th>
87
+ </tr>
88
+ <tr>
89
+ <th>A comprehensive collection of Model Context Protocol (MCP) servers.</th>
90
+ <th>A lightweight, open, and extensible multi-LLM interaction studio.</th>
91
+ </tr>
92
+ <tr>
93
+ <th>
94
+ <a href="https://github.com/TensorBlock/awesome-mcp-servers" target="_blank" style="
95
+ display: inline-block;
96
+ padding: 8px 16px;
97
+ background-color: #FF7F50;
98
+ color: white;
99
+ text-decoration: none;
100
+ border-radius: 6px;
101
+ font-weight: bold;
102
+ font-family: sans-serif;
103
+ ">👀 See what we built 👀</a>
104
+ </th>
105
+ <th>
106
+ <a href="https://github.com/TensorBlock/TensorBlock-Studio" target="_blank" style="
107
+ display: inline-block;
108
+ padding: 8px 16px;
109
+ background-color: #FF7F50;
110
+ color: white;
111
+ text-decoration: none;
112
+ border-radius: 6px;
113
+ font-weight: bold;
114
+ font-family: sans-serif;
115
+ ">👀 See what we built 👀</a>
116
+ </th>
117
+ </tr>
118
+ </table>
119
+
120
+ ## Prompt template
121
+
122
+ ```
123
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
124
+
125
+ Cutting Knowledge Date: December 2023
126
+ Today Date: 21 Jul 2025
127
+
128
+ {system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>
129
+
130
+ {prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
131
+ ```
132
+
133
+ ## Model file specification
134
+
135
+ | Filename | Quant type | File Size | Description |
136
+ | -------- | ---------- | --------- | ----------- |
137
+ | [Veena-Q2_K.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q2_K.gguf) | Q2_K | 1.595 GB | smallest, significant quality loss - not recommended for most purposes |
138
+ | [Veena-Q3_K_S.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q3_K_S.gguf) | Q3_K_S | 1.823 GB | very small, high quality loss |
139
+ | [Veena-Q3_K_M.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q3_K_M.gguf) | Q3_K_M | 1.968 GB | very small, high quality loss |
140
+ | [Veena-Q3_K_L.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q3_K_L.gguf) | Q3_K_L | 2.096 GB | small, substantial quality loss |
141
+ | [Veena-Q4_0.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q4_0.gguf) | Q4_0 | 2.262 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
142
+ | [Veena-Q4_K_S.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q4_K_S.gguf) | Q4_K_S | 2.273 GB | small, greater quality loss |
143
+ | [Veena-Q4_K_M.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q4_K_M.gguf) | Q4_K_M | 2.364 GB | medium, balanced quality - recommended |
144
+ | [Veena-Q5_0.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q5_0.gguf) | Q5_0 | 2.674 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
145
+ | [Veena-Q5_K_S.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q5_K_S.gguf) | Q5_K_S | 2.674 GB | large, low quality loss - recommended |
146
+ | [Veena-Q5_K_M.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q5_K_M.gguf) | Q5_K_M | 2.727 GB | large, very low quality loss - recommended |
147
+ | [Veena-Q6_K.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q6_K.gguf) | Q6_K | 3.113 GB | very large, extremely low quality loss |
148
+ | [Veena-Q8_0.gguf](https://huggingface.co/tensorblock/maya-research_Veena-GGUF/blob/main/Veena-Q8_0.gguf) | Q8_0 | 4.029 GB | very large, extremely low quality loss - not recommended |
149
+
150
+
151
+ ## Downloading instruction
152
+
153
+ ### Command line
154
+
155
+ Firstly, install Huggingface Client
156
+
157
+ ```shell
158
+ pip install -U "huggingface_hub[cli]"
159
+ ```
160
+
161
+ Then, downoad the individual model file the a local directory
162
+
163
+ ```shell
164
+ huggingface-cli download tensorblock/maya-research_Veena-GGUF --include "Veena-Q2_K.gguf" --local-dir MY_LOCAL_DIR
165
+ ```
166
+
167
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
168
+
169
+ ```shell
170
+ huggingface-cli download tensorblock/maya-research_Veena-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
171
+ ```
Veena-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4055f32b6cf29ae26e034fa8ef8a8d5b0289df947cd933f746349648621ee185
3
+ size 1595356896
Veena-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:def05c8f11f2c87ef2749298ce23a950508f8f4ddfb687d639e55db090b9f118
3
+ size 2095737568
Veena-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:145abb10c8c8b8428eb023df2457b6e70ec1b911bf3408da2cdad310add54efe
3
+ size 1967549152
Veena-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d172ac9b7e109e78d8ac802a343f500454d4c6f8fc40d90cb0b681cbf0471ef4
3
+ size 1823238880
Veena-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ad9c8e2fb036bbb0684a9352f4022c17be90fa00b77d0799f82fe28b1617172
3
+ size 2261616512
Veena-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1267d7ae99f81f6403cbb26705f425de65febb422ebac692e726b4cb3748ac30
3
+ size 2363803520
Veena-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28f9e2b9c12555f64186a077bc332c1ab63235b6b5a2c45865980a107a6712f4
3
+ size 2272626560
Veena-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c529ceafc20c64a4a0d791ab164cbbe9257c02751b1e919ff9b7ef5486fa63a
3
+ size 2674207232
Veena-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c19a01084ba8edc591a30e0f1af79dcfe564131dc70cd8b58aee9ef0cae12be
3
+ size 2726849024
Veena-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015ebb0b8f1bc3c5795d612372ea2b26b144238f35e8448039bdd94b015bb237
3
+ size 2674207232
Veena-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86cef94d090f88051d796cd605b2ef7d375e4f4d5e89197ecee8b18a346bbe99
3
+ size 3112584896
Veena-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ede4b9698c0a72cf976d20f9c3c29b2567246287168477e0d92d6350a637a56
3
+ size 4028750912