Spaces:
Running
Running
stream : few updates to make it compatible for Vim usage (#99)
Browse files- examples/stream/stream.cpp +57 -21
examples/stream/stream.cpp
CHANGED
|
@@ -17,6 +17,7 @@
|
|
| 17 |
#include <string>
|
| 18 |
#include <thread>
|
| 19 |
#include <vector>
|
|
|
|
| 20 |
|
| 21 |
// 500 -> 00:05.000
|
| 22 |
// 6000 -> 01:00.000
|
|
@@ -47,7 +48,7 @@ struct whisper_params {
|
|
| 47 |
|
| 48 |
std::string language = "en";
|
| 49 |
std::string model = "models/ggml-base.en.bin";
|
| 50 |
-
std::string
|
| 51 |
};
|
| 52 |
|
| 53 |
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
|
@@ -84,7 +85,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|
| 84 |
} else if (arg == "-m" || arg == "--model") {
|
| 85 |
params.model = argv[++i];
|
| 86 |
} else if (arg == "-f" || arg == "--file") {
|
| 87 |
-
params.
|
| 88 |
} else if (arg == "-h" || arg == "--help") {
|
| 89 |
whisper_print_usage(argc, argv, params);
|
| 90 |
exit(0);
|
|
@@ -115,7 +116,7 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
|
|
| 115 |
fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n");
|
| 116 |
fprintf(stderr, " -l LANG, --language LANG spoken language (default: %s)\n", params.language.c_str());
|
| 117 |
fprintf(stderr, " -m FNAME, --model FNAME model path (default: %s)\n", params.model.c_str());
|
| 118 |
-
fprintf(stderr, " -f FNAME, --file FNAME
|
| 119 |
fprintf(stderr, "\n");
|
| 120 |
}
|
| 121 |
|
|
@@ -143,9 +144,9 @@ bool audio_sdl_init(const int capture_id) {
|
|
| 143 |
|
| 144 |
{
|
| 145 |
int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
|
| 146 |
-
|
| 147 |
for (int i = 0; i < nDevices; i++) {
|
| 148 |
-
|
| 149 |
}
|
| 150 |
}
|
| 151 |
}
|
|
@@ -163,21 +164,21 @@ bool audio_sdl_init(const int capture_id) {
|
|
| 163 |
capture_spec_requested.samples = 1024;
|
| 164 |
|
| 165 |
if (capture_id >= 0) {
|
| 166 |
-
|
| 167 |
g_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
| 168 |
} else {
|
| 169 |
-
|
| 170 |
g_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
| 171 |
}
|
| 172 |
if (!g_dev_id_in) {
|
| 173 |
-
|
| 174 |
g_dev_id_in = 0;
|
| 175 |
} else {
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
}
|
| 182 |
}
|
| 183 |
|
|
@@ -212,6 +213,7 @@ int main(int argc, char ** argv) {
|
|
| 212 |
const int n_samples = (params.step_ms/1000.0)*WHISPER_SAMPLE_RATE;
|
| 213 |
const int n_samples_len = (params.length_ms/1000.0)*WHISPER_SAMPLE_RATE;
|
| 214 |
const int n_samples_30s = 30*WHISPER_SAMPLE_RATE;
|
|
|
|
| 215 |
std::vector<float> pcmf32(n_samples_30s, 0.0f);
|
| 216 |
std::vector<float> pcmf32_old;
|
| 217 |
|
|
@@ -219,15 +221,15 @@ int main(int argc, char ** argv) {
|
|
| 219 |
|
| 220 |
// print some info about the processing
|
| 221 |
{
|
| 222 |
-
|
| 223 |
if (!whisper_is_multilingual(ctx)) {
|
| 224 |
if (params.language != "en" || params.translate) {
|
| 225 |
params.language = "en";
|
| 226 |
params.translate = false;
|
| 227 |
-
|
| 228 |
}
|
| 229 |
}
|
| 230 |
-
|
| 231 |
__func__,
|
| 232 |
n_samples,
|
| 233 |
float(n_samples)/WHISPER_SAMPLE_RATE,
|
|
@@ -237,8 +239,8 @@ int main(int argc, char ** argv) {
|
|
| 237 |
params.translate ? "translate" : "transcribe",
|
| 238 |
params.no_timestamps ? 0 : 1);
|
| 239 |
|
| 240 |
-
|
| 241 |
-
|
| 242 |
}
|
| 243 |
|
| 244 |
SDL_PauseAudioDevice(g_dev_id_in, 0);
|
|
@@ -246,6 +248,18 @@ int main(int argc, char ** argv) {
|
|
| 246 |
int n_iter = 0;
|
| 247 |
bool is_running = true;
|
| 248 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
// main audio loop
|
| 250 |
while (is_running) {
|
| 251 |
// process SDL events:
|
|
@@ -253,13 +267,18 @@ int main(int argc, char ** argv) {
|
|
| 253 |
while (SDL_PollEvent(&event)) {
|
| 254 |
switch (event.type) {
|
| 255 |
case SDL_QUIT:
|
| 256 |
-
|
| 257 |
-
|
|
|
|
| 258 |
default:
|
| 259 |
break;
|
| 260 |
}
|
| 261 |
}
|
| 262 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
// process new audio
|
| 264 |
if (n_iter > 0 && SDL_GetQueuedAudioSize(g_dev_id_in) > 2*n_samples*sizeof(float)) {
|
| 265 |
fprintf(stderr, "\n\n%s: WARNING: cannot process audio fast enough, dropping audio ...\n\n", __func__);
|
|
@@ -312,20 +331,37 @@ int main(int argc, char ** argv) {
|
|
| 312 |
{
|
| 313 |
printf("\33[2K\r");
|
| 314 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
const int n_segments = whisper_full_n_segments(ctx);
|
| 316 |
for (int i = 0; i < n_segments; ++i) {
|
| 317 |
const char * text = whisper_full_get_segment_text(ctx, i);
|
| 318 |
|
| 319 |
if (params.no_timestamps) {
|
| 320 |
-
printf
|
| 321 |
fflush(stdout);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
} else {
|
| 323 |
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
| 324 |
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
| 325 |
|
| 326 |
printf ("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
}
|
| 328 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
}
|
| 330 |
|
| 331 |
++n_iter;
|
|
|
|
| 17 |
#include <string>
|
| 18 |
#include <thread>
|
| 19 |
#include <vector>
|
| 20 |
+
#include <fstream>
|
| 21 |
|
| 22 |
// 500 -> 00:05.000
|
| 23 |
// 6000 -> 01:00.000
|
|
|
|
| 48 |
|
| 49 |
std::string language = "en";
|
| 50 |
std::string model = "models/ggml-base.en.bin";
|
| 51 |
+
std::string fname_out = "";
|
| 52 |
};
|
| 53 |
|
| 54 |
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
|
|
|
| 85 |
} else if (arg == "-m" || arg == "--model") {
|
| 86 |
params.model = argv[++i];
|
| 87 |
} else if (arg == "-f" || arg == "--file") {
|
| 88 |
+
params.fname_out = argv[++i];
|
| 89 |
} else if (arg == "-h" || arg == "--help") {
|
| 90 |
whisper_print_usage(argc, argv, params);
|
| 91 |
exit(0);
|
|
|
|
| 116 |
fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n");
|
| 117 |
fprintf(stderr, " -l LANG, --language LANG spoken language (default: %s)\n", params.language.c_str());
|
| 118 |
fprintf(stderr, " -m FNAME, --model FNAME model path (default: %s)\n", params.model.c_str());
|
| 119 |
+
fprintf(stderr, " -f FNAME, --file FNAME text output file name (default: no output to file)\n");
|
| 120 |
fprintf(stderr, "\n");
|
| 121 |
}
|
| 122 |
|
|
|
|
| 144 |
|
| 145 |
{
|
| 146 |
int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
|
| 147 |
+
fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
|
| 148 |
for (int i = 0; i < nDevices; i++) {
|
| 149 |
+
fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
|
| 150 |
}
|
| 151 |
}
|
| 152 |
}
|
|
|
|
| 164 |
capture_spec_requested.samples = 1024;
|
| 165 |
|
| 166 |
if (capture_id >= 0) {
|
| 167 |
+
fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
|
| 168 |
g_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
| 169 |
} else {
|
| 170 |
+
fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
|
| 171 |
g_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
| 172 |
}
|
| 173 |
if (!g_dev_id_in) {
|
| 174 |
+
fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
|
| 175 |
g_dev_id_in = 0;
|
| 176 |
} else {
|
| 177 |
+
fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, g_dev_id_in);
|
| 178 |
+
fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
|
| 179 |
+
fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format, capture_spec_requested.format);
|
| 180 |
+
fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels, capture_spec_requested.channels);
|
| 181 |
+
fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
|
| 182 |
}
|
| 183 |
}
|
| 184 |
|
|
|
|
| 213 |
const int n_samples = (params.step_ms/1000.0)*WHISPER_SAMPLE_RATE;
|
| 214 |
const int n_samples_len = (params.length_ms/1000.0)*WHISPER_SAMPLE_RATE;
|
| 215 |
const int n_samples_30s = 30*WHISPER_SAMPLE_RATE;
|
| 216 |
+
|
| 217 |
std::vector<float> pcmf32(n_samples_30s, 0.0f);
|
| 218 |
std::vector<float> pcmf32_old;
|
| 219 |
|
|
|
|
| 221 |
|
| 222 |
// print some info about the processing
|
| 223 |
{
|
| 224 |
+
fprintf(stderr, "\n");
|
| 225 |
if (!whisper_is_multilingual(ctx)) {
|
| 226 |
if (params.language != "en" || params.translate) {
|
| 227 |
params.language = "en";
|
| 228 |
params.translate = false;
|
| 229 |
+
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
| 230 |
}
|
| 231 |
}
|
| 232 |
+
fprintf(stderr, "%s: processing %d samples (step = %.1f sec / len = %.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n",
|
| 233 |
__func__,
|
| 234 |
n_samples,
|
| 235 |
float(n_samples)/WHISPER_SAMPLE_RATE,
|
|
|
|
| 239 |
params.translate ? "translate" : "transcribe",
|
| 240 |
params.no_timestamps ? 0 : 1);
|
| 241 |
|
| 242 |
+
fprintf(stderr, "%s: n_new_line = %d\n", __func__, n_new_line);
|
| 243 |
+
fprintf(stderr, "\n");
|
| 244 |
}
|
| 245 |
|
| 246 |
SDL_PauseAudioDevice(g_dev_id_in, 0);
|
|
|
|
| 248 |
int n_iter = 0;
|
| 249 |
bool is_running = true;
|
| 250 |
|
| 251 |
+
std::ofstream fout;
|
| 252 |
+
if (params.fname_out.length() > 0) {
|
| 253 |
+
fout.open(params.fname_out);
|
| 254 |
+
if (!fout.is_open()) {
|
| 255 |
+
fprintf(stderr, "%s: failed to open output file '%s'!\n", __func__, params.fname_out.c_str());
|
| 256 |
+
return 1;
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
printf("[Start speaking]");
|
| 261 |
+
fflush(stdout);
|
| 262 |
+
|
| 263 |
// main audio loop
|
| 264 |
while (is_running) {
|
| 265 |
// process SDL events:
|
|
|
|
| 267 |
while (SDL_PollEvent(&event)) {
|
| 268 |
switch (event.type) {
|
| 269 |
case SDL_QUIT:
|
| 270 |
+
{
|
| 271 |
+
is_running = false;
|
| 272 |
+
} break;
|
| 273 |
default:
|
| 274 |
break;
|
| 275 |
}
|
| 276 |
}
|
| 277 |
|
| 278 |
+
if (!is_running) {
|
| 279 |
+
break;
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
// process new audio
|
| 283 |
if (n_iter > 0 && SDL_GetQueuedAudioSize(g_dev_id_in) > 2*n_samples*sizeof(float)) {
|
| 284 |
fprintf(stderr, "\n\n%s: WARNING: cannot process audio fast enough, dropping audio ...\n\n", __func__);
|
|
|
|
| 331 |
{
|
| 332 |
printf("\33[2K\r");
|
| 333 |
|
| 334 |
+
// print long empty line to clear the previous line
|
| 335 |
+
printf("%s", std::string(100, ' ').c_str());
|
| 336 |
+
|
| 337 |
+
printf("\33[2K\r");
|
| 338 |
+
|
| 339 |
const int n_segments = whisper_full_n_segments(ctx);
|
| 340 |
for (int i = 0; i < n_segments; ++i) {
|
| 341 |
const char * text = whisper_full_get_segment_text(ctx, i);
|
| 342 |
|
| 343 |
if (params.no_timestamps) {
|
| 344 |
+
printf("%s", text);
|
| 345 |
fflush(stdout);
|
| 346 |
+
|
| 347 |
+
if (params.fname_out.length() > 0) {
|
| 348 |
+
fout << text;
|
| 349 |
+
}
|
| 350 |
} else {
|
| 351 |
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
| 352 |
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
| 353 |
|
| 354 |
printf ("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text);
|
| 355 |
+
|
| 356 |
+
if (params.fname_out.length() > 0) {
|
| 357 |
+
fout << "[" << to_timestamp(t0) << " --> " << to_timestamp(t1) << "] " << text << std::endl;
|
| 358 |
+
}
|
| 359 |
}
|
| 360 |
}
|
| 361 |
+
|
| 362 |
+
if (params.fname_out.length() > 0) {
|
| 363 |
+
fout << std::endl;
|
| 364 |
+
}
|
| 365 |
}
|
| 366 |
|
| 367 |
++n_iter;
|