Spaces:
Running
Running
whisper.objc : fix context + broken readme links
Browse files
examples/command.wasm/README.md
CHANGED
|
@@ -5,7 +5,7 @@ It runs in fully in the browser via WebAseembly.
|
|
| 5 |
|
| 6 |
Online demo: https://whisper.ggerganov.com/command/
|
| 7 |
|
| 8 |
-
Terminal version:
|
| 9 |
|
| 10 |
## Build instructions
|
| 11 |
|
|
|
|
| 5 |
|
| 6 |
Online demo: https://whisper.ggerganov.com/command/
|
| 7 |
|
| 8 |
+
Terminal version: [examples/command](/examples/command)
|
| 9 |
|
| 10 |
## Build instructions
|
| 11 |
|
examples/command/README.md
CHANGED
|
@@ -13,7 +13,7 @@ More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/
|
|
| 13 |
|
| 14 |
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
| 15 |
|
| 16 |
-
Web version:
|
| 17 |
|
| 18 |
## Building
|
| 19 |
|
|
|
|
| 13 |
|
| 14 |
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
| 15 |
|
| 16 |
+
Web version: [examples/command.wasm](/examples/command.wasm)
|
| 17 |
|
| 18 |
## Building
|
| 19 |
|
examples/whisper.objc/README.md
CHANGED
|
@@ -5,6 +5,10 @@ The inference runs locally, on-device.
|
|
| 5 |
|
| 6 |
https://user-images.githubusercontent.com/1991296/197385372-962a6dea-bca1-4d50-bf96-1d8c27b98c81.mp4
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
## Usage
|
| 9 |
|
| 10 |
```java
|
|
|
|
| 5 |
|
| 6 |
https://user-images.githubusercontent.com/1991296/197385372-962a6dea-bca1-4d50-bf96-1d8c27b98c81.mp4
|
| 7 |
|
| 8 |
+
Real-time transcription demo:
|
| 9 |
+
|
| 10 |
+
https://user-images.githubusercontent.com/1991296/204126266-ce4177c6-6eca-4bd9-bca8-0e46d9da2364.mp4
|
| 11 |
+
|
| 12 |
## Usage
|
| 13 |
|
| 14 |
```java
|
examples/whisper.objc/whisper.objc/ViewController.m
CHANGED
|
@@ -198,6 +198,7 @@ void AudioInputCallback(void * inUserData,
|
|
| 198 |
params.language = "en";
|
| 199 |
params.n_threads = max_threads;
|
| 200 |
params.offset_ms = 0;
|
|
|
|
| 201 |
params.single_segment = self->stateInp.isRealtime;
|
| 202 |
|
| 203 |
CFTimeInterval startTime = CACurrentMediaTime();
|
|
@@ -228,8 +229,11 @@ void AudioInputCallback(void * inUserData,
|
|
| 228 |
result = [result stringByAppendingString:[NSString stringWithUTF8String:text_cur]];
|
| 229 |
}
|
| 230 |
|
|
|
|
|
|
|
| 231 |
// append processing time
|
| 232 |
-
result = [result stringByAppendingString:[NSString stringWithFormat:@"\n\n[
|
|
|
|
| 233 |
|
| 234 |
// dispatch the result to the main thread
|
| 235 |
dispatch_async(dispatch_get_main_queue(), ^{
|
|
|
|
| 198 |
params.language = "en";
|
| 199 |
params.n_threads = max_threads;
|
| 200 |
params.offset_ms = 0;
|
| 201 |
+
params.no_context = true;
|
| 202 |
params.single_segment = self->stateInp.isRealtime;
|
| 203 |
|
| 204 |
CFTimeInterval startTime = CACurrentMediaTime();
|
|
|
|
| 229 |
result = [result stringByAppendingString:[NSString stringWithUTF8String:text_cur]];
|
| 230 |
}
|
| 231 |
|
| 232 |
+
const float tRecording = (float)self->stateInp.n_samples / (float)self->stateInp.dataFormat.mSampleRate;
|
| 233 |
+
|
| 234 |
// append processing time
|
| 235 |
+
result = [result stringByAppendingString:[NSString stringWithFormat:@"\n\n[recording time: %5.3f s]", tRecording]];
|
| 236 |
+
result = [result stringByAppendingString:[NSString stringWithFormat:@" \n[processing time: %5.3f s]", endTime - startTime]];
|
| 237 |
|
| 238 |
// dispatch the result to the main thread
|
| 239 |
dispatch_async(dispatch_get_main_queue(), ^{
|