Spaces:
Sleeping
Sleeping
talk-llama : fix n_gpu_layers usage again (#1442)
Browse files
examples/talk-llama/talk-llama.cpp
CHANGED
|
@@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
|
|
| 267 |
|
| 268 |
auto lmparams = llama_model_default_params();
|
| 269 |
if (!params.use_gpu) {
|
| 270 |
-
|
| 271 |
}
|
| 272 |
|
| 273 |
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
|
|
|
|
| 267 |
|
| 268 |
auto lmparams = llama_model_default_params();
|
| 269 |
if (!params.use_gpu) {
|
| 270 |
+
lmparams.n_gpu_layers = 0;
|
| 271 |
}
|
| 272 |
|
| 273 |
struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
|