jhenhong commited on
Commit
37d6862
·
unverified ·
1 Parent(s): a94a8ce

talk-llama : fix n_gpu_layers usage again (#1442)

Browse files
Files changed (1) hide show
  1. examples/talk-llama/talk-llama.cpp +1 -1
examples/talk-llama/talk-llama.cpp CHANGED
@@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
267
 
268
  auto lmparams = llama_model_default_params();
269
  if (!params.use_gpu) {
270
- lcparams.lmparams = 0;
271
  }
272
 
273
  struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
 
267
 
268
  auto lmparams = llama_model_default_params();
269
  if (!params.use_gpu) {
270
+ lmparams.n_gpu_layers = 0;
271
  }
272
 
273
  struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);