ggerganov commited on
Commit
32cfce9
·
1 Parent(s): 6ef5667

examples : fix compile warnings [no ci] (#0)

Browse files
examples/common.cpp CHANGED
@@ -147,7 +147,6 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
147
  case 7: return "He";
148
  case 8: return "She";
149
  case 9: return "They";
150
- default: return "To";
151
  }
152
 
153
  return "The";
 
147
  case 7: return "He";
148
  case 8: return "She";
149
  case 9: return "They";
 
150
  }
151
 
152
  return "The";
examples/talk-llama/talk-llama.cpp CHANGED
@@ -417,7 +417,7 @@ int main(int argc, char ** argv) {
417
 
418
  session_tokens.resize(llama_n_ctx(ctx_llama));
419
  size_t n_token_count_out = 0;
420
- if (!llama_load_session_file(ctx_llama, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
421
  fprintf(stderr, "%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
422
  return 1;
423
  }
@@ -709,7 +709,7 @@ int main(int argc, char ** argv) {
709
 
710
  if (!path_session.empty() && need_to_save_session) {
711
  need_to_save_session = false;
712
- llama_save_session_file(ctx_llama, path_session.c_str(), session_tokens.data(), session_tokens.size());
713
  }
714
 
715
  llama_token id = 0;
 
417
 
418
  session_tokens.resize(llama_n_ctx(ctx_llama));
419
  size_t n_token_count_out = 0;
420
+ if (!llama_state_load_file(ctx_llama, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
421
  fprintf(stderr, "%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
422
  return 1;
423
  }
 
709
 
710
  if (!path_session.empty() && need_to_save_session) {
711
  need_to_save_session = false;
712
+ llama_state_save_file(ctx_llama, path_session.c_str(), session_tokens.data(), session_tokens.size());
713
  }
714
 
715
  llama_token id = 0;