| name: gemma | |
| model: gemma:7B | |
| version: 1 | |
| # Results Preferences | |
| stop: | |
| - <end_of_turn> | |
| - <eos> | |
| top_p: 0.95 | |
| temperature: 0.7 | |
| frequency_penalty: 0 | |
| presence_penalty: 0 | |
| max_tokens: 4096 # Infer from base config.json -> max_position_embeddings | |
| stream: true # true | false | |
| # Engine / Model Settings | |
| ngl: 33 # Infer from base config.json -> num_attention_heads | |
| ctx_len: 4096 # Infer from base config.json -> max_position_embeddings | |
| engine: llama-cpp | |
| prompt_template: "<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model" |