|
litellm_settings: |
|
drop_params: true |
|
model_list: |
|
- model_name: "HuggingFace: Mistral: Mistral 7B Instruct v0.1" |
|
litellm_params: |
|
model: huggingface/mistralai/Mistral-7B-Instruct-v0.1 |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 1024 |
|
- model_name: "HuggingFace: Mistral: Mistral 7B Instruct v0.2" |
|
litellm_params: |
|
model: huggingface/mistralai/Mistral-7B-Instruct-v0.2 |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 1024 |
|
- model_name: "HuggingFace: Meta: Llama 3 8B Instruct" |
|
litellm_params: |
|
model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 2047 |
|
- model_name: "HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1" |
|
litellm_params: |
|
model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1 |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 8192 |
|
- model_name: "HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct" |
|
litellm_params: |
|
model: huggingface/microsoft/Phi-3-mini-4k-instruct |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 1024 |
|
- model_name: "HuggingFace: Google: Gemma 7B 1.1" |
|
litellm_params: |
|
model: huggingface/google/gemma-1.1-7b-it |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 1024 |
|
- model_name: "HuggingFace: Yi-1.5 34B Chat" |
|
litellm_params: |
|
model: huggingface/01-ai/Yi-1.5-34B-Chat |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 1024 |
|
- model_name: "HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO" |
|
litellm_params: |
|
model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO |
|
api_key: os.environ/HF_TOKEN |
|
max_tokens: 2048 |
|
|