Update llm.py
Browse files- lightrag/llm.py +8 -2
lightrag/llm.py
CHANGED
@@ -350,7 +350,10 @@ async def ollama_model_if_cache(
|
|
350 |
timeout = kwargs.pop("timeout", None)
|
351 |
kwargs.pop("hashing_kv", None)
|
352 |
api_key = kwargs.pop("api_key", None)
|
353 |
-
headers = {
|
|
|
|
|
|
|
354 |
ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers)
|
355 |
messages = []
|
356 |
if system_prompt:
|
@@ -383,7 +386,10 @@ async def lollms_model_if_cache(
|
|
383 |
|
384 |
stream = True if kwargs.get("stream") else False
|
385 |
api_key = kwargs.pop("api_key", None)
|
386 |
-
headers = {
|
|
|
|
|
|
|
387 |
|
388 |
# Extract lollms specific parameters
|
389 |
request_data = {
|
|
|
350 |
timeout = kwargs.pop("timeout", None)
|
351 |
kwargs.pop("hashing_kv", None)
|
352 |
api_key = kwargs.pop("api_key", None)
|
353 |
+
headers = {
|
354 |
+
"Content-Type": "application/json",
|
355 |
+
"Authorization": f"Bearer {api_key}"
|
356 |
+
} if api_key else {"Content-Type": "application/json"}
|
357 |
ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers)
|
358 |
messages = []
|
359 |
if system_prompt:
|
|
|
386 |
|
387 |
stream = True if kwargs.get("stream") else False
|
388 |
api_key = kwargs.pop("api_key", None)
|
389 |
+
headers = {
|
390 |
+
"Content-Type": "application/json",
|
391 |
+
"Authorization": f"Bearer {api_key}"
|
392 |
+
} if api_key else {"Content-Type": "application/json"}
|
393 |
|
394 |
# Extract lollms specific parameters
|
395 |
request_data = {
|