ParisNeo commited on
Commit
ee58ad3
·
1 Parent(s): 58d4eab

fixed linting

Browse files
extra/OpenWebuiTool/openwebui_tool.py CHANGED
@@ -23,12 +23,8 @@ Requirements:
23
  - Lightrag
24
  """
25
 
26
- from pathlib import Path
27
- from typing import Optional, List, Dict, Union, Any
28
- from datetime import datetime
29
-
30
  # Tool version
31
  __version__ = "1.0.0"
32
  __author__ = "ParisNeo"
33
  __author_email__ = "[email protected]"
34
- __description__ = "Lightrag integration for OpenWebui"
 
23
  - Lightrag
24
  """
25
 
 
 
 
 
26
  # Tool version
27
  __version__ = "1.0.0"
28
  __author__ = "ParisNeo"
29
  __author_email__ = "[email protected]"
30
+ __description__ = "Lightrag integration for OpenWebui"
lightrag/api/lightrag_server.py CHANGED
@@ -297,15 +297,13 @@ def parse_args() -> argparse.Namespace:
297
  default=default_llm_host,
298
  help=f"llm server host URL (default: from env or {default_llm_host})",
299
  )
300
-
301
- default_llm_api_key = get_env_value(
302
- "LLM_BINDING_API_KEY", None
303
- )
304
-
305
  parser.add_argument(
306
  "--llm-binding-api-key",
307
  default=default_llm_api_key,
308
- help=f"llm server API key (default: from env or empty string)",
309
  )
310
 
311
  parser.add_argument(
@@ -323,14 +321,12 @@ def parse_args() -> argparse.Namespace:
323
  default=default_embedding_host,
324
  help=f"embedding server host URL (default: from env or {default_embedding_host})",
325
  )
326
-
327
- default_embedding_api_key = get_env_value(
328
- "EMBEDDING_BINDING_API_KEY", ""
329
- )
330
  parser.add_argument(
331
  "--embedding-binding-api-key",
332
  default=default_embedding_api_key,
333
- help=f"embedding server API key (default: from env or empty string)",
334
  )
335
 
336
  parser.add_argument(
@@ -649,26 +645,26 @@ def create_app(args):
649
  texts,
650
  embed_model=args.embedding_model,
651
  host=args.embedding_binding_host,
652
- api_key = args.embedding_binding_api_key
653
  )
654
  if args.embedding_binding == "lollms"
655
  else ollama_embed(
656
  texts,
657
  embed_model=args.embedding_model,
658
  host=args.embedding_binding_host,
659
- api_key = args.embedding_binding_api_key
660
  )
661
  if args.embedding_binding == "ollama"
662
  else azure_openai_embedding(
663
  texts,
664
  model=args.embedding_model, # no host is used for openai,
665
- api_key = args.embedding_binding_api_key
666
  )
667
  if args.embedding_binding == "azure_openai"
668
  else openai_embedding(
669
  texts,
670
  model=args.embedding_model, # no host is used for openai,
671
- api_key = args.embedding_binding_api_key
672
  ),
673
  )
674
 
@@ -686,7 +682,7 @@ def create_app(args):
686
  "host": args.llm_binding_host,
687
  "timeout": args.timeout,
688
  "options": {"num_ctx": args.max_tokens},
689
- "api_key": args.llm_binding_api_key
690
  },
691
  embedding_func=embedding_func,
692
  )
 
297
  default=default_llm_host,
298
  help=f"llm server host URL (default: from env or {default_llm_host})",
299
  )
300
+
301
+ default_llm_api_key = get_env_value("LLM_BINDING_API_KEY", None)
302
+
 
 
303
  parser.add_argument(
304
  "--llm-binding-api-key",
305
  default=default_llm_api_key,
306
+ help="llm server API key (default: from env or empty string)",
307
  )
308
 
309
  parser.add_argument(
 
321
  default=default_embedding_host,
322
  help=f"embedding server host URL (default: from env or {default_embedding_host})",
323
  )
324
+
325
+ default_embedding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "")
 
 
326
  parser.add_argument(
327
  "--embedding-binding-api-key",
328
  default=default_embedding_api_key,
329
+ help="embedding server API key (default: from env or empty string)",
330
  )
331
 
332
  parser.add_argument(
 
645
  texts,
646
  embed_model=args.embedding_model,
647
  host=args.embedding_binding_host,
648
+ api_key=args.embedding_binding_api_key,
649
  )
650
  if args.embedding_binding == "lollms"
651
  else ollama_embed(
652
  texts,
653
  embed_model=args.embedding_model,
654
  host=args.embedding_binding_host,
655
+ api_key=args.embedding_binding_api_key,
656
  )
657
  if args.embedding_binding == "ollama"
658
  else azure_openai_embedding(
659
  texts,
660
  model=args.embedding_model, # no host is used for openai,
661
+ api_key=args.embedding_binding_api_key,
662
  )
663
  if args.embedding_binding == "azure_openai"
664
  else openai_embedding(
665
  texts,
666
  model=args.embedding_model, # no host is used for openai,
667
+ api_key=args.embedding_binding_api_key,
668
  ),
669
  )
670
 
 
682
  "host": args.llm_binding_host,
683
  "timeout": args.timeout,
684
  "options": {"num_ctx": args.max_tokens},
685
+ "api_key": args.llm_binding_api_key,
686
  },
687
  embedding_func=embedding_func,
688
  )
lightrag/llm.py CHANGED
@@ -349,8 +349,8 @@ async def ollama_model_if_cache(
349
  host = kwargs.pop("host", None)
350
  timeout = kwargs.pop("timeout", None)
351
  kwargs.pop("hashing_kv", None)
352
- api_key = kwargs.pop("api_key", None)
353
- headers={'Authorization': f'Bearer {api_key}'} if api_key else None
354
  ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers)
355
  messages = []
356
  if system_prompt:
@@ -382,8 +382,8 @@ async def lollms_model_if_cache(
382
  """Client implementation for lollms generation."""
383
 
384
  stream = True if kwargs.get("stream") else False
385
- api_key = kwargs.pop("api_key", None)
386
- headers={'Authorization': f'Bearer {api_key}'} if api_key else None
387
 
388
  # Extract lollms specific parameters
389
  request_data = {
@@ -412,7 +412,7 @@ async def lollms_model_if_cache(
412
  request_data["prompt"] = full_prompt
413
  timeout = aiohttp.ClientTimeout(total=kwargs.get("timeout", None))
414
 
415
- async with aiohttp.ClientSession(timeout=timeout,headers=headers) as session:
416
  if stream:
417
 
418
  async def inner():
@@ -626,7 +626,12 @@ async def nvidia_openai_complete(
626
 
627
 
628
  async def azure_openai_complete(
629
- model: str = "gpt-4o-mini", prompt="", system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
 
 
 
 
 
630
  ) -> str:
631
  keyword_extraction = kwargs.pop("keyword_extraction", None)
632
  result = await azure_openai_complete_if_cache(
@@ -1152,9 +1157,13 @@ async def ollama_embedding(texts: list[str], embed_model, **kwargs) -> np.ndarra
1152
 
1153
 
1154
  async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
1155
- api_key = kwargs.pop("api_key",None)
1156
- headers = {"Authorization": api_key, "Content-Type": "application/json"} if api_key else None
1157
- kwargs["headers"]=headers
 
 
 
 
1158
  ollama_client = ollama.Client(**kwargs)
1159
  data = ollama_client.embed(model=embed_model, input=texts)
1160
  return data["embeddings"]
@@ -1175,15 +1184,20 @@ async def lollms_embed(
1175
  Returns:
1176
  np.ndarray: Array of embeddings
1177
  """
1178
- api_key = kwargs.pop("api_key",None)
1179
- headers = {"Authorization": api_key, "Content-Type": "application/json"} if api_key else None
 
 
 
 
1180
  async with aiohttp.ClientSession(headers=headers) as session:
1181
  embeddings = []
1182
  for text in texts:
1183
  request_data = {"text": text}
1184
 
1185
  async with session.post(
1186
- f"{base_url}/lollms_embed", json=request_data,
 
1187
  ) as response:
1188
  result = await response.json()
1189
  embeddings.append(result["vector"])
 
349
  host = kwargs.pop("host", None)
350
  timeout = kwargs.pop("timeout", None)
351
  kwargs.pop("hashing_kv", None)
352
+ api_key = kwargs.pop("api_key", None)
353
+ headers = {"Authorization": f"Bearer {api_key}"} if api_key else None
354
  ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers)
355
  messages = []
356
  if system_prompt:
 
382
  """Client implementation for lollms generation."""
383
 
384
  stream = True if kwargs.get("stream") else False
385
+ api_key = kwargs.pop("api_key", None)
386
+ headers = {"Authorization": f"Bearer {api_key}"} if api_key else None
387
 
388
  # Extract lollms specific parameters
389
  request_data = {
 
412
  request_data["prompt"] = full_prompt
413
  timeout = aiohttp.ClientTimeout(total=kwargs.get("timeout", None))
414
 
415
+ async with aiohttp.ClientSession(timeout=timeout, headers=headers) as session:
416
  if stream:
417
 
418
  async def inner():
 
626
 
627
 
628
  async def azure_openai_complete(
629
+ model: str = "gpt-4o-mini",
630
+ prompt="",
631
+ system_prompt=None,
632
+ history_messages=[],
633
+ keyword_extraction=False,
634
+ **kwargs,
635
  ) -> str:
636
  keyword_extraction = kwargs.pop("keyword_extraction", None)
637
  result = await azure_openai_complete_if_cache(
 
1157
 
1158
 
1159
  async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
1160
+ api_key = kwargs.pop("api_key", None)
1161
+ headers = (
1162
+ {"Authorization": api_key, "Content-Type": "application/json"}
1163
+ if api_key
1164
+ else None
1165
+ )
1166
+ kwargs["headers"] = headers
1167
  ollama_client = ollama.Client(**kwargs)
1168
  data = ollama_client.embed(model=embed_model, input=texts)
1169
  return data["embeddings"]
 
1184
  Returns:
1185
  np.ndarray: Array of embeddings
1186
  """
1187
+ api_key = kwargs.pop("api_key", None)
1188
+ headers = (
1189
+ {"Authorization": api_key, "Content-Type": "application/json"}
1190
+ if api_key
1191
+ else None
1192
+ )
1193
  async with aiohttp.ClientSession(headers=headers) as session:
1194
  embeddings = []
1195
  for text in texts:
1196
  request_data = {"text": text}
1197
 
1198
  async with session.post(
1199
+ f"{base_url}/lollms_embed",
1200
+ json=request_data,
1201
  ) as response:
1202
  result = await response.json()
1203
  embeddings.append(result["vector"])