Larfii commited on
Commit
90bbd37
·
1 Parent(s): bd50a3a
Files changed (2) hide show
  1. lightrag/lightrag.py +1 -5
  2. lightrag/llm.py +2 -15
lightrag/lightrag.py CHANGED
@@ -97,11 +97,7 @@ class LightRAG:
97
  addon_params: dict = field(default_factory=dict)
98
  convert_response_to_json_func: callable = convert_response_to_json
99
 
100
- def __post_init__(self):
101
- # use proxy
102
- os.environ['http_proxy'] = 'http://127.0.0.1:7890'
103
- os.environ['https_proxy'] = 'http://127.0.0.1:7890'
104
-
105
  log_file = os.path.join(self.working_dir, "lightrag.log")
106
  set_logger(log_file)
107
  logger.info(f"Logger initialized for working directory: {self.working_dir}")
 
97
  addon_params: dict = field(default_factory=dict)
98
  convert_response_to_json_func: callable = convert_response_to_json
99
 
100
+ def __post_init__(self):
 
 
 
 
101
  log_file = os.path.join(self.working_dir, "lightrag.log")
102
  set_logger(log_file)
103
  logger.info(f"Logger initialized for working directory: {self.working_dir}")
lightrag/llm.py CHANGED
@@ -17,7 +17,7 @@ from .utils import compute_args_hash, wrap_embedding_func_with_attrs
17
  retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
18
  )
19
  async def openai_complete_if_cache(
20
- model, prompt, api_key='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'
21
  , system_prompt=None, history_messages=[], **kwargs
22
  ) -> str:
23
  openai_async_client = AsyncOpenAI(api_key=api_key)
@@ -72,26 +72,13 @@ async def gpt_4o_mini_complete(
72
  wait=wait_exponential(multiplier=1, min=4, max=10),
73
  retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
74
  )
75
- async def openai_embedding(texts: list[str]) -> np.ndarray:
76
- api_key = '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'
77
  openai_async_client = AsyncOpenAI(api_key=api_key)
78
  response = await openai_async_client.embeddings.create(
79
  model="text-embedding-3-small", input=texts, encoding_format="float"
80
  )
81
  return np.array([dp.embedding for dp in response.data])
82
 
83
- async def moonshot_complete(
84
- prompt, system_prompt=None, history_messages=[], **kwargs
85
- ) -> str:
86
- return await openai_complete_if_cache(
87
- "moonshot-v1-128k",
88
- prompt,
89
- api_key='sk-OsvLvHgFFH3tz6Yhym3OAhcTfZ9y7rHEgQ3JDLmnuLpTw9C0',
90
- system_prompt=system_prompt,
91
- history_messages=history_messages,
92
- **kwargs,
93
- )
94
-
95
  if __name__ == "__main__":
96
  import asyncio
97
 
 
17
  retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
18
  )
19
  async def openai_complete_if_cache(
20
+ model, prompt, api_key=''
21
  , system_prompt=None, history_messages=[], **kwargs
22
  ) -> str:
23
  openai_async_client = AsyncOpenAI(api_key=api_key)
 
72
  wait=wait_exponential(multiplier=1, min=4, max=10),
73
  retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
74
  )
75
+ async def openai_embedding(texts: list[str], api_key='') -> np.ndarray:
 
76
  openai_async_client = AsyncOpenAI(api_key=api_key)
77
  response = await openai_async_client.embeddings.create(
78
  model="text-embedding-3-small", input=texts, encoding_format="float"
79
  )
80
  return np.array([dp.embedding for dp in response.data])
81
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  if __name__ == "__main__":
83
  import asyncio
84