LarFii
commited on
Commit
·
8f067b7
1
Parent(s):
2678ed8
fix bug
Browse files- examples/lightrag_hf_demo.py +5 -3
- examples/lightrag_openai_demo.py +1 -1
- lightrag/__init__.py +1 -1
- lightrag/llm.py +0 -5
examples/lightrag_hf_demo.py
CHANGED
@@ -16,11 +16,13 @@ rag = LightRAG(
|
|
16 |
llm_model_func=hf_model_complete,
|
17 |
llm_model_name='meta-llama/Llama-3.1-8B-Instruct',
|
18 |
embedding_func=EmbeddingFunc(
|
19 |
-
tokenizer=AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2"),
|
20 |
-
embed_model=AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2"),
|
21 |
embedding_dim=384,
|
22 |
max_token_size=5000,
|
23 |
-
func=hf_embedding
|
|
|
|
|
|
|
|
|
24 |
),
|
25 |
)
|
26 |
|
|
|
16 |
llm_model_func=hf_model_complete,
|
17 |
llm_model_name='meta-llama/Llama-3.1-8B-Instruct',
|
18 |
embedding_func=EmbeddingFunc(
|
|
|
|
|
19 |
embedding_dim=384,
|
20 |
max_token_size=5000,
|
21 |
+
func=lambda texts: hf_embedding(
|
22 |
+
texts,
|
23 |
+
tokenizer=AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2"),
|
24 |
+
embed_model=AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
|
25 |
+
)
|
26 |
),
|
27 |
)
|
28 |
|
examples/lightrag_openai_demo.py
CHANGED
@@ -5,7 +5,7 @@ from lightrag import LightRAG, QueryParam
|
|
5 |
from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete
|
6 |
from transformers import AutoModel,AutoTokenizer
|
7 |
|
8 |
-
WORKING_DIR = "
|
9 |
|
10 |
if not os.path.exists(WORKING_DIR):
|
11 |
os.mkdir(WORKING_DIR)
|
|
|
5 |
from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete
|
6 |
from transformers import AutoModel,AutoTokenizer
|
7 |
|
8 |
+
WORKING_DIR = "./dickens"
|
9 |
|
10 |
if not os.path.exists(WORKING_DIR):
|
11 |
os.mkdir(WORKING_DIR)
|
lightrag/__init__.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from .lightrag import LightRAG, QueryParam
|
2 |
|
3 |
-
__version__ = "0.0.
|
4 |
__author__ = "Zirui Guo"
|
5 |
__url__ = "https://github.com/HKUDS/LightRAG"
|
|
|
1 |
from .lightrag import LightRAG, QueryParam
|
2 |
|
3 |
+
__version__ = "0.0.5"
|
4 |
__author__ = "Zirui Guo"
|
5 |
__url__ = "https://github.com/HKUDS/LightRAG"
|
lightrag/llm.py
CHANGED
@@ -141,11 +141,6 @@ async def openai_embedding(texts: list[str]) -> np.ndarray:
|
|
141 |
return np.array([dp.embedding for dp in response.data])
|
142 |
|
143 |
|
144 |
-
|
145 |
-
@wrap_embedding_func_with_attrs(
|
146 |
-
embedding_dim=384,
|
147 |
-
max_token_size=5000,
|
148 |
-
)
|
149 |
async def hf_embedding(texts: list[str], tokenizer, embed_model) -> np.ndarray:
|
150 |
input_ids = tokenizer(texts, return_tensors='pt', padding=True, truncation=True).input_ids
|
151 |
with torch.no_grad():
|
|
|
141 |
return np.array([dp.embedding for dp in response.data])
|
142 |
|
143 |
|
|
|
|
|
|
|
|
|
|
|
144 |
async def hf_embedding(texts: list[str], tokenizer, embed_model) -> np.ndarray:
|
145 |
input_ids = tokenizer(texts, return_tensors='pt', padding=True, truncation=True).input_ids
|
146 |
with torch.no_grad():
|