yangdx
commited on
Commit
·
b5cae37
1
Parent(s):
424b7ca
Increase embeding priority for query request
Browse files- lightrag/kg/chroma_impl.py +3 -1
- lightrag/kg/faiss_impl.py +3 -1
- lightrag/kg/milvus_impl.py +3 -1
- lightrag/kg/mongo_impl.py +3 -1
- lightrag/kg/nano_vector_db_impl.py +4 -2
- lightrag/kg/postgres_impl.py +3 -1
- lightrag/kg/qdrant_impl.py +3 -1
- lightrag/kg/tidb_impl.py +3 -1
lightrag/kg/chroma_impl.py
CHANGED
@@ -161,7 +161,9 @@ class ChromaVectorDBStorage(BaseVectorStorage):
|
|
161 |
self, query: str, top_k: int, ids: list[str] | None = None
|
162 |
) -> list[dict[str, Any]]:
|
163 |
try:
|
164 |
-
embedding = await self.embedding_func(
|
|
|
|
|
165 |
|
166 |
results = self._collection.query(
|
167 |
query_embeddings=embedding.tolist()
|
|
|
161 |
self, query: str, top_k: int, ids: list[str] | None = None
|
162 |
) -> list[dict[str, Any]]:
|
163 |
try:
|
164 |
+
embedding = await self.embedding_func(
|
165 |
+
[query], _priority=5
|
166 |
+
) # higher priority for query
|
167 |
|
168 |
results = self._collection.query(
|
169 |
query_embeddings=embedding.tolist()
|
lightrag/kg/faiss_impl.py
CHANGED
@@ -175,7 +175,9 @@ class FaissVectorDBStorage(BaseVectorStorage):
|
|
175 |
"""
|
176 |
Search by a textual query; returns top_k results with their metadata + similarity distance.
|
177 |
"""
|
178 |
-
embedding = await self.embedding_func(
|
|
|
|
|
179 |
# embedding is shape (1, dim)
|
180 |
embedding = np.array(embedding, dtype=np.float32)
|
181 |
faiss.normalize_L2(embedding) # we do in-place normalization
|
|
|
175 |
"""
|
176 |
Search by a textual query; returns top_k results with their metadata + similarity distance.
|
177 |
"""
|
178 |
+
embedding = await self.embedding_func(
|
179 |
+
[query], _priority=5
|
180 |
+
) # higher priority for query
|
181 |
# embedding is shape (1, dim)
|
182 |
embedding = np.array(embedding, dtype=np.float32)
|
183 |
faiss.normalize_L2(embedding) # we do in-place normalization
|
lightrag/kg/milvus_impl.py
CHANGED
@@ -104,7 +104,9 @@ class MilvusVectorDBStorage(BaseVectorStorage):
|
|
104 |
async def query(
|
105 |
self, query: str, top_k: int, ids: list[str] | None = None
|
106 |
) -> list[dict[str, Any]]:
|
107 |
-
embedding = await self.embedding_func(
|
|
|
|
|
108 |
results = self._client.search(
|
109 |
collection_name=self.namespace,
|
110 |
data=embedding,
|
|
|
104 |
async def query(
|
105 |
self, query: str, top_k: int, ids: list[str] | None = None
|
106 |
) -> list[dict[str, Any]]:
|
107 |
+
embedding = await self.embedding_func(
|
108 |
+
[query], _priority=5
|
109 |
+
) # higher priority for query
|
110 |
results = self._client.search(
|
111 |
collection_name=self.namespace,
|
112 |
data=embedding,
|
lightrag/kg/mongo_impl.py
CHANGED
@@ -1032,7 +1032,9 @@ class MongoVectorDBStorage(BaseVectorStorage):
|
|
1032 |
) -> list[dict[str, Any]]:
|
1033 |
"""Queries the vector database using Atlas Vector Search."""
|
1034 |
# Generate the embedding
|
1035 |
-
embedding = await self.embedding_func(
|
|
|
|
|
1036 |
|
1037 |
# Convert numpy array to a list to ensure compatibility with MongoDB
|
1038 |
query_vector = embedding[0].tolist()
|
|
|
1032 |
) -> list[dict[str, Any]]:
|
1033 |
"""Queries the vector database using Atlas Vector Search."""
|
1034 |
# Generate the embedding
|
1035 |
+
embedding = await self.embedding_func(
|
1036 |
+
[query], _priority=5
|
1037 |
+
) # higher priority for query
|
1038 |
|
1039 |
# Convert numpy array to a list to ensure compatibility with MongoDB
|
1040 |
query_vector = embedding[0].tolist()
|
lightrag/kg/nano_vector_db_impl.py
CHANGED
@@ -124,8 +124,10 @@ class NanoVectorDBStorage(BaseVectorStorage):
|
|
124 |
async def query(
|
125 |
self, query: str, top_k: int, ids: list[str] | None = None
|
126 |
) -> list[dict[str, Any]]:
|
127 |
-
# Execute embedding outside of lock to avoid
|
128 |
-
embedding = await self.embedding_func(
|
|
|
|
|
129 |
embedding = embedding[0]
|
130 |
|
131 |
client = await self._get_client()
|
|
|
124 |
async def query(
|
125 |
self, query: str, top_k: int, ids: list[str] | None = None
|
126 |
) -> list[dict[str, Any]]:
|
127 |
+
# Execute embedding outside of lock to avoid improve cocurrent
|
128 |
+
embedding = await self.embedding_func(
|
129 |
+
[query], _priority=5
|
130 |
+
) # higher priority for query
|
131 |
embedding = embedding[0]
|
132 |
|
133 |
client = await self._get_client()
|
lightrag/kg/postgres_impl.py
CHANGED
@@ -644,7 +644,9 @@ class PGVectorStorage(BaseVectorStorage):
|
|
644 |
async def query(
|
645 |
self, query: str, top_k: int, ids: list[str] | None = None
|
646 |
) -> list[dict[str, Any]]:
|
647 |
-
embeddings = await self.embedding_func(
|
|
|
|
|
648 |
embedding = embeddings[0]
|
649 |
embedding_string = ",".join(map(str, embedding))
|
650 |
# Use parameterized document IDs (None means search across all documents)
|
|
|
644 |
async def query(
|
645 |
self, query: str, top_k: int, ids: list[str] | None = None
|
646 |
) -> list[dict[str, Any]]:
|
647 |
+
embeddings = await self.embedding_func(
|
648 |
+
[query], _priority=5
|
649 |
+
) # higher priority for query
|
650 |
embedding = embeddings[0]
|
651 |
embedding_string = ",".join(map(str, embedding))
|
652 |
# Use parameterized document IDs (None means search across all documents)
|
lightrag/kg/qdrant_impl.py
CHANGED
@@ -124,7 +124,9 @@ class QdrantVectorDBStorage(BaseVectorStorage):
|
|
124 |
async def query(
|
125 |
self, query: str, top_k: int, ids: list[str] | None = None
|
126 |
) -> list[dict[str, Any]]:
|
127 |
-
embedding = await self.embedding_func(
|
|
|
|
|
128 |
results = self._client.search(
|
129 |
collection_name=self.namespace,
|
130 |
query_vector=embedding[0],
|
|
|
124 |
async def query(
|
125 |
self, query: str, top_k: int, ids: list[str] | None = None
|
126 |
) -> list[dict[str, Any]]:
|
127 |
+
embedding = await self.embedding_func(
|
128 |
+
[query], _priority=5
|
129 |
+
) # higher priority for query
|
130 |
results = self._client.search(
|
131 |
collection_name=self.namespace,
|
132 |
query_vector=embedding[0],
|
lightrag/kg/tidb_impl.py
CHANGED
@@ -390,7 +390,9 @@ class TiDBVectorDBStorage(BaseVectorStorage):
|
|
390 |
self, query: str, top_k: int, ids: list[str] | None = None
|
391 |
) -> list[dict[str, Any]]:
|
392 |
"""Search from tidb vector"""
|
393 |
-
embeddings = await self.embedding_func(
|
|
|
|
|
394 |
embedding = embeddings[0]
|
395 |
|
396 |
embedding_string = "[" + ", ".join(map(str, embedding.tolist())) + "]"
|
|
|
390 |
self, query: str, top_k: int, ids: list[str] | None = None
|
391 |
) -> list[dict[str, Any]]:
|
392 |
"""Search from tidb vector"""
|
393 |
+
embeddings = await self.embedding_func(
|
394 |
+
[query], _priority=5
|
395 |
+
) # higher priority for query
|
396 |
embedding = embeddings[0]
|
397 |
|
398 |
embedding_string = "[" + ", ".join(map(str, embedding.tolist())) + "]"
|