Merge pull request #1750 from danielaskdd/embedding-batch-size
Browse files- env.example +1 -1
- lightrag/lightrag.py +1 -1
env.example
CHANGED
@@ -96,7 +96,7 @@ EMBEDDING_BINDING_API_KEY=your_api_key
|
|
96 |
# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
|
97 |
EMBEDDING_BINDING_HOST=http://localhost:11434
|
98 |
### Num of chunks send to Embedding in single request
|
99 |
-
# EMBEDDING_BATCH_NUM=
|
100 |
### Max concurrency requests for Embedding
|
101 |
# EMBEDDING_FUNC_MAX_ASYNC=16
|
102 |
### Maximum tokens sent to Embedding for each chunk (no longer in use?)
|
|
|
96 |
# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
|
97 |
EMBEDDING_BINDING_HOST=http://localhost:11434
|
98 |
### Num of chunks send to Embedding in single request
|
99 |
+
# EMBEDDING_BATCH_NUM=10
|
100 |
### Max concurrency requests for Embedding
|
101 |
# EMBEDDING_FUNC_MAX_ASYNC=16
|
102 |
### Maximum tokens sent to Embedding for each chunk (no longer in use?)
|
lightrag/lightrag.py
CHANGED
@@ -201,7 +201,7 @@ class LightRAG:
|
|
201 |
embedding_func: EmbeddingFunc | None = field(default=None)
|
202 |
"""Function for computing text embeddings. Must be set before use."""
|
203 |
|
204 |
-
embedding_batch_num: int = field(default=int(os.getenv("EMBEDDING_BATCH_NUM",
|
205 |
"""Batch size for embedding computations."""
|
206 |
|
207 |
embedding_func_max_async: int = field(
|
|
|
201 |
embedding_func: EmbeddingFunc | None = field(default=None)
|
202 |
"""Function for computing text embeddings. Must be set before use."""
|
203 |
|
204 |
+
embedding_batch_num: int = field(default=int(os.getenv("EMBEDDING_BATCH_NUM", 10)))
|
205 |
"""Batch size for embedding computations."""
|
206 |
|
207 |
embedding_func_max_async: int = field(
|