Update README.md
Browse files- README-zh.md +13 -9
- README.md +13 -9
README-zh.md
CHANGED
@@ -90,6 +90,8 @@ LightRAG服务器旨在提供Web UI和API支持。Web UI便于文档索引、知
|
|
90 |
|
91 |
```bash
|
92 |
pip install "lightrag-hku[api]"
|
|
|
|
|
93 |
```
|
94 |
|
95 |
* 从源代码安装
|
@@ -100,6 +102,8 @@ cd LightRAG
|
|
100 |
# 如有必要,创建Python虚拟环境
|
101 |
# 以可编辑模式安装并支持API
|
102 |
pip install -e ".[api]"
|
|
|
|
|
103 |
```
|
104 |
|
105 |
* 使用 Docker Compose 启动 LightRAG 服务器
|
@@ -820,7 +824,7 @@ rag = LightRAG(
|
|
820 |
create INDEX CONCURRENTLY entity_idx_node_id ON dickens."Entity" (ag_catalog.agtype_access_operator(properties, '"node_id"'::agtype));
|
821 |
CREATE INDEX CONCURRENTLY entity_node_id_gin_idx ON dickens."Entity" using gin(properties);
|
822 |
ALTER TABLE dickens."DIRECTED" CLUSTER ON directed_sid_idx;
|
823 |
-
|
824 |
-- 如有必要可以删除
|
825 |
drop INDEX entity_p_idx;
|
826 |
drop INDEX vertex_p_idx;
|
@@ -1166,17 +1170,17 @@ LightRAG 现已与 [RAG-Anything](https://github.com/HKUDS/RAG-Anything) 实现
|
|
1166 |
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
1167 |
from lightrag.utils import EmbeddingFunc
|
1168 |
import os
|
1169 |
-
|
1170 |
async def load_existing_lightrag():
|
1171 |
# 首先,创建或加载现有的 LightRAG 实例
|
1172 |
lightrag_working_dir = "./existing_lightrag_storage"
|
1173 |
-
|
1174 |
# 检查是否存在之前的 LightRAG 实例
|
1175 |
if os.path.exists(lightrag_working_dir) and os.listdir(lightrag_working_dir):
|
1176 |
print("✅ Found existing LightRAG instance, loading...")
|
1177 |
else:
|
1178 |
print("❌ No existing LightRAG instance found, will create new one")
|
1179 |
-
|
1180 |
# 使用您的配置创建/加载 LightRAG 实例
|
1181 |
lightrag_instance = LightRAG(
|
1182 |
working_dir=lightrag_working_dir,
|
@@ -1199,10 +1203,10 @@ LightRAG 现已与 [RAG-Anything](https://github.com/HKUDS/RAG-Anything) 实现
|
|
1199 |
),
|
1200 |
)
|
1201 |
)
|
1202 |
-
|
1203 |
# 初始化存储(如果有现有数据,这将加载现有数据)
|
1204 |
await lightrag_instance.initialize_storages()
|
1205 |
-
|
1206 |
# 现在使用现有的 LightRAG 实例初始化 RAGAnything
|
1207 |
rag = RAGAnything(
|
1208 |
lightrag=lightrag_instance, # 传递现有的 LightRAG 实例
|
@@ -1231,20 +1235,20 @@ LightRAG 现已与 [RAG-Anything](https://github.com/HKUDS/RAG-Anything) 实现
|
|
1231 |
)
|
1232 |
# 注意:working_dir、llm_model_func、embedding_func 等都从 lightrag_instance 继承
|
1233 |
)
|
1234 |
-
|
1235 |
# 查询现有的知识库
|
1236 |
result = await rag.query_with_multimodal(
|
1237 |
"What data has been processed in this LightRAG instance?",
|
1238 |
mode="hybrid"
|
1239 |
)
|
1240 |
print("Query result:", result)
|
1241 |
-
|
1242 |
# 向现有的 LightRAG 实例添加新的多模态文档
|
1243 |
await rag.process_document_complete(
|
1244 |
file_path="path/to/new/multimodal_document.pdf",
|
1245 |
output_dir="./output"
|
1246 |
)
|
1247 |
-
|
1248 |
if __name__ == "__main__":
|
1249 |
asyncio.run(load_existing_lightrag())
|
1250 |
```
|
|
|
90 |
|
91 |
```bash
|
92 |
pip install "lightrag-hku[api]"
|
93 |
+
cp env.example .env
|
94 |
+
lightrag-server
|
95 |
```
|
96 |
|
97 |
* 从源代码安装
|
|
|
102 |
# 如有必要,创建Python虚拟环境
|
103 |
# 以可编辑模式安装并支持API
|
104 |
pip install -e ".[api]"
|
105 |
+
cp env.example .env
|
106 |
+
lightrag-server
|
107 |
```
|
108 |
|
109 |
* 使用 Docker Compose 启动 LightRAG 服务器
|
|
|
824 |
create INDEX CONCURRENTLY entity_idx_node_id ON dickens."Entity" (ag_catalog.agtype_access_operator(properties, '"node_id"'::agtype));
|
825 |
CREATE INDEX CONCURRENTLY entity_node_id_gin_idx ON dickens."Entity" using gin(properties);
|
826 |
ALTER TABLE dickens."DIRECTED" CLUSTER ON directed_sid_idx;
|
827 |
+
|
828 |
-- 如有必要可以删除
|
829 |
drop INDEX entity_p_idx;
|
830 |
drop INDEX vertex_p_idx;
|
|
|
1170 |
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
1171 |
from lightrag.utils import EmbeddingFunc
|
1172 |
import os
|
1173 |
+
|
1174 |
async def load_existing_lightrag():
|
1175 |
# 首先,创建或加载现有的 LightRAG 实例
|
1176 |
lightrag_working_dir = "./existing_lightrag_storage"
|
1177 |
+
|
1178 |
# 检查是否存在之前的 LightRAG 实例
|
1179 |
if os.path.exists(lightrag_working_dir) and os.listdir(lightrag_working_dir):
|
1180 |
print("✅ Found existing LightRAG instance, loading...")
|
1181 |
else:
|
1182 |
print("❌ No existing LightRAG instance found, will create new one")
|
1183 |
+
|
1184 |
# 使用您的配置创建/加载 LightRAG 实例
|
1185 |
lightrag_instance = LightRAG(
|
1186 |
working_dir=lightrag_working_dir,
|
|
|
1203 |
),
|
1204 |
)
|
1205 |
)
|
1206 |
+
|
1207 |
# 初始化存储(如果有现有数据,这将加载现有数据)
|
1208 |
await lightrag_instance.initialize_storages()
|
1209 |
+
|
1210 |
# 现在使用现有的 LightRAG 实例初始化 RAGAnything
|
1211 |
rag = RAGAnything(
|
1212 |
lightrag=lightrag_instance, # 传递现有的 LightRAG 实例
|
|
|
1235 |
)
|
1236 |
# 注意:working_dir、llm_model_func、embedding_func 等都从 lightrag_instance 继承
|
1237 |
)
|
1238 |
+
|
1239 |
# 查询现有的知识库
|
1240 |
result = await rag.query_with_multimodal(
|
1241 |
"What data has been processed in this LightRAG instance?",
|
1242 |
mode="hybrid"
|
1243 |
)
|
1244 |
print("Query result:", result)
|
1245 |
+
|
1246 |
# 向现有的 LightRAG 实例添加新的多模态文档
|
1247 |
await rag.process_document_complete(
|
1248 |
file_path="path/to/new/multimodal_document.pdf",
|
1249 |
output_dir="./output"
|
1250 |
)
|
1251 |
+
|
1252 |
if __name__ == "__main__":
|
1253 |
asyncio.run(load_existing_lightrag())
|
1254 |
```
|
README.md
CHANGED
@@ -89,6 +89,8 @@ The LightRAG Server is designed to provide Web UI and API support. The Web UI fa
|
|
89 |
|
90 |
```bash
|
91 |
pip install "lightrag-hku[api]"
|
|
|
|
|
92 |
```
|
93 |
|
94 |
* Installation from Source
|
@@ -99,6 +101,8 @@ cd LightRAG
|
|
99 |
# create a Python virtual enviroment if neccesary
|
100 |
# Install in editable mode with API support
|
101 |
pip install -e ".[api]"
|
|
|
|
|
102 |
```
|
103 |
|
104 |
* Launching the LightRAG Server with Docker Compose
|
@@ -792,7 +796,7 @@ For production level scenarios you will most likely want to leverage an enterpri
|
|
792 |
create INDEX CONCURRENTLY entity_idx_node_id ON dickens."Entity" (ag_catalog.agtype_access_operator(properties, '"node_id"'::agtype));
|
793 |
CREATE INDEX CONCURRENTLY entity_node_id_gin_idx ON dickens."Entity" using gin(properties);
|
794 |
ALTER TABLE dickens."DIRECTED" CLUSTER ON directed_sid_idx;
|
795 |
-
|
796 |
-- drop if necessary
|
797 |
drop INDEX entity_p_idx;
|
798 |
drop INDEX vertex_p_idx;
|
@@ -1180,17 +1184,17 @@ LightRAG now seamlessly integrates with [RAG-Anything](https://github.com/HKUDS/
|
|
1180 |
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
1181 |
from lightrag.utils import EmbeddingFunc
|
1182 |
import os
|
1183 |
-
|
1184 |
async def load_existing_lightrag():
|
1185 |
# First, create or load an existing LightRAG instance
|
1186 |
lightrag_working_dir = "./existing_lightrag_storage"
|
1187 |
-
|
1188 |
# Check if previous LightRAG instance exists
|
1189 |
if os.path.exists(lightrag_working_dir) and os.listdir(lightrag_working_dir):
|
1190 |
print("✅ Found existing LightRAG instance, loading...")
|
1191 |
else:
|
1192 |
print("❌ No existing LightRAG instance found, will create new one")
|
1193 |
-
|
1194 |
# Create/Load LightRAG instance with your configurations
|
1195 |
lightrag_instance = LightRAG(
|
1196 |
working_dir=lightrag_working_dir,
|
@@ -1213,10 +1217,10 @@ LightRAG now seamlessly integrates with [RAG-Anything](https://github.com/HKUDS/
|
|
1213 |
),
|
1214 |
)
|
1215 |
)
|
1216 |
-
|
1217 |
# Initialize storage (this will load existing data if available)
|
1218 |
await lightrag_instance.initialize_storages()
|
1219 |
-
|
1220 |
# Now initialize RAGAnything with the existing LightRAG instance
|
1221 |
rag = RAGAnything(
|
1222 |
lightrag=lightrag_instance, # Pass the existing LightRAG instance
|
@@ -1245,20 +1249,20 @@ LightRAG now seamlessly integrates with [RAG-Anything](https://github.com/HKUDS/
|
|
1245 |
)
|
1246 |
# Note: working_dir, llm_model_func, embedding_func, etc. are inherited from lightrag_instance
|
1247 |
)
|
1248 |
-
|
1249 |
# Query the existing knowledge base
|
1250 |
result = await rag.query_with_multimodal(
|
1251 |
"What data has been processed in this LightRAG instance?",
|
1252 |
mode="hybrid"
|
1253 |
)
|
1254 |
print("Query result:", result)
|
1255 |
-
|
1256 |
# Add new multimodal documents to the existing LightRAG instance
|
1257 |
await rag.process_document_complete(
|
1258 |
file_path="path/to/new/multimodal_document.pdf",
|
1259 |
output_dir="./output"
|
1260 |
)
|
1261 |
-
|
1262 |
if __name__ == "__main__":
|
1263 |
asyncio.run(load_existing_lightrag())
|
1264 |
```
|
|
|
89 |
|
90 |
```bash
|
91 |
pip install "lightrag-hku[api]"
|
92 |
+
cp env.example .env
|
93 |
+
lightrag-server
|
94 |
```
|
95 |
|
96 |
* Installation from Source
|
|
|
101 |
# create a Python virtual enviroment if neccesary
|
102 |
# Install in editable mode with API support
|
103 |
pip install -e ".[api]"
|
104 |
+
cp env.example .env
|
105 |
+
lightrag-server
|
106 |
```
|
107 |
|
108 |
* Launching the LightRAG Server with Docker Compose
|
|
|
796 |
create INDEX CONCURRENTLY entity_idx_node_id ON dickens."Entity" (ag_catalog.agtype_access_operator(properties, '"node_id"'::agtype));
|
797 |
CREATE INDEX CONCURRENTLY entity_node_id_gin_idx ON dickens."Entity" using gin(properties);
|
798 |
ALTER TABLE dickens."DIRECTED" CLUSTER ON directed_sid_idx;
|
799 |
+
|
800 |
-- drop if necessary
|
801 |
drop INDEX entity_p_idx;
|
802 |
drop INDEX vertex_p_idx;
|
|
|
1184 |
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
1185 |
from lightrag.utils import EmbeddingFunc
|
1186 |
import os
|
1187 |
+
|
1188 |
async def load_existing_lightrag():
|
1189 |
# First, create or load an existing LightRAG instance
|
1190 |
lightrag_working_dir = "./existing_lightrag_storage"
|
1191 |
+
|
1192 |
# Check if previous LightRAG instance exists
|
1193 |
if os.path.exists(lightrag_working_dir) and os.listdir(lightrag_working_dir):
|
1194 |
print("✅ Found existing LightRAG instance, loading...")
|
1195 |
else:
|
1196 |
print("❌ No existing LightRAG instance found, will create new one")
|
1197 |
+
|
1198 |
# Create/Load LightRAG instance with your configurations
|
1199 |
lightrag_instance = LightRAG(
|
1200 |
working_dir=lightrag_working_dir,
|
|
|
1217 |
),
|
1218 |
)
|
1219 |
)
|
1220 |
+
|
1221 |
# Initialize storage (this will load existing data if available)
|
1222 |
await lightrag_instance.initialize_storages()
|
1223 |
+
|
1224 |
# Now initialize RAGAnything with the existing LightRAG instance
|
1225 |
rag = RAGAnything(
|
1226 |
lightrag=lightrag_instance, # Pass the existing LightRAG instance
|
|
|
1249 |
)
|
1250 |
# Note: working_dir, llm_model_func, embedding_func, etc. are inherited from lightrag_instance
|
1251 |
)
|
1252 |
+
|
1253 |
# Query the existing knowledge base
|
1254 |
result = await rag.query_with_multimodal(
|
1255 |
"What data has been processed in this LightRAG instance?",
|
1256 |
mode="hybrid"
|
1257 |
)
|
1258 |
print("Query result:", result)
|
1259 |
+
|
1260 |
# Add new multimodal documents to the existing LightRAG instance
|
1261 |
await rag.process_document_complete(
|
1262 |
file_path="path/to/new/multimodal_document.pdf",
|
1263 |
output_dir="./output"
|
1264 |
)
|
1265 |
+
|
1266 |
if __name__ == "__main__":
|
1267 |
asyncio.run(load_existing_lightrag())
|
1268 |
```
|