Milin commited on
Commit
4a586b4
·
2 Parent(s): d95261a 91c5685

Merge branch 'main' of github.com:lcjqyml/LightRAG

Browse files
Files changed (42) hide show
  1. README.md +23 -1
  2. env.example +10 -12
  3. lightrag/__init__.py +1 -1
  4. lightrag/api/README.assets/image-20250323122538997.png +3 -0
  5. lightrag/api/README.assets/image-20250323122754387.png +3 -0
  6. lightrag/api/README.assets/image-20250323123011220.png +3 -0
  7. lightrag/api/README.assets/image-20250323194750379.png +3 -0
  8. lightrag/api/README.md +156 -146
  9. lightrag/api/__init__.py +1 -1
  10. lightrag/api/auth.py +3 -0
  11. lightrag/api/gunicorn_config.py +3 -1
  12. lightrag/api/lightrag_server.py +25 -4
  13. lightrag/api/routers/document_routes.py +31 -11
  14. lightrag/api/run_with_gunicorn.py +2 -2
  15. lightrag/api/utils_api.py +12 -7
  16. lightrag/api/webui/assets/index-BSOt8Nur.css +0 -0
  17. lightrag/api/webui/assets/index-Cq65VeVX.css +0 -0
  18. lightrag/api/webui/assets/{index-4I5HV9Fr.js → index-DlScqWrq.js} +0 -0
  19. lightrag/api/webui/index.html +0 -0
  20. lightrag/kg/shared_storage.py +93 -2
  21. lightrag/lightrag.py +3 -1
  22. lightrag/llm/anthropic.py +311 -0
  23. lightrag_webui/src/App.tsx +59 -12
  24. lightrag_webui/src/AppRouter.tsx +17 -118
  25. lightrag_webui/src/api/lightrag.ts +11 -2
  26. lightrag_webui/src/components/AppSettings.tsx +3 -1
  27. lightrag_webui/src/components/graph/GraphLabels.tsx +38 -71
  28. lightrag_webui/src/components/graph/LayoutsControl.tsx +2 -2
  29. lightrag_webui/src/features/LoginPage.tsx +27 -13
  30. lightrag_webui/src/features/SiteHeader.tsx +15 -4
  31. lightrag_webui/src/hooks/useLightragGraph.tsx +173 -44
  32. lightrag_webui/src/i18n.js +0 -35
  33. lightrag_webui/src/i18n.ts +28 -18
  34. lightrag_webui/src/locales/ar.json +263 -0
  35. lightrag_webui/src/locales/en.json +3 -2
  36. lightrag_webui/src/locales/fr.json +263 -0
  37. lightrag_webui/src/locales/zh.json +3 -2
  38. lightrag_webui/src/main.tsx +1 -1
  39. lightrag_webui/src/services/navigation.ts +3 -7
  40. lightrag_webui/src/stores/graph.ts +12 -1
  41. lightrag_webui/src/stores/settings.ts +1 -1
  42. lightrag_webui/src/stores/state.ts +62 -7
README.md CHANGED
@@ -77,7 +77,9 @@ This repository hosts the code of LightRAG. The structure of this code is based
77
 
78
  </details>
79
 
80
- ## Install
 
 
81
 
82
  * Install from source (Recommend)
83
 
@@ -92,6 +94,26 @@ pip install -e .
92
  pip install lightrag-hku
93
  ```
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  ## Quick Start
96
 
97
  * [Video demo](https://www.youtube.com/watch?v=g21royNJ4fw) of running LightRAG locally.
 
77
 
78
  </details>
79
 
80
+ ## Installation
81
+
82
+ ### Install LightRAG Core
83
 
84
  * Install from source (Recommend)
85
 
 
94
  pip install lightrag-hku
95
  ```
96
 
97
+ ### Install LightRAG Server
98
+
99
+ The LightRAG Server is designed to provide Web UI and API support. The Web UI facilitates document indexing, knowledge graph exploration, and a simple RAG query interface. LightRAG Server also provide an Ollama compatible interfaces, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat bot, such as Open WebUI, to access LightRAG easily.
100
+
101
+ * Install from PyPI
102
+
103
+ ```bash
104
+ pip install "lightrag-hku[api]"
105
+ ```
106
+
107
+ * Installation from Source
108
+
109
+ ```bash
110
+ # create a Python virtual enviroment if neccesary
111
+ # Install in editable mode with API support
112
+ pip install -e ".[api]"
113
+ ```
114
+
115
+ **For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).**
116
+
117
  ## Quick Start
118
 
119
  * [Video demo](https://www.youtube.com/watch?v=g21royNJ4fw) of running LightRAG locally.
env.example CHANGED
@@ -30,11 +30,6 @@
30
  # LOG_MAX_BYTES=10485760 # Log file max size in bytes, defaults to 10MB
31
  # LOG_BACKUP_COUNT=5 # Number of backup files to keep, defaults to 5
32
 
33
- ### Max async calls for LLM
34
- # MAX_ASYNC=4
35
- ### Optional Timeout for LLM
36
- # TIMEOUT=150 # Time out in seconds, None for infinite timeout
37
-
38
  ### Settings for RAG query
39
  # HISTORY_TURNS=3
40
  # COSINE_THRESHOLD=0.2
@@ -44,16 +39,21 @@
44
  # MAX_TOKEN_ENTITY_DESC=4000
45
 
46
  ### Settings for document indexing
 
47
  # CHUNK_SIZE=1200
48
  # CHUNK_OVERLAP_SIZE=100
49
- # MAX_TOKENS=32768 # Max tokens send to LLM for summarization
50
- # MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
51
- # SUMMARY_LANGUAGE=English
 
 
 
 
52
  # MAX_EMBED_TOKENS=8192
53
- # ENABLE_LLM_CACHE_FOR_EXTRACT=true # Enable LLM cache for entity extraction
54
- # MAX_PARALLEL_INSERT=2 # Maximum number of parallel processing documents in pipeline
55
 
56
  ### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
 
 
57
  LLM_BINDING=ollama
58
  LLM_MODEL=mistral-nemo:latest
59
  LLM_BINDING_API_KEY=your_api_key
@@ -73,8 +73,6 @@ LLM_BINDING_HOST=http://localhost:11434
73
  ### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
74
  EMBEDDING_MODEL=bge-m3:latest
75
  EMBEDDING_DIM=1024
76
- EMBEDDING_BATCH_NUM=32
77
- EMBEDDING_FUNC_MAX_ASYNC=16
78
  # EMBEDDING_BINDING_API_KEY=your_api_key
79
  ### ollama example
80
  EMBEDDING_BINDING=ollama
 
30
  # LOG_MAX_BYTES=10485760 # Log file max size in bytes, defaults to 10MB
31
  # LOG_BACKUP_COUNT=5 # Number of backup files to keep, defaults to 5
32
 
 
 
 
 
 
33
  ### Settings for RAG query
34
  # HISTORY_TURNS=3
35
  # COSINE_THRESHOLD=0.2
 
39
  # MAX_TOKEN_ENTITY_DESC=4000
40
 
41
  ### Settings for document indexing
42
+ # SUMMARY_LANGUAGE=English
43
  # CHUNK_SIZE=1200
44
  # CHUNK_OVERLAP_SIZE=100
45
+ # MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
46
+ # MAX_PARALLEL_INSERT=2 # Number of parallel processing documents in one patch
47
+ # MAX_ASYNC=4 # Max concurrency requests of LLM
48
+ # ENABLE_LLM_CACHE_FOR_EXTRACT=true # Enable LLM cache for entity extraction
49
+
50
+ # EMBEDDING_BATCH_NUM=32 # num of chunks send to Embedding in one request
51
+ # EMBEDDING_FUNC_MAX_ASYNC=16 # Max concurrency requests for Embedding
52
  # MAX_EMBED_TOKENS=8192
 
 
53
 
54
  ### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
55
+ # MAX_TOKENS=32768 # Max tokens send to LLM (less than context size of the model)
56
+ # TIMEOUT=150 # Time out in seconds for LLM, None for infinite timeout
57
  LLM_BINDING=ollama
58
  LLM_MODEL=mistral-nemo:latest
59
  LLM_BINDING_API_KEY=your_api_key
 
73
  ### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
74
  EMBEDDING_MODEL=bge-m3:latest
75
  EMBEDDING_DIM=1024
 
 
76
  # EMBEDDING_BINDING_API_KEY=your_api_key
77
  ### ollama example
78
  EMBEDDING_BINDING=ollama
lightrag/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
  from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam
2
 
3
- __version__ = "1.2.7"
4
  __author__ = "Zirui Guo"
5
  __url__ = "https://github.com/HKUDS/LightRAG"
 
1
  from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam
2
 
3
+ __version__ = "1.2.8"
4
  __author__ = "Zirui Guo"
5
  __url__ = "https://github.com/HKUDS/LightRAG"
lightrag/api/README.assets/image-20250323122538997.png ADDED

Git LFS Details

  • SHA256: 240a42e392d71c55445585127afdb148a7ae6da720b05e0b74fa5349df98819b
  • Pointer size: 131 Bytes
  • Size of remote file: 383 kB
lightrag/api/README.assets/image-20250323122754387.png ADDED

Git LFS Details

  • SHA256: 27dc0dc1fc425c01b0d83eed33bc6e901281375b079cbeefd1a63d5f212f170a
  • Pointer size: 131 Bytes
  • Size of remote file: 365 kB
lightrag/api/README.assets/image-20250323123011220.png ADDED

Git LFS Details

  • SHA256: 575aba742f265e781c8b4e9d7c8e3c26a2a8797919a96a697576a53015ff3242
  • Pointer size: 131 Bytes
  • Size of remote file: 543 kB
lightrag/api/README.assets/image-20250323194750379.png ADDED

Git LFS Details

  • SHA256: 2f7a259dbc125235596dffa197b88caf17fafb2eda8564ecc6fd8fb108b4272c
  • Pointer size: 131 Bytes
  • Size of remote file: 390 kB
lightrag/api/README.md CHANGED
@@ -1,14 +1,24 @@
1
- ## Install LightRAG as an API Server
2
 
3
- LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG API Server in two ways:
4
 
5
- ### Installation from PyPI
 
 
 
 
 
 
 
 
 
 
6
 
7
  ```bash
8
  pip install "lightrag-hku[api]"
9
  ```
10
 
11
- ### Installation from Source (Development)
12
 
13
  ```bash
14
  # Clone the repository
@@ -22,116 +32,194 @@ cd lightrag
22
  pip install -e ".[api]"
23
  ```
24
 
25
- ### Starting API Server with Default Settings
26
-
27
- After installing LightRAG with API support, you can start LightRAG by this command: `lightrag-server`
28
 
29
- LightRAG requires both LLM and Embedding Model to work together to complete document indexing and querying tasks. LightRAG supports binding to various LLM/Embedding backends:
30
 
31
  * ollama
32
  * lollms
33
- * openai & openai compatible
34
  * azure_openai
35
 
36
- Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding.
37
- The LightRAG API Server provides default parameters for LLM and Embedding, allowing users to easily start the service through command line. These default configurations are:
38
-
39
- * Default endpoint of LLM/Embeding backend(LLM_BINDING_HOST or EMBEDDING_BINDING_HOST)
40
 
41
- ```
42
- # for lollms backend
43
- LLM_BINDING_HOST=http://localhost:11434
44
- EMBEDDING_BINDING_HOST=http://localhost:11434
45
 
46
- # for lollms backend
47
- LLM_BINDING_HOST=http://localhost:9600
48
- EMBEDDING_BINDING_HOST=http://localhost:9600
49
 
50
- # for openai, openai compatible or azure openai backend
 
 
51
  LLM_BINDING_HOST=https://api.openai.com/v1
52
- EMBEDDING_BINDING_HOST=http://localhost:9600
 
 
 
 
 
 
 
53
  ```
54
 
55
- * Default model config
56
 
57
  ```
 
58
  LLM_MODEL=mistral-nemo:latest
 
 
 
59
 
 
 
60
  EMBEDDING_MODEL=bge-m3:latest
61
  EMBEDDING_DIM=1024
62
- MAX_EMBED_TOKENS=8192
63
  ```
64
 
65
- * API keys for LLM/Embedding backend
66
 
67
- When connecting to backend require API KEY, corresponding environment variables must be provided:
 
68
 
69
  ```
70
- LLM_BINDING_API_KEY=your_api_key
71
- EMBEDDING_BINDING_API_KEY=your_api_key
72
  ```
 
73
 
74
- * Use command line arguments to choose LLM/Embeding backend
 
 
 
75
 
76
- Use `--llm-binding` to select LLM backend type, and use `--embedding-binding` to select the embedding backend type. All the supported backend types are:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  ```
79
- openai: LLM default type
80
- ollama: Embedding defult type
81
- lollms:
82
- azure_openai:
83
- openai-ollama: select openai for LLM and ollama for embedding(only valid for --llm-binding)
84
  ```
85
 
86
- The LightRAG API Server allows you to mix different bindings for llm/embeddings. For example, you have the possibility to use ollama for the embedding and openai for the llm.With the above default parameters, you can start API Server with simple CLI arguments like these:
 
 
87
 
 
 
 
 
88
  ```
89
- # start with openai llm and ollama embedding
90
- LLM_BINDING_API_KEY=your_api_key Light_server
91
- LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai-ollama
92
 
93
- # start with openai llm and openai embedding
94
- LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai --embedding-binding openai
95
 
96
- # start with ollama llm and ollama embedding (no apikey is needed)
97
- light-server --llm-binding ollama --embedding-binding ollama
 
 
 
 
 
98
  ```
99
 
100
- ### Starting API Server with Gunicorn (Production)
101
 
102
- For production deployments, it's recommended to use Gunicorn as the WSGI server to handle concurrent requests efficiently. LightRAG provides a dedicated Gunicorn startup script that handles shared data initialization, process management, and other critical functionalities.
 
 
 
 
 
 
103
 
104
- ```bash
105
- # Start with lightrag-gunicorn command
106
- lightrag-gunicorn --workers 4
107
 
108
- # Alternatively, you can use the module directly
109
- python -m lightrag.api.run_with_gunicorn --workers 4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  ```
111
 
112
- The `--workers` parameter is crucial for performance:
113
 
114
- - Determines how many worker processes Gunicorn will spawn to handle requests
115
- - Each worker can handle concurrent requests using asyncio
116
- - Recommended value is (2 x number_of_cores) + 1
117
- - For example, on a 4-core machine, use 9 workers: (2 x 4) + 1 = 9
118
- - Consider your server's memory when setting this value, as each worker consumes memory
119
 
120
- Other important startup parameters:
121
 
122
- - `--host`: Server listening address (default: 0.0.0.0)
123
- - `--port`: Server listening port (default: 9621)
124
- - `--timeout`: Request handling timeout (default: 150 seconds)
125
- - `--log-level`: Logging level (default: INFO)
126
- - `--ssl`: Enable HTTPS
127
- - `--ssl-certfile`: Path to SSL certificate file
128
- - `--ssl-keyfile`: Path to SSL private key file
129
 
130
- The command line parameters and enviroment variable run_with_gunicorn.py is exactly the same as `light-server`.
131
 
132
- ### For Azure OpenAI Backend
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
  Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)):
 
135
  ```bash
136
  # Change the resource group name, location and OpenAI resource name as needed
137
  RESOURCE_GROUP_NAME=LightRAG
@@ -147,6 +235,7 @@ az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOUR
147
  az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME
148
 
149
  ```
 
150
  The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file.
151
 
152
  ```
@@ -161,48 +250,9 @@ EMBEDDING_MODEL=your-embedding-deployment-name
161
 
162
  ```
163
 
164
- ### Install Lightrag as a Linux Service
165
 
166
- Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file:
167
 
168
- ```text
169
- Description=LightRAG Ollama Service
170
- WorkingDirectory=<lightrag installed directory>
171
- ExecStart=<lightrag installed directory>/lightrag/api/lightrag-api
172
- ```
173
-
174
- Modify your service startup script: `lightrag-api`. Change you python virtual environment activation command as needed:
175
-
176
- ```shell
177
- #!/bin/bash
178
-
179
- # your python virtual environment activation
180
- source /home/netman/lightrag-xyj/venv/bin/activate
181
- # start lightrag api server
182
- lightrag-server
183
- ```
184
-
185
- Install LightRAG service. If your system is Ubuntu, the following commands will work:
186
-
187
- ```shell
188
- sudo cp lightrag.service /etc/systemd/system/
189
- sudo systemctl daemon-reload
190
- sudo systemctl start lightrag.service
191
- sudo systemctl status lightrag.service
192
- sudo systemctl enable lightrag.service
193
- ```
194
-
195
- ### Automatic Document Indexing
196
-
197
- When starting any of the servers with the `--auto-scan-at-startup` parameter, the system will automatically:
198
-
199
- 1. Scan for new files in the input directory
200
- 2. Indexing new documents that aren't already in the database
201
- 3. Make all content immediately available for RAG queries
202
-
203
- > The `--input-dir` parameter specify the input directory to scan for.
204
-
205
- ## API Server Configuration
206
 
207
  API Server can be config in three way (highest priority first):
208
 
@@ -392,19 +442,6 @@ Note: If you don't need the API functionality, you can install the base package
392
  pip install lightrag-hku
393
  ```
394
 
395
- ## Authentication Endpoints
396
-
397
- ### JWT Authentication Mechanism
398
- LightRAG API Server implements JWT-based authentication using HS256 algorithm. To enable secure access control, the following environment variables are required:
399
- ```bash
400
- # For jwt auth
401
- AUTH_USERNAME=admin # login name
402
- AUTH_PASSWORD=admin123 # password
403
- TOKEN_SECRET=your-key # JWT key
404
- TOKEN_EXPIRE_HOURS=4 # expire duration
405
- WHITELIST_PATHS=/api1,/api2 # white list. /login,/health,/docs,/redoc,/openapi.json are whitelisted by default.
406
- ```
407
-
408
  ## API Endpoints
409
 
410
  All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When API Server is running, visit:
@@ -528,30 +565,3 @@ Check server health and configuration.
528
  ```bash
529
  curl "http://localhost:9621/health"
530
  ```
531
-
532
- ## Ollama Emulation
533
-
534
- We provide an Ollama-compatible interfaces for LightRAG, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat frontends supporting Ollama, such as Open WebUI, to access LightRAG easily.
535
-
536
- ### Connect Open WebUI to LightRAG
537
-
538
- After starting the lightrag-server, you can add an Ollama-type connection in the Open WebUI admin pannel. And then a model named lightrag:latest will appear in Open WebUI's model management interface. Users can then send queries to LightRAG through the chat interface. You'd better install LightRAG as service for this use case.
539
-
540
- Open WebUI's use LLM to do the session title and session keyword generation task. So the Ollama chat chat completion API detects and forwards OpenWebUI session-related requests directly to underlying LLM.
541
-
542
- ### Choose Query mode in chat
543
-
544
- A query prefix in the query string can determines which LightRAG query mode is used to generate the respond for the query. The supported prefixes include:
545
-
546
- ```
547
- /local
548
- /global
549
- /hybrid
550
- /naive
551
- /mix
552
- /bypass
553
- ```
554
-
555
- For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。
556
-
557
- "/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix.
 
1
+ # LightRAG Server and WebUI
2
 
3
+ The LightRAG Server is designed to provide Web UI and API support. The Web UI facilitates document indexing, knowledge graph exploration, and a simple RAG query interface. LightRAG Server also provide an Ollama compatible interfaces, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat bot, such as Open WebUI, to access LightRAG easily.
4
 
5
+ ![image-20250323122538997](./README.assets/image-20250323122538997.png)
6
+
7
+ ![image-20250323122754387](./README.assets/image-20250323122754387.png)
8
+
9
+ ![image-20250323123011220](./README.assets/image-20250323123011220.png)
10
+
11
+ ## Getting Start
12
+
13
+ ### Installation
14
+
15
+ * Install from PyPI
16
 
17
  ```bash
18
  pip install "lightrag-hku[api]"
19
  ```
20
 
21
+ * Installation from Source
22
 
23
  ```bash
24
  # Clone the repository
 
32
  pip install -e ".[api]"
33
  ```
34
 
35
+ ### Before Starting LightRAG Server
 
 
36
 
37
+ LightRAG necessitates the integration of both an LLM (Large Language Model) and an Embedding Model to effectively execute document indexing and querying operations. Prior to the initial deployment of the LightRAG server, it is essential to configure the settings for both the LLM and the Embedding Model. LightRAG supports binding to various LLM/Embedding backends:
38
 
39
  * ollama
40
  * lollms
41
+ * openai or openai compatible
42
  * azure_openai
43
 
44
+ It is recommended to use environment variables to configure the LightRAG Server. There is an example environment variable file named `env.example` in the root directory of the project. Please copy this file to the startup directory and rename it to `.env`. After that, you can modify the parameters related to the LLM and Embedding models in the `.env` file. It is important to note that the LightRAG Server will load the environment variables from `.env` into the system environment variables each time it starts. Since the LightRAG Server will prioritize the settings in the system environment variables, if you modify the `.env` file after starting the LightRAG Server via the command line, you need to execute `source .env` to make the new settings take effect.
 
 
 
45
 
46
+ Here are some examples of common settings for LLM and Embedding models:
 
 
 
47
 
48
+ * OpenAI LLM + Ollama Embedding
 
 
49
 
50
+ ```
51
+ LLM_BINDING=openai
52
+ LLM_MODEL=gpt-4o
53
  LLM_BINDING_HOST=https://api.openai.com/v1
54
+ LLM_BINDING_API_KEY=your_api_key
55
+ MAX_TOKENS=32768 # Max tokens send to LLM (less than model context size)
56
+
57
+ EMBEDDING_BINDING=ollama
58
+ EMBEDDING_BINDING_HOST=http://localhost:11434
59
+ EMBEDDING_MODEL=bge-m3:latest
60
+ EMBEDDING_DIM=1024
61
+ # EMBEDDING_BINDING_API_KEY=your_api_key
62
  ```
63
 
64
+ * Ollama LLM + Ollama Embedding
65
 
66
  ```
67
+ LLM_BINDING=ollama
68
  LLM_MODEL=mistral-nemo:latest
69
+ LLM_BINDING_HOST=http://localhost:11434
70
+ # LLM_BINDING_API_KEY=your_api_key
71
+ MAX_TOKENS=8192 # Max tokens send to LLM (base on your Ollama Server capacity)
72
 
73
+ EMBEDDING_BINDING=ollama
74
+ EMBEDDING_BINDING_HOST=http://localhost:11434
75
  EMBEDDING_MODEL=bge-m3:latest
76
  EMBEDDING_DIM=1024
77
+ # EMBEDDING_BINDING_API_KEY=your_api_key
78
  ```
79
 
80
+ ### Starting LightRAG Server
81
 
82
+ The LightRAG Server supports two operational modes:
83
+ * The simple and efficient Uvicorn mode
84
 
85
  ```
86
+ lightrag-server
 
87
  ```
88
+ * The multiprocess Gunicorn + Uvicorn mode (production mode, not supported on Windows environments)
89
 
90
+ ```
91
+ lightrag-gunicorn --workers 4
92
+ ```
93
+ The `.env` file must be placed in the startup directory. Upon launching, the LightRAG Server will create a documents directory (default is `./inputs`) and a data directory (default is `./rag_storage`). This allows you to initiate multiple instances of LightRAG Server from different directories, with each instance configured to listen on a distinct network port.
94
 
95
+ Here are some common used startup parameters:
96
+
97
+ - `--host`: Server listening address (default: 0.0.0.0)
98
+ - `--port`: Server listening port (default: 9621)
99
+ - `--timeout`: LLM request timeout (default: 150 seconds)
100
+ - `--log-level`: Logging level (default: INFO)
101
+ - --input-dir: specifying the directory to scan for documents (default: ./input)
102
+
103
+ ### Auto scan on startup
104
+
105
+ When starting any of the servers with the `--auto-scan-at-startup` parameter, the system will automatically:
106
+
107
+ 1. Scan for new files in the input directory
108
+ 2. Indexing new documents that aren't already in the database
109
+ 3. Make all content immediately available for RAG queries
110
+
111
+ > The `--input-dir` parameter specify the input directory to scan for. You can trigger input diretory scan from webui.
112
+
113
+ ### Multiple workers for Gunicorn + Uvicorn
114
+
115
+ The LightRAG Server can operate in the `Gunicorn + Uvicorn` preload mode. Gunicorn's Multiple Worker (multiprocess) capability prevents document indexing tasks from blocking RAG queries. Using CPU-exhaustive document extraction tools, such as docling, can lead to the entire system being blocked in pure Uvicorn mode.
116
+
117
+ Though LightRAG Server uses one workers to process the document indexing pipeline, with aysnc task supporting of Uvicorn, multiple files can be processed in parallell. The bottleneck of document indexing speed mainly lies with the LLM. If your LLM supports high concurrency, you can accelerate document indexing by increasing the concurrency level of the LLM. Below are several environment variables related to concurrent processing, along with their default values:
118
 
119
  ```
120
+ WORKERS=2 # Num of worker processes, not greater then (2 x number_of_cores) + 1
121
+ MAX_PARALLEL_INSERT=2 # Num of parallel files to process in one batch
122
+ MAX_ASYNC=4 # Max concurrency requests of LLM
 
 
123
  ```
124
 
125
+ ### Install Lightrag as a Linux Service
126
+
127
+ Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file:
128
 
129
+ ```text
130
+ Description=LightRAG Ollama Service
131
+ WorkingDirectory=<lightrag installed directory>
132
+ ExecStart=<lightrag installed directory>/lightrag/api/lightrag-api
133
  ```
 
 
 
134
 
135
+ Modify your service startup script: `lightrag-api`. Change you python virtual environment activation command as needed:
 
136
 
137
+ ```shell
138
+ #!/bin/bash
139
+
140
+ # your python virtual environment activation
141
+ source /home/netman/lightrag-xyj/venv/bin/activate
142
+ # start lightrag api server
143
+ lightrag-server
144
  ```
145
 
146
+ Install LightRAG service. If your system is Ubuntu, the following commands will work:
147
 
148
+ ```shell
149
+ sudo cp lightrag.service /etc/systemd/system/
150
+ sudo systemctl daemon-reload
151
+ sudo systemctl start lightrag.service
152
+ sudo systemctl status lightrag.service
153
+ sudo systemctl enable lightrag.service
154
+ ```
155
 
 
 
 
156
 
157
+
158
+
159
+
160
+ ## Ollama Emulation
161
+
162
+ We provide an Ollama-compatible interfaces for LightRAG, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat frontends supporting Ollama, such as Open WebUI, to access LightRAG easily.
163
+
164
+ ### Connect Open WebUI to LightRAG
165
+
166
+ After starting the lightrag-server, you can add an Ollama-type connection in the Open WebUI admin pannel. And then a model named lightrag:latest will appear in Open WebUI's model management interface. Users can then send queries to LightRAG through the chat interface. You'd better install LightRAG as service for this use case.
167
+
168
+ Open WebUI's use LLM to do the session title and session keyword generation task. So the Ollama chat chat completion API detects and forwards OpenWebUI session-related requests directly to underlying LLM. Screen shot from Open WebUI:
169
+
170
+ ![image-20250323194750379](./README.assets/image-20250323194750379.png)
171
+
172
+ ### Choose Query mode in chat
173
+
174
+ A query prefix in the query string can determines which LightRAG query mode is used to generate the respond for the query. The supported prefixes include:
175
+
176
+ ```
177
+ /local
178
+ /global
179
+ /hybrid
180
+ /naive
181
+ /mix
182
+ /bypass
183
  ```
184
 
185
+ For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。
186
 
187
+ "/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix.
 
 
 
 
188
 
 
189
 
 
 
 
 
 
 
 
190
 
191
+ ## API-Key and Authentication
192
 
193
+ By default, the LightRAG Server can be accessed without any authentication. We can configure the server with an API-Key or account credentials to secure it.
194
+
195
+ * API-KEY
196
+
197
+ ```
198
+ LIGHTRAG_API_KEY=your-secure-api-key-here
199
+ ```
200
+
201
+ * Account credentials (the web UI requires login before access)
202
+
203
+ LightRAG API Server implements JWT-based authentication using HS256 algorithm. To enable secure access control, the following environment variables are required:
204
+
205
+ ```bash
206
+ # For jwt auth
207
+ AUTH_USERNAME=admin # login name
208
+ AUTH_PASSWORD=admin123 # password
209
+ TOKEN_SECRET=your-key # JWT key
210
+ TOKEN_EXPIRE_HOURS=4 # expire duration
211
+ ```
212
+
213
+ > Currently, only the configuration of an administrator account and password is supported. A comprehensive account system is yet to be developed and implemented.
214
+
215
+ If Account credentials are not configured, the web UI will access the system as a Guest. Therefore, even if only API-KEY is configured, all API can still be accessed through the Guest account, which remains insecure. Hence, to safeguard the API, it is necessary to configure both authentication methods simultaneously.
216
+
217
+
218
+
219
+ ## For Azure OpenAI Backend
220
 
221
  Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)):
222
+
223
  ```bash
224
  # Change the resource group name, location and OpenAI resource name as needed
225
  RESOURCE_GROUP_NAME=LightRAG
 
235
  az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME
236
 
237
  ```
238
+
239
  The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file.
240
 
241
  ```
 
250
 
251
  ```
252
 
 
253
 
 
254
 
255
+ ## LightRAG Server Configuration in Detail
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
 
257
  API Server can be config in three way (highest priority first):
258
 
 
442
  pip install lightrag-hku
443
  ```
444
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
  ## API Endpoints
446
 
447
  All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When API Server is running, visit:
 
565
  ```bash
566
  curl "http://localhost:9621/health"
567
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lightrag/api/__init__.py CHANGED
@@ -1 +1 @@
1
- __api_version__ = "1.0.5"
 
1
+ __api_version__ = "1.2.2"
lightrag/api/auth.py CHANGED
@@ -3,6 +3,9 @@ from datetime import datetime, timedelta
3
  import jwt
4
  from fastapi import HTTPException, status
5
  from pydantic import BaseModel
 
 
 
6
 
7
 
8
  class TokenPayload(BaseModel):
 
3
  import jwt
4
  from fastapi import HTTPException, status
5
  from pydantic import BaseModel
6
+ from dotenv import load_dotenv
7
+
8
+ load_dotenv()
9
 
10
 
11
  class TokenPayload(BaseModel):
lightrag/api/gunicorn_config.py CHANGED
@@ -29,7 +29,9 @@ preload_app = True
29
  worker_class = "uvicorn.workers.UvicornWorker"
30
 
31
  # Other Gunicorn configurations
32
- timeout = int(os.getenv("TIMEOUT", 150)) # Default 150s to match run_with_gunicorn.py
 
 
33
  keepalive = int(os.getenv("KEEPALIVE", 5)) # Default 5s
34
 
35
  # Logging configuration
 
29
  worker_class = "uvicorn.workers.UvicornWorker"
30
 
31
  # Other Gunicorn configurations
32
+ timeout = int(
33
+ os.getenv("TIMEOUT", 150 * 2)
34
+ ) # Default 150s *2 to match run_with_gunicorn.py
35
  keepalive = int(os.getenv("KEEPALIVE", 5)) # Default 5s
36
 
37
  # Logging configuration
lightrag/api/lightrag_server.py CHANGED
@@ -23,9 +23,9 @@ from lightrag.api.utils_api import (
23
  get_default_host,
24
  display_splash_screen,
25
  )
26
- from lightrag import LightRAG
27
- from lightrag.types import GPTKeywordExtractionFormat
28
  from lightrag.api import __api_version__
 
29
  from lightrag.utils import EmbeddingFunc
30
  from lightrag.api.routers.document_routes import (
31
  DocumentManager,
@@ -49,7 +49,7 @@ from .auth import auth_handler
49
  # Load environment variables
50
  # Updated to use the .env that is inside the current folder
51
  # This update allows the user to put a different.env file for each lightrag folder
52
- load_dotenv(".env", override=True)
53
 
54
  # Initialize config parser
55
  config = configparser.ConfigParser()
@@ -364,9 +364,16 @@ def create_app(args):
364
  "token_type": "bearer",
365
  "auth_mode": "disabled",
366
  "message": "Authentication is disabled. Using guest access.",
 
 
367
  }
368
 
369
- return {"auth_configured": True, "auth_mode": "enabled"}
 
 
 
 
 
370
 
371
  @app.post("/login", dependencies=[Depends(optional_api_key)])
372
  async def login(form_data: OAuth2PasswordRequestForm = Depends()):
@@ -383,6 +390,8 @@ def create_app(args):
383
  "token_type": "bearer",
384
  "auth_mode": "disabled",
385
  "message": "Authentication is disabled. Using guest access.",
 
 
386
  }
387
 
388
  if form_data.username != username or form_data.password != password:
@@ -398,6 +407,8 @@ def create_app(args):
398
  "access_token": user_token,
399
  "token_type": "bearer",
400
  "auth_mode": "enabled",
 
 
401
  }
402
 
403
  @app.get("/health", dependencies=[Depends(optional_api_key)])
@@ -406,6 +417,13 @@ def create_app(args):
406
  # Get update flags status for all namespaces
407
  update_status = await get_all_update_flags_status()
408
 
 
 
 
 
 
 
 
409
  return {
410
  "status": "healthy",
411
  "working_directory": str(args.working_dir),
@@ -427,6 +445,9 @@ def create_app(args):
427
  "enable_llm_cache_for_extract": args.enable_llm_cache_for_extract,
428
  },
429
  "update_status": update_status,
 
 
 
430
  }
431
 
432
  # Custom StaticFiles class to prevent caching of HTML files
 
23
  get_default_host,
24
  display_splash_screen,
25
  )
26
+ from lightrag import LightRAG, __version__ as core_version
 
27
  from lightrag.api import __api_version__
28
+ from lightrag.types import GPTKeywordExtractionFormat
29
  from lightrag.utils import EmbeddingFunc
30
  from lightrag.api.routers.document_routes import (
31
  DocumentManager,
 
49
  # Load environment variables
50
  # Updated to use the .env that is inside the current folder
51
  # This update allows the user to put a different.env file for each lightrag folder
52
+ load_dotenv()
53
 
54
  # Initialize config parser
55
  config = configparser.ConfigParser()
 
364
  "token_type": "bearer",
365
  "auth_mode": "disabled",
366
  "message": "Authentication is disabled. Using guest access.",
367
+ "core_version": core_version,
368
+ "api_version": __api_version__,
369
  }
370
 
371
+ return {
372
+ "auth_configured": True,
373
+ "auth_mode": "enabled",
374
+ "core_version": core_version,
375
+ "api_version": __api_version__,
376
+ }
377
 
378
  @app.post("/login", dependencies=[Depends(optional_api_key)])
379
  async def login(form_data: OAuth2PasswordRequestForm = Depends()):
 
390
  "token_type": "bearer",
391
  "auth_mode": "disabled",
392
  "message": "Authentication is disabled. Using guest access.",
393
+ "core_version": core_version,
394
+ "api_version": __api_version__,
395
  }
396
 
397
  if form_data.username != username or form_data.password != password:
 
407
  "access_token": user_token,
408
  "token_type": "bearer",
409
  "auth_mode": "enabled",
410
+ "core_version": core_version,
411
+ "api_version": __api_version__,
412
  }
413
 
414
  @app.get("/health", dependencies=[Depends(optional_api_key)])
 
417
  # Get update flags status for all namespaces
418
  update_status = await get_all_update_flags_status()
419
 
420
+ username = os.getenv("AUTH_USERNAME")
421
+ password = os.getenv("AUTH_PASSWORD")
422
+ if not (username and password):
423
+ auth_mode = "disabled"
424
+ else:
425
+ auth_mode = "enabled"
426
+
427
  return {
428
  "status": "healthy",
429
  "working_directory": str(args.working_dir),
 
445
  "enable_llm_cache_for_extract": args.enable_llm_cache_for_extract,
446
  },
447
  "update_status": update_status,
448
+ "core_version": core_version,
449
+ "api_version": __api_version__,
450
+ "auth_mode": auth_mode,
451
  }
452
 
453
  # Custom StaticFiles class to prevent caching of HTML files
lightrag/api/routers/document_routes.py CHANGED
@@ -405,7 +405,7 @@ async def pipeline_index_file(rag: LightRAG, file_path: Path):
405
 
406
 
407
  async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]):
408
- """Index multiple files concurrently
409
 
410
  Args:
411
  rag: LightRAG instance
@@ -416,12 +416,12 @@ async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]):
416
  try:
417
  enqueued = False
418
 
419
- if len(file_paths) == 1:
420
- enqueued = await pipeline_enqueue_file(rag, file_paths[0])
421
- else:
422
- tasks = [pipeline_enqueue_file(rag, path) for path in file_paths]
423
- enqueued = any(await asyncio.gather(*tasks))
424
 
 
425
  if enqueued:
426
  await rag.apipeline_process_enqueue_documents()
427
  except Exception as e:
@@ -472,14 +472,34 @@ async def run_scanning_process(rag: LightRAG, doc_manager: DocumentManager):
472
  total_files = len(new_files)
473
  logger.info(f"Found {total_files} new files to index.")
474
 
475
- for idx, file_path in enumerate(new_files):
476
- try:
477
- await pipeline_index_file(rag, file_path)
478
- except Exception as e:
479
- logger.error(f"Error indexing file {file_path}: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
 
481
  except Exception as e:
482
  logger.error(f"Error during scanning process: {str(e)}")
 
483
 
484
 
485
  def create_document_routes(
 
405
 
406
 
407
  async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]):
408
+ """Index multiple files sequentially to avoid high CPU load
409
 
410
  Args:
411
  rag: LightRAG instance
 
416
  try:
417
  enqueued = False
418
 
419
+ # Process files sequentially
420
+ for file_path in file_paths:
421
+ if await pipeline_enqueue_file(rag, file_path):
422
+ enqueued = True
 
423
 
424
+ # Process the queue only if at least one file was successfully enqueued
425
  if enqueued:
426
  await rag.apipeline_process_enqueue_documents()
427
  except Exception as e:
 
472
  total_files = len(new_files)
473
  logger.info(f"Found {total_files} new files to index.")
474
 
475
+ if not new_files:
476
+ return
477
+
478
+ # Get MAX_PARALLEL_INSERT from global_args
479
+ max_parallel = global_args["max_parallel_insert"]
480
+ # Calculate batch size as 2 * MAX_PARALLEL_INSERT
481
+ batch_size = 2 * max_parallel
482
+
483
+ # Process files in batches
484
+ for i in range(0, total_files, batch_size):
485
+ batch_files = new_files[i : i + batch_size]
486
+ batch_num = i // batch_size + 1
487
+ total_batches = (total_files + batch_size - 1) // batch_size
488
+
489
+ logger.info(
490
+ f"Processing batch {batch_num}/{total_batches} with {len(batch_files)} files"
491
+ )
492
+ await pipeline_index_files(rag, batch_files)
493
+
494
+ # Log progress
495
+ processed = min(i + batch_size, total_files)
496
+ logger.info(
497
+ f"Processed {processed}/{total_files} files ({processed/total_files*100:.1f}%)"
498
+ )
499
 
500
  except Exception as e:
501
  logger.error(f"Error during scanning process: {str(e)}")
502
+ logger.error(traceback.format_exc())
503
 
504
 
505
  def create_document_routes(
lightrag/api/run_with_gunicorn.py CHANGED
@@ -13,7 +13,7 @@ from dotenv import load_dotenv
13
 
14
  # Updated to use the .env that is inside the current folder
15
  # This update allows the user to put a different.env file for each lightrag folder
16
- load_dotenv(".env")
17
 
18
 
19
  def check_and_install_dependencies():
@@ -140,7 +140,7 @@ def main():
140
 
141
  # Timeout configuration prioritizes command line arguments
142
  gunicorn_config.timeout = (
143
- args.timeout if args.timeout else int(os.getenv("TIMEOUT", 150))
144
  )
145
 
146
  # Keepalive configuration
 
13
 
14
  # Updated to use the .env that is inside the current folder
15
  # This update allows the user to put a different.env file for each lightrag folder
16
+ load_dotenv()
17
 
18
 
19
  def check_and_install_dependencies():
 
140
 
141
  # Timeout configuration prioritizes command line arguments
142
  gunicorn_config.timeout = (
143
+ args.timeout if args.timeout * 2 else int(os.getenv("TIMEOUT", 150 * 2))
144
  )
145
 
146
  # Keepalive configuration
lightrag/api/utils_api.py CHANGED
@@ -16,7 +16,7 @@ from starlette.status import HTTP_403_FORBIDDEN
16
  from .auth import auth_handler
17
 
18
  # Load environment variables
19
- load_dotenv(override=True)
20
 
21
  global_args = {"main_args": None}
22
 
@@ -365,6 +365,9 @@ def parse_args(is_uvicorn_mode: bool = False) -> argparse.Namespace:
365
  "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE
366
  )
367
 
 
 
 
368
  # Handle openai-ollama special case
369
  if args.llm_binding == "openai-ollama":
370
  args.llm_binding = "openai"
@@ -441,8 +444,8 @@ def display_splash_screen(args: argparse.Namespace) -> None:
441
  ASCIIColors.yellow(f"{args.log_level}")
442
  ASCIIColors.white(" ├─ Verbose Debug: ", end="")
443
  ASCIIColors.yellow(f"{args.verbose}")
444
- ASCIIColors.white(" ├─ Timeout: ", end="")
445
- ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}")
446
  ASCIIColors.white(" └─ API Key: ", end="")
447
  ASCIIColors.yellow("Set" if args.key else "Not Set")
448
 
@@ -459,8 +462,10 @@ def display_splash_screen(args: argparse.Namespace) -> None:
459
  ASCIIColors.yellow(f"{args.llm_binding}")
460
  ASCIIColors.white(" ├─ Host: ", end="")
461
  ASCIIColors.yellow(f"{args.llm_binding_host}")
462
- ASCIIColors.white(" └─ Model: ", end="")
463
  ASCIIColors.yellow(f"{args.llm_model}")
 
 
464
 
465
  # Embedding Configuration
466
  ASCIIColors.magenta("\n📊 Embedding Configuration:")
@@ -475,8 +480,10 @@ def display_splash_screen(args: argparse.Namespace) -> None:
475
 
476
  # RAG Configuration
477
  ASCIIColors.magenta("\n⚙️ RAG Configuration:")
478
- ASCIIColors.white(" ├─ Max Async Operations: ", end="")
479
  ASCIIColors.yellow(f"{args.max_async}")
 
 
480
  ASCIIColors.white(" ├─ Max Tokens: ", end="")
481
  ASCIIColors.yellow(f"{args.max_tokens}")
482
  ASCIIColors.white(" ├─ Max Embed Tokens: ", end="")
@@ -485,8 +492,6 @@ def display_splash_screen(args: argparse.Namespace) -> None:
485
  ASCIIColors.yellow(f"{args.chunk_size}")
486
  ASCIIColors.white(" ├─ Chunk Overlap Size: ", end="")
487
  ASCIIColors.yellow(f"{args.chunk_overlap_size}")
488
- ASCIIColors.white(" ├─ History Turns: ", end="")
489
- ASCIIColors.yellow(f"{args.history_turns}")
490
  ASCIIColors.white(" ├─ Cosine Threshold: ", end="")
491
  ASCIIColors.yellow(f"{args.cosine_threshold}")
492
  ASCIIColors.white(" ├─ Top-K: ", end="")
 
16
  from .auth import auth_handler
17
 
18
  # Load environment variables
19
+ load_dotenv()
20
 
21
  global_args = {"main_args": None}
22
 
 
365
  "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE
366
  )
367
 
368
+ # Get MAX_PARALLEL_INSERT from environment
369
+ global_args["max_parallel_insert"] = get_env_value("MAX_PARALLEL_INSERT", 2, int)
370
+
371
  # Handle openai-ollama special case
372
  if args.llm_binding == "openai-ollama":
373
  args.llm_binding = "openai"
 
444
  ASCIIColors.yellow(f"{args.log_level}")
445
  ASCIIColors.white(" ├─ Verbose Debug: ", end="")
446
  ASCIIColors.yellow(f"{args.verbose}")
447
+ ASCIIColors.white(" ├─ History Turns: ", end="")
448
+ ASCIIColors.yellow(f"{args.history_turns}")
449
  ASCIIColors.white(" └─ API Key: ", end="")
450
  ASCIIColors.yellow("Set" if args.key else "Not Set")
451
 
 
462
  ASCIIColors.yellow(f"{args.llm_binding}")
463
  ASCIIColors.white(" ├─ Host: ", end="")
464
  ASCIIColors.yellow(f"{args.llm_binding_host}")
465
+ ASCIIColors.white(" ├─ Model: ", end="")
466
  ASCIIColors.yellow(f"{args.llm_model}")
467
+ ASCIIColors.white(" └─ Timeout: ", end="")
468
+ ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}")
469
 
470
  # Embedding Configuration
471
  ASCIIColors.magenta("\n📊 Embedding Configuration:")
 
480
 
481
  # RAG Configuration
482
  ASCIIColors.magenta("\n⚙️ RAG Configuration:")
483
+ ASCIIColors.white(" ├─ Max Async for LLM: ", end="")
484
  ASCIIColors.yellow(f"{args.max_async}")
485
+ ASCIIColors.white(" ├─ Max Parallel Insert: ", end="")
486
+ ASCIIColors.yellow(f"{global_args['max_parallel_insert']}")
487
  ASCIIColors.white(" ├─ Max Tokens: ", end="")
488
  ASCIIColors.yellow(f"{args.max_tokens}")
489
  ASCIIColors.white(" ├─ Max Embed Tokens: ", end="")
 
492
  ASCIIColors.yellow(f"{args.chunk_size}")
493
  ASCIIColors.white(" ├─ Chunk Overlap Size: ", end="")
494
  ASCIIColors.yellow(f"{args.chunk_overlap_size}")
 
 
495
  ASCIIColors.white(" ├─ Cosine Threshold: ", end="")
496
  ASCIIColors.yellow(f"{args.cosine_threshold}")
497
  ASCIIColors.white(" ├─ Top-K: ", end="")
lightrag/api/webui/assets/index-BSOt8Nur.css DELETED
Binary file (52.9 kB)
 
lightrag/api/webui/assets/index-Cq65VeVX.css ADDED
Binary file (53.1 kB). View file
 
lightrag/api/webui/assets/{index-4I5HV9Fr.js → index-DlScqWrq.js} RENAMED
Binary files a/lightrag/api/webui/assets/index-4I5HV9Fr.js and b/lightrag/api/webui/assets/index-DlScqWrq.js differ
 
lightrag/api/webui/index.html CHANGED
Binary files a/lightrag/api/webui/index.html and b/lightrag/api/webui/index.html differ
 
lightrag/kg/shared_storage.py CHANGED
@@ -41,6 +41,9 @@ _pipeline_status_lock: Optional[LockType] = None
41
  _graph_db_lock: Optional[LockType] = None
42
  _data_init_lock: Optional[LockType] = None
43
 
 
 
 
44
 
45
  class UnifiedLock(Generic[T]):
46
  """Provide a unified lock interface type for asyncio.Lock and multiprocessing.Lock"""
@@ -51,12 +54,14 @@ class UnifiedLock(Generic[T]):
51
  is_async: bool,
52
  name: str = "unnamed",
53
  enable_logging: bool = True,
 
54
  ):
55
  self._lock = lock
56
  self._is_async = is_async
57
  self._pid = os.getpid() # for debug only
58
  self._name = name # for debug only
59
  self._enable_logging = enable_logging # for debug only
 
60
 
61
  async def __aenter__(self) -> "UnifiedLock[T]":
62
  try:
@@ -64,16 +69,39 @@ class UnifiedLock(Generic[T]):
64
  f"== Lock == Process {self._pid}: Acquiring lock '{self._name}' (async={self._is_async})",
65
  enable_output=self._enable_logging,
66
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  if self._is_async:
68
  await self._lock.acquire()
69
  else:
70
  self._lock.acquire()
 
71
  direct_log(
72
  f"== Lock == Process {self._pid}: Lock '{self._name}' acquired (async={self._is_async})",
73
  enable_output=self._enable_logging,
74
  )
75
  return self
76
  except Exception as e:
 
 
 
 
 
 
 
 
77
  direct_log(
78
  f"== Lock == Process {self._pid}: Failed to acquire lock '{self._name}': {e}",
79
  level="ERROR",
@@ -82,15 +110,29 @@ class UnifiedLock(Generic[T]):
82
  raise
83
 
84
  async def __aexit__(self, exc_type, exc_val, exc_tb):
 
85
  try:
86
  direct_log(
87
  f"== Lock == Process {self._pid}: Releasing lock '{self._name}' (async={self._is_async})",
88
  enable_output=self._enable_logging,
89
  )
 
 
90
  if self._is_async:
91
  self._lock.release()
92
  else:
93
  self._lock.release()
 
 
 
 
 
 
 
 
 
 
 
94
  direct_log(
95
  f"== Lock == Process {self._pid}: Lock '{self._name}' released (async={self._is_async})",
96
  enable_output=self._enable_logging,
@@ -101,6 +143,31 @@ class UnifiedLock(Generic[T]):
101
  level="ERROR",
102
  enable_output=self._enable_logging,
103
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  raise
105
 
106
  def __enter__(self) -> "UnifiedLock[T]":
@@ -151,51 +218,61 @@ class UnifiedLock(Generic[T]):
151
 
152
  def get_internal_lock(enable_logging: bool = False) -> UnifiedLock:
153
  """return unified storage lock for data consistency"""
 
154
  return UnifiedLock(
155
  lock=_internal_lock,
156
  is_async=not is_multiprocess,
157
  name="internal_lock",
158
  enable_logging=enable_logging,
 
159
  )
160
 
161
 
162
  def get_storage_lock(enable_logging: bool = False) -> UnifiedLock:
163
  """return unified storage lock for data consistency"""
 
164
  return UnifiedLock(
165
  lock=_storage_lock,
166
  is_async=not is_multiprocess,
167
  name="storage_lock",
168
  enable_logging=enable_logging,
 
169
  )
170
 
171
 
172
  def get_pipeline_status_lock(enable_logging: bool = False) -> UnifiedLock:
173
  """return unified storage lock for data consistency"""
 
174
  return UnifiedLock(
175
  lock=_pipeline_status_lock,
176
  is_async=not is_multiprocess,
177
  name="pipeline_status_lock",
178
  enable_logging=enable_logging,
 
179
  )
180
 
181
 
182
  def get_graph_db_lock(enable_logging: bool = False) -> UnifiedLock:
183
  """return unified graph database lock for ensuring atomic operations"""
 
184
  return UnifiedLock(
185
  lock=_graph_db_lock,
186
  is_async=not is_multiprocess,
187
  name="graph_db_lock",
188
  enable_logging=enable_logging,
 
189
  )
190
 
191
 
192
  def get_data_init_lock(enable_logging: bool = False) -> UnifiedLock:
193
  """return unified data initialization lock for ensuring atomic data initialization"""
 
194
  return UnifiedLock(
195
  lock=_data_init_lock,
196
  is_async=not is_multiprocess,
197
  name="data_init_lock",
198
  enable_logging=enable_logging,
 
199
  )
200
 
201
 
@@ -229,7 +306,8 @@ def initialize_share_data(workers: int = 1):
229
  _shared_dicts, \
230
  _init_flags, \
231
  _initialized, \
232
- _update_flags
 
233
 
234
  # Check if already initialized
235
  if _initialized:
@@ -251,6 +329,16 @@ def initialize_share_data(workers: int = 1):
251
  _shared_dicts = _manager.dict()
252
  _init_flags = _manager.dict()
253
  _update_flags = _manager.dict()
 
 
 
 
 
 
 
 
 
 
254
  direct_log(
255
  f"Process {os.getpid()} Shared-Data created for Multiple Process (workers={workers})"
256
  )
@@ -264,6 +352,7 @@ def initialize_share_data(workers: int = 1):
264
  _shared_dicts = {}
265
  _init_flags = {}
266
  _update_flags = {}
 
267
  direct_log(f"Process {os.getpid()} Shared-Data created for Single Process")
268
 
269
  # Mark as initialized
@@ -458,7 +547,8 @@ def finalize_share_data():
458
  _shared_dicts, \
459
  _init_flags, \
460
  _initialized, \
461
- _update_flags
 
462
 
463
  # Check if already initialized
464
  if not _initialized:
@@ -523,5 +613,6 @@ def finalize_share_data():
523
  _graph_db_lock = None
524
  _data_init_lock = None
525
  _update_flags = None
 
526
 
527
  direct_log(f"Process {os.getpid()} storage data finalization complete")
 
41
  _graph_db_lock: Optional[LockType] = None
42
  _data_init_lock: Optional[LockType] = None
43
 
44
+ # async locks for coroutine synchronization in multiprocess mode
45
+ _async_locks: Optional[Dict[str, asyncio.Lock]] = None
46
+
47
 
48
  class UnifiedLock(Generic[T]):
49
  """Provide a unified lock interface type for asyncio.Lock and multiprocessing.Lock"""
 
54
  is_async: bool,
55
  name: str = "unnamed",
56
  enable_logging: bool = True,
57
+ async_lock: Optional[asyncio.Lock] = None,
58
  ):
59
  self._lock = lock
60
  self._is_async = is_async
61
  self._pid = os.getpid() # for debug only
62
  self._name = name # for debug only
63
  self._enable_logging = enable_logging # for debug only
64
+ self._async_lock = async_lock # auxiliary lock for coroutine synchronization
65
 
66
  async def __aenter__(self) -> "UnifiedLock[T]":
67
  try:
 
69
  f"== Lock == Process {self._pid}: Acquiring lock '{self._name}' (async={self._is_async})",
70
  enable_output=self._enable_logging,
71
  )
72
+
73
+ # If in multiprocess mode and async lock exists, acquire it first
74
+ if not self._is_async and self._async_lock is not None:
75
+ direct_log(
76
+ f"== Lock == Process {self._pid}: Acquiring async lock for '{self._name}'",
77
+ enable_output=self._enable_logging,
78
+ )
79
+ await self._async_lock.acquire()
80
+ direct_log(
81
+ f"== Lock == Process {self._pid}: Async lock for '{self._name}' acquired",
82
+ enable_output=self._enable_logging,
83
+ )
84
+
85
+ # Then acquire the main lock
86
  if self._is_async:
87
  await self._lock.acquire()
88
  else:
89
  self._lock.acquire()
90
+
91
  direct_log(
92
  f"== Lock == Process {self._pid}: Lock '{self._name}' acquired (async={self._is_async})",
93
  enable_output=self._enable_logging,
94
  )
95
  return self
96
  except Exception as e:
97
+ # If main lock acquisition fails, release the async lock if it was acquired
98
+ if (
99
+ not self._is_async
100
+ and self._async_lock is not None
101
+ and self._async_lock.locked()
102
+ ):
103
+ self._async_lock.release()
104
+
105
  direct_log(
106
  f"== Lock == Process {self._pid}: Failed to acquire lock '{self._name}': {e}",
107
  level="ERROR",
 
110
  raise
111
 
112
  async def __aexit__(self, exc_type, exc_val, exc_tb):
113
+ main_lock_released = False
114
  try:
115
  direct_log(
116
  f"== Lock == Process {self._pid}: Releasing lock '{self._name}' (async={self._is_async})",
117
  enable_output=self._enable_logging,
118
  )
119
+
120
+ # Release main lock first
121
  if self._is_async:
122
  self._lock.release()
123
  else:
124
  self._lock.release()
125
+
126
+ main_lock_released = True
127
+
128
+ # Then release async lock if in multiprocess mode
129
+ if not self._is_async and self._async_lock is not None:
130
+ direct_log(
131
+ f"== Lock == Process {self._pid}: Releasing async lock for '{self._name}'",
132
+ enable_output=self._enable_logging,
133
+ )
134
+ self._async_lock.release()
135
+
136
  direct_log(
137
  f"== Lock == Process {self._pid}: Lock '{self._name}' released (async={self._is_async})",
138
  enable_output=self._enable_logging,
 
143
  level="ERROR",
144
  enable_output=self._enable_logging,
145
  )
146
+
147
+ # If main lock release failed but async lock hasn't been released, try to release it
148
+ if (
149
+ not main_lock_released
150
+ and not self._is_async
151
+ and self._async_lock is not None
152
+ ):
153
+ try:
154
+ direct_log(
155
+ f"== Lock == Process {self._pid}: Attempting to release async lock after main lock failure",
156
+ level="WARNING",
157
+ enable_output=self._enable_logging,
158
+ )
159
+ self._async_lock.release()
160
+ direct_log(
161
+ f"== Lock == Process {self._pid}: Successfully released async lock after main lock failure",
162
+ enable_output=self._enable_logging,
163
+ )
164
+ except Exception as inner_e:
165
+ direct_log(
166
+ f"== Lock == Process {self._pid}: Failed to release async lock after main lock failure: {inner_e}",
167
+ level="ERROR",
168
+ enable_output=self._enable_logging,
169
+ )
170
+
171
  raise
172
 
173
  def __enter__(self) -> "UnifiedLock[T]":
 
218
 
219
  def get_internal_lock(enable_logging: bool = False) -> UnifiedLock:
220
  """return unified storage lock for data consistency"""
221
+ async_lock = _async_locks.get("internal_lock") if is_multiprocess else None
222
  return UnifiedLock(
223
  lock=_internal_lock,
224
  is_async=not is_multiprocess,
225
  name="internal_lock",
226
  enable_logging=enable_logging,
227
+ async_lock=async_lock,
228
  )
229
 
230
 
231
  def get_storage_lock(enable_logging: bool = False) -> UnifiedLock:
232
  """return unified storage lock for data consistency"""
233
+ async_lock = _async_locks.get("storage_lock") if is_multiprocess else None
234
  return UnifiedLock(
235
  lock=_storage_lock,
236
  is_async=not is_multiprocess,
237
  name="storage_lock",
238
  enable_logging=enable_logging,
239
+ async_lock=async_lock,
240
  )
241
 
242
 
243
  def get_pipeline_status_lock(enable_logging: bool = False) -> UnifiedLock:
244
  """return unified storage lock for data consistency"""
245
+ async_lock = _async_locks.get("pipeline_status_lock") if is_multiprocess else None
246
  return UnifiedLock(
247
  lock=_pipeline_status_lock,
248
  is_async=not is_multiprocess,
249
  name="pipeline_status_lock",
250
  enable_logging=enable_logging,
251
+ async_lock=async_lock,
252
  )
253
 
254
 
255
  def get_graph_db_lock(enable_logging: bool = False) -> UnifiedLock:
256
  """return unified graph database lock for ensuring atomic operations"""
257
+ async_lock = _async_locks.get("graph_db_lock") if is_multiprocess else None
258
  return UnifiedLock(
259
  lock=_graph_db_lock,
260
  is_async=not is_multiprocess,
261
  name="graph_db_lock",
262
  enable_logging=enable_logging,
263
+ async_lock=async_lock,
264
  )
265
 
266
 
267
  def get_data_init_lock(enable_logging: bool = False) -> UnifiedLock:
268
  """return unified data initialization lock for ensuring atomic data initialization"""
269
+ async_lock = _async_locks.get("data_init_lock") if is_multiprocess else None
270
  return UnifiedLock(
271
  lock=_data_init_lock,
272
  is_async=not is_multiprocess,
273
  name="data_init_lock",
274
  enable_logging=enable_logging,
275
+ async_lock=async_lock,
276
  )
277
 
278
 
 
306
  _shared_dicts, \
307
  _init_flags, \
308
  _initialized, \
309
+ _update_flags, \
310
+ _async_locks
311
 
312
  # Check if already initialized
313
  if _initialized:
 
329
  _shared_dicts = _manager.dict()
330
  _init_flags = _manager.dict()
331
  _update_flags = _manager.dict()
332
+
333
+ # Initialize async locks for multiprocess mode
334
+ _async_locks = {
335
+ "internal_lock": asyncio.Lock(),
336
+ "storage_lock": asyncio.Lock(),
337
+ "pipeline_status_lock": asyncio.Lock(),
338
+ "graph_db_lock": asyncio.Lock(),
339
+ "data_init_lock": asyncio.Lock(),
340
+ }
341
+
342
  direct_log(
343
  f"Process {os.getpid()} Shared-Data created for Multiple Process (workers={workers})"
344
  )
 
352
  _shared_dicts = {}
353
  _init_flags = {}
354
  _update_flags = {}
355
+ _async_locks = None # No need for async locks in single process mode
356
  direct_log(f"Process {os.getpid()} Shared-Data created for Single Process")
357
 
358
  # Mark as initialized
 
547
  _shared_dicts, \
548
  _init_flags, \
549
  _initialized, \
550
+ _update_flags, \
551
+ _async_locks
552
 
553
  # Check if already initialized
554
  if not _initialized:
 
613
  _graph_db_lock = None
614
  _data_init_lock = None
615
  _update_flags = None
616
+ _async_locks = None
617
 
618
  direct_log(f"Process {os.getpid()} storage data finalization complete")
lightrag/lightrag.py CHANGED
@@ -186,7 +186,9 @@ class LightRAG:
186
  embedding_batch_num: int = field(default=int(os.getenv("EMBEDDING_BATCH_NUM", 32)))
187
  """Batch size for embedding computations."""
188
 
189
- embedding_func_max_async: int = field(default=int(os.getenv("EMBEDDING_FUNC_MAX_ASYNC", 16)))
 
 
190
  """Maximum number of concurrent embedding function calls."""
191
 
192
  embedding_cache_config: dict[str, Any] = field(
 
186
  embedding_batch_num: int = field(default=int(os.getenv("EMBEDDING_BATCH_NUM", 32)))
187
  """Batch size for embedding computations."""
188
 
189
+ embedding_func_max_async: int = field(
190
+ default=int(os.getenv("EMBEDDING_FUNC_MAX_ASYNC", 16))
191
+ )
192
  """Maximum number of concurrent embedding function calls."""
193
 
194
  embedding_cache_config: dict[str, Any] = field(
lightrag/llm/anthropic.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..utils import verbose_debug, VERBOSE_DEBUG
2
+ import sys
3
+ import os
4
+ import logging
5
+ import numpy as np
6
+ from typing import Any, Union, AsyncIterator
7
+ import pipmaster as pm # Pipmaster for dynamic library install
8
+
9
+ if sys.version_info < (3, 9):
10
+ from typing import AsyncIterator
11
+ else:
12
+ from collections.abc import AsyncIterator
13
+
14
+ # Install Anthropic SDK if not present
15
+ if not pm.is_installed("anthropic"):
16
+ pm.install("anthropic")
17
+
18
+ # Add Voyage AI import
19
+ if not pm.is_installed("voyageai"):
20
+ pm.install("voyageai")
21
+ import voyageai
22
+
23
+ from anthropic import (
24
+ AsyncAnthropic,
25
+ APIConnectionError,
26
+ RateLimitError,
27
+ APITimeoutError,
28
+ )
29
+ from tenacity import (
30
+ retry,
31
+ stop_after_attempt,
32
+ wait_exponential,
33
+ retry_if_exception_type,
34
+ )
35
+ from lightrag.utils import (
36
+ safe_unicode_decode,
37
+ logger,
38
+ )
39
+ from lightrag.api import __api_version__
40
+
41
+
42
+ # Custom exception for retry mechanism
43
+ class InvalidResponseError(Exception):
44
+ """Custom exception class for triggering retry mechanism"""
45
+
46
+ pass
47
+
48
+
49
+ # Core Anthropic completion function with retry
50
+ @retry(
51
+ stop=stop_after_attempt(3),
52
+ wait=wait_exponential(multiplier=1, min=4, max=10),
53
+ retry=retry_if_exception_type(
54
+ (RateLimitError, APIConnectionError, APITimeoutError, InvalidResponseError)
55
+ ),
56
+ )
57
+ async def anthropic_complete_if_cache(
58
+ model: str,
59
+ prompt: str,
60
+ system_prompt: str | None = None,
61
+ history_messages: list[dict[str, Any]] | None = None,
62
+ base_url: str | None = None,
63
+ api_key: str | None = None,
64
+ **kwargs: Any,
65
+ ) -> Union[str, AsyncIterator[str]]:
66
+ if history_messages is None:
67
+ history_messages = []
68
+ if not api_key:
69
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
70
+
71
+ default_headers = {
72
+ "User-Agent": f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_8) LightRAG/{__api_version__}",
73
+ "Content-Type": "application/json",
74
+ }
75
+
76
+ # Set logger level to INFO when VERBOSE_DEBUG is off
77
+ if not VERBOSE_DEBUG and logger.level == logging.DEBUG:
78
+ logging.getLogger("anthropic").setLevel(logging.INFO)
79
+
80
+ anthropic_async_client = (
81
+ AsyncAnthropic(default_headers=default_headers, api_key=api_key)
82
+ if base_url is None
83
+ else AsyncAnthropic(
84
+ base_url=base_url, default_headers=default_headers, api_key=api_key
85
+ )
86
+ )
87
+ kwargs.pop("hashing_kv", None)
88
+ messages: list[dict[str, Any]] = []
89
+ if system_prompt:
90
+ messages.append({"role": "system", "content": system_prompt})
91
+ messages.extend(history_messages)
92
+ messages.append({"role": "user", "content": prompt})
93
+
94
+ logger.debug("===== Sending Query to Anthropic LLM =====")
95
+ logger.debug(f"Model: {model} Base URL: {base_url}")
96
+ logger.debug(f"Additional kwargs: {kwargs}")
97
+ verbose_debug(f"Query: {prompt}")
98
+ verbose_debug(f"System prompt: {system_prompt}")
99
+
100
+ try:
101
+ response = await anthropic_async_client.messages.create(
102
+ model=model, messages=messages, stream=True, **kwargs
103
+ )
104
+ except APIConnectionError as e:
105
+ logger.error(f"Anthropic API Connection Error: {e}")
106
+ raise
107
+ except RateLimitError as e:
108
+ logger.error(f"Anthropic API Rate Limit Error: {e}")
109
+ raise
110
+ except APITimeoutError as e:
111
+ logger.error(f"Anthropic API Timeout Error: {e}")
112
+ raise
113
+ except Exception as e:
114
+ logger.error(
115
+ f"Anthropic API Call Failed,\nModel: {model},\nParams: {kwargs}, Got: {e}"
116
+ )
117
+ raise
118
+
119
+ async def stream_response():
120
+ try:
121
+ async for event in response:
122
+ content = (
123
+ event.delta.text
124
+ if hasattr(event, "delta") and event.delta.text
125
+ else None
126
+ )
127
+ if content is None:
128
+ continue
129
+ if r"\u" in content:
130
+ content = safe_unicode_decode(content.encode("utf-8"))
131
+ yield content
132
+ except Exception as e:
133
+ logger.error(f"Error in stream response: {str(e)}")
134
+ raise
135
+
136
+ return stream_response()
137
+
138
+
139
+ # Generic Anthropic completion function
140
+ async def anthropic_complete(
141
+ prompt: str,
142
+ system_prompt: str | None = None,
143
+ history_messages: list[dict[str, Any]] | None = None,
144
+ **kwargs: Any,
145
+ ) -> Union[str, AsyncIterator[str]]:
146
+ if history_messages is None:
147
+ history_messages = []
148
+ model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
149
+ return await anthropic_complete_if_cache(
150
+ model_name,
151
+ prompt,
152
+ system_prompt=system_prompt,
153
+ history_messages=history_messages,
154
+ **kwargs,
155
+ )
156
+
157
+
158
+ # Claude 3 Opus specific completion
159
+ async def claude_3_opus_complete(
160
+ prompt: str,
161
+ system_prompt: str | None = None,
162
+ history_messages: list[dict[str, Any]] | None = None,
163
+ **kwargs: Any,
164
+ ) -> Union[str, AsyncIterator[str]]:
165
+ if history_messages is None:
166
+ history_messages = []
167
+ return await anthropic_complete_if_cache(
168
+ "claude-3-opus-20240229",
169
+ prompt,
170
+ system_prompt=system_prompt,
171
+ history_messages=history_messages,
172
+ **kwargs,
173
+ )
174
+
175
+
176
+ # Claude 3 Sonnet specific completion
177
+ async def claude_3_sonnet_complete(
178
+ prompt: str,
179
+ system_prompt: str | None = None,
180
+ history_messages: list[dict[str, Any]] | None = None,
181
+ **kwargs: Any,
182
+ ) -> Union[str, AsyncIterator[str]]:
183
+ if history_messages is None:
184
+ history_messages = []
185
+ return await anthropic_complete_if_cache(
186
+ "claude-3-sonnet-20240229",
187
+ prompt,
188
+ system_prompt=system_prompt,
189
+ history_messages=history_messages,
190
+ **kwargs,
191
+ )
192
+
193
+
194
+ # Claude 3 Haiku specific completion
195
+ async def claude_3_haiku_complete(
196
+ prompt: str,
197
+ system_prompt: str | None = None,
198
+ history_messages: list[dict[str, Any]] | None = None,
199
+ **kwargs: Any,
200
+ ) -> Union[str, AsyncIterator[str]]:
201
+ if history_messages is None:
202
+ history_messages = []
203
+ return await anthropic_complete_if_cache(
204
+ "claude-3-haiku-20240307",
205
+ prompt,
206
+ system_prompt=system_prompt,
207
+ history_messages=history_messages,
208
+ **kwargs,
209
+ )
210
+
211
+
212
+ # Embedding function (placeholder, as Anthropic does not provide embeddings)
213
+ @retry(
214
+ stop=stop_after_attempt(3),
215
+ wait=wait_exponential(multiplier=1, min=4, max=60),
216
+ retry=retry_if_exception_type(
217
+ (RateLimitError, APIConnectionError, APITimeoutError)
218
+ ),
219
+ )
220
+ async def anthropic_embed(
221
+ texts: list[str],
222
+ model: str = "voyage-3", # Default to voyage-3 as a good general-purpose model
223
+ base_url: str = None,
224
+ api_key: str = None,
225
+ ) -> np.ndarray:
226
+ """
227
+ Generate embeddings using Voyage AI since Anthropic doesn't provide native embedding support.
228
+
229
+ Args:
230
+ texts: List of text strings to embed
231
+ model: Voyage AI model name (e.g., "voyage-3", "voyage-3-large", "voyage-code-3")
232
+ base_url: Optional custom base URL (not used for Voyage AI)
233
+ api_key: API key for Voyage AI (defaults to VOYAGE_API_KEY environment variable)
234
+
235
+ Returns:
236
+ numpy array of shape (len(texts), embedding_dimension) containing the embeddings
237
+ """
238
+ if not api_key:
239
+ api_key = os.environ.get("VOYAGE_API_KEY")
240
+ if not api_key:
241
+ logger.error("VOYAGE_API_KEY environment variable not set")
242
+ raise ValueError(
243
+ "VOYAGE_API_KEY environment variable is required for embeddings"
244
+ )
245
+
246
+ try:
247
+ # Initialize Voyage AI client
248
+ voyage_client = voyageai.Client(api_key=api_key)
249
+
250
+ # Get embeddings
251
+ result = voyage_client.embed(
252
+ texts,
253
+ model=model,
254
+ input_type="document", # Assuming document context; could be made configurable
255
+ )
256
+
257
+ # Convert list of embeddings to numpy array
258
+ embeddings = np.array(result.embeddings, dtype=np.float32)
259
+
260
+ logger.debug(f"Generated embeddings for {len(texts)} texts using {model}")
261
+ verbose_debug(f"Embedding shape: {embeddings.shape}")
262
+
263
+ return embeddings
264
+
265
+ except Exception as e:
266
+ logger.error(f"Voyage AI embedding failed: {str(e)}")
267
+ raise
268
+
269
+
270
+ # Optional: a helper function to get available embedding models
271
+ def get_available_embedding_models() -> dict[str, dict]:
272
+ """
273
+ Returns a dictionary of available Voyage AI embedding models and their properties.
274
+ """
275
+ return {
276
+ "voyage-3-large": {
277
+ "context_length": 32000,
278
+ "dimension": 1024,
279
+ "description": "Best general-purpose and multilingual",
280
+ },
281
+ "voyage-3": {
282
+ "context_length": 32000,
283
+ "dimension": 1024,
284
+ "description": "General-purpose and multilingual",
285
+ },
286
+ "voyage-3-lite": {
287
+ "context_length": 32000,
288
+ "dimension": 512,
289
+ "description": "Optimized for latency and cost",
290
+ },
291
+ "voyage-code-3": {
292
+ "context_length": 32000,
293
+ "dimension": 1024,
294
+ "description": "Optimized for code",
295
+ },
296
+ "voyage-finance-2": {
297
+ "context_length": 32000,
298
+ "dimension": 1024,
299
+ "description": "Optimized for finance",
300
+ },
301
+ "voyage-law-2": {
302
+ "context_length": 16000,
303
+ "dimension": 1024,
304
+ "description": "Optimized for legal",
305
+ },
306
+ "voyage-multimodal-3": {
307
+ "context_length": 32000,
308
+ "dimension": 1024,
309
+ "description": "Multimodal text and images",
310
+ },
311
+ }
lightrag_webui/src/App.tsx CHANGED
@@ -1,13 +1,13 @@
1
- import { useState, useCallback } from 'react'
2
  import ThemeProvider from '@/components/ThemeProvider'
3
  import TabVisibilityProvider from '@/contexts/TabVisibilityProvider'
4
  import MessageAlert from '@/components/MessageAlert'
5
  import ApiKeyAlert from '@/components/ApiKeyAlert'
6
  import StatusIndicator from '@/components/graph/StatusIndicator'
7
  import { healthCheckInterval } from '@/lib/constants'
8
- import { useBackendState } from '@/stores/state'
9
  import { useSettingsStore } from '@/stores/settings'
10
- import { useEffect } from 'react'
11
  import SiteHeader from '@/features/SiteHeader'
12
  import { InvalidApiKeyError, RequireApiKeError } from '@/api/lightrag'
13
 
@@ -23,17 +23,64 @@ function App() {
23
  const enableHealthCheck = useSettingsStore.use.enableHealthCheck()
24
  const currentTab = useSettingsStore.use.currentTab()
25
  const [apiKeyInvalid, setApiKeyInvalid] = useState(false)
 
26
 
27
- // Health check
28
  useEffect(() => {
29
- // Check immediately
30
- useBackendState.getState().check()
31
-
32
- const interval = setInterval(async () => {
33
- await useBackendState.getState().check()
34
- }, healthCheckInterval * 1000)
35
- return () => clearInterval(interval)
36
- }, [enableHealthCheck])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  const handleTabChange = useCallback(
39
  (tab: string) => useSettingsStore.getState().setCurrentTab(tab as any),
 
1
+ import { useState, useCallback, useEffect, useRef } from 'react'
2
  import ThemeProvider from '@/components/ThemeProvider'
3
  import TabVisibilityProvider from '@/contexts/TabVisibilityProvider'
4
  import MessageAlert from '@/components/MessageAlert'
5
  import ApiKeyAlert from '@/components/ApiKeyAlert'
6
  import StatusIndicator from '@/components/graph/StatusIndicator'
7
  import { healthCheckInterval } from '@/lib/constants'
8
+ import { useBackendState, useAuthStore } from '@/stores/state'
9
  import { useSettingsStore } from '@/stores/settings'
10
+ import { getAuthStatus } from '@/api/lightrag'
11
  import SiteHeader from '@/features/SiteHeader'
12
  import { InvalidApiKeyError, RequireApiKeError } from '@/api/lightrag'
13
 
 
23
  const enableHealthCheck = useSettingsStore.use.enableHealthCheck()
24
  const currentTab = useSettingsStore.use.currentTab()
25
  const [apiKeyInvalid, setApiKeyInvalid] = useState(false)
26
+ const versionCheckRef = useRef(false); // Prevent duplicate calls in Vite dev mode
27
 
28
+ // Health check - can be disabled
29
  useEffect(() => {
30
+ // Only execute if health check is enabled
31
+ if (!enableHealthCheck) return;
32
+
33
+ // Health check function
34
+ const performHealthCheck = async () => {
35
+ await useBackendState.getState().check();
36
+ };
37
+
38
+ // Execute immediately
39
+ performHealthCheck();
40
+
41
+ // Set interval for periodic execution
42
+ const interval = setInterval(performHealthCheck, healthCheckInterval * 1000);
43
+ return () => clearInterval(interval);
44
+ }, [enableHealthCheck]);
45
+
46
+ // Version check - independent and executed only once
47
+ useEffect(() => {
48
+ const checkVersion = async () => {
49
+ // Prevent duplicate calls in Vite dev mode
50
+ if (versionCheckRef.current) return;
51
+ versionCheckRef.current = true;
52
+
53
+ // Check if version info was already obtained in login page
54
+ const versionCheckedFromLogin = sessionStorage.getItem('VERSION_CHECKED_FROM_LOGIN') === 'true';
55
+ if (versionCheckedFromLogin) return;
56
+
57
+ // Get version info
58
+ const token = localStorage.getItem('LIGHTRAG-API-TOKEN');
59
+ if (!token) return;
60
+
61
+ try {
62
+ const status = await getAuthStatus();
63
+ if (status.core_version || status.api_version) {
64
+ const isGuestMode = status.auth_mode === 'disabled' || useAuthStore.getState().isGuestMode;
65
+ // Update version info while maintaining login state
66
+ useAuthStore.getState().login(
67
+ token,
68
+ isGuestMode,
69
+ status.core_version,
70
+ status.api_version
71
+ );
72
+
73
+ // Set flag to indicate version info has been checked
74
+ sessionStorage.setItem('VERSION_CHECKED_FROM_LOGIN', 'true');
75
+ }
76
+ } catch (error) {
77
+ console.error('Failed to get version info:', error);
78
+ }
79
+ };
80
+
81
+ // Execute version check
82
+ checkVersion();
83
+ }, []); // Empty dependency array ensures it only runs once on mount
84
 
85
  const handleTabChange = useCallback(
86
  (tab: string) => useSettingsStore.getState().setCurrentTab(tab as any),
lightrag_webui/src/AppRouter.tsx CHANGED
@@ -2,98 +2,11 @@ import { HashRouter as Router, Routes, Route, useNavigate } from 'react-router-d
2
  import { useEffect, useState } from 'react'
3
  import { useAuthStore } from '@/stores/state'
4
  import { navigationService } from '@/services/navigation'
5
- import { getAuthStatus } from '@/api/lightrag'
6
- import { toast } from 'sonner'
7
  import { Toaster } from 'sonner'
8
  import App from './App'
9
  import LoginPage from '@/features/LoginPage'
10
  import ThemeProvider from '@/components/ThemeProvider'
11
 
12
- interface ProtectedRouteProps {
13
- children: React.ReactNode
14
- }
15
-
16
- const ProtectedRoute = ({ children }: ProtectedRouteProps) => {
17
- const { isAuthenticated } = useAuthStore()
18
- const [isChecking, setIsChecking] = useState(true)
19
- const navigate = useNavigate()
20
-
21
- // Set navigate function for navigation service
22
- useEffect(() => {
23
- navigationService.setNavigate(navigate)
24
- }, [navigate])
25
-
26
- useEffect(() => {
27
- let isMounted = true; // Flag to prevent state updates after unmount
28
-
29
- // This effect will run when the component mounts
30
- // and will check if authentication is required
31
- const checkAuthStatus = async () => {
32
- try {
33
- // Skip check if already authenticated
34
- if (isAuthenticated) {
35
- if (isMounted) setIsChecking(false);
36
- return;
37
- }
38
-
39
- const status = await getAuthStatus()
40
-
41
- // Only proceed if component is still mounted
42
- if (!isMounted) return;
43
-
44
- if (!status.auth_configured && status.access_token) {
45
- // If auth is not configured, use the guest token
46
- useAuthStore.getState().login(status.access_token, true)
47
- if (status.message) {
48
- toast.info(status.message)
49
- }
50
- }
51
- } catch (error) {
52
- console.error('Failed to check auth status:', error)
53
- } finally {
54
- // Only update state if component is still mounted
55
- if (isMounted) {
56
- setIsChecking(false)
57
- }
58
- }
59
- }
60
-
61
- // Execute immediately
62
- checkAuthStatus()
63
-
64
- // Cleanup function to prevent state updates after unmount
65
- return () => {
66
- isMounted = false;
67
- }
68
- }, [isAuthenticated])
69
-
70
- // Handle navigation when authentication status changes
71
- useEffect(() => {
72
- if (!isChecking && !isAuthenticated) {
73
- const currentPath = window.location.hash.slice(1); // Remove the '#' from hash
74
- const isLoginPage = currentPath === '/login';
75
-
76
- if (!isLoginPage) {
77
- // Use navigation service for redirection
78
- console.log('Not authenticated, redirecting to login');
79
- navigationService.navigateToLogin();
80
- }
81
- }
82
- }, [isChecking, isAuthenticated]);
83
-
84
- // Show nothing while checking auth status or when not authenticated on login page
85
- if (isChecking || (!isAuthenticated && window.location.hash.slice(1) === '/login')) {
86
- return null;
87
- }
88
-
89
- // Show children only when authenticated
90
- if (!isAuthenticated) {
91
- return null;
92
- }
93
-
94
- return <>{children}</>;
95
- }
96
-
97
  const AppContent = () => {
98
  const [initializing, setInitializing] = useState(true)
99
  const { isAuthenticated } = useAuthStore()
@@ -104,58 +17,48 @@ const AppContent = () => {
104
  navigationService.setNavigate(navigate)
105
  }, [navigate])
106
 
107
- // Check token validity and auth configuration on app initialization
108
  useEffect(() => {
109
- let isMounted = true; // Flag to prevent state updates after unmount
110
 
111
  const checkAuth = async () => {
112
  try {
113
  const token = localStorage.getItem('LIGHTRAG-API-TOKEN')
114
 
115
- // If we have a token, we're already authenticated
116
  if (token && isAuthenticated) {
117
- if (isMounted) setInitializing(false);
118
  return;
119
  }
120
 
121
- // If no token or not authenticated, check if auth is configured
122
- const status = await getAuthStatus()
123
-
124
- // Only proceed if component is still mounted
125
- if (!isMounted) return;
126
-
127
- if (!status.auth_configured && status.access_token) {
128
- // If auth is not configured, use the guest token
129
- useAuthStore.getState().login(status.access_token, true)
130
- if (status.message) {
131
- toast.info(status.message)
132
- }
133
- } else if (!token) {
134
- // Only logout if we don't have a token
135
  useAuthStore.getState().logout()
136
  }
137
  } catch (error) {
138
  console.error('Auth initialization error:', error)
139
- if (isMounted && !isAuthenticated) {
140
  useAuthStore.getState().logout()
141
  }
142
  } finally {
143
- // Only update state if component is still mounted
144
- if (isMounted) {
145
- setInitializing(false)
146
- }
147
  }
148
  }
149
 
150
- // Execute immediately
151
  checkAuth()
152
 
153
- // Cleanup function to prevent state updates after unmount
154
  return () => {
155
- isMounted = false;
156
  }
157
  }, [isAuthenticated])
158
 
 
 
 
 
 
 
 
 
 
 
 
159
  // Show nothing while initializing
160
  if (initializing) {
161
  return null
@@ -166,11 +69,7 @@ const AppContent = () => {
166
  <Route path="/login" element={<LoginPage />} />
167
  <Route
168
  path="/*"
169
- element={
170
- <ProtectedRoute>
171
- <App />
172
- </ProtectedRoute>
173
- }
174
  />
175
  </Routes>
176
  )
 
2
  import { useEffect, useState } from 'react'
3
  import { useAuthStore } from '@/stores/state'
4
  import { navigationService } from '@/services/navigation'
 
 
5
  import { Toaster } from 'sonner'
6
  import App from './App'
7
  import LoginPage from '@/features/LoginPage'
8
  import ThemeProvider from '@/components/ThemeProvider'
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  const AppContent = () => {
11
  const [initializing, setInitializing] = useState(true)
12
  const { isAuthenticated } = useAuthStore()
 
17
  navigationService.setNavigate(navigate)
18
  }, [navigate])
19
 
20
+ // Token validity check
21
  useEffect(() => {
 
22
 
23
  const checkAuth = async () => {
24
  try {
25
  const token = localStorage.getItem('LIGHTRAG-API-TOKEN')
26
 
 
27
  if (token && isAuthenticated) {
28
+ setInitializing(false);
29
  return;
30
  }
31
 
32
+ if (!token) {
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  useAuthStore.getState().logout()
34
  }
35
  } catch (error) {
36
  console.error('Auth initialization error:', error)
37
+ if (!isAuthenticated) {
38
  useAuthStore.getState().logout()
39
  }
40
  } finally {
41
+ setInitializing(false)
 
 
 
42
  }
43
  }
44
 
 
45
  checkAuth()
46
 
 
47
  return () => {
 
48
  }
49
  }, [isAuthenticated])
50
 
51
+ // Redirect effect for protected routes
52
+ useEffect(() => {
53
+ if (!initializing && !isAuthenticated) {
54
+ const currentPath = window.location.hash.slice(1);
55
+ if (currentPath !== '/login') {
56
+ console.log('Not authenticated, redirecting to login');
57
+ navigate('/login');
58
+ }
59
+ }
60
+ }, [initializing, isAuthenticated, navigate]);
61
+
62
  // Show nothing while initializing
63
  if (initializing) {
64
  return null
 
69
  <Route path="/login" element={<LoginPage />} />
70
  <Route
71
  path="/*"
72
+ element={isAuthenticated ? <App /> : null}
 
 
 
 
73
  />
74
  </Routes>
75
  )
lightrag_webui/src/api/lightrag.ts CHANGED
@@ -41,6 +41,10 @@ export type LightragStatus = {
41
  graph_storage: string
42
  vector_storage: string
43
  }
 
 
 
 
44
  }
45
 
46
  export type LightragDocumentsScanProgress = {
@@ -132,6 +136,8 @@ export type AuthStatusResponse = {
132
  token_type?: string
133
  auth_mode?: 'enabled' | 'disabled'
134
  message?: string
 
 
135
  }
136
 
137
  export type LoginResponse = {
@@ -139,6 +145,8 @@ export type LoginResponse = {
139
  token_type: string
140
  auth_mode?: 'enabled' | 'disabled' // Authentication mode identifier
141
  message?: string // Optional message
 
 
142
  }
143
 
144
  export const InvalidApiKeyError = 'Invalid API Key'
@@ -179,8 +187,9 @@ axiosInstance.interceptors.response.use(
179
  }
180
  // For other APIs, navigate to login page
181
  navigationService.navigateToLogin();
182
- // Return a never-resolving promise to prevent further execution
183
- return new Promise(() => {});
 
184
  }
185
  throw new Error(
186
  `${error.response.status} ${error.response.statusText}\n${JSON.stringify(
 
41
  graph_storage: string
42
  vector_storage: string
43
  }
44
+ update_status?: Record<string, any>
45
+ core_version?: string
46
+ api_version?: string
47
+ auth_mode?: 'enabled' | 'disabled'
48
  }
49
 
50
  export type LightragDocumentsScanProgress = {
 
136
  token_type?: string
137
  auth_mode?: 'enabled' | 'disabled'
138
  message?: string
139
+ core_version?: string
140
+ api_version?: string
141
  }
142
 
143
  export type LoginResponse = {
 
145
  token_type: string
146
  auth_mode?: 'enabled' | 'disabled' // Authentication mode identifier
147
  message?: string // Optional message
148
+ core_version?: string
149
+ api_version?: string
150
  }
151
 
152
  export const InvalidApiKeyError = 'Invalid API Key'
 
187
  }
188
  // For other APIs, navigate to login page
189
  navigationService.navigateToLogin();
190
+
191
+ // return a reject Promise
192
+ return Promise.reject(new Error('Authentication required'));
193
  }
194
  throw new Error(
195
  `${error.response.status} ${error.response.statusText}\n${JSON.stringify(
lightrag_webui/src/components/AppSettings.tsx CHANGED
@@ -22,7 +22,7 @@ export default function AppSettings({ className }: AppSettingsProps) {
22
  const setTheme = useSettingsStore.use.setTheme()
23
 
24
  const handleLanguageChange = useCallback((value: string) => {
25
- setLanguage(value as 'en' | 'zh')
26
  }, [setLanguage])
27
 
28
  const handleThemeChange = useCallback((value: string) => {
@@ -47,6 +47,8 @@ export default function AppSettings({ className }: AppSettingsProps) {
47
  <SelectContent>
48
  <SelectItem value="en">English</SelectItem>
49
  <SelectItem value="zh">中文</SelectItem>
 
 
50
  </SelectContent>
51
  </Select>
52
  </div>
 
22
  const setTheme = useSettingsStore.use.setTheme()
23
 
24
  const handleLanguageChange = useCallback((value: string) => {
25
+ setLanguage(value as 'en' | 'zh' | 'fr' | 'ar')
26
  }, [setLanguage])
27
 
28
  const handleThemeChange = useCallback((value: string) => {
 
47
  <SelectContent>
48
  <SelectItem value="en">English</SelectItem>
49
  <SelectItem value="zh">中文</SelectItem>
50
+ <SelectItem value="fr">Français</SelectItem>
51
+ <SelectItem value="ar">العربية</SelectItem>
52
  </SelectContent>
53
  </Select>
54
  </div>
lightrag_webui/src/components/graph/GraphLabels.tsx CHANGED
@@ -1,4 +1,4 @@
1
- import { useCallback, useEffect, useRef } from 'react'
2
  import { AsyncSelect } from '@/components/ui/AsyncSelect'
3
  import { useSettingsStore } from '@/stores/settings'
4
  import { useGraphStore } from '@/stores/graph'
@@ -12,44 +12,8 @@ const GraphLabels = () => {
12
  const { t } = useTranslation()
13
  const label = useSettingsStore.use.queryLabel()
14
  const allDatabaseLabels = useGraphStore.use.allDatabaseLabels()
15
- const rawGraph = useGraphStore.use.rawGraph()
16
- const labelsLoadedRef = useRef(false)
17
-
18
- // Track if a fetch is in progress to prevent multiple simultaneous fetches
19
- const fetchInProgressRef = useRef(false)
20
-
21
- // Fetch labels and trigger initial data load
22
- useEffect(() => {
23
- // Check if we've already attempted to fetch labels in this session
24
- const labelsFetchAttempted = useGraphStore.getState().labelsFetchAttempted
25
-
26
- // Only fetch if we haven't attempted in this session and no fetch is in progress
27
- if (!labelsFetchAttempted && !fetchInProgressRef.current) {
28
- fetchInProgressRef.current = true
29
- // Set global flag to indicate we've attempted to fetch in this session
30
- useGraphStore.getState().setLabelsFetchAttempted(true)
31
-
32
- useGraphStore.getState().fetchAllDatabaseLabels()
33
- .then(() => {
34
- labelsLoadedRef.current = true
35
- fetchInProgressRef.current = false
36
- })
37
- .catch((error) => {
38
- console.error('Failed to fetch labels:', error)
39
- fetchInProgressRef.current = false
40
- // Reset global flag to allow retry
41
- useGraphStore.getState().setLabelsFetchAttempted(false)
42
- })
43
- }
44
- }, []) // Empty dependency array ensures this only runs once on mount
45
 
46
- // Trigger data load when labels are loaded
47
- useEffect(() => {
48
- if (labelsLoadedRef.current) {
49
- // Reset the fetch attempted flag to force a new data fetch
50
- useGraphStore.getState().setGraphDataFetchAttempted(false)
51
- }
52
- }, [label])
53
 
54
  const getSearchEngine = useCallback(() => {
55
  // Create search engine
@@ -93,40 +57,40 @@ const GraphLabels = () => {
93
  )
94
 
95
  const handleRefresh = useCallback(() => {
96
- // Reset labels fetch status to allow fetching labels again
97
  useGraphStore.getState().setLabelsFetchAttempted(false)
98
-
99
- // Reset graph data fetch status directly, not depending on allDatabaseLabels changes
100
  useGraphStore.getState().setGraphDataFetchAttempted(false)
101
 
102
- // Fetch all labels again
103
- useGraphStore.getState().fetchAllDatabaseLabels()
104
- .then(() => {
105
- // Trigger a graph data reload by changing the query label back and forth
106
- const currentLabel = useSettingsStore.getState().queryLabel
107
- useSettingsStore.getState().setQueryLabel('')
108
- setTimeout(() => {
109
- useSettingsStore.getState().setQueryLabel(currentLabel)
110
- }, 0)
111
- })
112
- .catch((error) => {
113
- console.error('Failed to refresh labels:', error)
114
- })
115
- }, [])
 
 
 
116
 
117
  return (
118
  <div className="flex items-center">
119
- {rawGraph && (
120
- <Button
121
- size="icon"
122
- variant={controlButtonVariant}
123
- onClick={handleRefresh}
124
- tooltip={t('graphPanel.graphLabels.refreshTooltip')}
125
- className="mr-1"
126
- >
127
- <RefreshCw className="h-4 w-4" />
128
- </Button>
129
- )}
130
  <AsyncSelect<string>
131
  className="ml-2"
132
  triggerClassName="max-h-8"
@@ -141,20 +105,23 @@ const GraphLabels = () => {
141
  placeholder={t('graphPanel.graphLabels.placeholder')}
142
  value={label !== null ? label : '*'}
143
  onChange={(newLabel) => {
144
- const currentLabel = useSettingsStore.getState().queryLabel
145
 
146
  // select the last item means query all
147
  if (newLabel === '...') {
148
- newLabel = '*'
149
  }
150
 
151
  // Handle reselecting the same label
152
  if (newLabel === currentLabel && newLabel !== '*') {
153
- newLabel = '*'
154
  }
155
 
156
- // Update the label, which will trigger the useEffect to handle data loading
157
- useSettingsStore.getState().setQueryLabel(newLabel)
 
 
 
158
  }}
159
  clearable={false} // Prevent clearing value on reselect
160
  />
 
1
+ import { useCallback } from 'react'
2
  import { AsyncSelect } from '@/components/ui/AsyncSelect'
3
  import { useSettingsStore } from '@/stores/settings'
4
  import { useGraphStore } from '@/stores/graph'
 
12
  const { t } = useTranslation()
13
  const label = useSettingsStore.use.queryLabel()
14
  const allDatabaseLabels = useGraphStore.use.allDatabaseLabels()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ // Remove initial label fetch effect as it's now handled by fetchGraph based on lastSuccessfulQueryLabel
 
 
 
 
 
 
17
 
18
  const getSearchEngine = useCallback(() => {
19
  // Create search engine
 
57
  )
58
 
59
  const handleRefresh = useCallback(() => {
60
+ // Reset fetch status flags
61
  useGraphStore.getState().setLabelsFetchAttempted(false)
 
 
62
  useGraphStore.getState().setGraphDataFetchAttempted(false)
63
 
64
+ // Clear last successful query label to ensure labels are fetched
65
+ useGraphStore.getState().setLastSuccessfulQueryLabel('')
66
+
67
+ // Get current label
68
+ const currentLabel = useSettingsStore.getState().queryLabel
69
+
70
+ // If current label is empty, use default label '*'
71
+ if (!currentLabel) {
72
+ useSettingsStore.getState().setQueryLabel('*')
73
+ } else {
74
+ // Trigger data reload
75
+ useSettingsStore.getState().setQueryLabel('')
76
+ setTimeout(() => {
77
+ useSettingsStore.getState().setQueryLabel(currentLabel)
78
+ }, 0)
79
+ }
80
+ }, []);
81
 
82
  return (
83
  <div className="flex items-center">
84
+ {/* Always show refresh button */}
85
+ <Button
86
+ size="icon"
87
+ variant={controlButtonVariant}
88
+ onClick={handleRefresh}
89
+ tooltip={t('graphPanel.graphLabels.refreshTooltip')}
90
+ className="mr-1"
91
+ >
92
+ <RefreshCw className="h-4 w-4" />
93
+ </Button>
 
94
  <AsyncSelect<string>
95
  className="ml-2"
96
  triggerClassName="max-h-8"
 
105
  placeholder={t('graphPanel.graphLabels.placeholder')}
106
  value={label !== null ? label : '*'}
107
  onChange={(newLabel) => {
108
+ const currentLabel = useSettingsStore.getState().queryLabel;
109
 
110
  // select the last item means query all
111
  if (newLabel === '...') {
112
+ newLabel = '*';
113
  }
114
 
115
  // Handle reselecting the same label
116
  if (newLabel === currentLabel && newLabel !== '*') {
117
+ newLabel = '*';
118
  }
119
 
120
+ // Reset graphDataFetchAttempted flag to ensure data fetch is triggered
121
+ useGraphStore.getState().setGraphDataFetchAttempted(false);
122
+
123
+ // Update the label to trigger data loading
124
+ useSettingsStore.getState().setQueryLabel(newLabel);
125
  }}
126
  clearable={false} // Prevent clearing value on reselect
127
  />
lightrag_webui/src/components/graph/LayoutsControl.tsx CHANGED
@@ -218,8 +218,8 @@ const LayoutsControl = () => {
218
  maxIterations: maxIterations,
219
  settings: {
220
  attraction: 0.0003, // Lower attraction force to reduce oscillation
221
- repulsion: 0.05, // Lower repulsion force to reduce oscillation
222
- gravity: 0.01, // Increase gravity to make nodes converge to center faster
223
  inertia: 0.4, // Lower inertia to add damping effect
224
  maxMove: 100 // Limit maximum movement per step to prevent large jumps
225
  }
 
218
  maxIterations: maxIterations,
219
  settings: {
220
  attraction: 0.0003, // Lower attraction force to reduce oscillation
221
+ repulsion: 0.02, // Lower repulsion force to reduce oscillation
222
+ gravity: 0.02, // Increase gravity to make nodes converge to center faster
223
  inertia: 0.4, // Lower inertia to add damping effect
224
  maxMove: 100 // Limit maximum movement per step to prevent large jumps
225
  }
lightrag_webui/src/features/LoginPage.tsx CHANGED
@@ -1,4 +1,4 @@
1
- import { useState, useEffect } from 'react'
2
  import { useNavigate } from 'react-router-dom'
3
  import { useAuthStore } from '@/stores/state'
4
  import { loginToServer, getAuthStatus } from '@/api/lightrag'
@@ -18,6 +18,7 @@ const LoginPage = () => {
18
  const [username, setUsername] = useState('')
19
  const [password, setPassword] = useState('')
20
  const [checkingAuth, setCheckingAuth] = useState(true)
 
21
 
22
  useEffect(() => {
23
  console.log('LoginPage mounted')
@@ -25,9 +26,14 @@ const LoginPage = () => {
25
 
26
  // Check if authentication is configured, skip login if not
27
  useEffect(() => {
28
- let isMounted = true; // Flag to prevent state updates after unmount
29
 
30
  const checkAuthConfig = async () => {
 
 
 
 
 
 
31
  try {
32
  // If already authenticated, redirect to home
33
  if (isAuthenticated) {
@@ -38,26 +44,30 @@ const LoginPage = () => {
38
  // Check auth status
39
  const status = await getAuthStatus()
40
 
41
- // Only proceed if component is still mounted
42
- if (!isMounted) return;
 
 
43
 
44
  if (!status.auth_configured && status.access_token) {
45
  // If auth is not configured, use the guest token and redirect
46
- login(status.access_token, true)
47
  if (status.message) {
48
  toast.info(status.message)
49
  }
50
  navigate('/')
51
- return // Exit early, no need to set checkingAuth to false
52
  }
 
 
 
 
53
  } catch (error) {
54
  console.error('Failed to check auth configuration:', error)
55
- } finally {
56
- // Only update state if component is still mounted
57
- if (isMounted) {
58
- setCheckingAuth(false)
59
- }
60
  }
 
61
  }
62
 
63
  // Execute immediately
@@ -65,7 +75,6 @@ const LoginPage = () => {
65
 
66
  // Cleanup function to prevent state updates after unmount
67
  return () => {
68
- isMounted = false;
69
  }
70
  }, [isAuthenticated, login, navigate])
71
 
@@ -87,7 +96,12 @@ const LoginPage = () => {
87
 
88
  // Check authentication mode
89
  const isGuestMode = response.auth_mode === 'disabled'
90
- login(response.access_token, isGuestMode)
 
 
 
 
 
91
 
92
  if (isGuestMode) {
93
  // Show authentication disabled notification
 
1
+ import { useState, useEffect, useRef } from 'react'
2
  import { useNavigate } from 'react-router-dom'
3
  import { useAuthStore } from '@/stores/state'
4
  import { loginToServer, getAuthStatus } from '@/api/lightrag'
 
18
  const [username, setUsername] = useState('')
19
  const [password, setPassword] = useState('')
20
  const [checkingAuth, setCheckingAuth] = useState(true)
21
+ const authCheckRef = useRef(false); // Prevent duplicate calls in Vite dev mode
22
 
23
  useEffect(() => {
24
  console.log('LoginPage mounted')
 
26
 
27
  // Check if authentication is configured, skip login if not
28
  useEffect(() => {
 
29
 
30
  const checkAuthConfig = async () => {
31
+ // Prevent duplicate calls in Vite dev mode
32
+ if (authCheckRef.current) {
33
+ return;
34
+ }
35
+ authCheckRef.current = true;
36
+
37
  try {
38
  // If already authenticated, redirect to home
39
  if (isAuthenticated) {
 
44
  // Check auth status
45
  const status = await getAuthStatus()
46
 
47
+ // Set session flag for version check to avoid duplicate checks in App component
48
+ if (status.core_version || status.api_version) {
49
+ sessionStorage.setItem('VERSION_CHECKED_FROM_LOGIN', 'true');
50
+ }
51
 
52
  if (!status.auth_configured && status.access_token) {
53
  // If auth is not configured, use the guest token and redirect
54
+ login(status.access_token, true, status.core_version, status.api_version)
55
  if (status.message) {
56
  toast.info(status.message)
57
  }
58
  navigate('/')
59
+ return
60
  }
61
+
62
+ // Only set checkingAuth to false if we need to show the login page
63
+ setCheckingAuth(false);
64
+
65
  } catch (error) {
66
  console.error('Failed to check auth configuration:', error)
67
+ // Also set checkingAuth to false in case of error
68
+ setCheckingAuth(false);
 
 
 
69
  }
70
+ // Removed finally block as we're setting checkingAuth earlier
71
  }
72
 
73
  // Execute immediately
 
75
 
76
  // Cleanup function to prevent state updates after unmount
77
  return () => {
 
78
  }
79
  }, [isAuthenticated, login, navigate])
80
 
 
96
 
97
  // Check authentication mode
98
  const isGuestMode = response.auth_mode === 'disabled'
99
+ login(response.access_token, isGuestMode, response.core_version, response.api_version)
100
+
101
+ // Set session flag for version check
102
+ if (response.core_version || response.api_version) {
103
+ sessionStorage.setItem('VERSION_CHECKED_FROM_LOGIN', 'true');
104
+ }
105
 
106
  if (isGuestMode) {
107
  // Show authentication disabled notification
lightrag_webui/src/features/SiteHeader.tsx CHANGED
@@ -55,7 +55,11 @@ function TabsNavigation() {
55
 
56
  export default function SiteHeader() {
57
  const { t } = useTranslation()
58
- const { isGuestMode } = useAuthStore()
 
 
 
 
59
 
60
  const handleLogout = () => {
61
  navigationService.navigateToLogin();
@@ -67,6 +71,11 @@ export default function SiteHeader() {
67
  <ZapIcon className="size-4 text-emerald-400" aria-hidden="true" />
68
  {/* <img src='/logo.png' className="size-4" /> */}
69
  <span className="font-bold md:inline-block">{SiteInfo.name}</span>
 
 
 
 
 
70
  </a>
71
 
72
  <div className="flex h-10 flex-1 justify-center">
@@ -86,9 +95,11 @@ export default function SiteHeader() {
86
  </a>
87
  </Button>
88
  <AppSettings />
89
- <Button variant="ghost" size="icon" side="bottom" tooltip={t('header.logout')} onClick={handleLogout}>
90
- <LogOutIcon className="size-4" aria-hidden="true" />
91
- </Button>
 
 
92
  </div>
93
  </nav>
94
  </header>
 
55
 
56
  export default function SiteHeader() {
57
  const { t } = useTranslation()
58
+ const { isGuestMode, coreVersion, apiVersion } = useAuthStore()
59
+
60
+ const versionDisplay = (coreVersion && apiVersion)
61
+ ? `${coreVersion}/${apiVersion}`
62
+ : null;
63
 
64
  const handleLogout = () => {
65
  navigationService.navigateToLogin();
 
71
  <ZapIcon className="size-4 text-emerald-400" aria-hidden="true" />
72
  {/* <img src='/logo.png' className="size-4" /> */}
73
  <span className="font-bold md:inline-block">{SiteInfo.name}</span>
74
+ {versionDisplay && (
75
+ <span className="ml-2 text-xs text-gray-500 dark:text-gray-400">
76
+ v{versionDisplay}
77
+ </span>
78
+ )}
79
  </a>
80
 
81
  <div className="flex h-10 flex-1 justify-center">
 
95
  </a>
96
  </Button>
97
  <AppSettings />
98
+ {!isGuestMode && (
99
+ <Button variant="ghost" size="icon" side="bottom" tooltip={t('header.logout')} onClick={handleLogout}>
100
+ <LogOutIcon className="size-4" aria-hidden="true" />
101
+ </Button>
102
+ )}
103
  </div>
104
  </nav>
105
  </header>
lightrag_webui/src/hooks/useLightragGraph.tsx CHANGED
@@ -12,34 +12,52 @@ import { useSettingsStore } from '@/stores/settings'
12
  import seedrandom from 'seedrandom'
13
 
14
  const validateGraph = (graph: RawGraph) => {
 
15
  if (!graph) {
16
- return false
 
17
  }
 
 
18
  if (!Array.isArray(graph.nodes) || !Array.isArray(graph.edges)) {
19
- return false
 
 
 
 
 
 
 
20
  }
21
 
 
22
  for (const node of graph.nodes) {
23
  if (!node.id || !node.labels || !node.properties) {
24
- return false
 
25
  }
26
  }
27
 
 
28
  for (const edge of graph.edges) {
29
  if (!edge.id || !edge.source || !edge.target) {
30
- return false
 
31
  }
32
  }
33
 
 
34
  for (const edge of graph.edges) {
35
- const source = graph.getNode(edge.source)
36
- const target = graph.getNode(edge.target)
37
  if (source == undefined || target == undefined) {
38
- return false
 
39
  }
40
  }
41
 
42
- return true
 
43
  }
44
 
45
  export type NodeType = {
@@ -53,16 +71,32 @@ export type NodeType = {
53
  export type EdgeType = { label: string }
54
 
55
  const fetchGraph = async (label: string, maxDepth: number, minDegree: number) => {
56
- let rawData: any = null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  try {
59
- rawData = await queryGraphs(label, maxDepth, minDegree)
 
60
  } catch (e) {
61
- useBackendState.getState().setErrorMessage(errorMessage(e), 'Query Graphs Error!')
62
- return null
63
  }
64
 
65
- let rawGraph = null
66
 
67
  if (rawData) {
68
  const nodeIdMap: Record<string, number> = {}
@@ -129,7 +163,7 @@ const fetchGraph = async (label: string, maxDepth: number, minDegree: number) =>
129
 
130
  if (!validateGraph(rawGraph)) {
131
  rawGraph = null
132
- console.error('Invalid graph data')
133
  }
134
  console.log('Graph data loaded')
135
  }
@@ -192,6 +226,8 @@ const useLightrangeGraph = () => {
192
  // Use ref to track if data has been loaded and initial load
193
  const dataLoadedRef = useRef(false)
194
  const initialLoadRef = useRef(false)
 
 
195
 
196
  const getNode = useCallback(
197
  (nodeId: string) => {
@@ -224,11 +260,16 @@ const useLightrangeGraph = () => {
224
 
225
  // Data fetching logic
226
  useEffect(() => {
227
- // Skip if fetch is already in progress or no query label
228
- if (fetchInProgressRef.current || !queryLabel) {
229
  return
230
  }
231
 
 
 
 
 
 
232
  // Only fetch data when graphDataFetchAttempted is false (avoids re-fetching on vite dev mode)
233
  if (!isFetching && !useGraphStore.getState().graphDataFetchAttempted) {
234
  // Set flags
@@ -246,49 +287,104 @@ const useLightrangeGraph = () => {
246
  })
247
  }
248
 
249
- console.log('Fetching graph data...')
250
 
251
  // Use a local copy of the parameters
252
  const currentQueryLabel = queryLabel
253
  const currentMaxQueryDepth = maxQueryDepth
254
  const currentMinDegree = minDegree
255
 
256
- // Fetch graph data
257
- fetchGraph(currentQueryLabel, currentMaxQueryDepth, currentMinDegree).then((data) => {
 
 
 
 
 
 
 
 
 
 
 
 
258
  const state = useGraphStore.getState()
259
 
260
  // Reset state
261
  state.reset()
262
 
263
- // Create and set new graph directly
264
- const newSigmaGraph = createSigmaGraph(data)
265
- data?.buildDynamicMap()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
- // Set new graph data
268
- state.setSigmaGraph(newSigmaGraph)
269
- state.setRawGraph(data)
 
 
 
 
 
 
 
 
270
 
271
  // Update flags
272
  dataLoadedRef.current = true
273
  initialLoadRef.current = true
274
  fetchInProgressRef.current = false
275
-
276
- // Reset camera view
277
- state.setMoveToSelectedNode(true)
278
-
279
  state.setIsFetching(false)
 
 
 
 
 
280
  }).catch((error) => {
281
  console.error('Error fetching graph data:', error)
282
 
283
  // Reset state on error
284
  const state = useGraphStore.getState()
285
  state.setIsFetching(false)
286
- dataLoadedRef.current = false
287
  fetchInProgressRef.current = false
288
  state.setGraphDataFetchAttempted(false)
 
289
  })
290
  }
291
- }, [queryLabel, maxQueryDepth, minDegree, isFetching])
292
 
293
  // Handle node expansion
294
  useEffect(() => {
@@ -368,7 +464,7 @@ const useLightrangeGraph = () => {
368
  const nodesToAdd = new Set<string>();
369
  const edgesToAdd = new Set<string>();
370
 
371
- // Get degree range from existing graph for size calculations
372
  const minDegree = 1;
373
  let maxDegree = 0;
374
  sigmaGraph.forEachNode(node => {
@@ -376,10 +472,6 @@ const useLightrangeGraph = () => {
376
  maxDegree = Math.max(maxDegree, degree);
377
  });
378
 
379
- // Calculate size formula parameters
380
- const range = maxDegree - minDegree || 1; // Avoid division by zero
381
- const scale = Constants.maxNodeSize - Constants.minNodeSize;
382
-
383
  // First identify connectable nodes (nodes connected to the expanded node)
384
  for (const node of processedNodes) {
385
  // Skip if node already exists
@@ -400,6 +492,7 @@ const useLightrangeGraph = () => {
400
 
401
  // Calculate node degrees and track discarded edges in one pass
402
  const nodeDegrees = new Map<string, number>();
 
403
  const nodesWithDiscardedEdges = new Set<string>();
404
 
405
  for (const edge of processedEdges) {
@@ -408,12 +501,19 @@ const useLightrangeGraph = () => {
408
 
409
  if (sourceExists && targetExists) {
410
  edgesToAdd.add(edge.id);
411
- // Add degrees for valid edges
412
  if (nodesToAdd.has(edge.source)) {
413
  nodeDegrees.set(edge.source, (nodeDegrees.get(edge.source) || 0) + 1);
 
 
 
414
  }
 
415
  if (nodesToAdd.has(edge.target)) {
416
  nodeDegrees.set(edge.target, (nodeDegrees.get(edge.target) || 0) + 1);
 
 
 
417
  }
418
  } else {
419
  // Track discarded edges for both new and existing nodes
@@ -437,16 +537,21 @@ const useLightrangeGraph = () => {
437
  sigmaGraph: DirectedGraph,
438
  nodesWithDiscardedEdges: Set<string>,
439
  minDegree: number,
440
- range: number,
441
- scale: number
442
  ) => {
 
 
 
 
443
  for (const nodeId of nodesWithDiscardedEdges) {
444
  if (sigmaGraph.hasNode(nodeId)) {
445
  let newDegree = sigmaGraph.degree(nodeId);
446
  newDegree += 1; // Add +1 for discarded edges
 
 
447
 
448
  const newSize = Math.round(
449
- Constants.minNodeSize + scale * Math.pow((newDegree - minDegree) / range, 0.5)
450
  );
451
 
452
  const currentSize = sigmaGraph.getNodeAttribute(nodeId, 'size');
@@ -460,16 +565,27 @@ const useLightrangeGraph = () => {
460
 
461
  // If no new connectable nodes found, show toast and return
462
  if (nodesToAdd.size === 0) {
463
- updateNodeSizes(sigmaGraph, nodesWithDiscardedEdges, minDegree, range, scale);
464
  toast.info(t('graphPanel.propertiesView.node.noNewNodes'));
465
  return;
466
  }
467
 
468
- // Update maxDegree with new node degrees
 
469
  for (const [, degree] of nodeDegrees.entries()) {
470
  maxDegree = Math.max(maxDegree, degree);
471
  }
472
 
 
 
 
 
 
 
 
 
 
 
473
  // SAdd nodes and edges to the graph
474
  // Calculate camera ratio and spread factor once before the loop
475
  const cameraRatio = useGraphStore.getState().sigmaInstance?.getCamera().ratio || 1;
@@ -489,8 +605,10 @@ const useLightrangeGraph = () => {
489
  const nodeDegree = nodeDegrees.get(nodeId) || 0;
490
 
491
  // Calculate node size
 
 
492
  const nodeSize = Math.round(
493
- Constants.minNodeSize + scale * Math.pow((nodeDegree - minDegree) / range, 0.5)
494
  );
495
 
496
  // Calculate angle for polar coordinates
@@ -565,7 +683,18 @@ const useLightrangeGraph = () => {
565
  useGraphStore.getState().resetSearchEngine();
566
 
567
  // Update sizes for all nodes with discarded edges
568
- updateNodeSizes(sigmaGraph, nodesWithDiscardedEdges, minDegree, range, scale);
 
 
 
 
 
 
 
 
 
 
 
569
 
570
  } catch (error) {
571
  console.error('Error expanding node:', error);
 
12
  import seedrandom from 'seedrandom'
13
 
14
  const validateGraph = (graph: RawGraph) => {
15
+ // Check if graph exists
16
  if (!graph) {
17
+ console.log('Graph validation failed: graph is null');
18
+ return false;
19
  }
20
+
21
+ // Check if nodes and edges are arrays
22
  if (!Array.isArray(graph.nodes) || !Array.isArray(graph.edges)) {
23
+ console.log('Graph validation failed: nodes or edges is not an array');
24
+ return false;
25
+ }
26
+
27
+ // Check if nodes array is empty
28
+ if (graph.nodes.length === 0) {
29
+ console.log('Graph validation failed: nodes array is empty');
30
+ return false;
31
  }
32
 
33
+ // Validate each node
34
  for (const node of graph.nodes) {
35
  if (!node.id || !node.labels || !node.properties) {
36
+ console.log('Graph validation failed: invalid node structure');
37
+ return false;
38
  }
39
  }
40
 
41
+ // Validate each edge
42
  for (const edge of graph.edges) {
43
  if (!edge.id || !edge.source || !edge.target) {
44
+ console.log('Graph validation failed: invalid edge structure');
45
+ return false;
46
  }
47
  }
48
 
49
+ // Validate edge connections
50
  for (const edge of graph.edges) {
51
+ const source = graph.getNode(edge.source);
52
+ const target = graph.getNode(edge.target);
53
  if (source == undefined || target == undefined) {
54
+ console.log('Graph validation failed: edge references non-existent node');
55
+ return false;
56
  }
57
  }
58
 
59
+ console.log('Graph validation passed');
60
+ return true;
61
  }
62
 
63
  export type NodeType = {
 
71
  export type EdgeType = { label: string }
72
 
73
  const fetchGraph = async (label: string, maxDepth: number, minDegree: number) => {
74
+ let rawData: any = null;
75
+
76
+ // Check if we need to fetch all database labels first
77
+ const lastSuccessfulQueryLabel = useGraphStore.getState().lastSuccessfulQueryLabel;
78
+ if (!lastSuccessfulQueryLabel) {
79
+ console.log('Last successful queryLabel is empty');
80
+ try {
81
+ await useGraphStore.getState().fetchAllDatabaseLabels();
82
+ } catch (e) {
83
+ console.error('Failed to fetch all database labels:', e);
84
+ // Continue with graph fetch even if labels fetch fails
85
+ }
86
+ }
87
+
88
+ // If label is empty, use default label '*'
89
+ const queryLabel = label || '*';
90
 
91
  try {
92
+ console.log(`Fetching graph label: ${queryLabel}, depth: ${maxDepth}, deg: ${minDegree}`);
93
+ rawData = await queryGraphs(queryLabel, maxDepth, minDegree);
94
  } catch (e) {
95
+ useBackendState.getState().setErrorMessage(errorMessage(e), 'Query Graphs Error!');
96
+ return null;
97
  }
98
 
99
+ let rawGraph = null;
100
 
101
  if (rawData) {
102
  const nodeIdMap: Record<string, number> = {}
 
163
 
164
  if (!validateGraph(rawGraph)) {
165
  rawGraph = null
166
+ console.warn('Invalid graph data')
167
  }
168
  console.log('Graph data loaded')
169
  }
 
226
  // Use ref to track if data has been loaded and initial load
227
  const dataLoadedRef = useRef(false)
228
  const initialLoadRef = useRef(false)
229
+ // Use ref to track if empty data has been handled
230
+ const emptyDataHandledRef = useRef(false)
231
 
232
  const getNode = useCallback(
233
  (nodeId: string) => {
 
260
 
261
  // Data fetching logic
262
  useEffect(() => {
263
+ // Skip if fetch is already in progress
264
+ if (fetchInProgressRef.current) {
265
  return
266
  }
267
 
268
+ // Empty queryLabel should be only handle once(avoid infinite loop)
269
+ if (!queryLabel && emptyDataHandledRef.current) {
270
+ return;
271
+ }
272
+
273
  // Only fetch data when graphDataFetchAttempted is false (avoids re-fetching on vite dev mode)
274
  if (!isFetching && !useGraphStore.getState().graphDataFetchAttempted) {
275
  // Set flags
 
287
  })
288
  }
289
 
290
+ console.log('Preparing graph data...')
291
 
292
  // Use a local copy of the parameters
293
  const currentQueryLabel = queryLabel
294
  const currentMaxQueryDepth = maxQueryDepth
295
  const currentMinDegree = minDegree
296
 
297
+ // Declare a variable to store data promise
298
+ let dataPromise;
299
+
300
+ // 1. If query label is not empty, use fetchGraph
301
+ if (currentQueryLabel) {
302
+ dataPromise = fetchGraph(currentQueryLabel, currentMaxQueryDepth, currentMinDegree);
303
+ } else {
304
+ // 2. If query label is empty, set data to null
305
+ console.log('Query label is empty, show empty graph')
306
+ dataPromise = Promise.resolve(null);
307
+ }
308
+
309
+ // 3. Process data
310
+ dataPromise.then((data) => {
311
  const state = useGraphStore.getState()
312
 
313
  // Reset state
314
  state.reset()
315
 
316
+ // Check if data is empty or invalid
317
+ if (!data || !data.nodes || data.nodes.length === 0) {
318
+ // Create a graph with a single "Graph Is Empty" node
319
+ const emptyGraph = new DirectedGraph();
320
+
321
+ // Add a single node with "Graph Is Empty" label
322
+ emptyGraph.addNode('empty-graph-node', {
323
+ label: t('graphPanel.emptyGraph'),
324
+ color: '#cccccc', // gray color
325
+ x: 0.5,
326
+ y: 0.5,
327
+ size: 15,
328
+ borderColor: Constants.nodeBorderColor,
329
+ borderSize: 0.2
330
+ });
331
+
332
+ // Set graph to store
333
+ state.setSigmaGraph(emptyGraph);
334
+ state.setRawGraph(null);
335
+
336
+ // Still mark graph as empty for other logic
337
+ state.setGraphIsEmpty(true);
338
+
339
+ // Only clear current label if it's not already empty
340
+ if (currentQueryLabel) {
341
+ useSettingsStore.getState().setQueryLabel('');
342
+ }
343
+
344
+ // Clear last successful query label to ensure labels are fetched next time
345
+ state.setLastSuccessfulQueryLabel('');
346
+
347
+ console.log('Graph data is empty, created graph with empty graph node');
348
+ } else {
349
+ // Create and set new graph
350
+ const newSigmaGraph = createSigmaGraph(data);
351
+ data.buildDynamicMap();
352
 
353
+ // Set new graph data
354
+ state.setSigmaGraph(newSigmaGraph);
355
+ state.setRawGraph(data);
356
+ state.setGraphIsEmpty(false);
357
+
358
+ // Update last successful query label
359
+ state.setLastSuccessfulQueryLabel(currentQueryLabel);
360
+
361
+ // Reset camera view
362
+ state.setMoveToSelectedNode(true);
363
+ }
364
 
365
  // Update flags
366
  dataLoadedRef.current = true
367
  initialLoadRef.current = true
368
  fetchInProgressRef.current = false
 
 
 
 
369
  state.setIsFetching(false)
370
+
371
+ // Mark empty data as handled if data is empty and query label is empty
372
+ if ((!data || !data.nodes || data.nodes.length === 0) && !currentQueryLabel) {
373
+ emptyDataHandledRef.current = true;
374
+ }
375
  }).catch((error) => {
376
  console.error('Error fetching graph data:', error)
377
 
378
  // Reset state on error
379
  const state = useGraphStore.getState()
380
  state.setIsFetching(false)
381
+ dataLoadedRef.current = false;
382
  fetchInProgressRef.current = false
383
  state.setGraphDataFetchAttempted(false)
384
+ state.setLastSuccessfulQueryLabel('') // Clear last successful query label on error
385
  })
386
  }
387
+ }, [queryLabel, maxQueryDepth, minDegree, isFetching, t])
388
 
389
  // Handle node expansion
390
  useEffect(() => {
 
464
  const nodesToAdd = new Set<string>();
465
  const edgesToAdd = new Set<string>();
466
 
467
+ // Get degree maxDegree from existing graph for size calculations
468
  const minDegree = 1;
469
  let maxDegree = 0;
470
  sigmaGraph.forEachNode(node => {
 
472
  maxDegree = Math.max(maxDegree, degree);
473
  });
474
 
 
 
 
 
475
  // First identify connectable nodes (nodes connected to the expanded node)
476
  for (const node of processedNodes) {
477
  // Skip if node already exists
 
492
 
493
  // Calculate node degrees and track discarded edges in one pass
494
  const nodeDegrees = new Map<string, number>();
495
+ const existingNodeDegreeIncrements = new Map<string, number>(); // Track degree increments for existing nodes
496
  const nodesWithDiscardedEdges = new Set<string>();
497
 
498
  for (const edge of processedEdges) {
 
501
 
502
  if (sourceExists && targetExists) {
503
  edgesToAdd.add(edge.id);
504
+ // Add degrees for both new and existing nodes
505
  if (nodesToAdd.has(edge.source)) {
506
  nodeDegrees.set(edge.source, (nodeDegrees.get(edge.source) || 0) + 1);
507
+ } else if (existingNodeIds.has(edge.source)) {
508
+ // Track degree increments for existing nodes
509
+ existingNodeDegreeIncrements.set(edge.source, (existingNodeDegreeIncrements.get(edge.source) || 0) + 1);
510
  }
511
+
512
  if (nodesToAdd.has(edge.target)) {
513
  nodeDegrees.set(edge.target, (nodeDegrees.get(edge.target) || 0) + 1);
514
+ } else if (existingNodeIds.has(edge.target)) {
515
+ // Track degree increments for existing nodes
516
+ existingNodeDegreeIncrements.set(edge.target, (existingNodeDegreeIncrements.get(edge.target) || 0) + 1);
517
  }
518
  } else {
519
  // Track discarded edges for both new and existing nodes
 
537
  sigmaGraph: DirectedGraph,
538
  nodesWithDiscardedEdges: Set<string>,
539
  minDegree: number,
540
+ maxDegree: number
 
541
  ) => {
542
+ // Calculate derived values inside the function
543
+ const range = maxDegree - minDegree || 1; // Avoid division by zero
544
+ const scale = Constants.maxNodeSize - Constants.minNodeSize;
545
+
546
  for (const nodeId of nodesWithDiscardedEdges) {
547
  if (sigmaGraph.hasNode(nodeId)) {
548
  let newDegree = sigmaGraph.degree(nodeId);
549
  newDegree += 1; // Add +1 for discarded edges
550
+ // Limit newDegree to maxDegree + 1 to prevent nodes from being too large
551
+ const limitedDegree = Math.min(newDegree, maxDegree + 1);
552
 
553
  const newSize = Math.round(
554
+ Constants.minNodeSize + scale * Math.pow((limitedDegree - minDegree) / range, 0.5)
555
  );
556
 
557
  const currentSize = sigmaGraph.getNodeAttribute(nodeId, 'size');
 
565
 
566
  // If no new connectable nodes found, show toast and return
567
  if (nodesToAdd.size === 0) {
568
+ updateNodeSizes(sigmaGraph, nodesWithDiscardedEdges, minDegree, maxDegree);
569
  toast.info(t('graphPanel.propertiesView.node.noNewNodes'));
570
  return;
571
  }
572
 
573
+ // Update maxDegree considering all nodes (both new and existing)
574
+ // 1. Consider degrees of new nodes
575
  for (const [, degree] of nodeDegrees.entries()) {
576
  maxDegree = Math.max(maxDegree, degree);
577
  }
578
 
579
+ // 2. Consider degree increments for existing nodes
580
+ for (const [nodeId, increment] of existingNodeDegreeIncrements.entries()) {
581
+ const currentDegree = sigmaGraph.degree(nodeId);
582
+ const projectedDegree = currentDegree + increment;
583
+ maxDegree = Math.max(maxDegree, projectedDegree);
584
+ }
585
+
586
+ const range = maxDegree - minDegree || 1; // Avoid division by zero
587
+ const scale = Constants.maxNodeSize - Constants.minNodeSize;
588
+
589
  // SAdd nodes and edges to the graph
590
  // Calculate camera ratio and spread factor once before the loop
591
  const cameraRatio = useGraphStore.getState().sigmaInstance?.getCamera().ratio || 1;
 
605
  const nodeDegree = nodeDegrees.get(nodeId) || 0;
606
 
607
  // Calculate node size
608
+ // Limit nodeDegree to maxDegree + 1 to prevent new nodes from being too large
609
+ const limitedDegree = Math.min(nodeDegree, maxDegree + 1);
610
  const nodeSize = Math.round(
611
+ Constants.minNodeSize + scale * Math.pow((limitedDegree - minDegree) / range, 0.5)
612
  );
613
 
614
  // Calculate angle for polar coordinates
 
683
  useGraphStore.getState().resetSearchEngine();
684
 
685
  // Update sizes for all nodes with discarded edges
686
+ updateNodeSizes(sigmaGraph, nodesWithDiscardedEdges, minDegree, maxDegree);
687
+
688
+ if (sigmaGraph.hasNode(nodeId)) {
689
+ const finalDegree = sigmaGraph.degree(nodeId);
690
+ const limitedDegree = Math.min(finalDegree, maxDegree + 1);
691
+ const newSize = Math.round(
692
+ Constants.minNodeSize + scale * Math.pow((limitedDegree - minDegree) / range, 0.5)
693
+ );
694
+ sigmaGraph.setNodeAttribute(nodeId, 'size', newSize);
695
+ nodeToExpand.size = newSize;
696
+ nodeToExpand.degree = finalDegree;
697
+ }
698
 
699
  } catch (error) {
700
  console.error('Error expanding node:', error);
lightrag_webui/src/i18n.js DELETED
@@ -1,35 +0,0 @@
1
- import i18n from "i18next";
2
- import { initReactI18next } from "react-i18next";
3
- import { useSettingsStore } from "./stores/settings";
4
-
5
- import en from "./locales/en.json";
6
- import zh from "./locales/zh.json";
7
-
8
- const getStoredLanguage = () => {
9
- try {
10
- const settingsString = localStorage.getItem('settings-storage');
11
- if (settingsString) {
12
- const settings = JSON.parse(settingsString);
13
- return settings.state?.language || 'en';
14
- }
15
- } catch (e) {
16
- console.error('Failed to get stored language:', e);
17
- }
18
- return 'en';
19
- };
20
-
21
- i18n
22
- .use(initReactI18next)
23
- .init({
24
- resources: {
25
- en: { translation: en },
26
- zh: { translation: zh }
27
- },
28
- lng: getStoredLanguage(), // 使用存储的语言设置
29
- fallbackLng: "en",
30
- interpolation: {
31
- escapeValue: false
32
- }
33
- });
34
-
35
- export default i18n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lightrag_webui/src/i18n.ts CHANGED
@@ -4,34 +4,44 @@ import { useSettingsStore } from '@/stores/settings'
4
 
5
  import en from './locales/en.json'
6
  import zh from './locales/zh.json'
 
 
7
 
8
- // Function to sync i18n with store state
9
- export const initializeI18n = async (): Promise<typeof i18n> => {
10
- // Get initial language from store
11
- const initialLanguage = useSettingsStore.getState().language
 
 
 
 
 
 
 
 
12
 
13
- // Initialize with store language
14
- await i18n.use(initReactI18next).init({
 
15
  resources: {
16
  en: { translation: en },
17
- zh: { translation: zh }
 
 
18
  },
19
- lng: initialLanguage,
20
  fallbackLng: 'en',
21
  interpolation: {
22
  escapeValue: false
23
  }
24
  })
25
 
26
- // Subscribe to language changes
27
- useSettingsStore.subscribe((state) => {
28
- const currentLanguage = state.language
29
- if (i18n.language !== currentLanguage) {
30
- i18n.changeLanguage(currentLanguage)
31
- }
32
- })
33
-
34
- return i18n
35
- }
36
 
37
  export default i18n
 
4
 
5
  import en from './locales/en.json'
6
  import zh from './locales/zh.json'
7
+ import fr from './locales/fr.json'
8
+ import ar from './locales/ar.json'
9
 
10
+ const getStoredLanguage = () => {
11
+ try {
12
+ const settingsString = localStorage.getItem('settings-storage')
13
+ if (settingsString) {
14
+ const settings = JSON.parse(settingsString)
15
+ return settings.state?.language || 'en'
16
+ }
17
+ } catch (e) {
18
+ console.error('Failed to get stored language:', e)
19
+ }
20
+ return 'en'
21
+ }
22
 
23
+ i18n
24
+ .use(initReactI18next)
25
+ .init({
26
  resources: {
27
  en: { translation: en },
28
+ zh: { translation: zh },
29
+ fr: { translation: fr },
30
+ ar: { translation: ar }
31
  },
32
+ lng: getStoredLanguage(), // 使用存储的语言设置
33
  fallbackLng: 'en',
34
  interpolation: {
35
  escapeValue: false
36
  }
37
  })
38
 
39
+ // Subscribe to language changes
40
+ useSettingsStore.subscribe((state) => {
41
+ const currentLanguage = state.language
42
+ if (i18n.language !== currentLanguage) {
43
+ i18n.changeLanguage(currentLanguage)
44
+ }
45
+ })
 
 
 
46
 
47
  export default i18n
lightrag_webui/src/locales/ar.json ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "settings": {
3
+ "language": "اللغة",
4
+ "theme": "السمة",
5
+ "light": "فاتح",
6
+ "dark": "داكن",
7
+ "system": "النظام"
8
+ },
9
+ "header": {
10
+ "documents": "المستندات",
11
+ "knowledgeGraph": "شبكة المعرفة",
12
+ "retrieval": "الاسترجاع",
13
+ "api": "واجهة برمجة التطبيقات",
14
+ "projectRepository": "مستودع المشروع",
15
+ "logout": "تسجيل الخروج",
16
+ "themeToggle": {
17
+ "switchToLight": "التحويل إلى السمة الفاتحة",
18
+ "switchToDark": "التحويل إلى السمة الداكنة"
19
+ }
20
+ },
21
+ "login": {
22
+ "description": "الرجاء إدخال حسابك وكلمة المرور لتسجيل الدخول إلى النظام",
23
+ "username": "اسم المستخدم",
24
+ "usernamePlaceholder": "الرجاء إدخال اسم المستخدم",
25
+ "password": "كلمة المرور",
26
+ "passwordPlaceholder": "الرجاء إدخال كلمة المرور",
27
+ "loginButton": "تسجيل الدخول",
28
+ "loggingIn": "جاري تسجيل الدخول...",
29
+ "successMessage": "تم تسجيل الدخول بنجاح",
30
+ "errorEmptyFields": "الرجاء إدخال اسم المستخدم وكلمة المرور",
31
+ "errorInvalidCredentials": "فشل تسجيل الدخول، يرجى التحقق من اسم المستخدم وكلمة المرور",
32
+ "authDisabled": "تم تعطيل المصادقة. استخدام وضع بدون تسجيل دخول.",
33
+ "guestMode": "وضع بدون تسجيل دخول"
34
+ },
35
+ "documentPanel": {
36
+ "clearDocuments": {
37
+ "button": "مسح",
38
+ "tooltip": "مسح المستندات",
39
+ "title": "مسح المستندات",
40
+ "confirm": "هل تريد حقًا مسح جميع المستندات؟",
41
+ "confirmButton": "نعم",
42
+ "success": "تم مسح المستندات بنجاح",
43
+ "failed": "فشل مسح المستندات:\n{{message}}",
44
+ "error": "فشل مسح المستندات:\n{{error}}"
45
+ },
46
+ "uploadDocuments": {
47
+ "button": "رفع",
48
+ "tooltip": "رفع المستندات",
49
+ "title": "رفع المستندات",
50
+ "description": "اسحب وأفلت مستنداتك هنا أو انقر للتصفح.",
51
+ "uploading": "جارٍ الرفع {{name}}: {{percent}}%",
52
+ "success": "نجاح الرفع:\nتم رفع {{name}} بنجاح",
53
+ "failed": "فشل الرفع:\n{{name}}\n{{message}}",
54
+ "error": "فشل الرفع:\n{{name}}\n{{error}}",
55
+ "generalError": "فشل الرفع\n{{error}}",
56
+ "fileTypes": "الأنواع المدعومة: TXT، MD، DOCX، PDF، PPTX، RTF، ODT، EPUB، HTML، HTM، TEX، JSON، XML، YAML، YML، CSV، LOG، CONF، INI، PROPERTIES، SQL، BAT، SH، C، CPP، PY، JAVA، JS، TS، SWIFT، GO، RB، PHP، CSS، SCSS، LESS"
57
+ },
58
+ "documentManager": {
59
+ "title": "إدارة المستندات",
60
+ "scanButton": "مسح ضوئي",
61
+ "scanTooltip": "مسح المستندات ضوئيًا",
62
+ "uploadedTitle": "المستندات المرفوعة",
63
+ "uploadedDescription": "قائمة المستندات المرفوعة وحالاتها.",
64
+ "emptyTitle": "لا توجد مستندات",
65
+ "emptyDescription": "لا توجد مستندات مرفوعة بعد.",
66
+ "columns": {
67
+ "id": "المعرف",
68
+ "summary": "الملخص",
69
+ "status": "الحالة",
70
+ "length": "الطول",
71
+ "chunks": "الأجزاء",
72
+ "created": "تم الإنشاء",
73
+ "updated": "تم التحديث",
74
+ "metadata": "البيانات الوصفية"
75
+ },
76
+ "status": {
77
+ "completed": "مكتمل",
78
+ "processing": "قيد المعالجة",
79
+ "pending": "معلق",
80
+ "failed": "فشل"
81
+ },
82
+ "errors": {
83
+ "loadFailed": "فشل تحميل المستندات\n{{error}}",
84
+ "scanFailed": "فشل المسح الضوئي للمستندات\n{{error}}",
85
+ "scanProgressFailed": "فشل الحصول على تقدم المسح الضوئي\n{{error}}"
86
+ }
87
+ }
88
+ },
89
+ "graphPanel": {
90
+ "sideBar": {
91
+ "settings": {
92
+ "settings": "الإعدادات",
93
+ "healthCheck": "فحص الحالة",
94
+ "showPropertyPanel": "إظهار لوحة الخصائص",
95
+ "showSearchBar": "إظهار شريط البحث",
96
+ "showNodeLabel": "إظهار تسمية العقدة",
97
+ "nodeDraggable": "العقدة قابلة للسحب",
98
+ "showEdgeLabel": "إظهار تسمية الحافة",
99
+ "hideUnselectedEdges": "إخفاء الحواف غير المحددة",
100
+ "edgeEvents": "أحداث الحافة",
101
+ "maxQueryDepth": "أقصى عمق للاستعلام",
102
+ "minDegree": "الدرجة الدنيا",
103
+ "maxLayoutIterations": "أقصى تكرارات التخطيط",
104
+ "depth": "العمق",
105
+ "degree": "الدرجة",
106
+ "apiKey": "مفتاح واجهة برمجة التطبيقات",
107
+ "enterYourAPIkey": "أدخل مفتاح واجهة برمجة التطبيقات الخاص بك",
108
+ "save": "حفظ",
109
+ "refreshLayout": "تحديث التخطيط"
110
+ },
111
+ "zoomControl": {
112
+ "zoomIn": "تكبير",
113
+ "zoomOut": "تصغير",
114
+ "resetZoom": "إعادة تعيين التكبير",
115
+ "rotateCamera": "تدوير في اتجاه عقارب الساعة",
116
+ "rotateCameraCounterClockwise": "تدوير عكس اتجاه عقارب الساعة"
117
+ },
118
+ "layoutsControl": {
119
+ "startAnimation": "بدء حركة التخطيط",
120
+ "stopAnimation": "إيقاف حركة التخطيط",
121
+ "layoutGraph": "تخطيط الرسم البياني",
122
+ "layouts": {
123
+ "Circular": "دائري",
124
+ "Circlepack": "حزمة دائرية",
125
+ "Random": "عشوائي",
126
+ "Noverlaps": "بدون تداخل",
127
+ "Force Directed": "موجه بالقوة",
128
+ "Force Atlas": "أطلس القوة"
129
+ }
130
+ },
131
+ "fullScreenControl": {
132
+ "fullScreen": "شاشة كاملة",
133
+ "windowed": "نوافذ"
134
+ }
135
+ },
136
+ "statusIndicator": {
137
+ "connected": "متصل",
138
+ "disconnected": "غير متصل"
139
+ },
140
+ "statusCard": {
141
+ "unavailable": "معلومات الحالة غير متوفرة",
142
+ "storageInfo": "معلومات التخزين",
143
+ "workingDirectory": "دليل العمل",
144
+ "inputDirectory": "دليل الإدخال",
145
+ "llmConfig": "تكوين نموذج اللغة الكبير",
146
+ "llmBinding": "ربط نموذج اللغة الكبير",
147
+ "llmBindingHost": "مضيف ربط نموذج اللغة الكبير",
148
+ "llmModel": "نموذج اللغة الكبير",
149
+ "maxTokens": "أقصى عدد من الرموز",
150
+ "embeddingConfig": "تكوين التضمين",
151
+ "embeddingBinding": "ربط التضمين",
152
+ "embeddingBindingHost": "مضيف ربط التضمين",
153
+ "embeddingModel": "نموذج التضمين",
154
+ "storageConfig": "تكوين التخزين",
155
+ "kvStorage": "تخزين المفتاح-القيمة",
156
+ "docStatusStorage": "تخزين حالة المستند",
157
+ "graphStorage": "تخزين الرسم البياني",
158
+ "vectorStorage": "تخزين المتجهات"
159
+ },
160
+ "propertiesView": {
161
+ "node": {
162
+ "title": "عقدة",
163
+ "id": "المعرف",
164
+ "labels": "التسميات",
165
+ "degree": "الدرجة",
166
+ "properties": "الخصائص",
167
+ "relationships": "العلاقات (داخل الرسم الفرعي)",
168
+ "expandNode": "توسيع العقدة",
169
+ "pruneNode": "تقليم العقدة",
170
+ "deleteAllNodesError": "رفض حذف جميع العقد في الرسم البياني",
171
+ "nodesRemoved": "تم إزالة {{count}} عقدة، بما في ذلك العقد اليتيمة",
172
+ "noNewNodes": "لم يتم العثور على عقد قابلة للتوسيع",
173
+ "propertyNames": {
174
+ "description": "الوصف",
175
+ "entity_id": "الاسم",
176
+ "entity_type": "النوع",
177
+ "source_id": "معرف المصدر",
178
+ "Neighbour": "الجار"
179
+ }
180
+ },
181
+ "edge": {
182
+ "title": "علاقة",
183
+ "id": "المعرف",
184
+ "type": "النوع",
185
+ "source": "المصدر",
186
+ "target": "الهدف",
187
+ "properties": "الخصائص"
188
+ }
189
+ },
190
+ "search": {
191
+ "placeholder": "ابحث في العقد...",
192
+ "message": "و {{count}} آخرون"
193
+ },
194
+ "graphLabels": {
195
+ "selectTooltip": "حدد تسمية الاستعلام",
196
+ "noLabels": "لم يتم العثور على تسميات",
197
+ "label": "التسمية",
198
+ "placeholder": "ابحث في التسميات...",
199
+ "andOthers": "و {{count}} آخرون",
200
+ "refreshTooltip": "إعادة تحميل بيانات الرسم البياني"
201
+ },
202
+ "emptyGraph": "الرسم البياني فارغ"
203
+ },
204
+ "retrievePanel": {
205
+ "chatMessage": {
206
+ "copyTooltip": "نسخ إلى الحافظة",
207
+ "copyError": "فشل نسخ النص إلى الحافظة"
208
+ },
209
+ "retrieval": {
210
+ "startPrompt": "ابدأ الاسترجاع بكتابة استفسارك أدناه",
211
+ "clear": "مسح",
212
+ "send": "إرسال",
213
+ "placeholder": "اكتب استفسارك...",
214
+ "error": "خطأ: فشل الحصول على الرد"
215
+ },
216
+ "querySettings": {
217
+ "parametersTitle": "المعلمات",
218
+ "parametersDescription": "تكوين معلمات الاستعلام الخاص بك",
219
+ "queryMode": "وضع الاستعلام",
220
+ "queryModeTooltip": "حدد استراتيجية الاسترجاع:\n• ساذج: بحث أساسي بدون تقنيات متقدمة\n• محلي: استرجاع معلومات يعتمد على السياق\n• عالمي: يستخدم قاعدة المعرفة العالمية\n• مختلط: يجمع بين الاسترجاع المحلي والعالمي\n• مزيج: يدمج شبكة المعرفة مع الاسترجاع المتجهي",
221
+ "queryModeOptions": {
222
+ "naive": "ساذج",
223
+ "local": "محلي",
224
+ "global": "عالمي",
225
+ "hybrid": "مختلط",
226
+ "mix": "مزيج"
227
+ },
228
+ "responseFormat": "تنسيق الرد",
229
+ "responseFormatTooltip": "يحدد تنسيق الرد. أمثلة:\n• فقرات متعددة\n• فقرة واحدة\n• نقاط نقطية",
230
+ "responseFormatOptions": {
231
+ "multipleParagraphs": "فقرات متعددة",
232
+ "singleParagraph": "فقرة واحدة",
233
+ "bulletPoints": "نقاط نقطية"
234
+ },
235
+ "topK": "أعلى K نتائج",
236
+ "topKTooltip": "عدد العناصر العلوية للاسترجاع. يمثل الكيانات في وضع 'محلي' والعلاقات في وضع 'عالمي'",
237
+ "topKPlaceholder": "عدد النتائج",
238
+ "maxTokensTextUnit": "أقصى عدد من الرموز لوحدة النص",
239
+ "maxTokensTextUnitTooltip": "الحد الأقصى لعدد الرموز المسموح به لكل جزء نصي مسترجع",
240
+ "maxTokensGlobalContext": "أقصى عدد من الرموز للسياق العالمي",
241
+ "maxTokensGlobalContextTooltip": "الحد الأقصى لعدد الرموز المخصص لأوصاف العلاقات في الاسترجاع العالمي",
242
+ "maxTokensLocalContext": "أقصى عدد من الرموز للسياق المحلي",
243
+ "maxTokensLocalContextTooltip": "الحد الأقصى لعدد الرموز المخصص لأوصاف الكيانات في الاسترجاع المحلي",
244
+ "historyTurns": "دورات التاريخ",
245
+ "historyTurnsTooltip": "عدد الدورات الكاملة للمحادثة (أزواج المستخدم-المساعد) التي يجب مراعاتها في سياق الرد",
246
+ "historyTurnsPlaceholder": "عدد دورات التاريخ",
247
+ "hlKeywords": "الكلمات المفتاحية عالية المستوى",
248
+ "hlKeywordsTooltip": "قائمة الكلمات المفتاحية عالية المستوى لإعطائها الأولوية في الاسترجاع. افصل بينها بفواصل",
249
+ "hlkeywordsPlaceHolder": "أدخل الكلمات المفتاحية",
250
+ "llKeywords": "الكلمات المفتاحية منخفضة المستوى",
251
+ "llKeywordsTooltip": "قائمة الكلمات المفتاحية منخفضة المستوى لتحسين تركيز الاسترجاع. افصل بينها بفواصل",
252
+ "onlyNeedContext": "تحتاج فقط إلى السياق",
253
+ "onlyNeedContextTooltip": "إذا كان صحيحًا، يتم إرجاع السياق المسترجع فقط دون إنشاء رد",
254
+ "onlyNeedPrompt": "تحتاج فقط إلى المطالبة",
255
+ "onlyNeedPromptTooltip": "إذا كان صحيحًا، يتم إرجاع المطالبة المولدة فقط دون إنتاج رد",
256
+ "streamResponse": "تدفق الرد",
257
+ "streamResponseTooltip": "إذا كان صحيحًا، يتيح إخراج التدفق للردود في الوقت الفعلي"
258
+ }
259
+ },
260
+ "apiSite": {
261
+ "loading": "جارٍ تحميل وثائق واجهة برمجة التطبيقات..."
262
+ }
263
+ }
lightrag_webui/src/locales/en.json CHANGED
@@ -167,7 +167,7 @@
167
  "labels": "Labels",
168
  "degree": "Degree",
169
  "properties": "Properties",
170
- "relationships": "Relationships",
171
  "expandNode": "Expand Node",
172
  "pruneNode": "Prune Node",
173
  "deleteAllNodesError": "Refuse to delete all nodes in the graph",
@@ -201,7 +201,8 @@
201
  "placeholder": "Search labels...",
202
  "andOthers": "And {count} others",
203
  "refreshTooltip": "Reload graph data"
204
- }
 
205
  },
206
  "retrievePanel": {
207
  "chatMessage": {
 
167
  "labels": "Labels",
168
  "degree": "Degree",
169
  "properties": "Properties",
170
+ "relationships": "Relations(within subgraph)",
171
  "expandNode": "Expand Node",
172
  "pruneNode": "Prune Node",
173
  "deleteAllNodesError": "Refuse to delete all nodes in the graph",
 
201
  "placeholder": "Search labels...",
202
  "andOthers": "And {count} others",
203
  "refreshTooltip": "Reload graph data"
204
+ },
205
+ "emptyGraph": "Graph Is Empty"
206
  },
207
  "retrievePanel": {
208
  "chatMessage": {
lightrag_webui/src/locales/fr.json ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "settings": {
3
+ "language": "Langue",
4
+ "theme": "Thème",
5
+ "light": "Clair",
6
+ "dark": "Sombre",
7
+ "system": "Système"
8
+ },
9
+ "header": {
10
+ "documents": "Documents",
11
+ "knowledgeGraph": "Graphe de connaissances",
12
+ "retrieval": "Récupération",
13
+ "api": "API",
14
+ "projectRepository": "Référentiel du projet",
15
+ "logout": "Déconnexion",
16
+ "themeToggle": {
17
+ "switchToLight": "Passer au thème clair",
18
+ "switchToDark": "Passer au thème sombre"
19
+ }
20
+ },
21
+ "login": {
22
+ "description": "Veuillez entrer votre compte et mot de passe pour vous connecter au système",
23
+ "username": "Nom d'utilisateur",
24
+ "usernamePlaceholder": "Veuillez saisir un nom d'utilisateur",
25
+ "password": "Mot de passe",
26
+ "passwordPlaceholder": "Veuillez saisir un mot de passe",
27
+ "loginButton": "Connexion",
28
+ "loggingIn": "Connexion en cours...",
29
+ "successMessage": "Connexion réussie",
30
+ "errorEmptyFields": "Veuillez saisir votre nom d'utilisateur et mot de passe",
31
+ "errorInvalidCredentials": "Échec de la connexion, veuillez vérifier le nom d'utilisateur et le mot de passe",
32
+ "authDisabled": "L'authentification est désactivée. Utilisation du mode sans connexion.",
33
+ "guestMode": "Mode sans connexion"
34
+ },
35
+ "documentPanel": {
36
+ "clearDocuments": {
37
+ "button": "Effacer",
38
+ "tooltip": "Effacer les documents",
39
+ "title": "Effacer les documents",
40
+ "confirm": "Voulez-vous vraiment effacer tous les documents ?",
41
+ "confirmButton": "OUI",
42
+ "success": "Documents effacés avec succès",
43
+ "failed": "Échec de l'effacement des documents :\n{{message}}",
44
+ "error": "Échec de l'effacement des documents :\n{{error}}"
45
+ },
46
+ "uploadDocuments": {
47
+ "button": "Télécharger",
48
+ "tooltip": "Télécharger des documents",
49
+ "title": "Télécharger des documents",
50
+ "description": "Glissez-déposez vos documents ici ou cliquez pour parcourir.",
51
+ "uploading": "Téléchargement de {{name}} : {{percent}}%",
52
+ "success": "Succès du téléchargement :\n{{name}} téléchargé avec succès",
53
+ "failed": "Échec du téléchargement :\n{{name}}\n{{message}}",
54
+ "error": "Échec du téléchargement :\n{{name}}\n{{error}}",
55
+ "generalError": "Échec du téléchargement\n{{error}}",
56
+ "fileTypes": "Types pris en charge : TXT, MD, DOCX, PDF, PPTX, RTF, ODT, EPUB, HTML, HTM, TEX, JSON, XML, YAML, YML, CSV, LOG, CONF, INI, PROPERTIES, SQL, BAT, SH, C, CPP, PY, JAVA, JS, TS, SWIFT, GO, RB, PHP, CSS, SCSS, LESS"
57
+ },
58
+ "documentManager": {
59
+ "title": "Gestion des documents",
60
+ "scanButton": "Scanner",
61
+ "scanTooltip": "Scanner les documents",
62
+ "uploadedTitle": "Documents téléchargés",
63
+ "uploadedDescription": "Liste des documents téléchargés et leurs statuts.",
64
+ "emptyTitle": "Aucun document",
65
+ "emptyDescription": "Il n'y a pas encore de documents téléchargés.",
66
+ "columns": {
67
+ "id": "ID",
68
+ "summary": "Résumé",
69
+ "status": "Statut",
70
+ "length": "Longueur",
71
+ "chunks": "Fragments",
72
+ "created": "Créé",
73
+ "updated": "Mis à jour",
74
+ "metadata": "Métadonnées"
75
+ },
76
+ "status": {
77
+ "completed": "Terminé",
78
+ "processing": "En traitement",
79
+ "pending": "En attente",
80
+ "failed": "Échoué"
81
+ },
82
+ "errors": {
83
+ "loadFailed": "Échec du chargement des documents\n{{error}}",
84
+ "scanFailed": "Échec de la numérisation des documents\n{{error}}",
85
+ "scanProgressFailed": "Échec de l'obtention de la progression de la numérisation\n{{error}}"
86
+ }
87
+ }
88
+ },
89
+ "graphPanel": {
90
+ "sideBar": {
91
+ "settings": {
92
+ "settings": "Paramètres",
93
+ "healthCheck": "Vérification de l'état",
94
+ "showPropertyPanel": "Afficher le panneau des propriétés",
95
+ "showSearchBar": "Afficher la barre de recherche",
96
+ "showNodeLabel": "Afficher l'étiquette du nœud",
97
+ "nodeDraggable": "Nœud déplaçable",
98
+ "showEdgeLabel": "Afficher l'étiquette de l'arête",
99
+ "hideUnselectedEdges": "Masquer les arêtes non sélectionnées",
100
+ "edgeEvents": "Événements des arêtes",
101
+ "maxQueryDepth": "Profondeur maximale de la requête",
102
+ "minDegree": "Degré minimum",
103
+ "maxLayoutIterations": "Itérations maximales de mise en page",
104
+ "depth": "Profondeur",
105
+ "degree": "Degré",
106
+ "apiKey": "Clé API",
107
+ "enterYourAPIkey": "Entrez votre clé API",
108
+ "save": "Sauvegarder",
109
+ "refreshLayout": "Actualiser la mise en page"
110
+ },
111
+ "zoomControl": {
112
+ "zoomIn": "Zoom avant",
113
+ "zoomOut": "Zoom arrière",
114
+ "resetZoom": "Réinitialiser le zoom",
115
+ "rotateCamera": "Rotation horaire",
116
+ "rotateCameraCounterClockwise": "Rotation antihoraire"
117
+ },
118
+ "layoutsControl": {
119
+ "startAnimation": "Démarrer l'animation de mise en page",
120
+ "stopAnimation": "Arrêter l'animation de mise en page",
121
+ "layoutGraph": "Mettre en page le graphe",
122
+ "layouts": {
123
+ "Circular": "Circulaire",
124
+ "Circlepack": "Paquet circulaire",
125
+ "Random": "Aléatoire",
126
+ "Noverlaps": "Sans chevauchement",
127
+ "Force Directed": "Dirigé par la force",
128
+ "Force Atlas": "Atlas de force"
129
+ }
130
+ },
131
+ "fullScreenControl": {
132
+ "fullScreen": "Plein écran",
133
+ "windowed": "Fenêtré"
134
+ }
135
+ },
136
+ "statusIndicator": {
137
+ "connected": "Connecté",
138
+ "disconnected": "Déconnecté"
139
+ },
140
+ "statusCard": {
141
+ "unavailable": "Informations sur l'état indisponibles",
142
+ "storageInfo": "Informations de stockage",
143
+ "workingDirectory": "Répertoire de travail",
144
+ "inputDirectory": "Répertoire d'entrée",
145
+ "llmConfig": "Configuration du modèle de langage",
146
+ "llmBinding": "Liaison du modèle de langage",
147
+ "llmBindingHost": "Hôte de liaison du modèle de langage",
148
+ "llmModel": "Modèle de langage",
149
+ "maxTokens": "Nombre maximum de jetons",
150
+ "embeddingConfig": "Configuration d'incorporation",
151
+ "embeddingBinding": "Liaison d'incorporation",
152
+ "embeddingBindingHost": "Hôte de liaison d'incorporation",
153
+ "embeddingModel": "Modèle d'incorporation",
154
+ "storageConfig": "Configuration de stockage",
155
+ "kvStorage": "Stockage clé-valeur",
156
+ "docStatusStorage": "Stockage de l'état des documents",
157
+ "graphStorage": "Stockage du graphe",
158
+ "vectorStorage": "Stockage vectoriel"
159
+ },
160
+ "propertiesView": {
161
+ "node": {
162
+ "title": "Nœud",
163
+ "id": "ID",
164
+ "labels": "Étiquettes",
165
+ "degree": "Degré",
166
+ "properties": "Propriétés",
167
+ "relationships": "Relations(dans le sous-graphe)",
168
+ "expandNode": "Développer le nœud",
169
+ "pruneNode": "Élaguer le nœud",
170
+ "deleteAllNodesError": "Refus de supprimer tous les nœuds du graphe",
171
+ "nodesRemoved": "{{count}} nœuds supprimés, y compris les nœuds orphelins",
172
+ "noNewNodes": "Aucun nœud développable trouvé",
173
+ "propertyNames": {
174
+ "description": "Description",
175
+ "entity_id": "Nom",
176
+ "entity_type": "Type",
177
+ "source_id": "ID source",
178
+ "Neighbour": "Voisin"
179
+ }
180
+ },
181
+ "edge": {
182
+ "title": "Relation",
183
+ "id": "ID",
184
+ "type": "Type",
185
+ "source": "Source",
186
+ "target": "Cible",
187
+ "properties": "Propriétés"
188
+ }
189
+ },
190
+ "search": {
191
+ "placeholder": "Rechercher des nœuds...",
192
+ "message": "Et {{count}} autres"
193
+ },
194
+ "graphLabels": {
195
+ "selectTooltip": "Sélectionner l'étiquette de la requête",
196
+ "noLabels": "Aucune étiquette trouvée",
197
+ "label": "Étiquette",
198
+ "placeholder": "Rechercher des étiquettes...",
199
+ "andOthers": "Et {{count}} autres",
200
+ "refreshTooltip": "Recharger les données du graphe"
201
+ },
202
+ "emptyGraph": "Le graphe est vide"
203
+ },
204
+ "retrievePanel": {
205
+ "chatMessage": {
206
+ "copyTooltip": "Copier dans le presse-papiers",
207
+ "copyError": "Échec de la copie du texte dans le presse-papiers"
208
+ },
209
+ "retrieval": {
210
+ "startPrompt": "Démarrez une récupération en tapant votre requête ci-dessous",
211
+ "clear": "Effacer",
212
+ "send": "Envoyer",
213
+ "placeholder": "Tapez votre requête...",
214
+ "error": "Erreur : Échec de l'obtention de la réponse"
215
+ },
216
+ "querySettings": {
217
+ "parametersTitle": "Paramètres",
218
+ "parametersDescription": "Configurez vos paramètres de requête",
219
+ "queryMode": "Mode de requête",
220
+ "queryModeTooltip": "Sélectionnez la stratégie de récupération :\n• Naïf : Recherche de base sans techniques avancées\n• Local : Récupération d'informations dépendante du contexte\n• Global : Utilise une base de connaissances globale\n• Hybride : Combine récupération locale et globale\n• Mixte : Intègre le graphe de connaissances avec la récupération vectorielle",
221
+ "queryModeOptions": {
222
+ "naive": "Naïf",
223
+ "local": "Local",
224
+ "global": "Global",
225
+ "hybrid": "Hybride",
226
+ "mix": "Mixte"
227
+ },
228
+ "responseFormat": "Format de réponse",
229
+ "responseFormatTooltip": "Définit le format de la réponse. Exemples :\n• Plusieurs paragraphes\n• Paragraphe unique\n• Points à puces",
230
+ "responseFormatOptions": {
231
+ "multipleParagraphs": "Plusieurs paragraphes",
232
+ "singleParagraph": "Paragraphe unique",
233
+ "bulletPoints": "Points à puces"
234
+ },
235
+ "topK": "Top K résultats",
236
+ "topKTooltip": "Nombre d'éléments supérieurs à récupérer. Représente les entités en mode 'local' et les relations en mode 'global'",
237
+ "topKPlaceholder": "Nombre de résultats",
238
+ "maxTokensTextUnit": "Nombre maximum de jetons pour l'unité de texte",
239
+ "maxTokensTextUnitTooltip": "Nombre maximum de jetons autorisés pour chaque fragment de texte récupéré",
240
+ "maxTokensGlobalContext": "Nombre maximum de jetons pour le contexte global",
241
+ "maxTokensGlobalContextTooltip": "Nombre maximum de jetons alloués pour les descriptions des relations dans la récupération globale",
242
+ "maxTokensLocalContext": "Nombre maximum de jetons pour le contexte local",
243
+ "maxTokensLocalContextTooltip": "Nombre maximum de jetons alloués pour les descriptions des entités dans la récupération locale",
244
+ "historyTurns": "Tours d'historique",
245
+ "historyTurnsTooltip": "Nombre de tours complets de conversation (paires utilisateur-assistant) à prendre en compte dans le contexte de la réponse",
246
+ "historyTurnsPlaceholder": "Nombre de tours d'historique",
247
+ "hlKeywords": "Mots-clés de haut niveau",
248
+ "hlKeywordsTooltip": "Liste de mots-clés de haut niveau à prioriser dans la récupération. Séparez par des virgules",
249
+ "hlkeywordsPlaceHolder": "Entrez les mots-clés",
250
+ "llKeywords": "Mots-clés de bas niveau",
251
+ "llKeywordsTooltip": "Liste de mots-clés de bas niveau pour affiner la focalisation de la récupération. Séparez par des virgules",
252
+ "onlyNeedContext": "Besoin uniquement du contexte",
253
+ "onlyNeedContextTooltip": "Si vrai, ne renvoie que le contexte récupéré sans générer de réponse",
254
+ "onlyNeedPrompt": "Besoin uniquement de l'invite",
255
+ "onlyNeedPromptTooltip": "Si vrai, ne renvoie que l'invite générée sans produire de réponse",
256
+ "streamResponse": "Réponse en flux",
257
+ "streamResponseTooltip": "Si vrai, active la sortie en flux pour des réponses en temps réel"
258
+ }
259
+ },
260
+ "apiSite": {
261
+ "loading": "Chargement de la documentation de l'API..."
262
+ }
263
+ }
lightrag_webui/src/locales/zh.json CHANGED
@@ -164,7 +164,7 @@
164
  "labels": "标签",
165
  "degree": "度数",
166
  "properties": "属性",
167
- "relationships": "关系",
168
  "expandNode": "扩展节点",
169
  "pruneNode": "修剪节点",
170
  "deleteAllNodesError": "拒绝删除图中的所有节点",
@@ -198,7 +198,8 @@
198
  "placeholder": "搜索标签...",
199
  "andOthers": "还有 {count} 个",
200
  "refreshTooltip": "重新加载图形数据"
201
- }
 
202
  },
203
  "retrievePanel": {
204
  "chatMessage": {
 
164
  "labels": "标签",
165
  "degree": "度数",
166
  "properties": "属性",
167
+ "relationships": "关系(子图内)",
168
  "expandNode": "扩展节点",
169
  "pruneNode": "修剪节点",
170
  "deleteAllNodesError": "拒绝删除图中的所有节点",
 
198
  "placeholder": "搜索标签...",
199
  "andOthers": "还有 {count} 个",
200
  "refreshTooltip": "重新加载图形数据"
201
+ },
202
+ "emptyGraph": "图谱数据为空"
203
  },
204
  "retrievePanel": {
205
  "chatMessage": {
lightrag_webui/src/main.tsx CHANGED
@@ -2,7 +2,7 @@ import { StrictMode } from 'react'
2
  import { createRoot } from 'react-dom/client'
3
  import './index.css'
4
  import AppRouter from './AppRouter'
5
- import './i18n';
6
 
7
 
8
 
 
2
  import { createRoot } from 'react-dom/client'
3
  import './index.css'
4
  import AppRouter from './AppRouter'
5
+ import './i18n.ts';
6
 
7
 
8
 
lightrag_webui/src/services/navigation.ts CHANGED
@@ -67,14 +67,10 @@ class NavigationService {
67
  return;
68
  }
69
 
70
- // First navigate to login page
71
- this.navigate('/login');
72
 
73
- // Then reset state after navigation
74
- setTimeout(() => {
75
- this.resetAllApplicationState();
76
- useAuthStore.getState().logout();
77
- }, 0);
78
  }
79
 
80
  navigateToHome() {
 
67
  return;
68
  }
69
 
70
+ this.resetAllApplicationState();
71
+ useAuthStore.getState().logout();
72
 
73
+ this.navigate('/login');
 
 
 
 
74
  }
75
 
76
  navigateToHome() {
lightrag_webui/src/stores/graph.ts CHANGED
@@ -74,6 +74,8 @@ interface GraphState {
74
 
75
  moveToSelectedNode: boolean
76
  isFetching: boolean
 
 
77
 
78
  // Global flags to track data fetching attempts
79
  graphDataFetchAttempted: boolean
@@ -88,6 +90,8 @@ interface GraphState {
88
  reset: () => void
89
 
90
  setMoveToSelectedNode: (moveToSelectedNode: boolean) => void
 
 
91
 
92
  setRawGraph: (rawGraph: RawGraph | null) => void
93
  setSigmaGraph: (sigmaGraph: DirectedGraph | null) => void
@@ -120,6 +124,8 @@ const useGraphStoreBase = create<GraphState>()((set) => ({
120
 
121
  moveToSelectedNode: false,
122
  isFetching: false,
 
 
123
 
124
  // Initialize global flags
125
  graphDataFetchAttempted: false,
@@ -132,6 +138,9 @@ const useGraphStoreBase = create<GraphState>()((set) => ({
132
 
133
  searchEngine: null,
134
 
 
 
 
135
 
136
  setIsFetching: (isFetching: boolean) => set({ isFetching }),
137
  setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) =>
@@ -155,7 +164,9 @@ const useGraphStoreBase = create<GraphState>()((set) => ({
155
  rawGraph: null,
156
  sigmaGraph: null, // to avoid other components from acccessing graph objects
157
  searchEngine: null,
158
- moveToSelectedNode: false
 
 
159
  });
160
  },
161
 
 
74
 
75
  moveToSelectedNode: boolean
76
  isFetching: boolean
77
+ graphIsEmpty: boolean
78
+ lastSuccessfulQueryLabel: string
79
 
80
  // Global flags to track data fetching attempts
81
  graphDataFetchAttempted: boolean
 
90
  reset: () => void
91
 
92
  setMoveToSelectedNode: (moveToSelectedNode: boolean) => void
93
+ setGraphIsEmpty: (isEmpty: boolean) => void
94
+ setLastSuccessfulQueryLabel: (label: string) => void
95
 
96
  setRawGraph: (rawGraph: RawGraph | null) => void
97
  setSigmaGraph: (sigmaGraph: DirectedGraph | null) => void
 
124
 
125
  moveToSelectedNode: false,
126
  isFetching: false,
127
+ graphIsEmpty: false,
128
+ lastSuccessfulQueryLabel: '', // Initialize as empty to ensure fetchAllDatabaseLabels runs on first query
129
 
130
  // Initialize global flags
131
  graphDataFetchAttempted: false,
 
138
 
139
  searchEngine: null,
140
 
141
+ setGraphIsEmpty: (isEmpty: boolean) => set({ graphIsEmpty: isEmpty }),
142
+ setLastSuccessfulQueryLabel: (label: string) => set({ lastSuccessfulQueryLabel: label }),
143
+
144
 
145
  setIsFetching: (isFetching: boolean) => set({ isFetching }),
146
  setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) =>
 
164
  rawGraph: null,
165
  sigmaGraph: null, // to avoid other components from acccessing graph objects
166
  searchEngine: null,
167
+ moveToSelectedNode: false,
168
+ graphIsEmpty: false
169
+ // Do not reset lastSuccessfulQueryLabel here as it's used to track query history
170
  });
171
  },
172
 
lightrag_webui/src/stores/settings.ts CHANGED
@@ -5,7 +5,7 @@ import { defaultQueryLabel } from '@/lib/constants'
5
  import { Message, QueryRequest } from '@/api/lightrag'
6
 
7
  type Theme = 'dark' | 'light' | 'system'
8
- type Language = 'en' | 'zh'
9
  type Tab = 'documents' | 'knowledge-graph' | 'retrieval' | 'api'
10
 
11
  interface SettingsState {
 
5
  import { Message, QueryRequest } from '@/api/lightrag'
6
 
7
  type Theme = 'dark' | 'light' | 'system'
8
+ type Language = 'en' | 'zh' | 'fr' | 'ar'
9
  type Tab = 'documents' | 'knowledge-graph' | 'retrieval' | 'api'
10
 
11
  interface SettingsState {
lightrag_webui/src/stores/state.ts CHANGED
@@ -19,8 +19,11 @@ interface BackendState {
19
  interface AuthState {
20
  isAuthenticated: boolean;
21
  isGuestMode: boolean; // Add guest mode flag
22
- login: (token: string, isGuest?: boolean) => void;
 
 
23
  logout: () => void;
 
24
  }
25
 
26
  const useBackendStateStoreBase = create<BackendState>()((set) => ({
@@ -33,6 +36,14 @@ const useBackendStateStoreBase = create<BackendState>()((set) => ({
33
  check: async () => {
34
  const health = await checkHealth()
35
  if (health.status === 'healthy') {
 
 
 
 
 
 
 
 
36
  set({
37
  health: true,
38
  message: null,
@@ -84,15 +95,25 @@ const isGuestToken = (token: string): boolean => {
84
  };
85
 
86
  // Initialize auth state from localStorage
87
- const initAuthState = (): { isAuthenticated: boolean; isGuestMode: boolean } => {
88
  const token = localStorage.getItem('LIGHTRAG-API-TOKEN');
 
 
 
89
  if (!token) {
90
- return { isAuthenticated: false, isGuestMode: false };
 
 
 
 
 
91
  }
92
 
93
  return {
94
  isAuthenticated: true,
95
- isGuestMode: isGuestToken(token)
 
 
96
  };
97
  };
98
 
@@ -103,20 +124,54 @@ export const useAuthStore = create<AuthState>(set => {
103
  return {
104
  isAuthenticated: initialState.isAuthenticated,
105
  isGuestMode: initialState.isGuestMode,
 
 
106
 
107
- login: (token, isGuest = false) => {
108
  localStorage.setItem('LIGHTRAG-API-TOKEN', token);
 
 
 
 
 
 
 
 
109
  set({
110
  isAuthenticated: true,
111
- isGuestMode: isGuest
 
 
112
  });
113
  },
114
 
115
  logout: () => {
116
  localStorage.removeItem('LIGHTRAG-API-TOKEN');
 
 
 
 
117
  set({
118
  isAuthenticated: false,
119
- isGuestMode: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  });
121
  }
122
  };
 
19
  interface AuthState {
20
  isAuthenticated: boolean;
21
  isGuestMode: boolean; // Add guest mode flag
22
+ coreVersion: string | null;
23
+ apiVersion: string | null;
24
+ login: (token: string, isGuest?: boolean, coreVersion?: string | null, apiVersion?: string | null) => void;
25
  logout: () => void;
26
+ setVersion: (coreVersion: string | null, apiVersion: string | null) => void;
27
  }
28
 
29
  const useBackendStateStoreBase = create<BackendState>()((set) => ({
 
36
  check: async () => {
37
  const health = await checkHealth()
38
  if (health.status === 'healthy') {
39
+ // Update version information if health check returns it
40
+ if (health.core_version || health.api_version) {
41
+ useAuthStore.getState().setVersion(
42
+ health.core_version || null,
43
+ health.api_version || null
44
+ );
45
+ }
46
+
47
  set({
48
  health: true,
49
  message: null,
 
95
  };
96
 
97
  // Initialize auth state from localStorage
98
+ const initAuthState = (): { isAuthenticated: boolean; isGuestMode: boolean; coreVersion: string | null; apiVersion: string | null } => {
99
  const token = localStorage.getItem('LIGHTRAG-API-TOKEN');
100
+ const coreVersion = localStorage.getItem('LIGHTRAG-CORE-VERSION');
101
+ const apiVersion = localStorage.getItem('LIGHTRAG-API-VERSION');
102
+
103
  if (!token) {
104
+ return {
105
+ isAuthenticated: false,
106
+ isGuestMode: false,
107
+ coreVersion: coreVersion,
108
+ apiVersion: apiVersion
109
+ };
110
  }
111
 
112
  return {
113
  isAuthenticated: true,
114
+ isGuestMode: isGuestToken(token),
115
+ coreVersion: coreVersion,
116
+ apiVersion: apiVersion
117
  };
118
  };
119
 
 
124
  return {
125
  isAuthenticated: initialState.isAuthenticated,
126
  isGuestMode: initialState.isGuestMode,
127
+ coreVersion: initialState.coreVersion,
128
+ apiVersion: initialState.apiVersion,
129
 
130
+ login: (token, isGuest = false, coreVersion = null, apiVersion = null) => {
131
  localStorage.setItem('LIGHTRAG-API-TOKEN', token);
132
+
133
+ if (coreVersion) {
134
+ localStorage.setItem('LIGHTRAG-CORE-VERSION', coreVersion);
135
+ }
136
+ if (apiVersion) {
137
+ localStorage.setItem('LIGHTRAG-API-VERSION', apiVersion);
138
+ }
139
+
140
  set({
141
  isAuthenticated: true,
142
+ isGuestMode: isGuest,
143
+ coreVersion: coreVersion,
144
+ apiVersion: apiVersion
145
  });
146
  },
147
 
148
  logout: () => {
149
  localStorage.removeItem('LIGHTRAG-API-TOKEN');
150
+
151
+ const coreVersion = localStorage.getItem('LIGHTRAG-CORE-VERSION');
152
+ const apiVersion = localStorage.getItem('LIGHTRAG-API-VERSION');
153
+
154
  set({
155
  isAuthenticated: false,
156
+ isGuestMode: false,
157
+ coreVersion: coreVersion,
158
+ apiVersion: apiVersion
159
+ });
160
+ },
161
+
162
+ setVersion: (coreVersion, apiVersion) => {
163
+ // Update localStorage
164
+ if (coreVersion) {
165
+ localStorage.setItem('LIGHTRAG-CORE-VERSION', coreVersion);
166
+ }
167
+ if (apiVersion) {
168
+ localStorage.setItem('LIGHTRAG-API-VERSION', apiVersion);
169
+ }
170
+
171
+ // Update state
172
+ set({
173
+ coreVersion: coreVersion,
174
+ apiVersion: apiVersion
175
  });
176
  }
177
  };