gzdaniel commited on
Commit
8fa793d
·
1 Parent(s): 48fcb22

Update env sample file

Browse files
Files changed (1) hide show
  1. env.example +63 -87
env.example CHANGED
@@ -1,12 +1,25 @@
1
  ### This is sample file of .env
2
 
 
3
  ### Server Configuration
4
- # HOST=0.0.0.0
5
- # PORT=9621
 
 
 
6
  # WORKERS=2
7
  # CORS_ORIGINS=http://localhost:3000,http://localhost:8080
8
- WEBUI_TITLE='Graph RAG Engine'
9
- WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
 
 
 
 
 
 
 
 
 
10
 
11
  ### Optional SSL Configuration
12
  # SSL=true
@@ -14,11 +27,10 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
14
  # SSL_KEYFILE=/path/to/key.pem
15
 
16
  ### Directory Configuration (defaults to current working directory)
17
- # WORKING_DIR=<absolute_path_for_working_dir>
 
18
  # INPUT_DIR=<absolute_path_for_doc_input_dir>
19
-
20
- ### Ollama Emulating Model Tag
21
- # OLLAMA_EMULATING_MODEL_TAG=latest
22
 
23
  ### Max nodes return from grap retrieval
24
  # MAX_GRAPH_NODES=1000
@@ -39,82 +51,57 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
39
  # MAX_TOKEN_RELATION_DESC=4000
40
  # MAX_TOKEN_ENTITY_DESC=4000
41
 
42
- ### Settings for document indexing
 
43
  SUMMARY_LANGUAGE=English
44
- # CHUNK_SIZE=1200
45
- # CHUNK_OVERLAP_SIZE=100
46
-
47
- ### Number of parallel processing documents in one patch
48
- # MAX_PARALLEL_INSERT=2
49
-
50
  ### Max tokens for entity/relations description after merge
51
  # MAX_TOKEN_SUMMARY=500
52
- ### Number of entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented)
53
- # FORCE_LLM_SUMMARY_ON_MERGE=6
54
 
55
- ### Num of chunks send to Embedding in single request
56
- # EMBEDDING_BATCH_NUM=32
57
- ### Max concurrency requests for Embedding
58
- # EMBEDDING_FUNC_MAX_ASYNC=16
59
- # MAX_EMBED_TOKENS=8192
60
 
61
  ### LLM Configuration
 
 
62
  ### Time out in seconds for LLM, None for infinite timeout
63
- TIMEOUT=150
64
  ### Some models like o1-mini require temperature to be set to 1
65
- TEMPERATURE=0.5
66
  ### Max concurrency requests of LLM
67
  MAX_ASYNC=4
68
- ### Max tokens send to LLM (less than context size of the model)
69
  MAX_TOKENS=32768
70
- ENABLE_LLM_CACHE=true
71
- ENABLE_LLM_CACHE_FOR_EXTRACT=true
72
-
73
- ### Ollama example (For local services installed with docker, you can use host.docker.internal as host)
74
- LLM_BINDING=ollama
75
- LLM_MODEL=mistral-nemo:latest
76
  LLM_BINDING_API_KEY=your_api_key
77
- LLM_BINDING_HOST=http://localhost:11434
78
-
79
- ### OpenAI alike example
80
- # LLM_BINDING=openai
81
- # LLM_MODEL=gpt-4o
82
- # LLM_BINDING_HOST=https://api.openai.com/v1
83
- # LLM_BINDING_API_KEY=your_api_key
84
- ### lollms example
85
- # LLM_BINDING=lollms
86
- # LLM_MODEL=mistral-nemo:latest
87
- # LLM_BINDING_HOST=http://localhost:9600
88
- # LLM_BINDING_API_KEY=your_api_key
89
-
90
- ### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
91
  EMBEDDING_MODEL=bge-m3:latest
92
  EMBEDDING_DIM=1024
93
- # EMBEDDING_BINDING_API_KEY=your_api_key
94
- ### ollama example
95
- EMBEDDING_BINDING=ollama
96
  EMBEDDING_BINDING_HOST=http://localhost:11434
97
- ### OpenAI alike example
98
- # EMBEDDING_BINDING=openai
99
- # EMBEDDING_BINDING_HOST=https://api.openai.com/v1
100
- ### Lollms example
101
- # EMBEDDING_BINDING=lollms
102
- # EMBEDDING_BINDING_HOST=http://localhost:9600
103
-
104
- ### Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority)
105
- # AZURE_OPENAI_API_VERSION=2024-08-01-preview
106
- # AZURE_OPENAI_DEPLOYMENT=gpt-4o
107
- # AZURE_OPENAI_API_KEY=your_api_key
108
- # AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
109
-
110
- # AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
111
- # AZURE_EMBEDDING_API_VERSION=2023-05-15
112
 
113
  ### Data storage selection
114
- LIGHTRAG_KV_STORAGE=JsonKVStorage
115
- LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage
116
- LIGHTRAG_GRAPH_STORAGE=NetworkXStorage
117
- LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage
118
 
119
  ### TiDB Configuration (Deprecated)
120
  # TIDB_HOST=localhost
@@ -135,22 +122,22 @@ POSTGRES_MAX_CONNECTIONS=12
135
  ### separating all data from difference Lightrag instances(deprecating)
136
  # POSTGRES_WORKSPACE=default
137
 
 
 
 
 
 
138
  ### Independent AGM Configuration(not for AMG embedded in PostreSQL)
139
- AGE_POSTGRES_DB=
140
- AGE_POSTGRES_USER=
141
- AGE_POSTGRES_PASSWORD=
142
- AGE_POSTGRES_HOST=
143
  # AGE_POSTGRES_PORT=8529
144
 
145
  # AGE Graph Name(apply to PostgreSQL and independent AGM)
146
  ### AGE_GRAPH_NAME is precated
147
  # AGE_GRAPH_NAME=lightrag
148
 
149
- ### Neo4j Configuration
150
- NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
151
- NEO4J_USERNAME=neo4j
152
- NEO4J_PASSWORD='your_password'
153
-
154
  ### MongoDB Configuration
155
  MONGO_URI=mongodb://root:root@localhost:27017/
156
  MONGO_DATABASE=LightRAG
@@ -170,14 +157,3 @@ QDRANT_URL=http://localhost:16333
170
 
171
  ### Redis
172
  REDIS_URI=redis://localhost:6379
173
-
174
- ### For JWT Auth
175
- # AUTH_ACCOUNTS='admin:admin123,user1:pass456'
176
- # TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
177
- # TOKEN_EXPIRE_HOURS=48
178
- # GUEST_TOKEN_EXPIRE_HOURS=24
179
- # JWT_ALGORITHM=HS256
180
-
181
- ### API-Key to access LightRAG Server API
182
- # LIGHTRAG_API_KEY=your-secure-api-key-here
183
- # WHITELIST_PATHS=/health,/api/*
 
1
  ### This is sample file of .env
2
 
3
+
4
  ### Server Configuration
5
+ HOST=0.0.0.0
6
+ PORT=9621
7
+ WEBUI_TITLE='My Graph KB'
8
+ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
9
+ OLLAMA_EMULATING_MODEL_TAG=latest
10
  # WORKERS=2
11
  # CORS_ORIGINS=http://localhost:3000,http://localhost:8080
12
+
13
+ ### Login Configuration
14
+ # AUTH_ACCOUNTS='admin:admin123,user1:pass456'
15
+ # TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
16
+ # TOKEN_EXPIRE_HOURS=48
17
+ # GUEST_TOKEN_EXPIRE_HOURS=24
18
+ # JWT_ALGORITHM=HS256
19
+
20
+ ### API-Key to access LightRAG Server API
21
+ # LIGHTRAG_API_KEY=your-secure-api-key-here
22
+ # WHITELIST_PATHS=/health,/api/*
23
 
24
  ### Optional SSL Configuration
25
  # SSL=true
 
27
  # SSL_KEYFILE=/path/to/key.pem
28
 
29
  ### Directory Configuration (defaults to current working directory)
30
+ ### Should be set if deploy by docker (Set by Dockerfile instead of .env)
31
+ ### Default value is ./inputs and ./rag_storage
32
  # INPUT_DIR=<absolute_path_for_doc_input_dir>
33
+ # WORKING_DIR=<absolute_path_for_working_dir>
 
 
34
 
35
  ### Max nodes return from grap retrieval
36
  # MAX_GRAPH_NODES=1000
 
51
  # MAX_TOKEN_RELATION_DESC=4000
52
  # MAX_TOKEN_ENTITY_DESC=4000
53
 
54
+ ### Entity and ralation summarization configuration
55
+ ### Language: English, Chinese, French, German ...
56
  SUMMARY_LANGUAGE=English
57
+ ### Number of duplicated entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented)
58
+ # FORCE_LLM_SUMMARY_ON_MERGE=6
 
 
 
 
59
  ### Max tokens for entity/relations description after merge
60
  # MAX_TOKEN_SUMMARY=500
 
 
61
 
62
+ ### Number of parallel processing documents(Less than MAX_ASYNC/2 is recommended)
63
+ # MAX_PARALLEL_INSERT=2
64
+ ### Chunk size for document splitting, 500~1500 is recommended
65
+ # CHUNK_SIZE=1200
66
+ # CHUNK_OVERLAP_SIZE=100
67
 
68
  ### LLM Configuration
69
+ ENABLE_LLM_CACHE=true
70
+ ENABLE_LLM_CACHE_FOR_EXTRACT=true
71
  ### Time out in seconds for LLM, None for infinite timeout
72
+ TIMEOUT=240
73
  ### Some models like o1-mini require temperature to be set to 1
74
+ TEMPERATURE=0
75
  ### Max concurrency requests of LLM
76
  MAX_ASYNC=4
77
+ ### Max tokens send to LLM for entity relation summaries (less than context size of the model)
78
  MAX_TOKENS=32768
79
+ ### LLM Binding type: openai, ollama, lollms
80
+ LLM_BINDING=openai
81
+ LLM_MODEL=gpt-4o
82
+ LLM_BINDING_HOST=https://api.openai.com/v1
 
 
83
  LLM_BINDING_API_KEY=your_api_key
84
+
85
+ ### Embedding Configuration
86
+ ### Embedding Binding type: openai, ollama, lollms
87
+ EMBEDDING_BINDING=ollama
 
 
 
 
 
 
 
 
 
 
88
  EMBEDDING_MODEL=bge-m3:latest
89
  EMBEDDING_DIM=1024
90
+ EMBEDDING_BINDING_API_KEY=your_api_key
91
+ # If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
 
92
  EMBEDDING_BINDING_HOST=http://localhost:11434
93
+ ### Num of chunks send to Embedding in single request
94
+ # EMBEDDING_BATCH_NUM=32
95
+ ### Max concurrency requests for Embedding
96
+ # EMBEDDING_FUNC_MAX_ASYNC=16
97
+ ### Maximum tokens sent to Embedding for each chunk (no longer in use?)
98
+ # MAX_EMBED_TOKENS=8192
 
 
 
 
 
 
 
 
 
99
 
100
  ### Data storage selection
101
+ # LIGHTRAG_KV_STORAGE=PGKVStorage
102
+ # LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
103
+ # LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
104
+ # LIGHTRAG_GRAPH_STORAGE=Neo4JStorage
105
 
106
  ### TiDB Configuration (Deprecated)
107
  # TIDB_HOST=localhost
 
122
  ### separating all data from difference Lightrag instances(deprecating)
123
  # POSTGRES_WORKSPACE=default
124
 
125
+ ### Neo4j Configuration
126
+ NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
127
+ NEO4J_USERNAME=neo4j
128
+ NEO4J_PASSWORD='your_password'
129
+
130
  ### Independent AGM Configuration(not for AMG embedded in PostreSQL)
131
+ # AGE_POSTGRES_DB=
132
+ # AGE_POSTGRES_USER=
133
+ # AGE_POSTGRES_PASSWORD=
134
+ # AGE_POSTGRES_HOST=
135
  # AGE_POSTGRES_PORT=8529
136
 
137
  # AGE Graph Name(apply to PostgreSQL and independent AGM)
138
  ### AGE_GRAPH_NAME is precated
139
  # AGE_GRAPH_NAME=lightrag
140
 
 
 
 
 
 
141
  ### MongoDB Configuration
142
  MONGO_URI=mongodb://root:root@localhost:27017/
143
  MONGO_DATABASE=LightRAG
 
157
 
158
  ### Redis
159
  REDIS_URI=redis://localhost:6379