support lmdeploy backend
Browse files- examples/lightrag_lmdeploy_demo.py +74 -0
- lightrag/llm.py +100 -0
- requirements.txt +1 -0
examples/lightrag_lmdeploy_demo.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from lightrag import LightRAG, QueryParam
|
| 4 |
+
from lightrag.llm import lmdeploy_model_if_cache, hf_embedding
|
| 5 |
+
from lightrag.utils import EmbeddingFunc
|
| 6 |
+
from transformers import AutoModel, AutoTokenizer
|
| 7 |
+
|
| 8 |
+
WORKING_DIR = "./dickens"
|
| 9 |
+
|
| 10 |
+
if not os.path.exists(WORKING_DIR):
|
| 11 |
+
os.mkdir(WORKING_DIR)
|
| 12 |
+
|
| 13 |
+
async def lmdeploy_model_complete(
|
| 14 |
+
prompt=None, system_prompt=None, history_messages=[], **kwargs
|
| 15 |
+
) -> str:
|
| 16 |
+
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
| 17 |
+
return await lmdeploy_model_if_cache(
|
| 18 |
+
model_name,
|
| 19 |
+
prompt,
|
| 20 |
+
system_prompt=system_prompt,
|
| 21 |
+
history_messages=history_messages,
|
| 22 |
+
## please specify chat_template if your local path does not follow original HF file name,
|
| 23 |
+
## or model_name is a pytorch model on huggingface.co,
|
| 24 |
+
## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py
|
| 25 |
+
## for a list of chat_template available in lmdeploy.
|
| 26 |
+
chat_template = "llama3",
|
| 27 |
+
# model_format ='awq', # if you are using awq quantization model.
|
| 28 |
+
# quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8.
|
| 29 |
+
**kwargs,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
rag = LightRAG(
|
| 34 |
+
working_dir=WORKING_DIR,
|
| 35 |
+
llm_model_func=lmdeploy_model_complete,
|
| 36 |
+
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
| 37 |
+
embedding_func=EmbeddingFunc(
|
| 38 |
+
embedding_dim=384,
|
| 39 |
+
max_token_size=5000,
|
| 40 |
+
func=lambda texts: hf_embedding(
|
| 41 |
+
texts,
|
| 42 |
+
tokenizer=AutoTokenizer.from_pretrained(
|
| 43 |
+
"sentence-transformers/all-MiniLM-L6-v2"
|
| 44 |
+
),
|
| 45 |
+
embed_model=AutoModel.from_pretrained(
|
| 46 |
+
"sentence-transformers/all-MiniLM-L6-v2"
|
| 47 |
+
),
|
| 48 |
+
),
|
| 49 |
+
),
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
with open("./book.txt", "r", encoding="utf-8") as f:
|
| 54 |
+
rag.insert(f.read())
|
| 55 |
+
|
| 56 |
+
# Perform naive search
|
| 57 |
+
print(
|
| 58 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Perform local search
|
| 62 |
+
print(
|
| 63 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# Perform global search
|
| 67 |
+
print(
|
| 68 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# Perform hybrid search
|
| 72 |
+
print(
|
| 73 |
+
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
| 74 |
+
)
|
lightrag/llm.py
CHANGED
|
@@ -322,6 +322,106 @@ async def ollama_model_if_cache(
|
|
| 322 |
return result
|
| 323 |
|
| 324 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
async def gpt_4o_complete(
|
| 326 |
prompt, system_prompt=None, history_messages=[], **kwargs
|
| 327 |
) -> str:
|
|
|
|
| 322 |
return result
|
| 323 |
|
| 324 |
|
| 325 |
+
@lru_cache(maxsize=1)
|
| 326 |
+
def initialize_lmdeploy_pipeline(model, tp=1, chat_template=None, log_level='WARNING', model_format='hf', quant_policy=0):
|
| 327 |
+
from lmdeploy import pipeline, ChatTemplateConfig, TurbomindEngineConfig
|
| 328 |
+
lmdeploy_pipe = pipeline(
|
| 329 |
+
model_path=model,
|
| 330 |
+
backend_config=TurbomindEngineConfig(tp=tp, model_format=model_format, quant_policy=quant_policy),
|
| 331 |
+
chat_template_config=ChatTemplateConfig(model_name=chat_template) if chat_template else None,
|
| 332 |
+
log_level='WARNING')
|
| 333 |
+
return lmdeploy_pipe
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
async def lmdeploy_model_if_cache(
|
| 337 |
+
model, prompt, system_prompt=None, history_messages=[],
|
| 338 |
+
chat_template=None, model_format='hf',quant_policy=0, **kwargs
|
| 339 |
+
) -> str:
|
| 340 |
+
"""
|
| 341 |
+
Args:
|
| 342 |
+
model (str): The path to the model.
|
| 343 |
+
It could be one of the following options:
|
| 344 |
+
- i) A local directory path of a turbomind model which is
|
| 345 |
+
converted by `lmdeploy convert` command or download
|
| 346 |
+
from ii) and iii).
|
| 347 |
+
- ii) The model_id of a lmdeploy-quantized model hosted
|
| 348 |
+
inside a model repo on huggingface.co, such as
|
| 349 |
+
"InternLM/internlm-chat-20b-4bit",
|
| 350 |
+
"lmdeploy/llama2-chat-70b-4bit", etc.
|
| 351 |
+
- iii) The model_id of a model hosted inside a model repo
|
| 352 |
+
on huggingface.co, such as "internlm/internlm-chat-7b",
|
| 353 |
+
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat"
|
| 354 |
+
and so on.
|
| 355 |
+
chat_template (str): needed when model is a pytorch model on
|
| 356 |
+
huggingface.co, such as "internlm-chat-7b",
|
| 357 |
+
"Qwen-7B-Chat ", "Baichuan2-7B-Chat" and so on,
|
| 358 |
+
and when the model name of local path did not match the original model name in HF.
|
| 359 |
+
tp (int): tensor parallel
|
| 360 |
+
prompt (Union[str, List[str]]): input texts to be completed.
|
| 361 |
+
do_preprocess (bool): whether pre-process the messages. Default to
|
| 362 |
+
True, which means chat_template will be applied.
|
| 363 |
+
skip_special_tokens (bool): Whether or not to remove special tokens
|
| 364 |
+
in the decoding. Default to be False.
|
| 365 |
+
do_sample (bool): Whether or not to use sampling, use greedy decoding otherwise.
|
| 366 |
+
Default to be False, which means greedy decoding will be applied.
|
| 367 |
+
"""
|
| 368 |
+
try:
|
| 369 |
+
import lmdeploy
|
| 370 |
+
from lmdeploy import version_info, GenerationConfig
|
| 371 |
+
except:
|
| 372 |
+
raise ImportError("Please install lmdeploy before intialize lmdeploy backend.")
|
| 373 |
+
|
| 374 |
+
kwargs.pop("response_format", None)
|
| 375 |
+
max_new_tokens = kwargs.pop("max_tokens", 512)
|
| 376 |
+
tp = kwargs.pop('tp', 1)
|
| 377 |
+
skip_special_tokens = kwargs.pop('skip_special_tokens', False)
|
| 378 |
+
do_preprocess = kwargs.pop('do_preprocess', True)
|
| 379 |
+
do_sample = kwargs.pop('do_sample', False)
|
| 380 |
+
gen_params = kwargs
|
| 381 |
+
|
| 382 |
+
version = version_info
|
| 383 |
+
if do_sample is not None and version < (0, 6, 0):
|
| 384 |
+
raise RuntimeError(
|
| 385 |
+
'`do_sample` parameter is not supported by lmdeploy until '
|
| 386 |
+
f'v0.6.0, but currently using lmdeloy {lmdeploy.__version__}')
|
| 387 |
+
else:
|
| 388 |
+
do_sample = True
|
| 389 |
+
gen_params.update(do_sample=do_sample)
|
| 390 |
+
|
| 391 |
+
lmdeploy_pipe = initialize_lmdeploy_pipeline(
|
| 392 |
+
model=model,
|
| 393 |
+
tp=tp,
|
| 394 |
+
chat_template=chat_template,
|
| 395 |
+
model_format=model_format,
|
| 396 |
+
quant_policy=quant_policy,
|
| 397 |
+
log_level='WARNING')
|
| 398 |
+
|
| 399 |
+
messages = []
|
| 400 |
+
if system_prompt:
|
| 401 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 402 |
+
|
| 403 |
+
hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
|
| 404 |
+
messages.extend(history_messages)
|
| 405 |
+
messages.append({"role": "user", "content": prompt})
|
| 406 |
+
if hashing_kv is not None:
|
| 407 |
+
args_hash = compute_args_hash(model, messages)
|
| 408 |
+
if_cache_return = await hashing_kv.get_by_id(args_hash)
|
| 409 |
+
if if_cache_return is not None:
|
| 410 |
+
return if_cache_return["return"]
|
| 411 |
+
|
| 412 |
+
gen_config = GenerationConfig(
|
| 413 |
+
skip_special_tokens=skip_special_tokens, max_new_tokens=max_new_tokens, **gen_params)
|
| 414 |
+
|
| 415 |
+
response = ""
|
| 416 |
+
async for res in lmdeploy_pipe.generate(messages, gen_config=gen_config,
|
| 417 |
+
do_preprocess=do_preprocess, stream_response=False, session_id=1):
|
| 418 |
+
response += res.response
|
| 419 |
+
|
| 420 |
+
if hashing_kv is not None:
|
| 421 |
+
await hashing_kv.upsert({args_hash: {"return": response, "model": model}})
|
| 422 |
+
return response
|
| 423 |
+
|
| 424 |
+
|
| 425 |
async def gpt_4o_complete(
|
| 426 |
prompt, system_prompt=None, history_messages=[], **kwargs
|
| 427 |
) -> str:
|
requirements.txt
CHANGED
|
@@ -13,3 +13,4 @@ tiktoken
|
|
| 13 |
torch
|
| 14 |
transformers
|
| 15 |
xxhash
|
|
|
|
|
|
| 13 |
torch
|
| 14 |
transformers
|
| 15 |
xxhash
|
| 16 |
+
# lmdeploy[all]
|