tackhwa commited on
Commit
788e80c
·
1 Parent(s): af81714
Files changed (1) hide show
  1. lightrag/llm.py +2 -3
lightrag/llm.py CHANGED
@@ -207,6 +207,8 @@ async def bedrock_complete_if_cache(
207
  def initialize_hf_model(model_name):
208
  hf_tokenizer = AutoTokenizer.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
209
  hf_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
 
 
210
 
211
  return hf_model, hf_tokenizer
212
 
@@ -216,9 +218,6 @@ async def hf_model_if_cache(
216
  ) -> str:
217
  model_name = model
218
  hf_model, hf_tokenizer = initialize_hf_model(model_name)
219
- if hf_tokenizer.pad_token is None:
220
- # print("use eos token")
221
- hf_tokenizer.pad_token = hf_tokenizer.eos_token
222
  hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
223
  messages = []
224
  if system_prompt:
 
207
  def initialize_hf_model(model_name):
208
  hf_tokenizer = AutoTokenizer.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
209
  hf_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
210
+ if hf_tokenizer.pad_token is None:
211
+ hf_tokenizer.pad_token = hf_tokenizer.eos_token
212
 
213
  return hf_model, hf_tokenizer
214
 
 
218
  ) -> str:
219
  model_name = model
220
  hf_model, hf_tokenizer = initialize_hf_model(model_name)
 
 
 
221
  hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
222
  messages = []
223
  if system_prompt: