from typing import Dict, Any, List from langchain_core.callbacks import BaseCallbackHandler import schemas import crud from datetime import datetime class LogResponseCallback(BaseCallbackHandler): def __init__(self, user_request: schemas.UserRequest, db): super().__init__() self.user_request = user_request self.db = db def on_llm_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any: """Run when llm ends running.""" # TODO: The function on_llm_end is going to be called when the LLM stops sending # the response. Use the crud.add_message function to capture that response. type = 'AI' user_data = crud.get_or_create_user(self.db, self.user_request.username) user_id = user_data.id timestamp = datetime.now() message = str(outputs) # answer from the prompt message print("hoistory messages", message) message_to_add = schemas.MessageBase( user_id = user_id, message = message, user=user_data.username, type = type, timestamp = timestamp ) _ = crud.add_message(self.db, message_to_add, self.user_request.username ) # raise NotImplemented def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> Any: for prompt in prompts: print(prompt)