Backend / prompts.py
sravan
first working application
dc078e3
from langchain_core.prompts import PromptTemplate
from typing import List
import models
from transformers import AutoTokenizer
def format_prompt(prompt) -> PromptTemplate:
# TODO: format the input prompt by using the model specific instruction template
template = f"""
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are a helpful assistant.<|eot_id|>
<|start_header_id|>user<|end_header_id|> Before answering tell me if you are given an empty context or not then answer
{prompt}<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
"""
# raw_template = [
# {"role": "system", "content":"You are a helpful assistant." },
# {"role": "user", "content": "{{prompt}}"},
# ]
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
# formatted_template = tokenizer.apply_chat_template(
# raw_template,
# tokenize=False,
# add_generation_prompt=True
# )
prompt_template = PromptTemplate.from_template(
# input_variables=["question"], the variables will be auto detected by langchain package
template
)
# TODO: return a langchain PromptTemplate
return prompt_template
def format_chat_history(messages: List[models.Message]):
# TODO: implement format_chat_history to format
# the list of Message into a text of chat history.
chat_history = ""
for msg in messages:
chat_history += '{}:{}'.format(msg.type, msg.message)
chat_history += "\n---\n"
# combined all messages from the list for sending it to the model prompt.
return chat_history
# raise NotImplemented
def format_context(docs: List[str]):
# TODO: the output of the DataIndexer.search is a list of text,
# so we need to concatenate that list into a text that can fit into
# the rag_prompt_formatted. Implement format_context that takes a
# like of strings and returns the context as one string.
if not docs:
return ""
combined_text = ""
combined_text = "\n\n---\n\n".join(
doc.strip() for doc in docs if doc.strip()
)
# raise NotImplemented
return combined_text
raw_prompt = "{question}"
# TODO: Create the history_prompt prompt that will capture the question and the conversation history.
# The history_prompt needs a {chat_history} placeholder and a {question} placeholder.
history_prompt: str = """
Given the following conversation provide a helpful answer to the following up question.
explain me the previous questions if I ask,
Chat History:
{chat_history}
Follow Up Question: {question}
helpful answer:
"""
# TODO: Create the standalone_prompt prompt that will capture the question and the chat history
# to generate a standalone question. It needs a {chat_history} placeholder and a {question} placeholder,
standalone_prompt: str = """
Given the following conversation and a follow up question, rephrase the
follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:
"""
# TODO: Create the rag_prompt that will capture the context and the standalone question to generate
# a final answer to the question.
rag_prompt: str = """
Answer the question based only on the following context:
{context}
Question: {standalone_question}
"""
# TODO: create raw_prompt_formatted by using format_prompt
raw_prompt_formatted = format_prompt(raw_prompt)
raw_prompt = PromptTemplate.from_template(raw_prompt)
# TODO: use format_prompt to create history_prompt_formatted
history_prompt_formatted: PromptTemplate = format_prompt(history_prompt)
# TODO: use format_prompt to create standalone_prompt_formatted
standalone_prompt_formatted: PromptTemplate = format_prompt(standalone_prompt)
# TODO: use format_prompt to create rag_prompt_formatted
rag_prompt_formatted: PromptTemplate = format_prompt(rag_prompt)