id
stringlengths 14
16
| text
stringlengths 29
2.31k
| source
stringlengths 57
122
|
---|---|---|
e912ab597e24-16
|
ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
|
7e3654206de9-0
|
Source code for langchain.chains.hyde.base
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Dict, List
import numpy as np
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
from langchain.embeddings.base import Embeddings
from langchain.llms.base import BaseLLM
[docs]class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
return self.llm_chain.output_keys
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
[docs] def combine_embeddings(self,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/hyde/base.html
|
7e3654206de9-1
|
return self.base_embeddings.embed_documents(texts)
[docs] def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
[docs] def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Call the internal llm chain."""
return self.llm_chain._call(inputs)
[docs] @classmethod
def from_llm(
cls, llm: BaseLLM, base_embeddings: Embeddings, prompt_key: str
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain for a specific prompt key."""
prompt = PROMPT_MAP[prompt_key]
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain)
@property
def _chain_type(self) -> str:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/hyde/base.html
|
7e3654206de9-2
|
@property
def _chain_type(self) -> str:
return "hyde_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/hyde/base.html
|
31bdc3a4a787-0
|
Source code for langchain.chains.sql_database.base
"""Chain for interacting with SQL Database."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
from langchain.sql_database import SQLDatabase
[docs]class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
.. code-block:: python
from langchain import SQLDatabaseChain, OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain(llm=OpenAI(), database=db)
"""
llm: BaseLanguageModel
"""LLM wrapper to use."""
database: SQLDatabase = Field(exclude=True)
"""SQL Database to connect to."""
prompt: Optional[BasePromptTemplate] = None
"""Prompt to use to translate natural language to SQL."""
top_k: int = 5
"""Number of results to return from the query"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
31bdc3a4a787-1
|
steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the SQL table directly."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = self.prompt or SQL_PROMPTS[self.database.dialect]
except KeyError:
# fallback to generic prompt if dialect-specific prompt doesn't exist yet
prompt = PROMPT
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
input_text =
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
31bdc3a4a787-2
|
prompt=prompt)
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
self.callback_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": self.top_k,
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps = []
sql_cmd = llm_chain.predict(**llm_inputs)
intermediate_steps.append(sql_cmd)
self.callback_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
result = self.database.run(sql_cmd)
intermediate_steps.append(result)
self.callback_manager.on_text("\nSQLResult: ", verbose=self.verbose)
self.callback_manager.on_text(result, color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to the sql query
if self.return_direct:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
31bdc3a4a787-3
|
to the sql query
if self.return_direct:
final_result = result
else:
self.callback_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
final_result = llm_chain.predict(**llm_inputs)
self.callback_manager.on_text(
final_result, color="green", verbose=self.verbose
)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result["intermediate_steps"] = intermediate_steps
return chain_result
@property
def _chain_type(self) -> str:
return "sql_database_chain"
[docs]class SQLDatabaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:
1. Based on the query, determine which tables to use.
2. Based on those tables, call the normal SQL database chain.
This is useful in cases where the number of tables in the database is large.
"""
return_intermediate_steps: bool =
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
31bdc3a4a787-4
|
in the database is large.
"""
return_intermediate_steps: bool = False
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: SQLDatabase,
query_prompt: BasePromptTemplate = PROMPT,
decider_prompt: BasePromptTemplate = DECIDER_PROMPT,
**kwargs: Any,
) -> SQLDatabaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain(
llm=llm, database=database, prompt=query_prompt, **kwargs
)
decider_chain = LLMChain(
llm=llm, prompt=decider_prompt, output_key="table_names"
)
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
decider_chain: LLMChain
sql_chain: SQLDatabaseChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
31bdc3a4a787-5
|
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ", ".join(_table_names)
llm_inputs = {
"query": inputs[self.input_key],
"table_names": table_names,
}
table_names_to_use = self.decider_chain.predict_and_parse(**llm_inputs)
self.callback_manager.on_text(
"Table names to use:", end="\n", verbose=self.verbose
)
self.callback_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
31bdc3a4a787-6
|
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(new_inputs, return_only_outputs=True)
@property
def _chain_type(self) -> str:
return "sql_database_sequential_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sql_database/base.html
|
8759cac1bfde-0
|
Source code for langchain.chains.retrieval_qa.base
"""Chain for question-answering against a vector database."""
from __future__ import annotations
import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
from langchain.prompts import PromptTemplate
from langchain.schema import BaseLanguageModel, BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
class BaseRetrievalQA(Chain):
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_source_documents: bool = False
"""Return the source documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
allow_population_by_field_name = True
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
8759cac1bfde-1
|
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Initialize from LLM."""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(llm=llm, prompt=_prompt)
document_prompt = PromptTemplate(
input_variables=["page_content"], template="Context:\n{page_content}"
)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name="context",
document_prompt=document_prompt,
)
return
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
8759cac1bfde-2
|
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Load chain from chain type."""
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(
llm, chain_type=chain_type, **_chain_type_kwargs
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@abstractmethod
def _get_docs(self, question: str) -> List[Document]:
"""Get documents to do question answering over."""
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs =
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
8759cac1bfde-3
|
'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
question = inputs[self.input_key]
docs = self._get_docs(question)
answer = self.combine_documents_chain.run(
input_documents=docs, question=question
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
@abstractmethod
async def _aget_docs(self, question: str) -> List[Document]:
"""Get documents to do question answering over."""
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
question = inputs[self.input_key]
docs = await self._aget_docs(question)
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
8759cac1bfde-4
|
docs = await self._aget_docs(question)
answer = await self.combine_documents_chain.arun(
input_documents=docs, question=question
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
[docs]class RetrievalQA(BaseRetrievalQA):
"""Chain for question-answering against an index.
Example:
.. code-block:: python
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.faiss import FAISS
vectordb = FAISS(...)
retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=vectordb)
"""
retriever: BaseRetriever = Field(exclude=True)
def _get_docs(self, question: str) -> List[Document]:
return self.retriever.get_relevant_documents(question)
async def _aget_docs(self, question: str) -> List[Document]:
return await self.retriever.aget_relevant_documents(question)
[docs]class VectorDBQA(BaseRetrievalQA):
"""Chain for
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
8759cac1bfde-5
|
VectorDBQA(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""
vectorstore: VectorStore = Field(exclude=True, alias="vectorstore")
"""Vector Database to connect to."""
k: int = 4
"""Number of documents to query for."""
search_type: str = "similarity"
"""Search type to use over vectorstore. `similarity` or `mmr`."""
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
warnings.warn(
"`VectorDBQA` is deprecated - "
"please use `from langchain.chains import RetrievalQA`"
)
return values
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "mmr"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def _get_docs(self, question: str) -> List[Document]:
if self.search_type == "similarity":
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
8759cac1bfde-6
|
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
elif self.search_type == "mmr":
docs = self.vectorstore.max_marginal_relevance_search(
question, k=self.k, **self.search_kwargs
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_docs(self, question: str) -> List[Document]:
raise NotImplementedError("VectorDBQA does not support async")
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "vector_db_qa"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/retrieval_qa/base.html
|
384e47f86e43-0
|
Source code for langchain.chains.pal.base
"""Implements Program-Aided Language Models.
As in https://arxiv.org/pdf/2211.10435.pdf.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain.chains.pal.math_prompt import MATH_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.python import PythonREPL
from langchain.schema import BaseLanguageModel
[docs]class PALChain(Chain):
"""Implements Program-Aided Language Models."""
llm: BaseLanguageModel
prompt: BasePromptTemplate
stop: str = "\n\n"
get_answer_expr: str = "print(solution())"
python_globals: Optional[Dict[str, Any]] = None
python_locals: Optional[Dict[str, Any]] = None
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return self.prompt.input_variables
@property
def output_keys(self) ->
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/pal/base.html
|
384e47f86e43-1
|
self.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)
code = llm_chain.predict(stop=[self.stop], **inputs)
self.callback_manager.on_text(
code, color="green", end="\n", verbose=self.verbose
)
repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals)
res = repl.run(code + f"\n{self.get_answer_expr}")
output = {self.output_key: res.strip()}
if self.return_intermediate_steps:
output["intermediate_steps"] = code
return output
[docs] @classmethod
def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain:
"""Load PAL from math prompt."""
return cls(
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/pal/base.html
|
384e47f86e43-2
|
PAL from math prompt."""
return cls(
llm=llm,
prompt=MATH_PROMPT,
stop="\n\n",
get_answer_expr="print(solution())",
**kwargs,
)
[docs] @classmethod
def from_colored_object_prompt(
cls, llm: BaseLanguageModel, **kwargs: Any
) -> PALChain:
"""Load PAL from colored object prompt."""
return cls(
llm=llm,
prompt=COLORED_OBJECT_PROMPT,
stop="\n\n\n",
get_answer_expr="print(answer)",
**kwargs,
)
@property
def _chain_type(self) -> str:
return "pal_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/pal/base.html
|
6ad00523614a-0
|
Source code for langchain.chains.qa_with_sources.retrieval
"""Question-answering with sources over an index."""
from typing import Any, Dict, List
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain.llm.get_num_tokens(
doc.page_content
)
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html
|
6ad00523614a-1
|
)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = self.retriever.get_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = await self.retriever.aget_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html
|
d3b2a26dc397-0
|
Source code for langchain.chains.qa_with_sources.base
"""Question answering with sources over documents."""
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.chains.qa_with_sources.map_reduce_prompt import (
COMBINE_PROMPT,
EXAMPLE_PROMPT,
QUESTION_PROMPT,
)
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering with sources over documents."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
question_key: str = "question" #: :meta private:
input_docs_key: str = "docs" #: :meta private:
answer_key: str = "answer" #: :meta private:
sources_answer_key: str = "sources" #: :meta private:
return_source_documents: bool = False
"""Return the source documents."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/base.html
|
d3b2a26dc397-1
|
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
question_prompt: BasePromptTemplate = QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_chain = StuffDocumentsChain(
llm_chain=llm_combine_chain,
document_prompt=document_prompt,
document_variable_name="summaries",
)
combine_document_chain = MapReduceDocumentsChain(
llm_chain=llm_question_chain,
combine_document_chain=combine_results_chain,
document_variable_name="context",
)
return cls(
combine_documents_chain=combine_document_chain,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/base.html
|
d3b2a26dc397-2
|
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Load chain from chain type."""
_chain_kwargs = chain_type_kwargs or {}
combine_document_chain = load_qa_with_sources_chain(
llm, chain_type=chain_type, **_chain_kwargs
)
return cls(combine_documents_chain=combine_document_chain, **kwargs)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/base.html
|
d3b2a26dc397-3
|
+ ["source_documents"]
return _output_keys
@root_validator(pre=True)
def validate_naming(cls, values: Dict) -> Dict:
"""Fix backwards compatability in naming."""
if "combine_document_chain" in values:
values["combine_documents_chain"] = values.pop("combine_document_chain")
return values
@abstractmethod
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
"""Get docs to run questioning over."""
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
docs = self._get_docs(inputs)
answer = self.combine_documents_chain.run(input_documents=docs, **inputs)
if re.search(r"SOURCES:\s", answer):
answer, sources = re.split(r"SOURCES:\s", answer)
else:
sources = ""
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
@abstractmethod
async def _aget_docs(self, inputs: Dict[str, Any]) ->
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/base.html
|
d3b2a26dc397-4
|
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
"""Get docs to run questioning over."""
async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
docs = await self._aget_docs(inputs)
answer = await self.combine_documents_chain.arun(input_documents=docs, **inputs)
if re.search(r"SOURCES:\s", answer):
answer, sources = re.split(r"SOURCES:\s", answer)
else:
sources = ""
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
[docs]class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""
input_docs_key: str = "docs" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_docs_key, self.question_key]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/base.html
|
d3b2a26dc397-5
|
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
return inputs.pop(self.input_docs_key)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
return inputs.pop(self.input_docs_key)
@property
def _chain_type(self) -> str:
return "qa_with_sources_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/base.html
|
1eabbe448027-0
|
Source code for langchain.chains.qa_with_sources.vector_db
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from pydantic import Field, root_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
from langchain.docstore.document import Document
from langchain.vectorstores.base import VectorStore
[docs]class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html
|
1eabbe448027-1
|
tokens = [
self.combine_documents_chain.llm_chain.llm.get_num_tokens(
doc.page_content
)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html
|
1eabbe448027-2
|
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html
|
465692767816-0
|
Source code for langchain.chains.graph_qa.base
"""Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, PROMPT
from langchain.chains.llm import LLMChain
from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities
from langchain.llms.base import BaseLLM
from langchain.prompts.base import BasePromptTemplate
[docs]class GraphQAChain(Chain):
"""Chain for question-answering against a graph."""
graph: NetworkxEntityGraph = Field(exclude=True)
entity_extraction_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
[docs] @classmethod
def from_llm(
cls,
llm: BaseLLM,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/graph_qa/base.html
|
465692767816-1
|
cls,
llm: BaseLLM,
qa_prompt: BasePromptTemplate = PROMPT,
entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT,
**kwargs: Any,
) -> GraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
entity_chain = LLMChain(llm=llm, prompt=entity_prompt)
return cls(qa_chain=qa_chain, entity_extraction_chain=entity_chain, **kwargs)
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""Extract entities, look up info and answer question."""
question = inputs[self.input_key]
entity_string = self.entity_extraction_chain.run(question)
self.callback_manager.on_text(
"Entities Extracted:", end="\n", verbose=self.verbose
)
self.callback_manager.on_text(
entity_string, color="green", end="\n", verbose=self.verbose
)
entities = get_entities(entity_string)
context = ""
for entity in entities:
triplets = self.graph.get_entity_knowledge(entity)
context +=
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/graph_qa/base.html
|
465692767816-2
|
context += "\n".join(triplets)
self.callback_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
self.callback_manager.on_text(
context, color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain({"question": question, "context": context})
return {self.output_key: result[self.qa_chain.output_key]}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/graph_qa/base.html
|
32b616021c99-0
|
Source code for langchain.chains.qa_generation.base
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
[docs]class QAGenerationChain(Chain):
llm_chain: LLMChain
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
)
input_key: str = "text"
output_key: str = "questions"
k: Optional[int] = None
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> List[str]:
return
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_generation/base.html
|
32b616021c99-1
|
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate([{"text": d.page_content} for d in docs])
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/qa_generation/base.html
|
7ded77bde162-0
|
Source code for langchain.chains.constitutional_ai.base
"""Chain for applying constitutional principles to the outputs of another chain."""
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.constitutional_ai.principles import PRINCIPLES
from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
[docs]class ConstitutionalChain(Chain):
"""Chain for applying constitutional principles.
Example:
.. code-block:: python
from langchain.llms import OpenAI
from langchain.chains import LLMChain, ConstitutionalChain
qa_prompt = PromptTemplate(
template="Q: {question} A:",
input_variables=["question"],
)
qa_chain = LLMChain(llm=OpenAI(), prompt=qa_prompt)
constitutional_chain = ConstitutionalChain.from_llm(
chain=qa_chain,
constitutional_principles=[
ConstitutionalPrinciple(
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/constitutional_ai/base.html
|
7ded77bde162-1
|
ConstitutionalPrinciple(
critique_request="Tell if this answer is good.",
revision_request="Give a better answer.",
)
],
)
constitutional_chain.run(question="What is the meaning of life?")
"""
chain: LLMChain
constitutional_principles: List[ConstitutionalPrinciple]
critique_chain: LLMChain
revision_chain: LLMChain
[docs] @classmethod
def get_principles(
cls, names: Optional[List[str]] = None
) -> List[ConstitutionalPrinciple]:
if names is None:
return list(PRINCIPLES.values())
else:
return [PRINCIPLES[name] for name in names]
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
chain: LLMChain,
critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT,
revision_prompt:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/constitutional_ai/base.html
|
7ded77bde162-2
|
BasePromptTemplate = CRITIQUE_PROMPT,
revision_prompt: BasePromptTemplate = REVISION_PROMPT,
**kwargs: Any,
) -> "ConstitutionalChain":
"""Create a chain from an LLM."""
critique_chain = LLMChain(llm=llm, prompt=critique_prompt)
revision_chain = LLMChain(llm=llm, prompt=revision_prompt)
return cls(
chain=chain,
critique_chain=critique_chain,
revision_chain=revision_chain,
**kwargs,
)
@property
def input_keys(self) -> List[str]:
"""Defines the input keys."""
return self.chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Defines the output keys."""
return ["output"]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
response = self.chain.run(**inputs)
input_prompt = self.chain.prompt.format(**inputs)
self.callback_manager.on_text(
text="Initial response: " + response + "\n\n",
verbose=self.verbose,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/constitutional_ai/base.html
|
7ded77bde162-3
|
verbose=self.verbose,
color="yellow",
)
for constitutional_principle in self.constitutional_principles:
# Do critique
raw_critique = self.critique_chain.run(
input_prompt=input_prompt,
output_from_model=response,
critique_request=constitutional_principle.critique_request,
)
critique = self._parse_critique(
output_string=raw_critique,
).strip()
# Do revision
revision = self.revision_chain.run(
input_prompt=input_prompt,
output_from_model=response,
critique_request=constitutional_principle.critique_request,
critique=critique,
revision_request=constitutional_principle.revision_request,
).strip()
response = revision
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/constitutional_ai/base.html
|
7ded77bde162-4
|
response = revision
self.callback_manager.on_text(
text=f"Applying {constitutional_principle.name}..." + "\n\n",
verbose=self.verbose,
color="green",
)
self.callback_manager.on_text(
text="Critique: " + critique + "\n\n",
verbose=self.verbose,
color="blue",
)
self.callback_manager.on_text(
text="Updated response: " + revision + "\n\n",
verbose=self.verbose,
color="yellow",
)
return {"output": response}
@staticmethod
def _parse_critique(output_string: str) -> str:
if "Revision request:" not in output_string:
return output_string
output_string = output_string.split("Revision request:")[0]
if
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/constitutional_ai/base.html
|
7ded77bde162-5
|
= output_string.split("Revision request:")[0]
if "\n\n" in output_string:
output_string = output_string.split("\n\n")[0]
return output_string
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/constitutional_ai/base.html
|
43b3a98d7f13-0
|
Source code for langchain.chains.api.base
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.prompts import BasePromptTemplate
from langchain.requests import TextRequestsWrapper
from langchain.schema import BaseLanguageModel
[docs]class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question."""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/base.html
|
43b3a98d7f13-1
|
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question, api_docs=self.api_docs
)
self.callback_manager.on_text(
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/base.html
|
43b3a98d7f13-2
|
)
self.callback_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_response = self.requests_wrapper.get(api_url)
self.callback_manager.on_text(
api_response, color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
)
return {self.output_key: answer}
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question, api_docs=self.api_docs
)
self.callback_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_response = await self.requests_wrapper.aget(api_url)
self.callback_manager.on_text(
api_response, color="yellow",
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/base.html
|
43b3a98d7f13-3
|
api_response, color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
)
return {self.output_key: answer}
[docs] @classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/base.html
|
43b3a98d7f13-4
|
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/base.html
|
08f094fe5e60-0
|
Source code for langchain.chains.api.openapi.chain
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
import json
from typing import Any, Dict, List, NamedTuple, Optional, cast
from pydantic import BaseModel, Field
from requests import Response
from langchain.chains.api.openapi.requests_chain import APIRequesterChain
from langchain.chains.api.openapi.response_chain import APIResponderChain
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.requests import Requests
from langchain.tools.openapi.utils.api_models import APIOperation
class _ParamMapping(NamedTuple):
"""Mapping from parameter name to parameter value."""
query_params: List[str]
body_params: List[str]
path_params: List[str]
[docs]class OpenAPIEndpointChain(Chain, BaseModel):
"""Chain interacts with an OpenAPI endpoint using natural language."""
api_request_chain: LLMChain
api_response_chain: Optional[LLMChain]
api_operation: APIOperation
requests: Requests = Field(exclude=True, default_factory=Requests)
param_mapping: _ParamMapping = Field(alias="param_mapping")
return_intermediate_steps: bool = False
instructions_key: str = "instructions" #: :meta private:
output_key: str = "output" #: :meta private:
max_text_length: Optional[int] = Field(ge=0) #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-1
|
"""Expect input key.
:meta private:
"""
return [self.instructions_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _construct_path(self, args: Dict[str, str]) -> str:
"""Construct the path from the deserialized input."""
path = self.api_operation.base_url + self.api_operation.path
for param in self.param_mapping.path_params:
path = path.replace(f"{{{param}}}", args.pop(param, ""))
return path
def _extract_query_params(self, args: Dict[str, str]) -> Dict[str, str]:
"""Extract the query params from the deserialized input."""
query_params = {}
for param in self.param_mapping.query_params:
if param in args:
query_params[param] = args.pop(param)
return query_params
def _extract_body_params(self, args: Dict[str, str]) -> Optional[Dict[str,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-2
|
def _extract_body_params(self, args: Dict[str, str]) -> Optional[Dict[str, str]]:
"""Extract the request body params from the deserialized input."""
body_params = None
if self.param_mapping.body_params:
body_params = {}
for param in self.param_mapping.body_params:
if param in args:
body_params[param] = args.pop(param)
return body_params
[docs] def deserialize_json_input(self, serialized_args: str) -> dict:
"""Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
"""
args: dict = json.loads(serialized_args)
path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {
"url": path,
"data": body_params,
"params": query_params,
}
def _get_output(self, output: str, intermediate_steps: dict) -> dict:
"""Return the output from the API call."""
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-3
|
"""Return the output from the API call."""
if self.return_intermediate_steps:
return {
self.output_key: output,
"intermediate_steps": intermediate_steps,
}
else:
return {self.output_key: output}
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
intermediate_steps = {}
instructions = inputs[self.instructions_key]
instructions = instructions[: self.max_text_length]
_api_arguments = self.api_request_chain.predict_and_parse(
instructions=instructions
)
api_arguments = cast(str, _api_arguments)
intermediate_steps["request_args"] = api_arguments
self.callback_manager.on_text(
api_arguments, color="green", end="\n", verbose=self.verbose
)
if api_arguments.startswith("ERROR"):
return self._get_output(api_arguments, intermediate_steps)
elif api_arguments.startswith("MESSAGE:"):
return self._get_output(
api_arguments[len("MESSAGE:") :],
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-4
|
api_arguments[len("MESSAGE:") :], intermediate_steps
)
try:
request_args = self.deserialize_json_input(api_arguments)
method = getattr(self.requests, self.api_operation.method.value)
api_response: Response = method(**request_args)
if api_response.status_code != 200:
method_str = str(self.api_operation.method.value)
response_text = (
f"{api_response.status_code}: {api_response.reason}"
+ f"\nFor {method_str.upper()} {request_args['url']}\n"
+ f"Called with args: {request_args['params']}"
)
else:
response_text = api_response.text
except Exception as e:
response_text = f"Error with message {str(e)}"
response_text = response_text[: self.max_text_length]
intermediate_steps["response_text"] = response_text
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-5
|
intermediate_steps["response_text"] = response_text
self.callback_manager.on_text(
response_text, color="blue", end="\n", verbose=self.verbose
)
if self.api_response_chain is not None:
_answer = self.api_response_chain.predict_and_parse(
response=response_text,
instructions=instructions,
)
answer = cast(str, _answer)
self.callback_manager.on_text(
answer, color="yellow", end="\n", verbose=self.verbose
)
return self._get_output(answer, intermediate_steps)
else:
return self._get_output(response_text, intermediate_steps)
[docs] @classmethod
def from_url_and_method(
cls,
spec_url: str,
path: str,
method: str,
llm: BaseLLM,
requests: Optional[Requests] = None,
return_intermediate_steps: bool = False,
**kwargs: Any
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-6
|
bool = False,
**kwargs: Any
# TODO: Handle async
) -> "OpenAPIEndpointChain":
"""Create an OpenAPIEndpoint from a spec at the specified url."""
operation = APIOperation.from_openapi_url(spec_url, path, method)
return cls.from_api_operation(
operation,
requests=requests,
llm=llm,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
[docs] @classmethod
def from_api_operation(
cls,
operation: APIOperation,
llm: BaseLLM,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
raw_response: bool = False,
**kwargs: Any
# TODO: Handle async
) -> "OpenAPIEndpointChain":
"""Create an OpenAPIEndpointChain from an operation and a spec."""
param_mapping = _ParamMapping(
query_params=operation.query_params,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
08f094fe5e60-7
|
query_params=operation.query_params,
body_params=operation.body_params,
path_params=operation.path_params,
)
requests_chain = APIRequesterChain.from_llm_and_typescript(
llm, typescript_definition=operation.to_typescript(), verbose=verbose
)
if raw_response:
response_chain = None
else:
response_chain = APIResponderChain.from_llm(llm, verbose=verbose)
_requests = requests or Requests()
return cls(
api_request_chain=requests_chain,
api_response_chain=response_chain,
api_operation=operation,
requests=_requests,
param_mapping=param_mapping,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/api/openapi/chain.html
|
33e73bb2eb8b-0
|
Source code for langchain.chains.llm_checker.base
"""Chain for question-answering with self-verification."""
from typing import Dict, List
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.llm_checker.prompt import (
CHECK_ASSERTIONS_PROMPT,
CREATE_DRAFT_ANSWER_PROMPT,
LIST_ASSERTIONS_PROMPT,
REVISED_ANSWER_PROMPT,
)
from langchain.chains.sequential import SequentialChain
from langchain.llms.base import BaseLLM
from langchain.prompts import PromptTemplate
[docs]class LLMCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:
.. code-block:: python
from langchain import OpenAI, LLMCheckerChain
llm = OpenAI(temperature=0.7)
checker_chain = LLMCheckerChain(llm=llm)
"""
llm: BaseLLM
"""LLM wrapper to use."""
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT
"""Prompt to use when questioning the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_checker/base.html
|
33e73bb2eb8b-1
|
output_key: str = "result" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
question = inputs[self.input_key]
create_draft_answer_chain = LLMChain(
llm=self.llm, prompt=self.create_draft_answer_prompt, output_key="statement"
)
list_assertions_chain = LLMChain(
llm=self.llm, prompt=self.list_assertions_prompt, output_key="assertions"
)
check_assertions_chain = LLMChain(
llm=self.llm,
prompt=self.check_assertions_prompt,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_checker/base.html
|
33e73bb2eb8b-2
|
prompt=self.check_assertions_prompt,
output_key="checked_assertions",
)
revised_answer_chain = LLMChain(
llm=self.llm,
prompt=self.revised_answer_prompt,
output_key="revised_statement",
)
chains = [
create_draft_answer_chain,
list_assertions_chain,
check_assertions_chain,
revised_answer_chain,
]
question_to_checked_assertions_chain = SequentialChain(
chains=chains,
input_variables=["question"],
output_variables=["revised_statement"],
verbose=True,
)
output = question_to_checked_assertions_chain({"question": question})
return {self.output_key: output["revised_statement"]}
@property
def _chain_type(self) -> str:
return "llm_checker_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_checker/base.html
|
1480c49efdfe-0
|
Source code for langchain.chains.llm_bash.base
"""Chain that interprets a prompt and executes bash code to perform bash operations."""
from typing import Dict, List
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.prompt import PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
from langchain.utilities.bash import BashProcess
[docs]class LLMBashChain(Chain):
"""Chain that interprets a prompt and executes bash code to perform bash operations.
Example:
.. code-block:: python
from langchain import LLMBashChain, OpenAI
llm_bash = LLMBashChain(llm=OpenAI())
"""
llm: BaseLanguageModel
"""LLM wrapper to use."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
prompt: BasePromptTemplate = PROMPT
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) ->
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_bash/base.html
|
1480c49efdfe-1
|
[self.input_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
llm_executor = LLMChain(prompt=self.prompt, llm=self.llm)
bash_executor = BashProcess()
self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose)
t = llm_executor.predict(question=inputs[self.input_key])
self.callback_manager.on_text(t, color="green", verbose=self.verbose)
t = t.strip()
if t.startswith("```bash"):
# Split the string into a list of substrings
command_list = t.split("\n")
print(command_list)
# Remove the first and last substrings
command_list = [s for s in command_list[1:-1]]
output = bash_executor.run(command_list)
self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
self.callback_manager.on_text(output, color="yellow", verbose=self.verbose)
else:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_bash/base.html
|
1480c49efdfe-2
|
verbose=self.verbose)
else:
raise ValueError(f"unknown format from LLM: {t}")
return {self.output_key: output}
@property
def _chain_type(self) -> str:
return "llm_bash_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_bash/base.html
|
b7b40de51de8-0
|
Source code for langchain.chains.llm_math.base
"""Chain that interprets a prompt and executes python code to do math."""
from typing import Dict, List
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.llm_math.prompt import PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.python import PythonREPL
from langchain.schema import BaseLanguageModel
[docs]class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI
llm_math = LLMMathChain(llm=OpenAI())
"""
llm: BaseLanguageModel
"""LLM wrapper to use."""
prompt: BasePromptTemplate = PROMPT
"""Prompt to use to translate to python if neccessary."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_math/base.html
|
b7b40de51de8-1
|
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _process_llm_result(self, t: str) -> Dict[str, str]:
python_executor = PythonREPL()
self.callback_manager.on_text(t, color="green", verbose=self.verbose)
t = t.strip()
if t.startswith("```python"):
code = t[9:-4]
output = python_executor.run(code)
self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
self.callback_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif t.startswith("Answer:"):
answer = t
elif "Answer:" in t:
answer = "Answer: " + t.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {t}")
return {self.output_key: answer}
async def _aprocess_llm_result(self,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_math/base.html
|
b7b40de51de8-2
|
{self.output_key: answer}
async def _aprocess_llm_result(self, t: str) -> Dict[str, str]:
python_executor = PythonREPL()
if self.callback_manager.is_async:
await self.callback_manager.on_text(t, color="green", verbose=self.verbose)
else:
self.callback_manager.on_text(t, color="green", verbose=self.verbose)
t = t.strip()
if t.startswith("```python"):
code = t[9:-4]
output = python_executor.run(code)
if self.callback_manager.is_async:
await self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
await self.callback_manager.on_text(
output, color="yellow", verbose=self.verbose
)
else:
await self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose)
await self.callback_manager.on_text(
output, color="yellow", verbose=self.verbose
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_math/base.html
|
b7b40de51de8-3
|
output, color="yellow", verbose=self.verbose
)
answer = "Answer: " + output
elif t.startswith("Answer:"):
answer = t
elif "Answer:" in t:
answer = "Answer: " + t.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {t}")
return {self.output_key: answer}
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
llm_executor = LLMChain(
prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager
)
self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose)
t = llm_executor.predict(question=inputs[self.input_key], stop=["```output"])
return self._process_llm_result(t)
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
llm_executor = LLMChain(
prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager
)
if self.callback_manager.is_async:
await
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_math/base.html
|
b7b40de51de8-4
|
if self.callback_manager.is_async:
await self.callback_manager.on_text(
inputs[self.input_key], verbose=self.verbose
)
else:
self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose)
t = await llm_executor.apredict(
question=inputs[self.input_key], stop=["```output"]
)
return await self._aprocess_llm_result(t)
@property
def _chain_type(self) -> str:
return "llm_math_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_math/base.html
|
87cde4abbb1d-0
|
Source code for langchain.chains.combine_documents.base
"""Base interface for chains combining documents."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from pydantic import Field
from langchain.chains.base import Chain
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
class BaseCombineDocumentsChain(Chain, ABC):
"""Base interface for chains combining documents."""
input_key: str = "input_documents" #: :meta private:
output_key: str = "output_text" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]:
"""Return the prompt length given the documents passed in.
Returns None if the method does not depend on the prompt length.
"""
return None
@abstractmethod
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Combine documents into a single string."""
@abstractmethod
async def
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/combine_documents/base.html
|
87cde4abbb1d-1
|
"""Combine documents into a single string."""
@abstractmethod
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents into a single string asynchronously."""
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
docs = inputs[self.input_key]
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
output, extra_return_dict = self.combine_docs(docs, **other_keys)
extra_return_dict[self.output_key] = output
return extra_return_dict
async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, str]:
docs = inputs[self.input_key]
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
output, extra_return_dict = await self.acombine_docs(docs, **other_keys)
extra_return_dict[self.output_key] = output
return extra_return_dict
[docs]class AnalyzeDocumentChain(Chain):
"""Chain that splits documents, then analyzes it in pieces."""
input_key: str = "input_document" #: :meta private:
output_key: str = "output_text"
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/combine_documents/base.html
|
87cde4abbb1d-2
|
#: :meta private:
output_key: str = "output_text" #: :meta private:
text_splitter: TextSplitter = Field(default_factory=RecursiveCharacterTextSplitter)
combine_docs_chain: BaseCombineDocumentsChain
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
document = inputs[self.input_key]
docs = self.text_splitter.create_documents([document])
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
other_keys[self.combine_docs_chain.input_key] = docs
return self.combine_docs_chain(other_keys, return_only_outputs=True)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/combine_documents/base.html
|
aa5a947440ec-0
|
Source code for langchain.chains.conversational_retrieval.base
"""Chain for chatting with a vector database."""
from __future__ import annotations
import warnings
from abc import abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from pydantic import Extra, Field, root_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
def _get_chat_history(chat_history: List[Tuple[str, str]]) -> str:
buffer = ""
for human_s, ai_s in chat_history:
human = "Human: " + human_s
ai = "Assistant: " + ai_s
buffer += "\n" + "\n".join([human, ai])
return buffer
class BaseConversationalRetrievalChain(Chain):
"""Chain for chatting with an index."""
combine_docs_chain: BaseCombineDocumentsChain
question_generator: LLMChain
output_key: str = "answer"
return_source_documents: bool = False
get_chat_history: Optional[Callable[[Tuple[str, str]], str]] = None
"""Return the source documents."""
class Config:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-1
|
= None
"""Return the source documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
allow_population_by_field_name = True
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return ["question", "chat_history"]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
@abstractmethod
def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
"""Get docs."""
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
question = inputs["question"]
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
if chat_history_str:
new_question = self.question_generator.run(
question=question,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-2
|
question=question, chat_history=chat_history_str
)
else:
new_question = question
docs = self._get_docs(new_question, inputs)
new_inputs = inputs.copy()
new_inputs["question"] = new_question
new_inputs["chat_history"] = chat_history_str
answer = self.combine_docs_chain.run(input_documents=docs, **new_inputs)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
@abstractmethod
async def _aget_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
"""Get docs."""
async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
question = inputs["question"]
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
if chat_history_str:
new_question = await self.question_generator.arun(
question=question, chat_history=chat_history_str
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-3
|
question=question, chat_history=chat_history_str
)
else:
new_question = question
docs = await self._aget_docs(new_question, inputs)
new_inputs = inputs.copy()
new_inputs["question"] = new_question
new_inputs["chat_history"] = chat_history_str
answer = await self.combine_docs_chain.arun(input_documents=docs, **new_inputs)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
def save(self, file_path: Union[Path, str]) -> None:
if self.get_chat_history:
raise ValueError("Chain not savable when `get_chat_history` is not None.")
super().save(file_path)
[docs]class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
"""Chain for chatting with an index."""
retriever: BaseRetriever
"""Index to connect to."""
max_tokens_limit: Optional[int] = None
"""If set, restricts the docs to return from store based on tokens, enforced only
for StuffDocumentChain"""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-4
|
docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.max_tokens_limit and isinstance(
self.combine_docs_chain, StuffDocumentsChain
):
tokens = [
self.combine_docs_chain.llm_chain.llm.get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
docs = self.retriever.get_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
docs = await self.retriever.aget_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
[docs] @classmethod
def from_llm(
cls,
llm:
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-5
|
cls,
llm: BaseLanguageModel,
retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
qa_prompt: Optional[BasePromptTemplate] = None,
chain_type: str = "stuff",
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Load chain from LLM."""
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
prompt=qa_prompt,
)
condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt)
return cls(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
**kwargs,
)
[docs]class ChatVectorDBChain(BaseConversationalRetrievalChain):
"""Chain for chatting with a vector database."""
vectorstore: VectorStore = Field(alias="vectorstore")
top_k_docs_for_context: int = 4
search_kwargs: dict = Field(default_factory=dict)
@property
def
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-6
|
search_kwargs: dict = Field(default_factory=dict)
@property
def _chain_type(self) -> str:
return "chat-vector-db"
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
warnings.warn(
"`ChatVectorDBChain` is deprecated - "
"please use `from langchain.chains import ConversationalRetrievalChain`"
)
return values
def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
vectordbkwargs = inputs.get("vectordbkwargs", {})
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
return self.vectorstore.similarity_search(
question, k=self.top_k_docs_for_context, **full_kwargs
)
async def _aget_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
raise NotImplementedError("ChatVectorDBChain does not support async")
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
qa_prompt: Optional[BasePromptTemplate] = None,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
aa5a947440ec-7
|
qa_prompt: Optional[BasePromptTemplate] = None,
chain_type: str = "stuff",
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Load chain from LLM."""
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
prompt=qa_prompt,
)
condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt)
return cls(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
**kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
|
190753413774-0
|
Source code for langchain.chains.llm_summarization_checker.base
"""Chain for summarization with self-verification."""
from pathlib import Path
from typing import Dict, List
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sequential import SequentialChain
from langchain.llms.base import BaseLLM
from langchain.prompts.prompt import PromptTemplate
PROMPTS_DIR = Path(__file__).parent / "prompts"
CREATE_ASSERTIONS_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "create_facts.txt", ["summary"]
)
CHECK_ASSERTIONS_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "check_facts.txt", ["assertions"]
)
REVISED_SUMMARY_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "revise_summary.txt", ["checked_assertions", "summary"]
)
ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "are_all_true_prompt.txt", ["checked_assertions"]
)
[docs]class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:
.. code-block:: python
from langchain import OpenAI, LLMSummarizationCheckerChain
llm = OpenAI(temperature=0.0)
checker_chain = LLMSummarizationCheckerChain(llm=llm)
"""
llm: BaseLLM
"""LLM wrapper to use."""
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
|
190753413774-1
|
use."""
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
max_checks: int = 2
"""Maximum number of times to check the assertions. Default to double-checking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
all_true = False
count = 0
output = None
original_input = inputs[self.input_key]
chain_input = original_input
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
|
190753413774-2
|
chain_input = original_input
while not all_true and count < self.max_checks:
chain = SequentialChain(
chains=[
LLMChain(
llm=self.llm,
prompt=self.create_assertions_prompt,
output_key="assertions",
verbose=self.verbose,
),
LLMChain(
llm=self.llm,
prompt=self.check_assertions_prompt,
output_key="checked_assertions",
verbose=self.verbose,
),
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
|
190753413774-3
|
),
LLMChain(
llm=self.llm,
prompt=self.revised_summary_prompt,
output_key="revised_summary",
verbose=self.verbose,
),
LLMChain(
llm=self.llm,
output_key="all_true",
prompt=self.are_all_true_prompt,
verbose=self.verbose,
),
],
input_variables=["summary"],
output_variables=["all_true", "revised_summary"],
verbose=self.verbose,
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
|
190753413774-4
|
verbose=self.verbose,
)
output = chain({"summary": chain_input})
count += 1
if output["all_true"].strip() == "True":
break
if self.verbose:
print(output["revised_summary"])
chain_input = output["revised_summary"]
if not output:
raise ValueError("No output from chain")
return {self.output_key: output["revised_summary"].strip()}
@property
def _chain_type(self) -> str:
return "llm_summarization_checker_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
|
a312197949d0-0
|
Source code for langchain.chains.conversation.base
"""Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List
from pydantic import Extra, Field, root_validator
from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseMemory
[docs]class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.
Example:
.. code-block:: python
from langchain import ConversationChain, OpenAI
conversation = ConversationChain(llm=OpenAI())
"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
"""Default memory store."""
prompt: BasePromptTemplate = PROMPT
"""Default conversation prompt to use."""
input_key: str = "input" #: :meta private:
output_key: str = "response" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Use this since so some prompt vars come from history."""
return [self.input_key]
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversation/base.html
|
a312197949d0-1
|
values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
input_key = values["input_key"]
if input_key in memory_keys:
raise ValueError(
f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
prompt_variables = values["prompt"].input_variables
expected_keys = memory_keys + [input_key]
if set(expected_keys) != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but got {memory_keys} as inputs from "
f"memory, and {input_key} as the normal input key."
)
return values
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/conversation/base.html
|
adcf4adca589-0
|
.rst
.pdf
Evaluation
Contents
The Problem
The Solution
The Examples
Other Examples
Evaluation#
Note
Conceptual Guide
This section of documentation covers how we approach and think about evaluation in LangChain.
Both evaluation of internal chains/agents, but also how we would recommend people building on top of LangChain approach evaluation.
The Problem#
It can be really hard to evaluate LangChain chains and agents.
There are two main reasons for this:
# 1: Lack of data
You generally don’t have a ton of data to evaluate your chains/agents over before starting a project.
This is usually because Large Language Models (the core of most chains/agents) are terrific few-shot and zero shot learners,
meaning you are almost always able to get started on a particular task (text-to-SQL, question answering, etc) without
a large dataset of examples.
This is in stark contrast to traditional machine learning where you had to first collect a bunch of datapoints
before even getting started using a model.
# 2: Lack of metrics
Most chains/agents are performing tasks for which there are not very good metrics to evaluate performance.
For example, one of the most common use cases is generating text of some form.
Evaluating generated text is much more complicated than evaluating a classification prediction, or a numeric prediction.
The Solution#
LangChain attempts to tackle both of those issues.
What we have so far are initial passes at solutions - we do not think we have a perfect solution.
So we very much welcome feedback, contributions, integrations, and thoughts on this.
Here is what we have for each problem so far:
# 1: Lack of data
We have started LangChainDatasets a Community space on Hugging Face.
We intend this to be a collection of open source datasets for evaluating common chains and agents.
We have contributed five datasets of our own to start, but we highly intend this to be a community effort.
In order to
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/evaluation.html
|
adcf4adca589-1
|
of our own to start, but we highly intend this to be a community effort.
In order to contribute a dataset, you simply need to join the community and then you will be able to upload datasets.
We’re also aiming to make it as easy as possible for people to create their own datasets.
As a first pass at this, we’ve added a QAGenerationChain, which given a document comes up
with question-answer pairs that can be used to evaluate question-answering tasks over that document down the line.
See this notebook for an example of how to use this chain.
# 2: Lack of metrics
We have two solutions to the lack of metrics.
The first solution is to use no metrics, and rather just rely on looking at results by eye to get a sense for how the chain/agent is performing.
To assist in this, we have developed (and will continue to develop) tracing, a UI-based visualizer of your chain and agent runs.
The second solution we recommend is to use Language Models themselves to evaluate outputs.
For this we have a few different chains and prompts aimed at tackling this issue.
The Examples#
We have created a bunch of examples combining the above two solutions to show how we internally evaluate chains and agents when we are developing.
In addition to the examples we’ve curated, we also highly welcome contributions here.
To facilitate that, we’ve included a template notebook for community members to use to build their own examples.
The existing examples we have are:
Question Answering (State of Union): A notebook showing evaluation of a question-answering task over a State-of-the-Union address.
Question Answering (Paul Graham Essay): A notebook showing evaluation of a question-answering task over a Paul Graham essay.
SQL Question Answering (Chinook): A notebook showing evaluation of a question-answering task over a SQL database (the Chinook database).
Agent Vectorstore: A notebook showing evaluation of an agent doing
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/evaluation.html
|
adcf4adca589-2
|
SQL database (the Chinook database).
Agent Vectorstore: A notebook showing evaluation of an agent doing question answering while routing between two different vector databases.
Agent Search + Calculator: A notebook showing evaluation of an agent doing question answering using a Search engine and a Calculator as tools.
Evaluating an OpenAPI Chain: A notebook showing evaluation of an OpenAPI chain, including how to generate test data if you don’t have any.
Other Examples#
In addition, we also have some more generic resources for evaluation.
Question Answering: An overview of LLMs aimed at evaluating question answering systems in general.
Data Augmented Question Answering: An end-to-end example of evaluating a question answering system focused on a specific document (a RetrievalQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
Hugging Face Datasets: Covers an example of loading and using a dataset from Hugging Face for evaluation.
previous
Extraction
next
Agent Benchmarking: Search + Calculator
Contents
The Problem
The Solution
The Examples
Other Examples
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/evaluation.html
|
6a7f3d70d71d-0
|
.md
.pdf
Personal Assistants (Agents)
Personal Assistants (Agents)#
Conceptual Guide
We use “personal assistant” here in a very broad sense.
Personal assistants have a few characteristics:
They can interact with the outside world
They have knowledge of your data
They remember your interactions
Really all of the functionality in LangChain is relevant for building a personal assistant.
Highlighting specific parts:
Agent Documentation (for interacting with the outside world)
Index Documentation (for giving them knowledge of your data)
Memory (for helping them remember interactions)
Specific examples of this include:
Baby AGI: a notebook implementing BabyAGI by Yohei Nakajima as LLM Chains
Baby AGI with Tools: building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions.
CAMEL: an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with eachother.
AI Plugins: an implementation of an agent that is designed to be able to use all AI Plugins.
previous
How to add SharedMemory to an Agent and its Tools
next
Question Answering over Docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/personal_assistants.html
|
64e8277f249d-0
|
.md
.pdf
Extraction
Extraction#
Conceptual Guide
Most APIs and databases still deal with structured information.
Therefore, in order to better work with those, it can be useful to extract structured information from text.
Examples of this include:
Extracting a structured row to insert into a database from a sentence
Extracting multiple rows to insert into a database from a long document
Extracting the correct API parameters from a user query
This work is extremely related to output parsing.
Output parsers are responsible for instructing the LLM to respond in a specific format.
In this case, the output parsers specify the format of the data you would like to extract from the document.
Then, in addition to the output format instructions, the prompt should also contain the data you would like to extract information from.
While normal output parsers are good enough for basic structuring of response data,
when doing extraction you often want to extract more complicated or nested structures.
For a deep dive on extraction, we recommend checking out kor,
a library that uses the existing LangChain chain and OutputParser abstractions
but deep dives on allowing extraction of more complicated schemas.
previous
Summarization
next
Evaluation
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/extraction.html
|
a79603634610-0
|
.md
.pdf
Interacting with APIs
Contents
Chains
Agents
Interacting with APIs#
Conceptual Guide
Lots of data and information is stored behind APIs.
This page covers all resources available in LangChain for working with APIs.
Chains#
If you are just getting started, and you have relatively simple apis, you should get started with chains.
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
understand what is happening better.
API Chain
Agents#
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
The downside of agents are that you have less control. The upside is that they are more powerful,
which allows you to use them on larger and more complex schemas.
OpenAPI Agent
previous
Code Understanding
next
Summarization
Contents
Chains
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/apis.html
|
f914f59da339-0
|
.md
.pdf
Question Answering over Docs
Contents
Document Question Answering
Adding in sources
Additional Related Resources
Question Answering over Docs#
Conceptual Guide
Question answering in this context refers to question answering over your document data.
For question answering over other types of data, please see other sources documentation like SQL database Question Answering or Interacting with APIs.
For question answering over many documents, you almost always want to create an index over the data.
This can be used to smartly access the most relevant documents for a given question, allowing you to avoid having to pass all the documents to the LLM (saving you time and money).
See this notebook for a more detailed introduction to this, but for a super quick start the steps involved are:
Load Your Documents
from langchain.document_loaders import TextLoader
loader = TextLoader('../state_of_the_union.txt')
See here for more information on how to get started with document loading.
Create Your Index
from langchain.indexes import VectorstoreIndexCreator
index = VectorstoreIndexCreator().from_loaders([loader])
The best and most popular index by far at the moment is the VectorStore index.
Query Your Index
query = "What did the president say about Ketanji Brown Jackson"
index.query(query)
Alternatively, use query_with_sources to also get back the sources involved
query = "What did the president say about Ketanji Brown Jackson"
index.query_with_sources(query)
Again, these high level interfaces obfuscate a lot of what is going on under the hood, so please see this notebook for a lower level walkthrough.
Document Question Answering#
Question answering involves fetching multiple documents, and then asking a question of them.
The LLM response will contain the answer to your question, based on the content of the documents.
The recommended way to get started using a question answering chain is:
from langchain.chains.question_answering import load_qa_chain
chain =
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/question_answering.html
|
f914f59da339-1
|
chain is:
from langchain.chains.question_answering import load_qa_chain
chain = load_qa_chain(llm, chain_type="stuff")
chain.run(input_documents=docs, question=query)
The following resources exist:
Question Answering Notebook: A notebook walking through how to accomplish this task.
VectorDB Question Answering Notebook: A notebook walking through how to do question answering over a vector database. This can often be useful for when you have a LOT of documents, and you don’t want to pass them all to the LLM, but rather first want to do some semantic search over embeddings.
Adding in sources#
There is also a variant of this, where in addition to responding with the answer the language model will also cite its sources (eg which of the documents passed in it used).
The recommended way to get started using a question answering with sources chain is:
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
chain = load_qa_with_sources_chain(llm, chain_type="stuff")
chain({"input_documents": docs, "question": query}, return_only_outputs=True)
The following resources exist:
QA With Sources Notebook: A notebook walking through how to accomplish this task.
VectorDB QA With Sources Notebook: A notebook walking through how to do question answering with sources over a vector database. This can often be useful for when you have a LOT of documents, and you don’t want to pass them all to the LLM, but rather first want to do some semantic search over embeddings.
Additional Related Resources#
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents) and Embeddings & Vectorstores (useful for the above Vector DB example).
CombineDocuments Chains: A conceptual overview of specific types of chains by which you can accomplish this task.
previous
Personal Assistants
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/question_answering.html
|
f914f59da339-2
|
conceptual overview of specific types of chains by which you can accomplish this task.
previous
Personal Assistants (Agents)
next
Chatbots
Contents
Document Question Answering
Adding in sources
Additional Related Resources
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/question_answering.html
|
2949d934dd82-0
|
.md
.pdf
Chatbots
Chatbots#
Conceptual Guide
Since language models are good at producing text, that makes them ideal for creating chatbots.
Aside from the base prompts/LLMs, an important concept to know for Chatbots is memory.
Most chat based applications rely on remembering what happened in previous interactions, which memory is designed to help with.
The following resources exist:
ChatGPT Clone: A notebook walking through how to recreate a ChatGPT-like experience with LangChain.
Conversation Memory: A notebook walking through how to use different types of conversational memory.
Conversation Agent: A notebook walking through how to create an agent optimized for conversation.
Additional related resources include:
Memory Key Concepts: Explanation of key concepts related to memory.
Memory Examples: A collection of how-to examples for working with memory.
previous
Question Answering over Docs
next
Querying Tabular Data
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/chatbots.html
|
dc8bebbe4bfb-0
|
.md
.pdf
Code Understanding
Contents
Conversational Retriever Chain
Code Understanding#
Overview
LangChain is a useful tool designed to parse GitHub code repositories. By leveraging VectorStores, Conversational RetrieverChain, and GPT-4, it can answer questions in the context of an entire GitHub repository or generate new code. This documentation page outlines the essential components of the system and guides using LangChain for better code comprehension, contextual question answering, and code generation in GitHub repositories.
Conversational Retriever Chain#
Conversational RetrieverChain is a retrieval-focused system that interacts with the data stored in a VectorStore. Utilizing advanced techniques, like context-aware filtering and ranking, it retrieves the most relevant code snippets and information for a given user query. Conversational RetrieverChain is engineered to deliver high-quality, pertinent results while considering conversation history and context.
LangChain Workflow for Code Understanding and Generation
Index the code base: Clone the target repository, load all files within, chunk the files, and execute the indexing process. Optionally, you can skip this step and use an already indexed dataset.
Embedding and Code Store: Code snippets are embedded using a code-aware embedding model and stored in a VectorStore.
Query Understanding: GPT-4 processes user queries, grasping the context and extracting relevant details.
Construct the Retriever: Conversational RetrieverChain searches the VectorStore to identify the most relevant code snippets for a given query.
Build the Conversational Chain: Customize the retriever settings and define any user-defined filters as needed.
Ask questions: Define a list of questions to ask about the codebase, and then use the ConversationalRetrievalChain to generate context-aware answers. The LLM (GPT-4) generates comprehensive, context-aware answers based on retrieved code snippets and conversation history.
The full tutorial is available below.
Twitter the-algorithm codebase analysis with Deep Lake: A notebook walking through how to parse
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/code.html
|
dc8bebbe4bfb-1
|
below.
Twitter the-algorithm codebase analysis with Deep Lake: A notebook walking through how to parse github source code and run queries conversation.
previous
Querying Tabular Data
next
Interacting with APIs
Contents
Conversational Retriever Chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/code.html
|
7f70692e5769-0
|
.md
.pdf
Summarization
Summarization#
Conceptual Guide
Summarization involves creating a smaller summary of multiple longer documents.
This can be useful for distilling long documents into the core pieces of information.
The recommended way to get started using a summarization chain is:
from langchain.chains.summarize import load_summarize_chain
chain = load_summarize_chain(llm, chain_type="map_reduce")
chain.run(docs)
The following resources exist:
Summarization Notebook: A notebook walking through how to accomplish this task.
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents).
previous
Interacting with APIs
next
Extraction
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/summarization.html
|
2317cdc18685-0
|
.md
.pdf
Querying Tabular Data
Contents
Document Loading
Querying
Chains
Agents
Querying Tabular Data#
Conceptual Guide
Lots of data and information is stored in tabular data, whether it be csvs, excel sheets, or SQL tables.
This page covers all resources available in LangChain for working with data in this format.
Document Loading#
If you have text data stored in a tabular format, you may want to load the data into a Document and then index it as you would
other text/unstructured data. For this, you should use a document loader like the CSVLoader
and then you should create an index over that data, and query it that way.
Querying#
If you have more numeric tabular data, or have a large amount of data and don’t want to index it, you should get started
by looking at various chains and agents we have for dealing with this data.
Chains#
If you are just getting started, and you have relatively small/simple tabular data, you should get started with chains.
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
understand what is happening better.
SQL Database Chain
Agents#
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
The downside of agents are that you have less control. The upside is that they are more powerful,
which allows you to use them on larger databases and more complex schemas.
SQL Agent
Pandas Agent
CSV Agent
previous
Chatbots
next
Code Understanding
Contents
Document Loading
Querying
Chains
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/tabular.html
|
7b555a9de0b3-0
|
.ipynb
.pdf
Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Deep Lake
Contents
1. Index the code base (optional)
2. Question Answering on Twitter algorithm codebase
Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Deep Lake#
In this tutorial, we are going to use Langchain + Deep Lake with GPT4 to analyze the code base of the twitter algorithm.
!python3 -m pip install --upgrade langchain deeplake openai tiktoken
Define OpenAI embeddings, Deep Lake multi-modal vector store api and authenticate. For full documentation of Deep Lake please follow https://docs.activeloop.ai/ and API reference https://docs.deeplake.ai/en/latest/
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import DeepLake
os.environ['OPENAI_API_KEY']='sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
embeddings = OpenAIEmbeddings()
Authenticate into Deep Lake if you want to create your own dataset and publish it. You can get an API key from the platform at https://app.activeloop.ai
!activeloop login -t <TOKEN>
1. Index the code base (optional)#
You can directly skip this part and directly jump into using already indexed dataset. To begin with, first we will clone the repository, then parse and chunk the code base and use OpenAI indexing.
!git clone https://github.com/twitter/the-algorithm # replace any repository of your choice
Load all files inside the repository
import os
from langchain.document_loaders import TextLoader
root_dir = './the-algorithm'
docs = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for file in filenames:
try:
loader =
|
https:///langchain-cn.readthedocs.io/en/latest/use_cases/code/twitter-the-algorithm-analysis-deeplake.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.