id
stringlengths
14
16
text
stringlengths
29
2.31k
source
stringlengths
57
122
3f3f9f4e9a2b-7
= datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id,
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
3f3f9f4e9a2b-8
return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/promptlayer_openai.html
3f2a98548499-0
Source code for langchain.llms.anthropic """Wrapper around Anthropic APIs.""" import re from typing import Any, Dict, Generator, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class Anthropic(LLM): r"""Wrapper around Anthropic large language models. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain.llms import Anthropic model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key") # Simplest invocation, automatically wrapped with HUMAN_PROMPT # and AI_PROMPT. response = model("What are the biggest risks facing humanity?") # Or if you want to use the chat mode, build a few-shot-prompt, or # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: raw_prompt = "What are the biggest risks facing humanity?" prompt = f"{anthropic.HUMAN_PROMPT}
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-1
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" response = model(prompt) """ client: Any #: :meta private: model: str = "claude-v1" """Model name to use.""" max_tokens_to_sample: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 1.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" streaming: bool = False """Whether to stream the results.""" anthropic_api_key: Optional[str] = None HUMAN_PROMPT: Optional[str] = None AI_PROMPT: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anthropic_api_key = get_from_dict_or_env( values, "anthropic_api_key", "ANTHROPIC_API_KEY" ) try:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-2
) try: import anthropic values["client"] = anthropic.Client(anthropic_api_key) values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT values["AI_PROMPT"] = anthropic.AI_PROMPT except ImportError: raise ValueError( "Could not import anthropic python package. " "Please install it with `pip install anthropic`." ) return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Anthropic API.""" return { "max_tokens_to_sample": self.max_tokens_to_sample, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-3
self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "anthropic" def _wrap_prompt(self, prompt: str) -> str: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if prompt.startswith(self.HUMAN_PROMPT): return prompt # Already wrapped. # Guard against common errors in specifying wrong number of newlines. corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt) if n_subs == 1: return corrected_prompt # As a last resort, wrap the prompt ourselves to emulate instruct-style. return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if stop is None: stop = [] # Never want model to invent new turns of Human / Assistant dialog.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-4
# Never want model to invent new turns of Human / Assistant dialog. stop.extend([self.HUMAN_PROMPT, self.AI_PROMPT]) return stop def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: r"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model(prompt) """ stop = self._get_anthropic_stop(stop) if self.streaming: stream_resp = self.client.completion_stream( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop,
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-5
stop_sequences=stop, stream=True, **self._default_params, ) current_completion = "" for data in stream_resp: delta = data["completion"][len(current_completion) :] current_completion = data["completion"] self.callback_manager.on_llm_new_token( delta, verbose=self.verbose, **data ) return current_completion response = self.client.completion( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) return response["completion"] async def _acall(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Anthropic's completion endpoint asynchronously.""" stop = self._get_anthropic_stop(stop) if self.streaming:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-6
self._get_anthropic_stop(stop) if self.streaming: stream_resp = await self.client.acompletion_stream( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **self._default_params, ) current_completion = "" async for data in stream_resp: delta = data["completion"][len(current_completion) :] current_completion = data["completion"] if self.callback_manager.is_async: await self.callback_manager.on_llm_new_token( delta, verbose=self.verbose, **data ) else: self.callback_manager.on_llm_new_token(
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-7
delta, verbose=self.verbose, **data ) return current_completion response = await self.client.acompletion( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) return response["completion"] [docs] def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: r"""Call Anthropic completion_stream and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream."
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
3f2a98548499-8
= "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """ stop = self._get_anthropic_stop(stop) return self.client.completion_stream( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/anthropic.html
eac71d76efcc-0
Source code for langchain.llms.openai """Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( Any, Callable, Dict, Generator, List, Mapping, Optional, Set, Tuple, Union, ) from pydantic import Extra, Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-1
= stream_response["choices"][0][ "finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]: return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) |
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-2
| retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-3
environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ client: Any #: :meta private: model_name: str = "text-davinci-003" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-4
model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None openai_organization: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`"
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-5
langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" )
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-6
) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_api_base:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-7
if openai_api_base: print("USING API_BASE: ") print(openai_api_base) openai.api_base = openai_api_base if openai_organization: print("USING ORGANIZATION: ") print(openai_organization) openai.organization = openai_organization values["client"] = openai.Completion except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-8
def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "best_of": self.best_of, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block::
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-9
Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ):
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-10
): self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-11
Optional[List[str]] = None ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() async for stream_resp in await acompletion_with_retry( self, prompt=_prompts, **params ): if self.callback_manager.is_async:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-12
await self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) else: self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"])
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-13
choices.extend(response["choices"]) else: response = await acompletion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) def get_sub_prompts( self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError(
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-14
raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result( self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"], generation_info=dict(
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-15
generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: """Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from OpenAI.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-16
A generator representing the stream of tokens from OpenAI. Example: .. code-block:: python generator = openai.stream("Tell me a joke.") for token in generator: yield token """ params = self.prep_streaming_params(stop) generator = self.client.create(prompt=prompt, **params) return generator def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """Prepare the params for streaming.""" params = self._invocation_params if params["best_of"] != 1: raise ValueError("OpenAI only supports best_of == 1 for streaming") if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params["stream"] = True return params @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-17
-> Dict[str, Any]: """Get the parameters used to invoke the model.""" return self._default_params @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" def get_num_tokens(self, text: str) -> int: """Calculate num tokens with tiktoken package.""" # tiktoken NOT supported for Python 3.8 or below if sys.version_info[1] <= 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) encoder = "gpt2" if self.model_name in
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-18
encoder = "gpt2" if self.model_name in ("text-davinci-003", "text-davinci-002"): encoder = "p50k_base" if self.model_name.startswith("code"): encoder = "p50k_base" # create a GPT-3 encoder instance enc = tiktoken.get_encoding(encoder) # encode the text using the GPT-3 encoder tokenized_text = enc.encode(text) # calculate the number of tokens in the encoded text return len(tokenized_text) def modelname_to_contextsize(self, modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192,
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-19
8192, "gpt-4-0314": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096, "text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097, "code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048,
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-20
2048, "code-cushman-001": 2048, } context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) # get max context size for model by name max_size = self.modelname_to_contextsize(self.model_name) return max_size - num_tokens [docs]class
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-21
return max_size - num_tokens [docs]class OpenAI(BaseOpenAI): """Generic OpenAI class that uses model name.""" @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} [docs]class AzureOpenAI(BaseOpenAI): """Azure specific OpenAI class that uses deployment name.""" deployment_name: str = "" """Deployment name to use.""" @property def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: return {**{"engine": self.deployment_name}, **super()._invocation_params} [docs]class OpenAIChat(BaseLLM): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-22
langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" class Config: """Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-23
if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: import openai openai.api_key = openai_api_key if openai_api_base:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-24
if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-25
" "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit del
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-26
is equivalent to having no limit del params["max_tokens"] return messages, params def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) if self.streaming: response = "" params["stream"] = True for stream_resp in completion_with_retry(self, messages=messages, **params): token = stream_resp["choices"][0]["delta"].get("content", "") response += token self.callback_manager.on_llm_new_token( token, verbose=self.verbose, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = completion_with_retry(self, messages=messages, **params) llm_output =
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-27
**params) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) if self.streaming: response = "" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=messages, **params ): token = stream_resp["choices"][0]["delta"].get("content", "")
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-28
"") response += token if self.callback_manager.is_async: await self.callback_manager.on_llm_new_token( token, verbose=self.verbose, ) else: self.callback_manager.on_llm_new_token( token, verbose=self.verbose, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = await acompletion_with_retry( self, messages=messages, **params ) llm_output = {
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-29
llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" [docs] def get_num_tokens(self, text: str) -> int: """Calculate num tokens with tiktoken package.""" # tiktoken NOT supported for Python 3.8 or below if sys.version_info[1] <= 8: return super().get_num_tokens(text) try:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
eac71d76efcc-30
super().get_num_tokens(text) try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) # create a GPT-3.5-Turbo encoder instance enc = tiktoken.encoding_for_model("gpt-3.5-turbo") # encode the text using the GPT-3.5-Turbo encoder tokenized_text = enc.encode(text) # calculate the number of tokens in the encoded text return len(tokenized_text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/openai.html
d5929d963814-0
Source code for langchain.llms.huggingface_endpoint """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation") [docs]class HuggingFaceEndpoint(LLM): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_endpoint.html
d5929d963814-1
endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_endpoint.html
d5929d963814-2
except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_endpoint.html
d5929d963814-3
= None) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_endpoint.html
d5929d963814-4
# This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_endpoint.html
d5929d963814-5
= enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/huggingface_endpoint.html
2f03e25e3e0c-0
Source code for langchain.llms.rwkv """Wrapper for the RWKV model. Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py """ from typing import Any, Dict, List, Mapping, Optional, Set from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens [docs]class RWKV(LLM, BaseModel): r"""Wrapper around RWKV language models. To use, you should have the ``rwkv`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import RWKV model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32") # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained RWKV model file.""" tokens_path: str """Path to the RWKV tokens file.""" strategy: str = "cpu fp32" """Token context window.""" rwkv_verbose: bool = True """Print debug information.""" temperature: float = 1.0 """The temperature to use for sampling.""" top_p:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-1
"""The temperature to use for sampling.""" top_p: float = 0.5 """The top-p value to use for sampling.""" penalty_alpha_frequency: float = 0.4 """Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim..""" penalty_alpha_presence: float = 0.4 """Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics..""" CHUNK_LEN: int = 256 """Batch size for prompt processing.""" max_tokens_per_generation: int = 256 """Maximum number of tokens to generate.""" client: Any = None #: :meta private: tokenizer: Any = None #: :meta private: pipeline: Any = None #: :meta private: model_tokens: Any = None #: :meta private: model_state: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "verbose": self.verbose, "top_p": self.top_p, "temperature": self.temperature,
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-2
"temperature": self.temperature, "penalty_alpha_frequency": self.penalty_alpha_frequency, "penalty_alpha_presence": self.penalty_alpha_presence, "CHUNK_LEN": self.CHUNK_LEN, "max_tokens_per_generation": self.max_tokens_per_generation, } @staticmethod def _rwkv_param_names() -> Set[str]: """Get the identifying parameters.""" return { "verbose", } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: import tokenizers except ImportError: raise ValueError( "Could not import tokenizers python package. " "Please install it with `pip install tokenizers`." ) try: from rwkv.model import RWKV as RWKVMODEL from rwkv.utils import PIPELINE values["tokenizer"] =
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-3
import PIPELINE values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"]) rwkv_keys = cls._rwkv_param_names() model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys} model_kwargs["verbose"] = values["rwkv_verbose"] values["client"] = RWKVMODEL( values["model"], strategy=values["strategy"], **model_kwargs ) values["pipeline"] = PIPELINE(values["client"], values["tokens_path"]) except ImportError: raise ValueError( "Could not import rwkv python package. " "Please install it with `pip install rwkv`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params, **{k: v for k, v in
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-4
**{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()}, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "rwkv-4" def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any: AVOID_REPEAT_TOKENS = [] AVOID_REPEAT = ",:?!" for i in AVOID_REPEAT: dd = self.pipeline.encode(i) assert len(dd) == 1 AVOID_REPEAT_TOKENS += dd tokens = [int(x) for x in _tokens] self.model_tokens += tokens out: Any = None while len(tokens) > 0: out, self.model_state = self.client.forward( tokens[: self.CHUNK_LEN], self.model_state ) tokens = tokens[self.CHUNK_LEN :] END_OF_LINE = 187 out[END_OF_LINE] += newline_adj # adjust \n probability
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-5
+= newline_adj # adjust \n probability if self.model_tokens[-1] in AVOID_REPEAT_TOKENS: out[self.model_tokens[-1]] = -999999999 return out def rwkv_generate(self, prompt: str) -> str: self.model_state = None self.model_tokens = [] logits = self.run_rnn(self.tokenizer.encode(prompt).ids) begin = len(self.model_tokens) out_last = begin occurrence: Dict = {} decoded = "" for i in range(self.max_tokens_per_generation): for n in occurrence: logits[n] -= ( self.penalty_alpha_presence + occurrence[n] * self.penalty_alpha_frequency ) token = self.pipeline.sample_logits( logits, temperature=self.temperature, top_p=self.top_p ) END_OF_TEXT = 0 if token == END_OF_TEXT:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-6
if token == END_OF_TEXT: break if token not in occurrence: occurrence[token] = 1 else: occurrence[token] += 1 logits = self.run_rnn([token]) xxx = self.tokenizer.decode(self.model_tokens[out_last:]) if "\ufffd" not in xxx: # avoid utf-8 display issues decoded += xxx out_last = begin + i + 1 if i >= self.max_tokens_per_generation - 100: break return decoded def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: r"""RWKV generation Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
2f03e25e3e0c-7
The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text = self.rwkv_generate(prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/rwkv.html
491fff0ce8a4-0
Source code for langchain.llms.replicate """Wrapper around Replicate API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Replicate(LLM): """Wrapper around Replicate models. To use, you should have the ``replicate`` python package installed, and the environment variable ``REPLICATE_API_TOKEN`` set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, ...} Example: .. code-block:: python from langchain.llms import Replicate replicate = Replicate(model="stability-ai/stable-diffusion: \ 27b93a2413e7f36cd83da926f365628\ 0b2931564ff050bf9575f1fdf9bcd7478",
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/replicate.html
491fff0ce8a4-1
input={"image_dimensions": "512x512"}) """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) replicate_api_token: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" )
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/replicate.html
491fff0ce8a4-2
) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env( values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN" ) values["replicate_api_token"] = replicate_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "replicate" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to replicate endpoint.""" try: import replicate as replicate_python except ImportError: raise ValueError(
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/replicate.html
491fff0ce8a4-3
raise ValueError( "Could not import replicate python package. " "Please install it with `pip install replicate`." ) # get the model and version model_str, version_str = self.model.split(":") model = replicate_python.models.get(model_str) version = model.versions.get(version_str) # sort through the openapi schema to get the name of the first input input_properties = sorted( version.openapi_schema["components"]["schemas"]["Input"][ "properties" ].items(), key=lambda item: item[1].get("x-order", 0), ) first_input_name = input_properties[0][0] inputs = {first_input_name: prompt, **self.input} outputs = replicate_python.run(self.model, input={**inputs}) return outputs[0] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/llms/replicate.html
0c6acfea29dd-0
Source code for langchain.chains.llm_requests """Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Dict, List from pydantic import Extra, Field, root_validator from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.requests import TextRequestsWrapper DEFAULT_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501 } [docs]class LLMRequestsChain(Chain): """Chain that hits a URL and then uses an LLM to parse results.""" llm_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field( default_factory=TextRequestsWrapper, exclude=True ) text_length: int = 8000 requests_key: str = "requests_result" #: :meta private: input_key: str = "url" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key]
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_requests.html
0c6acfea29dd-1
""" return [self.input_key] @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "Could not import bs4 python package. " "Please install it with `pip install bs4`." ) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: from bs4 import BeautifulSoup # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} url = inputs[self.input_key] res = self.requests_wrapper.get(url) # extract the text from the html soup = BeautifulSoup(res, "html.parser")
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_requests.html
0c6acfea29dd-2
soup = BeautifulSoup(res, "html.parser") other_keys[self.requests_key] = soup.get_text()[: self.text_length] result = self.llm_chain.predict(**other_keys) return {self.output_key: result} @property def _chain_type(self) -> str: return "llm_requests_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm_requests.html
d74756673c0d-0
Source code for langchain.chains.transform """Chain that runs an arbitrary python function.""" from typing import Callable, Dict, List from langchain.chains.base import Chain [docs]class TransformChain(Chain): """Chain transform chain output. Example: .. code-block:: python from langchain import TransformChain transform_chain = TransformChain(input_variables=["text"], output_variables["entities"], transform=func()) """ input_variables: List[str] output_variables: List[str] transform: Callable[[Dict[str, str]], Dict[str, str]] @property def input_keys(self) -> List[str]: """Expect input keys. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output keys. :meta private: """ return self.output_variables def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: return self.transform(inputs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/transform.html
f688b4c96f7d-0
Source code for langchain.chains.mapreduce """Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Dict, List from pydantic import Extra from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.text_splitter import TextSplitter [docs]class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: [docs] @classmethod def from_params( cls, llm: BaseLLM, prompt: BasePromptTemplate, text_splitter: TextSplitter ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt) reduce_chain = StuffDocumentsChain(llm_chain=llm_chain)
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/mapreduce.html
f688b4c96f7d-1
= StuffDocumentsChain(llm_chain=llm_chain) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=reduce_chain ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: # Split the larger text into smaller chunks. texts = self.text_splitter.split_text(inputs[self.input_key]) docs = [Document(page_content=text) for text in texts] outputs = self.combine_documents_chain.run(input_documents=docs) return {self.output_key: outputs} By Harrison Chase
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/mapreduce.html
f688b4c96f7d-2
return {self.output_key: outputs} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/mapreduce.html
7fa4c61f55a4-0
Source code for langchain.chains.moderation """Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.chains.base import Chain from langchain.utils import get_from_dict_or_env [docs]class OpenAIModerationChain(Chain): """Pass input through a moderation endpoint. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() """ client: Any #: :meta private: model_name: Optional[str] = None """Moderation model name to use.""" error: bool = False """Whether or not to error if bad content was found.""" input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: openai_api_key: Optional[str] = None openai_organization: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/moderation.html
7fa4c61f55a4-1
openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values["client"] = openai.Moderation except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/moderation.html
7fa4c61f55a4-2
:meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _moderate(self, text: str, results: dict) -> str: if results["flagged"]: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results["results"][0]) return {self.output_key: output} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/moderation.html
77eb846076b3-0
Source code for langchain.chains.sequential """Chain pipeline where the outputs of one step feed directly into next.""" from typing import Dict, List from pydantic import Extra, root_validator from langchain.chains.base import Chain from langchain.input import get_color_mapping [docs]class SequentialChain(Chain): """Chain where the outputs of one chain feed directly into next.""" chains: List[Chain] input_variables: List[str] output_variables: List[str] #: :meta private: return_all: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return expected input keys to the chain. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return self.output_variables @root_validator(pre=True) def validate_chains(cls, values: Dict) -> Dict: """Validate that the correct inputs exist for all chains.""" chains = values["chains"] input_variables = values["input_variables"] memory_keys = list() if "memory" in values and
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sequential.html
77eb846076b3-1
memory_keys = list() if "memory" in values and values["memory"] is not None: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables if set(input_variables).intersection(set(memory_keys)): overlapping_keys = set(input_variables) & set(memory_keys) raise ValueError( f"The the input key(s) {''.join(overlapping_keys)} are found " f"in the Memory keys ({memory_keys}) - please use input and " f"memory keys that don't overlap." ) known_variables = set(input_variables + memory_keys) for chain in chains: missing_vars = set(chain.input_keys).difference(known_variables) if missing_vars: raise ValueError( f"Missing required input keys: {missing_vars}, " f"only had {known_variables}"
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sequential.html
77eb846076b3-2
f"only had {known_variables}" ) overlapping_keys = known_variables.intersection(chain.output_keys) if overlapping_keys: raise ValueError( f"Chain returned keys that already exist: {overlapping_keys}" ) known_variables |= set(chain.output_keys) if "output_variables" not in values: if values.get("return_all", False): output_keys = known_variables.difference(input_variables) else: output_keys = chains[-1].output_keys values["output_variables"] = output_keys else: missing_vars = set(values["output_variables"]).difference(known_variables) if missing_vars: raise ValueError( f"Expected output variables that were not found: {missing_vars}." ) return values
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sequential.html
77eb846076b3-3
) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: known_values = inputs.copy() for i, chain in enumerate(self.chains): outputs = chain(known_values, return_only_outputs=True) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} [docs]class SimpleSequentialChain(Chain): """Simple chain where the outputs of one step feed directly into next.""" chains: List[Chain] strip_outputs: bool = False input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] @root_validator() def validate_chains(cls, values:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sequential.html
77eb846076b3-4
@root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: if len(chain.input_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one input, got " f"{chain} with {len(chain.input_keys)} inputs." ) if len(chain.output_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one output, got " f"{chain} with {len(chain.output_keys)} outputs." ) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input =
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sequential.html
77eb846076b3-5
in enumerate(self.chains): _input = chain.run(_input) if self.strip_outputs: _input = _input.strip() self.callback_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/sequential.html
1954af4c2268-0
Source code for langchain.chains.llm """Chain that just formats a prompt and calls an LLM.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import Extra from langchain.chains.base import Chain from langchain.input import get_colored_text from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseLanguageModel, LLMResult, PromptValue [docs]class LLMChain(Chain): """Chain to run queries against LLMs. Example: .. code-block:: python from langchain import LLMChain, OpenAI, PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) """ prompt: BasePromptTemplate """Prompt object to use.""" llm: BaseLanguageModel output_key: str = "text" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-1
@property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.apply([inputs])[0] [docs] def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = self.prep_prompts(input_list) return self.llm.generate_prompt(prompts, stop) [docs] async def agenerate(self, input_list: List[Dict[str, Any]]) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = await self.aprep_prompts(input_list) return await self.llm.agenerate_prompt(prompts, stop) [docs] def prep_prompts( self, input_list: List[Dict[str, Any]] ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-2
inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text self.callback_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] async def aprep_prompts( self, input_list: List[Dict[str, Any]] ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-3
"""Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if self.callback_manager.is_async: await self.callback_manager.on_text( _text, end="\n", verbose=self.verbose ) else: self.callback_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all."
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-4
any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" response = self.generate(input_list) return self.create_outputs(response) [docs] async def aapply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" response = await self.agenerate(input_list) return self.create_outputs(response) [docs] def create_outputs(self, response: LLMResult) -> List[Dict[str, str]]: """Create outputs from response.""" return [ # Get the text of the top generated string. {self.output_key: generation[0].text} for generation in response.generations ] async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, str]: return (await self.aapply([inputs]))[0] [docs] def predict(self, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-5
"""Format prompt with kwargs and pass to LLM. Args: **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return self(kwargs)[self.output_key] [docs] async def apredict(self, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return (await self.acall(kwargs))[self.output_key] [docs] def predict_and_parse(self, **kwargs: Any) -> Union[str, List[str], Dict[str, str]]: """Call predict and then parse the results.""" result = self.predict(**kwargs) if self.prompt.output_parser is not None:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-6
if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result [docs] async def apredict_and_parse( self, **kwargs: Any ) -> Union[str, List[str], Dict[str, str]]: """Call apredict and then parse the results.""" result = await self.apredict(**kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result [docs] def apply_and_parse( self, input_list: List[Dict[str, Any]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" result = self.apply(input_list) return self._parse_result(result) def _parse_result( self, result: List[Dict[str, str]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: if self.prompt.output_parser is not None: return [ self.prompt.output_parser.parse(res[self.output_key]) for res in result ]
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
1954af4c2268-7
for res in result ] else: return result [docs] async def aapply_and_parse( self, input_list: List[Dict[str, Any]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" result = await self.aapply(input_list) return self._parse_result(result) @property def _chain_type(self) -> str: return "llm_chain" [docs] @classmethod def from_string(cls, llm: BaseLanguageModel, template: str) -> Chain: """Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 18, 2023.
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/llm.html
e912ab597e24-0
Source code for langchain.chains.loading """Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.retrieval_qa.base import VectorDBQA from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config: llm_config =
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-1
if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-2
present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain(
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-3
) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config =
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-4
"collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-5
prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-6
elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-7
if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config:
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-8
Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-9
or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-10
combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) ->
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-11
config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-12
combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain =
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-13
"api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain,
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-14
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html
e912ab597e24-15
config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) [docs]def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and
https:///langchain-cn.readthedocs.io/en/latest/_modules/langchain/chains/loading.html