id
stringlengths 14
16
| text
stringlengths 29
2.31k
| source
stringlengths 57
122
|
---|---|---|
1d96df483bd1-69
|
= None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-70
|
**dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.SelfHostedHuggingFaceLLM[source]#
Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Only supports text-generation and text2text-generation for now.
Example using from_model_id:from langchain.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):from langchain.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-71
|
rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
Validators
set_callback_manager » callback_manager
set_verbose » verbose
field device: int = 0#
Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.
field hardware: Any = None#
Remote hardware to send the inference function to.
field inference_fn: Callable = <function _generate_text>#
Inference function to send to the remote hardware.
field load_fn_kwargs: Optional[dict] = None#
Key word arguments to pass to the model load function.
field model_id: str = 'gpt2'#
Hugging Face model_id to load the model.
field model_kwargs: Optional[dict] = None#
Key word arguments to pass to the model.
field model_load_fn: Callable = <function _load_transformer>#
Function to load the model remotely on the server.
field model_reqs: List[str] = ['./', 'transformers', 'torch']#
Requirements to install on hardware to inference the model.
field task: str = 'text-generation'#
Hugging Face task (either “text-generation” or “text2text-generation”).
__call__(prompt: str, stop: Optional[List[str]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None) →
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-72
|
agenerate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) → langchain.llms.base.LLM#
Init the SelfHostedPipeline from a pipeline object or string.
generate(prompts: List[str], stop: Optional[List[str]]
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-73
|
from a pipeline object or string.
generate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.SelfHostedPipeline[source]#
Run model inference on self-hosted remote hardware.
Supported hardware includes auto-launched instances on
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-74
|
model inference on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example for custom pipeline and inference functions:from langchain.llms import SelfHostedPipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def load_pipeline():
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
return pipeline(
"text-generation", model=model, tokenizer=tokenizer,
max_new_tokens=10
)
def inference_fn(pipeline, prompt, stop = None):
return pipeline(prompt)[0]["generated_text"]
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn
)
Example for <2GB model (can be serialized and sent directly to the server):from langchain.llms import SelfHostedPipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
my_model = ...
llm = SelfHostedPipeline.from_pipeline(
pipeline=my_model,
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Example passing model path for larger models:from langchain.llms import SelfHostedPipeline
import runhouse as
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-75
|
path for larger models:from langchain.llms import SelfHostedPipeline
import runhouse as rh
import pickle
from transformers import pipeline
generator = pipeline(model="gpt2")
rh.blob(pickle.dumps(generator), path="models/pipeline.pkl"
).save().to(gpu, path="models")
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Validators
set_callback_manager » callback_manager
set_verbose » verbose
field hardware: Any = None#
Remote hardware to send the inference function to.
field inference_fn: Callable = <function _generate_text>#
Inference function to send to the remote hardware.
field load_fn_kwargs: Optional[dict] = None#
Key word arguments to pass to the model load function.
field model_load_fn: Callable [Required]#
Function to load the model remotely on the server.
field model_reqs: List[str] = ['./', 'torch']#
Requirements to install on hardware to inference the model.
__call__(prompt: str, stop: Optional[List[str]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-76
|
__fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) → langchain.llms.base.LLM[source]#
Init the SelfHostedPipeline from a pipeline object or string.
generate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-77
|
List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.StochasticAI[source]#
Wrapper around StochasticAI large language models.
To use, you should have the environment variable STOCHASTICAI_API_KEY
set with your API key.
Example
from langchain.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
Validators
build_extra » all fields
set_callback_manager » callback_manager
set_verbose » verbose
validate_environment » all fields
field api_url: str = ''#
Model name to use.
field model_kwargs: Dict[str, Any] [Optional]#
Holds any model parameters valid for create call not
explicitly specified.
__call__(prompt: str, stop:
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-78
|
parameters valid for create call not
explicitly specified.
__call__(prompt: str, stop: Optional[List[str]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-79
|
stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.Writer[source]#
Wrapper around Writer large language models.
To use, you should have the environment variable WRITER_API_KEY
set with your API key.
Example
from langchain import Writer
writer
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-80
|
WRITER_API_KEY
set with your API key.
Example
from langchain import Writer
writer = Writer(model_id="palmyra-base")
Validators
set_callback_manager » callback_manager
set_verbose » verbose
validate_environment » all fields
field base_url: Optional[str] = None#
Base url to use, if None decides based on model name.
field beam_search_diversity_rate: float = 1.0#
Only applies to beam search, i.e. when the beam width is >1.
A higher value encourages beam search to return a more diverse
set of candidates
field beam_width: Optional[int] = None#
The number of concurrent candidates to keep track of during
beam search
field length: int = 256#
The maximum number of tokens to generate in the completion.
field length_pentaly: float = 1.0#
Only applies to beam search, i.e. when the beam width is >1.
Larger values penalize long candidates more heavily, thus preferring
shorter candidates
field logprobs: bool = False#
Whether to return log probabilities.
field model_id: str = 'palmyra-base'#
Model name to use.
field random_seed: int = 0#
The model generates random results.
Changing the random seed alone will produce a different response
with similar characteristics. It is possible to reproduce results
by fixing the random seed (assuming all other hyperparameters
are also fixed)
field repetition_penalty: float = 1.0#
Penalizes repeated tokens according to frequency.
field stop: Optional[List[str]] = None#
Sequences when completion generation will stop
field temperature: float = 1.0#
What sampling temperature to use.
field tokens_to_generate: int = 24#
Max number of tokens to generate.
field top_k: int = 1#
The number of highest probability vocabulary tokens to
keep for top-k-filtering.
field top_p: float =
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-81
|
of highest probability vocabulary tokens to
keep for top-k-filtering.
field top_p: float = 1.0#
Total probability mass of tokens to consider at each step.
__call__(prompt: str, stop: Optional[List[str]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-82
|
model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
previous
Writer
next
Chat Models
By Harrison Chase
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
1d96df483bd1-83
|
Models
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/llms.html
|
582aa715a1d7-0
|
.rst
.pdf
Embeddings
Embeddings#
Wrappers around embedding modules.
pydantic model langchain.embeddings.AlephAlphaAsymmetricSemanticEmbedding[source]#
Wrapper for Aleph Alpha’s Asymmetric Embeddings
AA provides you with an endpoint to embed a document and a query.
The models were optimized to make the embeddings of documents and
the query for a document as similar as possible.
To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/
Example
from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding
embeddings = AlephAlphaSymmetricSemanticEmbedding()
document = "This is a content of the document"
query = "What is the content of the document?"
doc_result = embeddings.embed_documents([document])
query_result = embeddings.embed_query(query)
field compress_to_size: Optional[int] = 128#
Should the returned embeddings come back as an original 5120-dim vector,
or should it be compressed to 128-dim.
field contextual_control_threshold: Optional[int] = None#
Attention control parameters only apply to those tokens that have
explicitly been set in the request.
field control_log_additive: Optional[bool] = True#
Apply controls on prompt items by adding the log(control_factor)
to attention scores.
field hosting: Optional[str] = 'https://api.aleph-alpha.com'#
Optional parameter that specifies which datacenters may process the request.
field model: Optional[str] = 'luminous-base'#
Model name to use.
field normalize: Optional[bool] = True#
Should returned embeddings be normalized
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Aleph Alpha’s asymmetric Document endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-1
|
for each text.
embed_query(text: str) → List[float][source]#
Call out to Aleph Alpha’s asymmetric, query embedding endpoint
:param text: The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.AlephAlphaSymmetricSemanticEmbedding[source]#
The symmetric version of the Aleph Alpha’s semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
.. rubric:: Example
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Aleph Alpha’s Document endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to Aleph Alpha’s asymmetric, query embedding endpoint
:param text: The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.CohereEmbeddings[source]#
Wrapper around Cohere embedding models.
To use, you should have the cohere python package installed, and the
environment variable COHERE_API_KEY set with your API key or pass it
as a named parameter to the constructor.
Example
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(model="medium", cohere_api_key="my-api-key")
field model: str = 'large'#
Model name to use.
field truncate: Optional[str] = None#
Truncate embeddings that are too long from start or end (“NONE”|”START”|”END”)
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to Cohere’s embedding endpoint.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-2
|
for each text.
embed_query(text: str) → List[float][source]#
Call out to Cohere’s embedding endpoint.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.FakeEmbeddings[source]#
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed search docs.
embed_query(text: str) → List[float][source]#
Embed query text.
pydantic model langchain.embeddings.HuggingFaceEmbeddings[source]#
Wrapper around sentence_transformers embedding models.
To use, you should have the sentence_transformers python package installed.
Example
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceEmbeddings(model_name=model_name)
field model_name: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace transformer model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace transformer model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.HuggingFaceHubEmbeddings[source]#
Wrapper around HuggingFaceHub embedding models.
To use, you should have the huggingface_hub python package installed, and the
environment variable HUGGINGFACEHUB_API_TOKEN set with your API token, or pass
it as a named parameter to the constructor.
Example
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-3
|
= HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
field model_kwargs: Optional[dict] = None#
Key word arguments to pass to the model.
field repo_id: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
field task: Optional[str] = 'feature-extraction'#
Task to call the model with.
embed_documents(texts: List[str]) → List[List[float]][source]#
Call out to HuggingFaceHub’s embedding endpoint for embedding search docs.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to HuggingFaceHub’s embedding endpoint for embedding query text.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.HuggingFaceInstructEmbeddings[source]#
Wrapper around sentence_transformers embedding models.
To use, you should have the sentence_transformers
and InstructorEmbedding python package installed.
Example
from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
hf = HuggingFaceInstructEmbeddings(model_name=model_name)
field embed_instruction: str = 'Represent the document for retrieval: '#
Instruction to use for embedding documents.
field model_name: str = 'hkunlp/instructor-large'#
Model name to use.
field query_instruction: str = 'Represent the question for retrieving supporting documents: '#
Instruction to use for embedding query.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace instruct model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-4
|
model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace instruct model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.LlamaCppEmbeddings[source]#
Wrapper around llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: abetlen/llama-cpp-python
Example
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
field f16_kv: bool = False#
Use half-precision for key/value cache.
field logits_all: bool = False#
Return logits for all tokens, not just the last token.
field n_batch: Optional[int] = 8#
Number of tokens to process in parallel.
Should be a number between 1 and n_ctx.
field n_ctx: int = 512#
Token context window.
field n_parts: int = -1#
Number of parts to split the model into.
If -1, the number of parts is automatically determined.
field n_threads: Optional[int] = None#
Number of threads to use. If None, the number
of threads is automatically determined.
field seed: int = -1#
Seed. If -1, a random seed is used.
field use_mlock: bool = False#
Force system to keep model in RAM.
field vocab_only: bool = False#
Only load the vocabulary, no weights.
embed_documents(texts: List[str]) → List[List[float]][source]#
Embed a list of documents using the Llama model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-5
|
model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Embed a query using the Llama model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.OpenAIEmbeddings[source]#
Wrapper around OpenAI embedding models.
To use, you should have the openai python package installed, and the
environment variable OPENAI_API_KEY set with your API key or pass it
as a named parameter to the constructor.
Example
from langchain.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
In order to use the library with Microsoft Azure endpoints, you need to set
the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and optionally and
API_VERSION.
The OPENAI_API_TYPE must be set to ‘azure’ and the others correspond to
the properties of your endpoint.
In addition, the deployment name must be passed as the model parameter.
Example
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(model="your-embeddings-deployment-name")
text = "This is a test query."
query_result = embeddings.embed_query(text)
field chunk_size: int = 1000#
Maximum number of texts to embed in each batch
field max_retries: int = 6#
Maximum number of retries to make when generating.
embed_documents(texts: List[str], chunk_size: Optional[int] = 0) → List[List[float]][source]#
Call out to
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-6
|
Optional[int] = 0) → List[List[float]][source]#
Call out to OpenAI’s embedding endpoint for embedding search docs.
Parameters
texts – The list of texts to embed.
chunk_size – The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Call out to OpenAI’s embedding endpoint for embedding query text.
Parameters
text – The text to embed.
Returns
Embedding for the text.
pydantic model langchain.embeddings.SagemakerEndpointEmbeddings[source]#
Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
field content_handler: langchain.llms.sagemaker_endpoint.ContentHandlerBase [Required]#
The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
field credentials_profile_name: Optional[str] = None#
The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See:
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-7
|
profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
field endpoint_kwargs: Optional[Dict] = None#
Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
field endpoint_name: str = ''#
The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region.
field model_kwargs: Optional[Dict] = None#
Key word arguments to pass to the model.
field region_name: str = ''#
The aws region where the Sagemaker model is deployed, eg. us-west-2.
embed_documents(texts: List[str], chunk_size: int = 64) → List[List[float]][source]#
Compute doc embeddings using a SageMaker Inference Endpoint.
Parameters
texts – The list of texts to embed.
chunk_size – The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a SageMaker inference endpoint.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.SelfHostedEmbeddings[source]#
Runs custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example using a model load function:from
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-8
|
use, you should have the runhouse python package installed.
Example using a model load function:from langchain.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Validators
set_callback_manager » callback_manager
set_verbose » verbose
field inference_fn: Callable = <function _embed_documents>#
Inference function to extract the embeddings on the remote hardware.
field inference_kwargs: Any = None#
Any kwargs to pass to the model’s inference function.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace transformer
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-9
|
List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace transformer model.
Parameters
texts – The list of texts to embed.s
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace transformer model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.SelfHostedHuggingFaceEmbeddings[source]#
Runs sentence_transformers embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example
from langchain.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh
model_name = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu)
Validators
set_callback_manager » callback_manager
set_verbose » verbose
field hardware: Any = None#
Remote hardware to send the inference function to.
field inference_fn: Callable = <function _embed_documents>#
Inference function to extract the embeddings.
field load_fn_kwargs: Optional[dict] = None#
Key word arguments to pass to the model load function.
field model_id: str = 'sentence-transformers/all-mpnet-base-v2'#
Model name to use.
field model_load_fn: Callable = <function load_embedding_model>#
Function to load the model remotely on the server.
field model_reqs: List[str] = ['./', 'sentence_transformers',
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-10
|
on the server.
field model_reqs: List[str] = ['./', 'sentence_transformers', 'torch']#
Requirements to install on hardware to inference the model.
pydantic model langchain.embeddings.SelfHostedHuggingFaceInstructEmbeddings[source]#
Runs InstructorEmbedding embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example
from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
Validators
set_callback_manager » callback_manager
set_verbose » verbose
field embed_instruction: str = 'Represent the document for retrieval: '#
Instruction to use for embedding documents.
field model_id: str = 'hkunlp/instructor-large'#
Model name to use.
field model_reqs: List[str] = ['./', 'InstructorEmbedding', 'torch']#
Requirements to install on hardware to inference the model.
field query_instruction: str = 'Represent the question for retrieving supporting documents: '#
Instruction to use for embedding query.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a HuggingFace instruct model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a HuggingFace instruct
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
582aa715a1d7-11
|
str) → List[float][source]#
Compute query embeddings using a HuggingFace instruct model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
pydantic model langchain.embeddings.TensorflowHubEmbeddings[source]#
Wrapper around tensorflow_hub embedding models.
To use, you should have the tensorflow_text python package installed.
Example
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
field model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3'#
Model name to use.
embed_documents(texts: List[str]) → List[List[float]][source]#
Compute doc embeddings using a TensorflowHub embedding model.
Parameters
texts – The list of texts to embed.
Returns
List of embeddings, one for each text.
embed_query(text: str) → List[float][source]#
Compute query embeddings using a TensorflowHub embedding model.
Parameters
text – The text to embed.
Returns
Embeddings for the text.
previous
Text Splitter
next
VectorStores
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/reference/modules/embeddings.html
|
a8991eded20e-0
|
.md
.pdf
入门指南
Contents
安装
环境设置
构建语言模型应用程序:LLMs
构建语言模型类应用程序:聊天模型
入门指南#
本教程将为您快速介绍如何使用LangChain构建端到端的语言模型应用程序。
安装#
请使用以下命令安装LangChain:
pip install langchain
# or
conda install langchain -c conda-forge
环境设置#
通常使用LangChain需要将其与一个或多个模型提供者、数据存储、API等进行集成。
在本示例中,我们将使用OpenAI的API,因此我们首先需要安装其SDK:
pip install openai
接下来我们需要在终端中设置环境变量。
export OPENAI_API_KEY="..."
是的,没错。您可以在 Jupyter 笔记本或 Python 脚本中设置环境变量。以下是设置环境变量的代码示例:
import os
os.environ["OPENAI_API_KEY"] = "..."
构建语言模型应用程序:LLMs#
现在我们已经安装了 LangChain 并设置好了环境,可以开始构建我们的语言模型应用程序了。
LangChain 提供了许多模块,可以用来构建语言模型应用程序。这些模块可以组合使用,创建更复杂的应用程序,或者单独用于简单的应用程序。
LLMs: 从语言模型获取预测结果
LangChain
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-1
|
从语言模型获取预测结果
LangChain 最基本的构建块是在某些输入上调用 LLM。让我们通过一个简单的示例来演示如何实现这一点。为此,假设我们正在构建一个服务,根据公司的产品生成公司名称。
为了实现这一点,我们首先需要导入 LLM 包,例如OpenAI。
from langchain.llms import OpenAI
接下来,我们可以使用任何参数来初始化LLM。在这个例子中,我们希望输出结果更随机一些,因此我们将使用较高的temperature(这也是OpenAI的API参数,温度代表了分子平均随机运动的快慢,因此借用温度来表示随机性)值来初始化。
llm = OpenAI(temperature=0.9)
现在我们可以调用它了!
text = "请用中文给生产彩色袜子的公司起个好名字。"
print(llm(text))
色活靓履
有关如何在 LangChain 中使用 LLMs 的更多详细信息,请参见 LLM 入门指南.
Prompt Templates (提示模板): 为语言模型管理提示
调用 LLM 是我们开始的第一步。但通常在应用中使用 LLM 时,您不会直接将用户输入发送到 LLM 中。您可能会按照业务场景需要,简化用户的输入。然后按适合当前场景的模式,对输入进行修饰和调整后再发送到
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-2
|
LLM 中。
例如,在先前的示例中,我们传递的文本是硬编码以要求为制造彩色袜子的公司命名。在这个想象中的服务中,我们希望做的是只获取描述公司的用户输入,然后使用这些信息格式化提示。
这在 LangChain 中非常容易!
首先,让我们定义提示模板:
from langchain.prompts import PromptTemplate
prompt = PromptTemplate(
input_variables=["product"],
template="请用中文给生产{product}的公司起个好名字",
)
现在让我们看看它的工作原理!我们可以调用 .format 方法来格式化它。
print(prompt.format(product="彩色袜子"))
请用中文给生产彩色袜子的公司起个好名字
想要了解更多详细信息,请参阅提示模板的入门指南。
Chains(链):在多步流程中将 LLM(语言模型)和 Prompt Templates (提示模板)结合起来。
到目前为止,我们已经单独使用了 PromptTemplate 和 LLM 这些基础组件。他们不仅可以单独使用,还可以“链接”在一起组合使用。
在 LangChain 中,“链”由“链接”组成,“链接”可以是像 LLM 这样的基础组件或其他链。
例如:LLMChain 就是最核心的“链”类型,由 PromptTemplate 和 LLM 组成。
扩展之前的例子,我们可以构造一个
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-3
|
LLM 组成。
扩展之前的例子,我们可以构造一个 LLMChain,它接受用户输入,使用 PromptTemplate 进行格式化,并将格式化的响应传递给 LLM。
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="请用中文给生产{product}的公司起个好名字",
)
现在我们可以创建一个非常简单的链,该链将接受用户输入,然后用 PromptTemplate(提示模版)进行格式化,最后将其发送给 LLM(语言模型):
from langchain.chains import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
现在我们可以运行该链,只需指定产品即可获得我们期望的结果!
chain.run("彩色袜子")
# -> '\n\n彩色星足!'
第一个链成功执行了! - 一个 LLM 链。虽然这只是一种较简单的链,但理解它的工作原理会为您使用更复杂的链打下良好的基础。
要了解更多详情,请查看链的入门指南。
Agents(代理):根据用户输入动态调用链
到目前为止,我们所讨论的链都是按照预定顺序运行的。
而代理不再是这样:它们使用 LLM
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-4
|
LLM 确定要采取的操作以及其顺序。一个操作可以是使用工具并观察其输出,或将结果返回给用户。
如果使用得当,代理可以非常强大。在本教程中,我们将通过最简单的、最高级别的 API,向您展示如何轻松地使用代理。
为了加载代理,您应该理解以下概念:
工具: 执行特定任务的函数。这可以是诸如 Google 搜索、数据库查找、Python REPL、其他链之类的工具。目前工具的接口是一个期望输入为字符串并输出字符串的函数。
LLM: 驱动代理的语言模型。注意:这里指代理通过语言模型自己判断选择使用哪些工具
代理: 通过引用代理类的名字字符串,指定要使用的代理。本篇介绍专注于最简单的、最高级别的 API,所以仅涵盖使用标准支持的代理类。如果要实现自定义代理,请参阅自定义代理的文档(即将推出)。
代理: 有关支持的代理及其规格列表,请参考 here.
工具: 有关预定义工具及其规格列表,请参见 here.
接下来这个例子需要安装 SerpAPI Python 包。
pip install google-search-results
然后设置环境变量。
import os
os.environ["SERPAPI_API_KEY"] = "..."
现在我们可以开始了!
from langchain.agents import
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-5
|
= "..."
现在我们可以开始了!
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
# 首先,让我们加载我们将用来控制代理的语言模型。
llm = OpenAI(temperature=0)
# 接下来,让我们加载一些要使用的工具。请注意,“llm-math”工具使用LLM,因此我们需要传递LLM。
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# 最后,让我们使用这些工具、语言模型和我们想要使用的代理类型来初始化一个代理。
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# 现在让我们来测试一下吧!
agent.run("What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?")
> Entering new AgentExecutor chain...
I need to find the temperature first, then use the calculator to raise it to the .023 power.
Action: Search
Action Input: "High temperature in SF yesterday"
Observation: San Francisco Temperature Yesterday. Maximum temperature yesterday: 57 °F (at 1:56 pm) Minimum temperature yesterday: 49 °F (at 1:56 am) Average temperature ...
Thought: I now have the temperature, so I can use the calculator to raise it to the .023 power.
Action: Calculator
Action Input: 57^.023
Observation: Answer: 1.0974509573251117
Thought: I now know the final answer
Final Answer: The high temperature in SF yesterday in Fahrenheit raised
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-6
|
I now know the final answer
Final Answer: The high temperature in SF yesterday in Fahrenheit raised to the .023 power is 1.0974509573251117.
> Finished chain.
Memory(记忆):将状态添加到链和代理中。
到目前为止,我们所讨论的所有链和代理都是无状态的。但通常情况下,您可能希望链或代理具有某种“记忆”概念,以便它可以记住有关其先前交互的信息。这种情况最清晰和简单的例子是设计聊天机器人 - 您希望它记住以前的消息,以便它可以利用上下文进行更好的对话。这将是一种“短期记忆”。在更复杂的方面,您可以想象链/代理随着时间的推移记住关键信息 - 这将是一种“长期记忆”。有关后者的更具体的想法,请参见 论文.
LangChain 提供了几个专门为此目的创建的链。本笔记本演示了使用其中一条链(ConversationChain)和两种不同类型的记忆的方法。
默认情况下,ConversationChain 具有一种简单的记忆类型,它记住了所有先前的输入/输出,并将它们添加到传递的上下文中。让我们看一下如何使用这个链(将verbose=True,以便我们可以看到提示)。
from langchain import
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-7
|
langchain import OpenAI, ConversationChain
llm = OpenAI(temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)
conversation.predict(input="Hi there!")
> Entering new chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI:
> Finished chain.
' Hello! How are you today?'
conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
> Entering new chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: Hello! How are you today?
Human: I'm doing well! Just having a conversation with an AI.
AI:
> Finished chain.
" That's great! What would you like to talk about?"
构建语言模型类应用程序:聊天模型#
同样地,您可以使用聊天模型而不是 LLM。聊天模型是语言模型的一种变体。虽然聊天模型在底层使用语言模型,但它们公开的界面有点不同:它们不是公开一个“文本输入,文本输出”的 API,而是公开一个“聊天信息”是输入和输出的接口。
聊天模型 API
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-8
|
API 还比较新,因此我们仍在摸索正确的抽象化方法。
在聊天模型中获取消
您可以通过向聊天模型传递一个或多个消息来获得聊天回复。响应将是一个消息。目前在 LangChain 中支持的消息类型有 AIMessage、HumanMessage、SystemMessage 和 ChatMessage,其中 ChatMessage 接受一个任意角色参数。大多数情况下,您只需要处理 HumanMessage、AIMessage 和 SystemMessage 即可
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
chat = ChatOpenAI(temperature=0)
您可以通过传入单个消息来获取回复
chat([HumanMessage(content="翻译句子从英文到中文. I love programming.")])
# -> AIMessage(content="我喜欢编程。", additional_kwargs={})
您还可以针对 OpenAI 的 GPT-3.5 Turbo 和 GPT-4 模型传入多个消息。
messages = [
SystemMessage(content="你是我的英文翻译中文助理"),
HumanMessage(content="翻译句子从英文到中文. I love programming.")
]
chat(messages)
# -> AIMessage(content="我喜欢编程。", additional_kwargs={})
您可以进一步采用 “generate” 函数生成多组消息的完成度建议。这将返回一个带有额外 “message” 参数的 “LLMResult”。
batch_messages = [
[
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-9
|
= [
[
SystemMessage(content="你是我的英文翻译中文助理"),
HumanMessage(content="翻译句子从英文到中文. I love programming.")
],
[
SystemMessage(content="你是我的英文翻译中文助理"),
HumanMessage(content="翻译句子从英文到中文. I love artificial intelligence.")
],
]
result = chat.generate(batch_messages)
result
# -> LLMResult(generations=[[ChatGeneration(text='我喜欢编程。', generation_info=None, message=AIMessage(content='我喜欢编程。', additional_kwargs={}))], [ChatGeneration(text='我喜爱人工智能。', generation_info=None, message=AIMessage(content='我喜爱人工智能。', additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 95, 'completion_tokens': 19, 'total_tokens': 114}, 'model_name': 'gpt-3.5-turbo'})
您可以从这个 “LLMResult” 中恢复令牌使用情况等信息。
result.llm_output['token_usage']
# -> {'prompt_tokens': 95, 'completion_tokens': 19, 'total_tokens': 114}
Chat Prompt Templates(聊天提示模版)
类似于 LLM 模型,您可以使用 “MessagePromptTemplate” 模板来进行 templating。您可以构建一个 ChatPromptTemplate,从一个或多个 MessagePromptTemplate 构建。您可以使用 ChatPromptTemplate 的 format_prompt -
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-10
|
MessagePromptTemplate 构建。您可以使用 ChatPromptTemplate 的 format_prompt - 这将返回一个 PromptValue,您可以将其转换为字符串或 Message 对象,这取决于您是否想将格式化的值用作 llm 或 chat 模型的输入。
为了方便起见,模板上公开了一个 “from_template” 方法。如果您使用此模板,它会是这样的:
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
chat = ChatOpenAI(temperature=0)
template="You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# get a chat completion from the formatted messages
chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages())
# -> AIMessage(content="J'aime programmer.", additional_kwargs={})
在Chain(链)中使用聊天模型
上面讨论的 LLMChain 也可以与聊天模型一起使用:
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
chat = ChatOpenAI(temperature=0)
template="You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt =
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-11
|
are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
chain.run(input_language="English", output_language="French", text="I love programming.")
# -> "J'aime programmer."
在Agents(代理)中使用聊天模型
也可以将代理与聊天模型一起使用,您可以使用 AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION 作为代理类型进行初始化。
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
# 首先,让我们加载我们将用来控制代理的语言模型。
chat = ChatOpenAI(temperature=0)
# 接下来,让我们加载一些工具来使用。请注意,llm-math 工具使用 LLM,因此我们需要将其传递给它。
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# 最后,让我们使用这些工具、语言模型和我们想要使用的代理类型来初始化一个代理。
agent = initialize_agent(tools, chat, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# 现在让我们测试一下!
agent.run("Who is Olivia Wilde's boyfriend? What is his current age raised to
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-12
|
is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?")
> Entering new AgentExecutor chain...
Thought: I need to use a search engine to find Olivia Wilde's boyfriend and a calculator to raise his age to the 0.23 power.
Action:
{
"action": "Search",
"action_input": "Olivia Wilde boyfriend"
}
Observation: Sudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling.
Thought:I need to use a search engine to find Harry Styles' current age.
Action:
{
"action": "Search",
"action_input": "Harry Styles age"
}
Observation: 29 years
Thought:Now I need to calculate 29 raised to the 0.23 power.
Action:
{
"action": "Calculator",
"action_input": "29^0.23"
}
Observation: Answer: 2.169459462491557
Thought:I now know the final answer.
Final Answer: 2.169459462491557
> Finished chain.
'2.169459462491557'
Memory(记忆): 为 Chain(链)与 Agents(代理)添加状态
可以在使用聊天模型初始化的链和代理中使用记忆。与针对语言模型的记忆不同的是,我们可以将之前的所有消息作为唯一的记忆对象保留,而不试图将其压缩为一个字符串。
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-13
|
import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template("下面是人类和人工智能之间的友好对话。人工智能很健谈,提供了许多来自上下文的具体细节。如果人工智能不知道回答一个问题,它会诚实地说出自己不知道。"),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(temperature=0)
memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
conversation.predict(input="Hi there!")
# -> 'Hello! How can I assist you today?'
conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
# -> "That sounds like fun! I'm happy to chat with you. Is there anything specific you'd like to talk about?"
conversation.predict(input="Tell me about yourself.")
# -> "Sure! I am an AI language model created by OpenAI. I was trained on a large dataset of text from the internet, which allows me to understand and generate human-like language. I can answer questions, provide information, and even have conversations like this one. Is there anything else you'd like to know about me?"
previous
Welcome to LangChain
next
Models(模型)
Contents
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
a8991eded20e-14
|
about me?"
previous
Welcome to LangChain
next
Models(模型)
Contents
安装
环境设置
构建语言模型应用程序:LLMs
构建语言模型类应用程序:聊天模型
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 18, 2023.
|
https:///langchain-cn.readthedocs.io/en/latest/getting_started/getting_started.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.