id
stringlengths
14
15
text
stringlengths
22
2.51k
source
stringlengths
60
153
7ea9bd139f1b-7
k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on creation." ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mul=lambda_mult ) return docs [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, persist_path: Optional[str] = None, **kwargs: Any, ) -> "SKLearnVectorStore": vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs) vs.add_texts(texts, metadatas=metadatas, ids=ids)
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\sklearn.html
7ea9bd139f1b-8
vs.add_texts(texts, metadatas=metadatas, ids=ids) return vs
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\sklearn.html
7979ff8f156c-0
Source code for langchain.vectorstores.starrocks """Wrapper around open source StarRocks VectorSearch capability.""" from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple from pydantic import BaseSettings from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() DEBUG = False [docs]def has_mul_sub_str(s: str, *args: Any) -> bool: """ Check if a string has multiple substrings. Args: s: The string to check *args: The substrings to check for in the string Returns: bool: True if all substrings are present in the string, False otherwise """ for a in args: if a not in s: return False return True [docs]def debug_output(s: Any) -> None: """ Print a debug message if DEBUG is True. Args: s: The message to print """ if DEBUG: print(s) [docs]def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]: """ Get a named result from a query. Args: connection: The connection to the database query: The query to execute Returns: List[dict[str, Any]]: The result of the query """ cursor = connection.cursor() cursor.execute(query) columns = cursor.description result = [] for value in cursor.fetchall(): r = {} for idx, datum in enumerate(value):
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-1
r = {} for idx, datum in enumerate(value): k = columns[idx][0] r[k] = datum result.append(r) debug_output(result) cursor.close() return result [docs]class StarRocksSettings(BaseSettings): """StarRocks Client Configuration Attribute: StarRocks_host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. StarRocks_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'embedding': 'text_embedding', 'document': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 9030 username: str = "root" password: str = "" column_map: Dict[str, str] = { "id": "id", "document": "document", "embedding": "embedding", "metadata": "metadata", } database: str = "default"
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-2
"metadata": "metadata", } database: str = "default" table: str = "langchain" def __getitem__(self, item: str) -> Any: return getattr(self, item) [docs] class Config: env_file = ".env" env_prefix = "starrocks_" env_file_encoding = "utf-8" [docs]class StarRocks(VectorStore): """Wrapper around StarRocks vector database You need a `pymysql` python package, and a valid account to connect to StarRocks. Right now StarRocks has only implemented `cosine_similarity` function to compute distance between two vectors. And there is no vector inside right now, so we have to iterate all vectors and compute spatial distance. For more information, please visit [StarRocks official site](https://www.starrocks.io/) [StarRocks github](https://github.com/StarRocks/starrocks) """ def __init__( self, embedding: Embeddings, config: Optional[StarRocksSettings] = None, **kwargs: Any, ) -> None: """StarRocks Wrapper to LangChain embedding_function (Embeddings): config (StarRocksSettings): Configuration to StarRocks Client """ try: import pymysql # type: ignore[import] except ImportError: raise ImportError( "Could not import pymysql python package. " "Please install it with `pip install pymysql`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-3
except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x, **kwargs: x super().__init__() if config is not None: self.config = config else: self.config = StarRocksSettings() assert self.config assert self.config.host and self.config.port assert self.config.column_map and self.config.database and self.config.table for k in ["id", "embedding", "document", "metadata"]: assert k in self.config.column_map # initialize the schema dim = len(embedding.embed_query("test")) self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} string, {self.config.column_map['document']} string, {self.config.column_map['embedding']} array<float>, {self.config.column_map['metadata']} string ) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \ PROPERTIES ("replication_num" = "1")\ """ self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding self.dist_order = "DESC" debug_output(self.config) # Create a connection to StarRocks self.connection = pymysql.connect( host=self.config.host, port=self.config.port, user=self.config.username, password=self.config.password, database=self.config.database, **kwargs, ) debug_output(self.schema) get_named_result(self.connection, self.schema) [docs] def escape_str(self, value: str) -> str:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-4
[docs] def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) embed_tuple_index = tuple(column_names).index( self.config.column_map["embedding"] ) _data = [] for n in transac: n = ",".join( [ f"'{self.escape_str(str(_n))}'" if idx != embed_tuple_index else f"array<float>{str(_n)}" for (idx, _n) in enumerate(n) ] ) _data.append(f"({n})") i_str = f""" INSERT INTO {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: _insert_query = self._build_insert_sql(transac, column_names) debug_output(_insert_query) get_named_result(self.connection, _insert_query) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore.
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-5
Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["document"]: texts, colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)), } metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert ( len(v[keys.index(self.config.column_map["embedding"])]) == self.dim ) transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-6
return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[StarRocksSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> StarRocks: """Create StarRocks wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (StarRocksSettings, Optional): StarRocks configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to StarRocks. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Returns: StarRocks Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str: """Text representation for StarRocks Vector Store, prints backends, username and schemas. Easy to use with `str(StarRocks())` Returns:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-7
Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n" _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" width = 25 fields = 3 _repr += "-" * (width * fields + 1) + "\n" columns = ["name", "type", "key"] _repr += f"|\033[94m{columns[0]:24s}\033[0m|\033[96m{columns[1]:24s}" _repr += f"\033[0m|\033[96m{columns[2]:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" q_str = f"DESC {self.config.database}.{self.config.table}" debug_output(q_str) rs = get_named_result(self.connection, q_str) for r in rs: _repr += f"|\033[94m{r['Field']:24s}\033[0m|\033[96m{r['Type']:24s}" _repr += f"\033[0m|\033[96m{r['Key']:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" return _repr def _build_query_sql( self, q_emb: List[float], topk: int, where_str: Optional[str] = None
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-8
) -> str: q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"WHERE {where_str}" else: where_str = "" q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, cosine_similarity_norm(array<float>[{q_emb_str}], {self.config.column_map['embedding']}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} LIMIT {topk} """ debug_output(q_str) return q_str [docs] def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self.embedding_function.embed_query(query), k, where_str, **kwargs ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-9
self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with StarRocks by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_query_sql(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ) for r in get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with StarRocks Args: query (str): query string
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
7979ff8f156c-10
Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql( self.embedding_function.embed_query(query), k, where_str ) try: return [ ( Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ), r["dist"], ) for r in get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def drop(self) -> None: """ Helper function: Drop data """ get_named_result( self.connection, f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}", ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"]
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\starrocks.html
537703ce25c0-0
Source code for langchain.vectorstores.supabase from __future__ import annotations import uuid from itertools import repeat from typing import ( TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type, Union, ) import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import supabase [docs]class SupabaseVectorStore(VectorStore): """VectorStore for a Supabase postgres database. Assumes you have the `pgvector` extension installed and a `match_documents` (or similar) function. For more details: https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase You can implement your own `match_documents` function in order to limit the search space to a subset of documents based on your own authorization or business logic. Note that the Supabase Python client does not yet support async operations. If you'd like to use `max_marginal_relevance_search`, please review the instructions below on modifying the `match_documents` function to return matched embeddings. """ _client: supabase.client.Client # This is the embedding function. Don't confuse with the embedding vectors. # We should perhaps rename the underlying Embedding base class to EmbeddingFunction # or something _embedding: Embeddings table_name: str query_name: str def __init__( self, client: supabase.client.Client, embedding: Embeddings, table_name: str,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-1
embedding: Embeddings, table_name: str, query_name: Union[str, None] = None, ) -> None: """Initialize with supabase client.""" try: import supabase # noqa: F401 except ImportError: raise ValueError( "Could not import supabase python package. " "Please install it with `pip install supabase`." ) self._client = client self._embedding: Embeddings = embedding self.table_name = table_name or "documents" self.query_name = query_name or "match_documents" [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: ids = ids or [str(uuid.uuid4()) for _ in texts] docs = self._texts_to_documents(texts, metadatas) vectors = self._embedding.embed_documents(list(texts)) return self.add_vectors(vectors, docs, ids) [docs] @classmethod def from_texts( cls: Type["SupabaseVectorStore"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = "documents", query_name: Union[str, None] = "match_documents", ids: Optional[List[str]] = None, **kwargs: Any, ) -> "SupabaseVectorStore": """Return VectorStore initialized from texts and embeddings."""
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-2
"""Return VectorStore initialized from texts and embeddings.""" if not client: raise ValueError("Supabase client is required.") if not table_name: raise ValueError("Supabase document table_name is required.") embeddings = embedding.embed_documents(texts) ids = [str(uuid.uuid4()) for _ in texts] docs = cls._texts_to_documents(texts, metadatas) _ids = cls._add_vectors(client, table_name, embeddings, docs, ids) return cls( client=client, embedding=embedding, table_name=table_name, query_name=query_name, ) [docs] def add_vectors( self, vectors: List[List[float]], documents: List[Document], ids: List[str], ) -> List[str]: return self._add_vectors(self._client, self.table_name, vectors, documents, ids) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector(vectors[0], k) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: result = self.similarity_search_by_vector_with_relevance_scores(embedding, k) documents = [doc for doc, _ in result] return documents [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-3
) -> List[Tuple[Document, float]]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k) [docs] def similarity_search_by_vector_with_relevance_scores( self, query: List[float], k: int ) -> List[Tuple[Document, float]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), ) for search in res.data if search.get("content") ] return match_result [docs] def similarity_search_by_vector_returning_embeddings( self, query: List[float], k: int ) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), # Supabase returns a vector type as its string represation (!). # This is a hack to convert the string to numpy array. np.fromstring( search.get("embedding", "").strip("[]"), np.float32, sep="," ), )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-4
), ) for search in res.data if search.get("content") ] return match_result @staticmethod def _texts_to_documents( texts: Iterable[str], metadatas: Optional[Iterable[dict[Any, Any]]] = None, ) -> List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [ Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas) ] return docs @staticmethod def _add_vectors( client: supabase.client.Client, table_name: str, vectors: List[List[float]], documents: List[Document], ids: List[str], ) -> List[str]: """Add vectors to Supabase table.""" rows: List[dict[str, Any]] = [ { "id": ids[idx], "content": documents[idx].page_content, "embedding": embedding, "metadata": documents[idx].metadata, # type: ignore } for idx, embedding in enumerate(vectors) ] # According to the SupabaseVectorStore JS implementation, the best chunk size # is 500 chunk_size = 500 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] result = client.from_(table_name).upsert(chunk).execute() # type: ignore if len(result.data) == 0:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-5
if len(result.data) == 0: raise Exception("Error inserting: No rows added") # VectorStore.add_vectors returns ids as strings ids = [str(i.get("id")) for i in result.data if i.get("id")] id_list.extend(ids) return id_list [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ result = self.similarity_search_by_vector_returning_embeddings( embedding, fetch_k ) matched_documents = [doc_tuple[0] for doc_tuple in result] matched_embeddings = [doc_tuple[2] for doc_tuple in result] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), matched_embeddings, k=k, lambda_mult=lambda_mult, )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-6
matched_embeddings, k=k, lambda_mult=lambda_mult, ) filtered_documents = [matched_documents[i] for i in mmr_selected] return filtered_documents [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. `max_marginal_relevance_search` requires that `query_name` returns matched embeddings alongside the match documents. The following function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE( id uuid, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGIN RETURN query SELECT id, content, metadata, embedding,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
537703ce25c0-7
SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROM docstore ORDER BY docstore.embedding <=> query_embedding LIMIT match_count; END; $$; ``` """ embedding = self._embedding.embed_documents([query]) docs = self.max_marginal_relevance_search_by_vector( embedding[0], k, fetch_k, lambda_mult=lambda_mult ) return docs [docs] def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") rows: List[dict[str, Any]] = [ { "id": id, } for id in ids ] # TODO: Check if this can be done in bulk for row in rows: self._client.from_(self.table_name).delete().eq("id", row["id"]).execute()
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\supabase.html
fc5eae86123b-0
Source code for langchain.vectorstores.tair """Wrapper around Tair Vector.""" from __future__ import annotations import json import logging import uuid from typing import Any, Iterable, List, Optional, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore logger = logging.getLogger(__name__) def _uuid_key() -> str: return uuid.uuid4().hex [docs]class Tair(VectorStore): """Wrapper around Tair Vector store.""" def __init__( self, embedding_function: Embeddings, url: str, index_name: str, content_key: str = "content", metadata_key: str = "metadata", search_params: Optional[dict] = None, **kwargs: Any, ): self.embedding_function = embedding_function self.index_name = index_name try: from tair import Tair as TairClient except ImportError: raise ImportError( "Could not import tair python package. " "Please install it with `pip install tair`." ) try: # connect to tair from url client = TairClient.from_url(url, **kwargs) except ValueError as e: raise ValueError(f"Tair failed to connect: {e}") self.client = client self.content_key = content_key self.metadata_key = metadata_key self.search_params = search_params [docs] def create_index_if_not_exist( self, dim: int, distance_type: str, index_type: str, data_type: str,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tair.html
fc5eae86123b-1
index_type: str, data_type: str, **kwargs: Any, ) -> bool: index = self.client.tvs_get_index(self.index_name) if index is not None: logger.info("Index already exists") return False self.client.tvs_create_index( self.index_name, dim, distance_type, index_type, data_type, **kwargs, ) return True [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add texts data to an existing index.""" ids = [] keys = kwargs.get("keys", None) # Write data to tair pipeline = self.client.pipeline(transaction=False) embeddings = self.embedding_function.embed_documents(list(texts)) for i, text in enumerate(texts): # Use provided key otherwise use default key key = keys[i] if keys else _uuid_key() metadata = metadatas[i] if metadatas else {} pipeline.tvs_hset( self.index_name, key, embeddings[i], False, **{ self.content_key: text, self.metadata_key: json.dumps(metadata), }, ) ids.append(key) pipeline.execute() return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tair.html
fc5eae86123b-2
""" Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ # Creates embedding vector from user query embedding = self.embedding_function.embed_query(query) keys_and_scores = self.client.tvs_knnsearch( self.index_name, k, embedding, False, None, **kwargs ) pipeline = self.client.pipeline(transaction=False) for key, _ in keys_and_scores: pipeline.tvs_hmget( self.index_name, key, self.metadata_key, self.content_key ) docs = pipeline.execute() return [ Document( page_content=d[1], metadata=json.loads(d[0]), ) for d in docs ] [docs] @classmethod def from_texts( cls: Type[Tair], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: try: from tair import tairvector except ImportError: raise ValueError( "Could not import tair python package. " "Please install it with `pip install tair`." ) url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tair.html
fc5eae86123b-3
if "tair_url" in kwargs: kwargs.pop("tair_url") distance_type = tairvector.DistanceMetric.InnerProduct if "distance_type" in kwargs: distance_type = kwargs.pop("distance_typ") index_type = tairvector.IndexType.HNSW if "index_type" in kwargs: index_type = kwargs.pop("index_type") data_type = tairvector.DataType.Float32 if "data_type" in kwargs: data_type = kwargs.pop("data_type") index_params = {} if "index_params" in kwargs: index_params = kwargs.pop("index_params") search_params = {} if "search_params" in kwargs: search_params = kwargs.pop("search_params") keys = None if "keys" in kwargs: keys = kwargs.pop("keys") try: tair_vector_store = cls( embedding, url, index_name, content_key=content_key, metadata_key=metadata_key, search_params=search_params, **kwargs, ) except ValueError as e: raise ValueError(f"tair failed to connect: {e}") # Create embeddings for documents embeddings = embedding.embed_documents(texts) tair_vector_store.create_index_if_not_exist( len(embeddings[0]), distance_type, index_type, data_type, **index_params, ) tair_vector_store.add_texts(texts, metadatas, keys=keys) return tair_vector_store [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tair.html
fc5eae86123b-4
cls, documents: List[Document], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts, embedding, metadatas, index_name, content_key, metadata_key, **kwargs ) [docs] @staticmethod def drop_index( index_name: str = "langchain", **kwargs: Any, ) -> bool: """ Drop an existing index. Args: index_name (str): Name of the index to drop. Returns: bool: True if the index is dropped successfully. """ try: from tair import Tair as TairClient except ImportError: raise ValueError( "Could not import tair python package. " "Please install it with `pip install tair`." ) url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") try: if "tair_url" in kwargs: kwargs.pop("tair_url") client = TairClient.from_url(url=url, **kwargs) except ValueError as e: raise ValueError(f"Tair connection error: {e}") # delete index ret = client.tvs_del_index(index_name) if ret == 0: # index not exist logger.info("Index does not exist") return False
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tair.html
fc5eae86123b-5
# index not exist logger.info("Index does not exist") return False return True [docs] @classmethod def from_existing_index( cls, embedding: Embeddings, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: """Connect to an existing Tair index.""" url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") search_params = {} if "search_params" in kwargs: search_params = kwargs.pop("search_params") return cls( embedding, url, index_name, content_key=content_key, metadata_key=metadata_key, search_params=search_params, **kwargs, )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tair.html
ee2beb105d8c-0
Source code for langchain.vectorstores.tigris from __future__ import annotations import itertools from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple from langchain.embeddings.base import Embeddings from langchain.schema import Document from langchain.vectorstores import VectorStore if TYPE_CHECKING: from tigrisdb import TigrisClient from tigrisdb import VectorStore as TigrisVectorStore from tigrisdb.types.filters import Filter as TigrisFilter from tigrisdb.types.vector import Document as TigrisDocument [docs]class Tigris(VectorStore): def __init__(self, client: TigrisClient, embeddings: Embeddings, index_name: str): """Initialize Tigris vector store""" try: import tigrisdb # noqa: F401 except ImportError: raise ValueError( "Could not import tigrisdb python package. " "Please install it with `pip install tigrisdb`" ) self._embed_fn = embeddings self._vector_store = TigrisVectorStore(client.get_search(), index_name) @property def search_index(self) -> TigrisVectorStore: return self._vector_store [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts.
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tigris.html
ee2beb105d8c-1
metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids for documents. Ids will be autogenerated if not provided. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ docs = self._prep_docs(texts, metadatas, ids) result = self.search_index.add_documents(docs) return [r.id for r in result] [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[TigrisFilter] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" docs_with_scores = self.similarity_search_with_score(query, k, filter) return [doc for doc, _ in docs_with_scores] [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[TigrisFilter] = None, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[TigrisFilter]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ vector = self._embed_fn.embed_query(query) result = self.search_index.similarity_search( vector=vector, k=k, filter_by=filter )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tigris.html
ee2beb105d8c-2
vector=vector, k=k, filter_by=filter ) docs: List[Tuple[Document, float]] = [] for r in result: docs.append( ( Document( page_content=r.doc["text"], metadata=r.doc.get("metadata") ), r.score, ) ) return docs [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, client: Optional[TigrisClient] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> Tigris: """Return VectorStore initialized from texts and embeddings.""" if not index_name: raise ValueError("`index_name` is required") if not client: client = TigrisClient() store = cls(client, embedding, index_name) store.add_texts(texts=texts, metadatas=metadatas, ids=ids) return store def _prep_docs( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[TigrisDocument]: embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts)) docs: List[TigrisDocument] = [] for t, m, e, _id in itertools.zip_longest( texts, metadatas or [], embeddings or [], ids or [] ): doc: TigrisDocument = { "text": t, "embeddings": e or [],
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tigris.html
ee2beb105d8c-3
"text": t, "embeddings": e or [], "metadata": m or {}, } if _id: doc["id"] = _id docs.append(doc) return docs
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\tigris.html
9df9ffbd3410-0
Source code for langchain.vectorstores.typesense """Wrapper around Typesense vector search""" from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from typesense.client import Client from typesense.collection import Collection [docs]class Typesense(VectorStore): """Wrapper around Typesense vector search. To use, you should have the ``typesense`` python package installed. Example: .. code-block:: python from langchain.embedding.openai import OpenAIEmbeddings from langchain.vectorstores import Typesense import typesense node = { "host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net "port": "8108", # For Typesense Cloud use 443 "protocol": "http" # For Typesense Cloud use https } typesense_client = typesense.Client( { "nodes": [node], "api_key": "<API_KEY>", "connection_timeout_seconds": 2 } ) typesense_collection_name = "langchain-memory" embedding = OpenAIEmbeddings() vectorstore = Typesense( typesense_client=typesense_client, embedding=embedding, typesense_collection_name=typesense_collection_name, text_key="text", ) """ def __init__( self, typesense_client: Client, embedding: Embeddings, *,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\typesense.html
9df9ffbd3410-1
typesense_client: Client, embedding: Embeddings, *, typesense_collection_name: Optional[str] = None, text_key: str = "text", ): """Initialize with Typesense client.""" try: from typesense import Client except ImportError: raise ValueError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) if not isinstance(typesense_client, Client): raise ValueError( f"typesense_client should be an instance of typesense.Client, " f"got {type(typesense_client)}" ) self._typesense_client = typesense_client self._embedding = embedding self._typesense_collection_name = ( typesense_collection_name or f"langchain-{str(uuid.uuid4())}" ) self._text_key = text_key @property def _collection(self) -> Collection: return self._typesense_client.collections[self._typesense_collection_name] def _prep_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[dict]: """Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [ {"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata}
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\typesense.html
9df9ffbd3410-2
for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas) ] def _create_collection(self, num_dim: int) -> None: fields = [ {"name": "vec", "type": "float[]", "num_dim": num_dim}, {"name": f"{self._text_key}", "type": "string"}, {"name": ".*", "type": "auto"}, ] self._typesense_client.collections.create( {"name": self._typesense_collection_name, "fields": fields} ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids from adding the texts into the vectorstore. """ from typesense.exceptions import ObjectNotFound docs = self._prep_texts(texts, metadatas, ids) try: self._collection.documents.import_(docs, {"action": "upsert"}) except ObjectNotFound: # Create the collection if it doesn't already exist self._create_collection(len(docs[0]["vec"])) self._collection.documents.import_(docs, {"action": "upsert"}) return [doc["id"] for doc in docs]
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\typesense.html
9df9ffbd3410-3
return [doc["id"] for doc in docs] [docs] def similarity_search_with_score( self, query: str, k: int = 10, filter: Optional[str] = "", ) -> List[Tuple[Document, float]]: """Return typesense documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ embedded_query = [str(x) for x in self._embedding.embed_query(query)] query_obj = { "q": "*", "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})', "filter_by": filter, "collection": self._typesense_collection_name, } docs = [] response = self._typesense_client.multi_search.perform( {"searches": [query_obj]}, {} ) for hit in response["results"][0]["hits"]: document = hit["document"] metadata = document["metadata"] text = document[self._text_key] score = hit["vector_distance"] docs.append((Document(page_content=text, metadata=metadata), score)) return docs [docs] def similarity_search( self, query: str, k: int = 10, filter: Optional[str] = "", **kwargs: Any, ) -> List[Document]: """Return typesense documents most similar to query.
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\typesense.html
9df9ffbd3410-4
) -> List[Document]: """Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter) return [doc for doc, _ in docs_and_score] [docs] @classmethod def from_client_params( cls, embedding: Embeddings, *, host: str = "localhost", port: Union[str, int] = "8108", protocol: str = "http", typesense_api_key: Optional[str] = None, connection_timeout_seconds: int = 2, **kwargs: Any, ) -> Typesense: """Initialize Typesense directly from client parameters. Example: .. code-block:: python from langchain.embedding.openai import OpenAIEmbeddings from langchain.vectorstores import Typesense # Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY". vectorstore = Typesense( OpenAIEmbeddings(), host="localhost", port="8108", protocol="http", typesense_collection_name="langchain-memory", ) """ try: from typesense import Client except ImportError: raise ValueError( "Could not import typesense python package. " "Please install it with `pip install typesense`." )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\typesense.html
9df9ffbd3410-5
"Please install it with `pip install typesense`." ) node = { "host": host, "port": str(port), "protocol": protocol, } typesense_api_key = typesense_api_key or get_from_env( "typesense_api_key", "TYPESENSE_API_KEY" ) client_config = { "nodes": [node], "api_key": typesense_api_key, "connection_timeout_seconds": connection_timeout_seconds, } return cls(Client(client_config), embedding, **kwargs) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, typesense_client: Optional[Client] = None, typesense_client_params: Optional[dict] = None, typesense_collection_name: Optional[str] = None, text_key: str = "text", **kwargs: Any, ) -> Typesense: """Construct Typesense wrapper from raw text.""" if typesense_client: vectorstore = cls(typesense_client, embedding, **kwargs) elif typesense_client_params: vectorstore = cls.from_client_params( embedding, **typesense_client_params, **kwargs ) else: raise ValueError( "Must specify one of typesense_client or typesense_client_params." ) vectorstore.add_texts(texts, metadatas=metadatas, ids=ids) return vectorstore
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\typesense.html
68d10e503305-0
Source code for langchain.vectorstores.utils """Utility functions for working with vectors and vectorstores.""" from typing import List import numpy as np from langchain.math_utils import cosine_similarity [docs]def maximal_marginal_relevance( query_embedding: np.ndarray, embedding_list: list, lambda_mult: float = 0.5, k: int = 4, ) -> List[int]: """Calculate maximal marginal relevance.""" if min(k, len(embedding_list)) <= 0: return [] if query_embedding.ndim == 1: query_embedding = np.expand_dims(query_embedding, axis=0) similarity_to_query = cosine_similarity(query_embedding, embedding_list)[0] most_similar = int(np.argmax(similarity_to_query)) idxs = [most_similar] selected = np.array([embedding_list[most_similar]]) while len(idxs) < min(k, len(embedding_list)): best_score = -np.inf idx_to_add = -1 similarity_to_selected = cosine_similarity(embedding_list, selected) for i, query_score in enumerate(similarity_to_query): if i in idxs: continue redundant_score = max(similarity_to_selected[i]) equation_score = ( lambda_mult * query_score - (1 - lambda_mult) * redundant_score ) if equation_score > best_score: best_score = equation_score idx_to_add = i idxs.append(idx_to_add) selected = np.append(selected, [embedding_list[idx_to_add]], axis=0) return idxs
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\utils.html
0bcb3a155e00-0
Source code for langchain.vectorstores.vectara """Wrapper around Vectara vector database.""" from __future__ import annotations import json import logging import os from hashlib import md5 from typing import Any, Iterable, List, Optional, Tuple, Type import requests from pydantic import Field from langchain.embeddings.base import Embeddings from langchain.schema import Document from langchain.vectorstores.base import VectorStore, VectorStoreRetriever [docs]class Vectara(VectorStore): """Implementation of Vector Store using Vectara. See (https://vectara.com). Example: .. code-block:: python from langchain.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, vectara_corpus_id=vectara_corpus_id, vectara_api_key=vectara_api_key ) """ def __init__( self, vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None, ): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( "VECTARA_CUSTOMER_ID" ) self._vectara_corpus_id = vectara_corpus_id or os.environ.get( "VECTARA_CORPUS_ID" ) self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY") if ( self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None ): logging.warning(
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-1
or self._vectara_api_key is None ): logging.warning( "Cant find Vectara credentials, customer_id or corpus_id in " "environment." ) else: logging.debug(f"Using corpus id {self._vectara_corpus_id}") self._session = requests.Session() # to reuse connections adapter = requests.adapters.HTTPAdapter(max_retries=3) self._session.mount("http://", adapter) def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return { "x-api-key": self._vectara_api_key, "customer-id": self._vectara_customer_id, "Content-Type": "application/json", } def _delete_doc(self, doc_id: str) -> bool: """ Delete a document from the Vectara corpus. Args: url (str): URL of the page to delete. doc_id (str): ID of the document to delete. Returns: bool: True if deletion was successful, False otherwise. """ body = { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "document_id": doc_id, } response = self._session.post( "https://api.vectara.io/v1/delete-doc", data=json.dumps(body), verify=True, headers=self._get_post_headers(), ) if response.status_code != 200: logging.error( f"Delete request failed for doc_id = {doc_id} with status code " f"{response.status_code}, reason {response.reason}, text "
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-2
f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return False return True def _index_doc(self, doc: dict) -> str: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id request["document"] = doc response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/core/index", data=json.dumps(request), timeout=30, verify=True, ) status_code = response.status_code result = response.json() status_str = result["status"]["code"] if "status" in result else None if status_code == 409 or status_str and (status_str == "ALREADY_EXISTS"): return "E_ALREADY_EXISTS" elif status_str and (status_str == "FORBIDDEN"): return "E_NO_PERMISSIONS" else: return "E_SUCCEEDED" [docs] def add_files( self, files_list: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Vectara provides a way to add documents directly via our API where pre-processing and chunking occurs internally in an optimal way This method provides a way to use that API in LangChain Args: files_list: Iterable of strings, each representing a local file path. Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc. see API docs for full list
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-3
see API docs for full list metadatas: Optional list of metadatas associated with each file Returns: List of ids associated with each of the files indexed """ doc_ids = [] for inx, file in enumerate(files_list): if not os.path.exists(file): logging.error(f"File {file} does not exist, skipping") continue md = metadatas[inx] if metadatas else {} files: dict = { "file": (file, open(file, "rb")), "doc_metadata": json.dumps(md), } headers = self._get_post_headers() headers.pop("Content-Type") response = self._session.post( f"https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True", files=files, verify=True, headers=headers, ) if response.status_code == 409: doc_id = response.json()["document"]["documentId"] logging.info( f"File {file} already exists on Vectara (doc_id={doc_id}), skipping" ) elif response.status_code == 200: doc_id = response.json()["document"]["documentId"] doc_ids.append(doc_id) else: logging.info(f"Error indexing file {file}: {response.json()}") return doc_ids [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, **kwargs: Any, ) -> List[str]:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-4
**kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "part" and the metadata are associated with each part. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: List of ids from adding the texts into the vectorstore. """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata["source"] = "langchain" else: doc_metadata = {"source": "langchain"} doc = { "document_id": doc_id, "metadataJson": json.dumps(doc_metadata), "parts": [ {"text": text, "metadataJson": json.dumps(md)} for text, md in zip(texts, metadatas) ], } success_str = self._index_doc(doc) if success_str == "E_ALREADY_EXISTS": self._delete_doc(doc_id) self._index_doc(doc) elif success_str == "E_NO_PERMISSIONS": print( """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-5
Check your corpus ID, customer ID and API key""" ) return [doc_id] [docs] def similarity_search_with_score( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, n_sentence_context: int = 0, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add Returns: List of Documents most similar to the query and score for each. """ data = json.dumps( { "query": [ { "query": query, "start": 0, "num_results": k, "context_config": { "sentences_before": n_sentence_context, "sentences_after": n_sentence_context, }, "corpus_key": [ { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "metadataFilter": filter,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-6
"metadataFilter": filter, "lexical_interpolation_config": {"lambda": lambda_val}, } ], } ] } ) response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", data=data, timeout=10, ) if response.status_code != 200: logging.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [] result = response.json() responses = result["responseSet"][0]["response"] vectara_default_metadata = ["lang", "len", "offset"] docs = [ ( Document( page_content=x["text"], metadata={ m["name"]: m["value"] for m in x["metadata"] if m["name"] not in vectara_default_metadata }, ), x["score"], ) for x in responses ] return docs [docs] def similarity_search( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, n_sentence_context: int = 0, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5.
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-7
k: Number of Documents to return. Defaults to 5. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, k=k, lambda_val=lambda_val, filter=filter, n_sentence_context=n_sentence_context, **kwargs, ) return [doc for doc, _ in docs_and_scores] [docs] @classmethod def from_texts( cls: Type[Vectara], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) doc_metadata = kwargs.pop("doc_metadata", {}) vectara = cls(**kwargs)
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-8
vectara = cls(**kwargs) vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs) return vectara [docs] @classmethod def from_files( cls: Type[Vectara], files: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Vectara vectara = Vectara.from_files( files_list, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) vectara = cls(**kwargs) vectara.add_files(files, metadatas) return vectara [docs] def as_retriever(self, **kwargs: Any) -> VectaraRetriever: return VectaraRetriever(vectorstore=self, **kwargs) [docs]class VectaraRetriever(VectorStoreRetriever): vectorstore: Vectara search_kwargs: dict = Field( default_factory=lambda: { "lambda_val": 0.025, "k": 5, "filter": "", "n_sentence_context": "0", } ) """Search params. k: Number of Documents to return. Defaults to 5.
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
0bcb3a155e00-9
k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add """ [docs] def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = {}, ) -> None: """Add text to the Vectara vectorstore. Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.vectorstore.add_texts(texts, metadatas, doc_metadata)
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\vectara.html
7dd2b58e8e5d-0
Source code for langchain.vectorstores.weaviate """Wrapper around weaviate vector database.""" from __future__ import annotations import datetime from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type from uuid import uuid4 import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def _default_schema(index_name: str) -> Dict: return { "class": index_name, "properties": [ { "name": "text", "dataType": ["text"], } ], } def _create_weaviate_client(**kwargs: Any) -> Any: client = kwargs.get("client") if client is not None: return client weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL") try: # the weaviate api key param should not be mandatory weaviate_api_key = get_from_dict_or_env( kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None ) except ValueError: weaviate_api_key = None try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`" ) auth = ( weaviate.auth.AuthApiKey(api_key=weaviate_api_key) if weaviate_api_key is not None else None )
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-1
if weaviate_api_key is not None else None ) client = weaviate.Client(weaviate_url, auth_client_secret=auth) return client def _default_score_normalizer(val: float) -> float: return 1 - 1 / (1 + np.exp(val)) def _json_serializable(value: Any) -> Any: if isinstance(value, datetime.datetime): return value.isoformat() return value [docs]class Weaviate(VectorStore): """Wrapper around Weaviate vector database. To use, you should have the ``weaviate-client`` python package installed. Example: .. code-block:: python import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) """ def __init__( self, client: Any, index_name: str, text_key: str, embedding: Optional[Embeddings] = None, attributes: Optional[List[str]] = None, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, by_text: bool = True, ): """Initialize with Weaviate client.""" try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError(
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-2
) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._query_attrs = [self._text_key] self._relevance_score_fn = relevance_score_fn self._by_text = by_text if attributes is not None: self._query_attrs.extend(attributes) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Weaviate.""" from weaviate.util import get_valid_uuid ids = [] with self._client.batch as batch: for i, text in enumerate(texts): data_properties = {self._text_key: text} if metadatas is not None: for key, val in metadatas[i].items(): data_properties[key] = _json_serializable(val) # Allow for ids (consistent w/ other methods) # # Or uuids (backwards compatble w/ existing arg) # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. _id = get_valid_uuid(uuid4()) if "uuids" in kwargs: _id = kwargs["uuids"][i] elif "ids" in kwargs: _id = kwargs["ids"][i] if self._embedding is not None:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-3
if self._embedding is not None: vector = self._embedding.embed_documents([text])[0] else: vector = None batch.add_data_object( data_object=data_properties, class_name=self._index_name, uuid=_id, vector=vector, ) ids.append(_id) return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ if self._by_text: return self.similarity_search_by_text(query, k, **kwargs) else: if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search when " "_by_text=False" ) embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(embedding, k, **kwargs) [docs] def similarity_search_by_text( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"):
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-4
if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_text(content).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Look up similar documents by embedding vector in Weaviate.""" vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_vector(vector).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-5
docs.append(Document(page_content=text, metadata=res)) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding is not None: embedding = self._embedding.embed_query(query) else: raise ValueError( "max_marginal_relevance_search requires a suitable Embeddings object" ) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-6
**kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) results = ( query_obj.with_additional("vector") .with_near_vector(vector) .with_limit(fetch_k) .do() ) payload = results["data"]["Get"][self._index_name] embeddings = [result["_additional"]["vector"] for result in payload] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) docs = [] for idx in mmr_selected: text = payload[idx].pop(self._text_key) payload[idx].pop("_additional") meta = payload[idx] docs.append(Document(page_content=text, metadata=meta)) return docs [docs] def similarity_search_with_score(
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-7
return docs [docs] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search_with_score" ) content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if not self._by_text: embedding = self._embedding.embed_query(query) vector = {"vector": embedding} result = ( query_obj.with_near_vector(vector) .with_limit(k) .with_additional("vector") .do() ) else: result = ( query_obj.with_near_text(content) .with_limit(k) .with_additional("vector") .do() ) if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs_and_scores = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) score = np.dot( res["_additional"]["vector"], self._embedding.embed_query(query) ) docs_and_scores.append((Document(page_content=text, metadata=res), score)) return docs_and_scores def _similarity_search_with_relevance_scores(
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-8
return docs_and_scores def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self._relevance_score_fn is None: raise ValueError( "relevance_score_fn must be provided to" " Weaviate constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k, **kwargs) return [ (doc, self._relevance_score_fn(score)) for doc, score in docs_and_scores ] [docs] @classmethod def from_texts( cls: Type[Weaviate], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Weaviate: """Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Weaviate instance. 3. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores.weaviate import Weaviate from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-9
weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) """ client = _create_weaviate_client(**kwargs) from weaviate.util import get_valid_uuid index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}") embeddings = embedding.embed_documents(texts) if embedding else None text_key = "text" schema = _default_schema(index_name) attributes = list(metadatas[0].keys()) if metadatas else None # check whether the index already exists if not client.schema.contains(schema): client.schema.create_class(schema) with client.batch as batch: for i, text in enumerate(texts): data_properties = { text_key: text, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] # If the UUID of one of the objects already exists # then the existing objectwill be replaced by the new object. if "uuids" in kwargs: _id = kwargs["uuids"][i] else: _id = get_valid_uuid(uuid4()) # if an embedding strategy is not provided, we let # weaviate create the embedding. Note that this will only # work if weaviate has been installed with a vectorizer module # like text2vec-contextionary for example params = { "uuid": _id, "data_object": data_properties, "class_name": index_name, } if embeddings is not None:
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
7dd2b58e8e5d-10
"class_name": index_name, } if embeddings is not None: params["vector"] = embeddings[i] batch.add_data_object(**params) batch.flush() relevance_score_fn = kwargs.get("relevance_score_fn") by_text: bool = kwargs.get("by_text", False) return cls( client, index_name, text_key, embedding=embedding, attributes=attributes, relevance_score_fn=relevance_score_fn, by_text=by_text, ) [docs] def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") # TODO: Check if this can be done in bulk for id in ids: self._client.data_object.delete(uuid=id)
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\weaviate.html
f098bbc8de2d-0
Source code for langchain.vectorstores.docarray.base from abc import ABC from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type import numpy as np from pydantic import Field from langchain.embeddings.base import Embeddings from langchain.schema import Document from langchain.vectorstores import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from docarray import BaseDoc from docarray.index.abstract import BaseDocIndex def _check_docarray_import() -> None: try: import docarray da_version = docarray.__version__.split(".") if int(da_version[0]) == 0 and int(da_version[1]) <= 31: raise ValueError( f"To use the DocArrayHnswSearch VectorStore the docarray " f"version >=0.32.0 is expected, received: {docarray.__version__}." f"To upgrade, please run: `pip install -U docarray`." ) except ImportError: raise ImportError( "Could not import docarray python package. " 'Please install it with `pip install "langchain[docarray]"`.' ) [docs]class DocArrayIndex(VectorStore, ABC): def __init__( self, doc_index: "BaseDocIndex", embedding: Embeddings, ): """Initialize a vector store from DocArray's DocIndex.""" self.doc_index = doc_index self.embedding = embedding @staticmethod def _get_doc_cls(**embeddings_params: Any) -> Type["BaseDoc"]: """Get docarray Document class describing the schema of DocIndex.""" from docarray import BaseDoc
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\base.html
f098bbc8de2d-1
from docarray import BaseDoc from docarray.typing import NdArray class DocArrayDoc(BaseDoc): text: Optional[str] embedding: Optional[NdArray] = Field(**embeddings_params) metadata: Optional[dict] return DocArrayDoc @property def doc_cls(self) -> Type["BaseDoc"]: if self.doc_index._schema is None: raise ValueError("doc_index expected to have non-null _schema attribute.") return self.doc_index._schema [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ ids: List[str] = [] embeddings = self.embedding.embed_documents(list(texts)) for i, (t, e) in enumerate(zip(texts, embeddings)): m = metadatas[i] if metadatas else {} doc = self.doc_cls(text=t, embedding=e, metadata=m) self.doc_index.index([doc]) ids.append(str(doc.id)) return ids [docs] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to.
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\base.html
f098bbc8de2d-2
Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ query_embedding = self.embedding.embed_query(query) query_doc = self.doc_cls(embedding=query_embedding) # type: ignore docs, scores = self.doc_index.find(query_doc, search_field="embedding", limit=k) result = [ (Document(page_content=doc.text, metadata=doc.metadata), score) for doc, score in zip(docs, scores) ] return result [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ results = self.similarity_search_with_score(query, k=k, **kwargs) return [doc for doc, _ in results] def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ raise NotImplementedError [docs] def similarity_search_by_vector(
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\base.html
f098bbc8de2d-3
""" raise NotImplementedError [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ query_doc = self.doc_cls(embedding=embedding) # type: ignore docs = self.doc_index.find( query_doc, search_field="embedding", limit=k ).documents result = [ Document(page_content=doc.text, metadata=doc.metadata) for doc in docs ] return result [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\base.html
f098bbc8de2d-4
Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self.embedding.embed_query(query) query_doc = self.doc_cls(embedding=query_embedding) # type: ignore docs = self.doc_index.find( query_doc, search_field="embedding", limit=fetch_k ).documents mmr_selected = maximal_marginal_relevance( np.array(query_embedding), docs.embedding, k=k ) results = [ Document(page_content=docs[idx].text, metadata=docs[idx].metadata) for idx in mmr_selected ] return results
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\base.html
e8cadf05be13-0
Source code for langchain.vectorstores.docarray.hnsw """Wrapper around Hnswlib store.""" from __future__ import annotations from typing import Any, List, Literal, Optional from langchain.embeddings.base import Embeddings from langchain.vectorstores.docarray.base import ( DocArrayIndex, _check_docarray_import, ) [docs]class DocArrayHnswSearch(DocArrayIndex): """Wrapper around HnswLib storage. To use it, you should have the ``docarray`` package with version >=0.32.0 installed. You can install it with `pip install "langchain[docarray]"`. """ [docs] @classmethod def from_params( cls, embedding: Embeddings, work_dir: str, n_dim: int, dist_metric: Literal["cosine", "ip", "l2"] = "cosine", max_elements: int = 1024, index: bool = True, ef_construction: int = 200, ef: int = 10, M: int = 16, allow_replace_deleted: bool = True, num_threads: int = 1, **kwargs: Any, ) -> DocArrayHnswSearch: """Initialize DocArrayHnswSearch store. Args: embedding (Embeddings): Embedding function. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. dist_metric (str): Distance metric for DocArrayHnswSearch can be one of: "cosine", "ip", and "l2". Defaults to "cosine".
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\hnsw.html
e8cadf05be13-1
"cosine", "ip", and "l2". Defaults to "cosine". max_elements (int): Maximum number of vectors that can be stored. Defaults to 1024. index (bool): Whether an index should be built for this field. Defaults to True. ef_construction (int): defines a construction time/accuracy trade-off. Defaults to 200. ef (int): parameter controlling query time/accuracy trade-off. Defaults to 10. M (int): parameter that defines the maximum number of outgoing connections in the graph. Defaults to 16. allow_replace_deleted (bool): Enables replacing of deleted elements with new added ones. Defaults to True. num_threads (int): Sets the number of cpu threads to use. Defaults to 1. **kwargs: Other keyword arguments to be passed to the get_doc_cls method. """ _check_docarray_import() from docarray.index import HnswDocumentIndex doc_cls = cls._get_doc_cls( dim=n_dim, space=dist_metric, max_elements=max_elements, index=index, ef_construction=ef_construction, ef=ef, M=M, allow_replace_deleted=allow_replace_deleted, num_threads=num_threads, **kwargs, ) doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore return cls(doc_index, embedding) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, work_dir: Optional[str] = None,
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\hnsw.html
e8cadf05be13-2
work_dir: Optional[str] = None, n_dim: Optional[int] = None, **kwargs: Any, ) -> DocArrayHnswSearch: """Create an DocArrayHnswSearch store and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. **kwargs: Other keyword arguments to be passed to the __init__ method. Returns: DocArrayHnswSearch Vector Store """ if work_dir is None: raise ValueError("`work_dir` parameter has not been set.") if n_dim is None: raise ValueError("`n_dim` parameter has not been set.") store = cls.from_params(embedding, work_dir, n_dim, **kwargs) store.add_texts(texts=texts, metadatas=metadatas) return store
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\hnsw.html
1d0517f37b98-0
Source code for langchain.vectorstores.docarray.in_memory """Wrapper around in-memory storage.""" from __future__ import annotations from typing import Any, Dict, List, Literal, Optional from langchain.embeddings.base import Embeddings from langchain.vectorstores.docarray.base import ( DocArrayIndex, _check_docarray_import, ) [docs]class DocArrayInMemorySearch(DocArrayIndex): """Wrapper around in-memory storage for exact search. To use it, you should have the ``docarray`` package with version >=0.32.0 installed. You can install it with `pip install "langchain[docarray]"`. """ [docs] @classmethod def from_params( cls, embedding: Embeddings, metric: Literal[ "cosine_sim", "euclidian_dist", "sgeuclidean_dist" ] = "cosine_sim", **kwargs: Any, ) -> DocArrayInMemorySearch: """Initialize DocArrayInMemorySearch store. Args: embedding (Embeddings): Embedding function. metric (str): metric for exact nearest-neighbor search. Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist". Defaults to "cosine_sim". **kwargs: Other keyword arguments to be passed to the get_doc_cls method. """ _check_docarray_import() from docarray.index import InMemoryExactNNIndex doc_cls = cls._get_doc_cls(space=metric, **kwargs) doc_index = InMemoryExactNNIndex[doc_cls]() # type: ignore return cls(doc_index, embedding) [docs] @classmethod def from_texts(
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\in_memory.html
1d0517f37b98-1
[docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> DocArrayInMemorySearch: """Create an DocArrayInMemorySearch store and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text if it exists. Defaults to None. metric (str): metric for exact nearest-neighbor search. Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist". Defaults to "cosine_sim". Returns: DocArrayInMemorySearch Vector Store """ store = cls.from_params(embedding, **kwargs) store.add_texts(texts=texts, metadatas=metadatas) return store
rtdocs\api.python.langchain.com\en\latest\_modules\langchain\vectorstores\docarray\in_memory.html