Tai Truong
fix readme
d202ada
{
"data": {
"edges": [
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Agent",
"id": "Agent-ImgzA",
"name": "response",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-ZNoa2",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-Agent-ImgzA{œdataTypeœ:œAgentœ,œidœ:œAgent-ImgzAœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-ZNoa2{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-ZNoa2œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "Agent-ImgzA",
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-ImgzAœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}",
"target": "ChatOutput-ZNoa2",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-ZNoa2œ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Agent",
"id": "Agent-cj2PH",
"name": "response",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "Agent-ImgzA",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-Agent-cj2PH{œdataTypeœ:œAgentœ,œidœ:œAgent-cj2PHœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-Agent-ImgzA{œfieldNameœ:œinput_valueœ,œidœ:œAgent-ImgzAœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "Agent-cj2PH",
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-cj2PHœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}",
"target": "Agent-ImgzA",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAgent-ImgzAœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Agent",
"id": "Agent-rPh1n",
"name": "response",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "Agent-cj2PH",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-Agent-rPh1n{œdataTypeœ:œAgentœ,œidœ:œAgent-rPh1nœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-Agent-cj2PH{œfieldNameœ:œinput_valueœ,œidœ:œAgent-cj2PHœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "Agent-rPh1n",
"sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-rPh1nœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}",
"target": "Agent-cj2PH",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAgent-cj2PHœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "SearchAPI",
"id": "SearchAPI-Aez0t",
"name": "api_build_tool",
"output_types": [
"Tool"
]
},
"targetHandle": {
"fieldName": "tools",
"id": "Agent-rPh1n",
"inputTypes": [
"Tool",
"BaseTool",
"StructuredTool"
],
"type": "other"
}
},
"id": "reactflow__edge-SearchAPI-Aez0t{œdataTypeœ:œSearchAPIœ,œidœ:œSearchAPI-Aez0tœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-rPh1n{œfieldNameœ:œtoolsœ,œidœ:œAgent-rPh1nœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}",
"source": "SearchAPI-Aez0t",
"sourceHandle": "{œdataTypeœ: œSearchAPIœ, œidœ: œSearchAPI-Aez0tœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}",
"target": "Agent-rPh1n",
"targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-rPh1nœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "url_content_fetcher",
"id": "url_content_fetcher-AyGpn",
"name": "api_build_tool",
"output_types": [
"Tool"
]
},
"targetHandle": {
"fieldName": "tools",
"id": "Agent-cj2PH",
"inputTypes": [
"Tool",
"BaseTool",
"StructuredTool"
],
"type": "other"
}
},
"id": "reactflow__edge-url_content_fetcher-AyGpn{œdataTypeœ:œurl_content_fetcherœ,œidœ:œurl_content_fetcher-AyGpnœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-cj2PH{œfieldNameœ:œtoolsœ,œidœ:œAgent-cj2PHœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}",
"source": "url_content_fetcher-AyGpn",
"sourceHandle": "{œdataTypeœ: œurl_content_fetcherœ, œidœ: œurl_content_fetcher-AyGpnœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}",
"target": "Agent-cj2PH",
"targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-cj2PHœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "CalculatorTool",
"id": "CalculatorTool-dGfrj",
"name": "api_build_tool",
"output_types": [
"Tool"
]
},
"targetHandle": {
"fieldName": "tools",
"id": "Agent-ImgzA",
"inputTypes": [
"Tool",
"BaseTool",
"StructuredTool"
],
"type": "other"
}
},
"id": "reactflow__edge-CalculatorTool-dGfrj{œdataTypeœ:œCalculatorToolœ,œidœ:œCalculatorTool-dGfrjœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-ImgzA{œfieldNameœ:œtoolsœ,œidœ:œAgent-ImgzAœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}",
"source": "CalculatorTool-dGfrj",
"sourceHandle": "{œdataTypeœ: œCalculatorToolœ, œidœ: œCalculatorTool-dGfrjœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}",
"target": "Agent-ImgzA",
"targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-ImgzAœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "ChatInput",
"id": "ChatInput-CIU0F",
"name": "message",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "Agent-rPh1n",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-ChatInput-CIU0F{œdataTypeœ:œChatInputœ,œidœ:œChatInput-CIU0Fœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Agent-rPh1n{œfieldNameœ:œinput_valueœ,œidœ:œAgent-rPh1nœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "ChatInput-CIU0F",
"sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-CIU0Fœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}",
"target": "Agent-rPh1n",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAgent-rPh1nœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
}
],
"nodes": [
{
"data": {
"id": "ChatInput-CIU0F",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Get chat inputs from the Playground.",
"display_name": "Chat Input",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"should_store_message",
"sender",
"sender_name",
"session_id",
"files"
],
"frozen": false,
"icon": "MessagesSquare",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Message",
"method": "message_response",
"name": "message",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"background_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Background Color",
"dynamic": false,
"info": "The background color of the icon.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "background_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"chat_icon": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Icon",
"dynamic": false,
"info": "The icon of the message.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "chat_icon",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\"background_color\": background_color, \"text_color\": text_color, \"icon\": icon},\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
"advanced": true,
"display_name": "Files",
"dynamic": false,
"fileTypes": [
"txt",
"md",
"mdx",
"csv",
"json",
"yaml",
"yml",
"xml",
"html",
"htm",
"pdf",
"docx",
"py",
"sh",
"sql",
"js",
"ts",
"tsx",
"jpg",
"jpeg",
"png",
"bmp",
"image"
],
"file_path": "",
"info": "Files to be sent with the message.",
"list": true,
"name": "files",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "file",
"value": ""
},
"input_value": {
"_input_type": "MultilineInput",
"advanced": false,
"display_name": "Text",
"dynamic": false,
"info": "Message to be passed as input.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "Create a travel itinerary for a trip from São Paulo to Uberlândia, MG on August 23, 2024. The traveler enjoys drinking beer, eating pão de queijo, and drinking special coffee."
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Type of sender.",
"name": "sender",
"options": [
"Machine",
"User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": "User"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Name of the sender.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "User"
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"should_store_message": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Store Messages",
"dynamic": false,
"info": "Store the message in the history.",
"list": false,
"name": "should_store_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"text_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Color",
"dynamic": false,
"info": "The text color of the name",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "text_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
}
},
"type": "ChatInput"
},
"dragging": false,
"height": 234,
"id": "ChatInput-CIU0F",
"position": {
"x": 1756.77096149088,
"y": 305.19157712497963
},
"positionAbsolute": {
"x": 1756.77096149088,
"y": 305.19157712497963
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Display a chat message in the Playground.",
"display_name": "Chat Output",
"id": "ChatOutput-ZNoa2",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Display a chat message in the Playground.",
"display_name": "Chat Output",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"should_store_message",
"sender",
"sender_name",
"session_id",
"data_template",
"background_color",
"chat_icon",
"text_color"
],
"frozen": false,
"icon": "MessagesSquare",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Message",
"method": "message_response",
"name": "message",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"background_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Background Color",
"dynamic": false,
"info": "The background color of the icon.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "background_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"chat_icon": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Icon",
"dynamic": false,
"info": "The icon of the message.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "chat_icon",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"data_template": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Data Template",
"dynamic": false,
"info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "data_template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{text}"
},
"input_value": {
"_input_type": "MessageInput",
"advanced": false,
"display_name": "Text",
"dynamic": false,
"info": "Message to be passed as output.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Type of sender.",
"name": "sender",
"options": [
"Machine",
"User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Name of the sender.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "AI"
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"should_store_message": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Store Messages",
"dynamic": false,
"info": "Store the message in the history.",
"list": false,
"name": "should_store_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"text_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Color",
"dynamic": false,
"info": "The text color of the name",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "text_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"type": "ChatOutput"
},
"dragging": false,
"height": 234,
"id": "ChatOutput-ZNoa2",
"position": {
"x": 4349.229697347143,
"y": 620.5490494265098
},
"positionAbsolute": {
"x": 4349.229697347143,
"y": 620.5490494265098
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "SearchAPI-Aez0t",
"node": {
"base_classes": [
"Data",
"list",
"Tool"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Call the searchapi.io API with result limiting",
"display_name": "Search API",
"documentation": "https://www.searchapi.io/docs/google",
"edited": false,
"field_order": [
"engine",
"api_key",
"input_value",
"search_params"
],
"frozen": false,
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"official": false,
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Data",
"method": "run_model",
"name": "api_run_model",
"required_inputs": [
"api_key"
],
"selected": "Data",
"types": [
"Data"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Tool",
"method": "build_tool",
"name": "api_build_tool",
"required_inputs": [
"api_key"
],
"selected": "Tool",
"types": [
"Tool"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "SearchAPI API Key",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"load_from_db": true,
"name": "api_key",
"password": true,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "str",
"value": "EnE19gGWNyewCPsMj5c1fMGx"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Any\n\nfrom langchain.tools import StructuredTool\nfrom langchain_community.utilities.searchapi import SearchApiAPIWrapper\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import DictInput, IntInput, MessageTextInput, MultilineInput, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass SearchAPIComponent(LCToolComponent):\n display_name: str = \"Search API\"\n description: str = \"Call the searchapi.io API with result limiting\"\n name = \"SearchAPI\"\n documentation: str = \"https://www.searchapi.io/docs/google\"\n\n inputs = [\n MessageTextInput(name=\"engine\", display_name=\"Engine\", value=\"google\"),\n SecretStrInput(name=\"api_key\", display_name=\"SearchAPI API Key\", required=True),\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input\",\n ),\n DictInput(name=\"search_params\", display_name=\"Search parameters\", advanced=True, is_list=True),\n IntInput(name=\"max_results\", display_name=\"Max Results\", value=5, advanced=True),\n IntInput(name=\"max_snippet_length\", display_name=\"Max Snippet Length\", value=100, advanced=True),\n ]\n\n class SearchAPISchema(BaseModel):\n query: str = Field(..., description=\"The search query\")\n params: dict[str, Any] = Field(default_factory=dict, description=\"Additional search parameters\")\n max_results: int = Field(5, description=\"Maximum number of results to return\")\n max_snippet_length: int = Field(100, description=\"Maximum length of each result snippet\")\n\n def _build_wrapper(self):\n return SearchApiAPIWrapper(engine=self.engine, searchapi_api_key=self.api_key)\n\n def build_tool(self) -> Tool:\n wrapper = self._build_wrapper()\n\n def search_func(\n query: str, params: dict[str, Any] | None = None, max_results: int = 5, max_snippet_length: int = 100\n ) -> list[dict[str, Any]]:\n params = params or {}\n full_results = wrapper.results(query=query, **params)\n organic_results = full_results.get(\"organic_results\", [])[:max_results]\n\n limited_results = []\n for result in organic_results:\n limited_result = {\n \"title\": result.get(\"title\", \"\")[:max_snippet_length],\n \"link\": result.get(\"link\", \"\"),\n \"snippet\": result.get(\"snippet\", \"\")[:max_snippet_length],\n }\n limited_results.append(limited_result)\n\n return limited_results\n\n tool = StructuredTool.from_function(\n name=\"search_api\",\n description=\"Search for recent results using searchapi.io with result limiting\",\n func=search_func,\n args_schema=self.SearchAPISchema,\n )\n\n self.status = f\"Search API Tool created with engine: {self.engine}\"\n return tool\n\n def run_model(self) -> list[Data]:\n tool = self.build_tool()\n results = tool.run(\n {\n \"query\": self.input_value,\n \"params\": self.search_params or {},\n \"max_results\": self.max_results,\n \"max_snippet_length\": self.max_snippet_length,\n }\n )\n\n data_list = [Data(data=result, text=result.get(\"snippet\", \"\")) for result in results]\n\n self.status = data_list\n return data_list\n"
},
"engine": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Engine",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "engine",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "google"
},
"input_value": {
"_input_type": "MultilineInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "langflow docs"
},
"max_results": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Results",
"dynamic": false,
"info": "",
"list": false,
"name": "max_results",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 5
},
"max_snippet_length": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Snippet Length",
"dynamic": false,
"info": "",
"list": false,
"name": "max_snippet_length",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 100
},
"search_params": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Search parameters",
"dynamic": false,
"info": "",
"list": true,
"name": "search_params",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
}
}
},
"type": "SearchAPI"
},
"dragging": false,
"height": 407,
"id": "SearchAPI-Aez0t",
"position": {
"x": 2101.519951743063,
"y": 949.7032293566349
},
"positionAbsolute": {
"x": 2101.519951743063,
"y": 949.7032293566349
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "url_content_fetcher-AyGpn",
"node": {
"base_classes": [
"Data",
"list",
"Tool"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Fetch content from a single URL.",
"display_name": "URL Content Fetcher",
"documentation": "https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base",
"edited": true,
"field_order": [
"url",
"fetch_params"
],
"frozen": false,
"icon": "globe",
"lf_version": "1.0.19.post2",
"official": false,
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Data",
"hidden": true,
"method": "run_model",
"name": "api_run_model",
"selected": "Data",
"types": [
"Data",
"list"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Tool",
"method": "build_tool",
"name": "api_build_tool",
"selected": "Tool",
"types": [
"Tool"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Union, Optional\r\nfrom langchain_community.document_loaders.web_base import WebBaseLoader\r\nfrom langflow.base.langchain_utilities.model import LCToolComponent\r\nfrom langflow.inputs import MessageTextInput, DictInput\r\nfrom langflow.schema import Data\r\nfrom langflow.field_typing import Tool\r\nfrom langchain.tools import StructuredTool\r\nfrom pydantic import BaseModel, Field\r\n\r\nclass URLToolComponent(LCToolComponent):\r\n display_name: str = \"URL Content Fetcher\"\r\n description: str = \"Fetch content from a single URL.\"\r\n name = \"url_content_fetcher\"\r\n documentation: str = \"https://python.langchain.com/docs/modules/data_connection/document_loaders/integrations/web_base\"\r\n icon=\"globe\"\r\n \r\n inputs = [\r\n MessageTextInput(\r\n name=\"url\",\r\n display_name=\"URL\",\r\n info=\"Enter a single URL to fetch content from.\",\r\n ),\r\n DictInput(name=\"fetch_params\", display_name=\"Fetch parameters\", advanced=True, is_list=True),\r\n ]\r\n\r\n class URLContentFetcherSchema(BaseModel):\r\n url: str = Field(..., description=\"The URL to fetch content from\")\r\n fetch_params: Optional[dict] = Field(default=None, description=\"Additional parameters for fetching\")\r\n\r\n def run_model(self) -> Union[Data, list[Data]]:\r\n wrapper = self._build_wrapper()\r\n content = wrapper.load()[0]\r\n data = Data(data={\"content\": content.page_content, \"metadata\": content.metadata}, \r\n text=content.page_content[:500])\r\n self.status = data\r\n return data\r\n\r\n def build_tool(self) -> Tool:\r\n return StructuredTool.from_function(\r\n name=\"url_content_fetcher\",\r\n description=\"Fetch content from a single URL. Input should be a URL string only.\",\r\n func=self._fetch_url_content,\r\n args_schema=self.URLContentFetcherSchema,\r\n )\r\n\r\n def _build_wrapper(self):\r\n return WebBaseLoader(web_paths=[self.url], encoding=\"utf-8\", **self.fetch_params or {})\r\n\r\n def _fetch_url_content(self, url: str, fetch_params: Optional[dict] = None) -> dict:\r\n loader = WebBaseLoader(web_paths=[url], encoding=\"utf-8\", **(fetch_params or {}))\r\n content = loader.load()[0]\r\n return {\r\n \"content\": content.page_content,\r\n \"metadata\": content.metadata\r\n }"
},
"fetch_params": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Fetch parameters",
"dynamic": false,
"info": "",
"list": true,
"name": "fetch_params",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"url": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "URL",
"dynamic": false,
"info": "Enter a single URL to fetch content from.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "url",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
}
},
"type": "url_content_fetcher"
},
"dragging": false,
"height": 234,
"id": "url_content_fetcher-AyGpn",
"position": {
"x": 2834.525991812012,
"y": 939.6518333549263
},
"positionAbsolute": {
"x": 2834.525991812012,
"y": 939.6518333549263
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "CalculatorTool-dGfrj",
"node": {
"base_classes": [
"Data",
"list",
"Sequence",
"Tool"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Perform basic arithmetic operations on a given expression.",
"display_name": "Calculator",
"documentation": "",
"edited": false,
"field_order": [
"expression"
],
"frozen": false,
"icon": "calculator",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"official": false,
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Data",
"method": "run_model",
"name": "api_run_model",
"required_inputs": [],
"selected": "Data",
"types": [
"Data"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Tool",
"method": "build_tool",
"name": "api_build_tool",
"required_inputs": [],
"selected": "Tool",
"types": [
"Tool"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._eval_expr_with_error,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _eval_expr(self, node):\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](self._eval_expr(node.left), self._eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](self._eval_expr(node.operand))\n if isinstance(node, ast.Call):\n msg = (\n \"Function calls like sqrt(), sin(), cos() etc. are not supported. \"\n \"Only basic arithmetic operations (+, -, *, /, **) are allowed.\"\n )\n raise TypeError(msg)\n msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(msg)\n\n def _eval_expr_with_error(self, expression: str) -> list[Data]:\n try:\n return self._evaluate_expression(expression)\n except Exception as e:\n raise ToolException(str(e)) from e\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message, \"input\": expression})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message, \"input\": expression})]\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error evaluating expression\")\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message, \"input\": expression})]\n"
},
"expression": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Expression",
"dynamic": false,
"info": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "expression",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "2+2"
}
}
},
"type": "CalculatorTool"
},
"dragging": false,
"height": 254,
"id": "CalculatorTool-dGfrj",
"position": {
"x": 3546.599894399727,
"y": 972.1522299506486
},
"positionAbsolute": {
"x": 3546.599894399727,
"y": 972.1522299506486
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Define the agent's instructions, then enter a task to complete using tools.",
"display_name": "City Selection Agent",
"id": "Agent-rPh1n",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Define the agent's instructions, then enter a task to complete using tools.",
"display_name": "City Selection Agent",
"documentation": "",
"edited": false,
"field_order": [
"agent_llm",
"max_tokens",
"model_kwargs",
"json_mode",
"output_schema",
"model_name",
"openai_api_base",
"api_key",
"temperature",
"seed",
"output_parser",
"system_prompt",
"tools",
"input_value",
"handle_parsing_errors",
"verbose",
"max_iterations",
"agent_description",
"memory",
"sender",
"sender_name",
"n_messages",
"session_id",
"order",
"template",
"add_current_date_tool"
],
"frozen": false,
"icon": "bot",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Response",
"method": "message_response",
"name": "response",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"add_current_date_tool": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Current Date",
"dynamic": false,
"info": "If true, will add a tool to the agent that returns the current date.",
"list": false,
"name": "add_current_date_tool",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"agent_description": {
"_input_type": "MultilineInput",
"advanced": true,
"display_name": "Agent Description",
"dynamic": false,
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "agent_description",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "A helpful assistant with access to the following tools:"
},
"agent_llm": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Provider",
"dynamic": false,
"info": "The provider of the language model that the agent will use to generate responses.",
"input_types": [],
"name": "agent_llm",
"options": [
"Amazon Bedrock",
"Anthropic",
"Azure OpenAI",
"Groq",
"NVIDIA",
"OpenAI",
"Custom"
],
"placeholder": "",
"real_time_refresh": true,
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "OpenAI"
},
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "The OpenAI API Key to use for the OpenAI model.",
"input_types": [
"Message"
],
"load_from_db": false,
"name": "api_key",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n llm_model, display_name = self.get_llm()\n self.model_name = get_model_name(llm_model, display_name=display_name)\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = await self.get_memory_data()\n\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise ValueError(msg)\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n raise ValueError(msg)\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n return build_config\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Handle Parse Errors",
"dynamic": false,
"info": "Should the Agent fix errors when reading user input for better processing?",
"list": false,
"name": "handle_parsing_errors",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"input_value": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "The input provided by the user for the agent to process.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"json_mode": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "JSON Mode",
"dynamic": false,
"info": "If True, it will output JSON regardless of passing a schema.",
"list": false,
"name": "json_mode",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_iterations": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Iterations",
"dynamic": false,
"info": "The maximum number of attempts the agent can make to complete its task before it stops.",
"list": false,
"name": "max_iterations",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 15
},
"max_tokens": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"list": false,
"name": "max_tokens",
"placeholder": "",
"range_spec": {
"max": 128000,
"min": 0,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"memory": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "External Memory",
"dynamic": false,
"info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.",
"input_types": [
"BaseChatMessageHistory"
],
"list": false,
"name": "memory",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Name",
"dynamic": false,
"info": "",
"name": "model_name",
"options": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "gpt-4o-mini"
},
"n_messages": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Number of Messages",
"dynamic": false,
"info": "Number of messages to retrieve.",
"list": false,
"name": "n_messages",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 100
},
"openai_api_base": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "OpenAI API Base",
"dynamic": false,
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
"list": false,
"load_from_db": false,
"name": "openai_api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"order": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Order",
"dynamic": false,
"info": "Order of the messages.",
"name": "order",
"options": [
"Ascending",
"Descending"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Ascending"
},
"output_parser": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Output Parser",
"dynamic": false,
"info": "The parser to use to parse the output of the model",
"input_types": [
"OutputParser"
],
"list": false,
"name": "output_parser",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"output_schema": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Schema",
"dynamic": false,
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]",
"list": true,
"name": "output_schema",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"seed": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Seed",
"dynamic": false,
"info": "The seed controls the reproducibility of the job.",
"list": false,
"name": "seed",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 1
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Filter by sender type.",
"name": "sender",
"options": [
"Machine",
"User",
"Machine and User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine and User"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Filter by sender name.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"system_prompt": {
"_input_type": "MultilineInput",
"advanced": false,
"display_name": "Agent Instructions",
"dynamic": false,
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "system_prompt",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "You are a helpful assistant that can use tools to answer questions and perform tasks."
},
"temperature": {
"_input_type": "FloatInput",
"advanced": true,
"display_name": "Temperature",
"dynamic": false,
"info": "",
"list": false,
"name": "temperature",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "float",
"value": 0.1
},
"template": {
"_input_type": "MultilineInput",
"advanced": true,
"display_name": "Template",
"dynamic": false,
"info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{sender_name}: {text}"
},
"tools": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Tools",
"dynamic": false,
"info": "These are the tools that the agent can use to help with tasks.",
"input_types": [
"Tool",
"BaseTool",
"StructuredTool"
],
"list": true,
"name": "tools",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"verbose": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Verbose",
"dynamic": false,
"info": "",
"list": false,
"name": "verbose",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
}
},
"tool_mode": false
},
"type": "Agent"
},
"dragging": true,
"height": 650,
"id": "Agent-rPh1n",
"position": {
"x": 2472.7748760933105,
"y": 335.66187210240537
},
"positionAbsolute": {
"x": 2472.7748760933105,
"y": 335.66187210240537
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Define the agent's instructions, then enter a task to complete using tools.",
"display_name": "Local Expert Agent",
"id": "Agent-cj2PH",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Define the agent's instructions, then enter a task to complete using tools.",
"display_name": "Local Expert Agent",
"documentation": "",
"edited": false,
"field_order": [
"agent_llm",
"max_tokens",
"model_kwargs",
"json_mode",
"output_schema",
"model_name",
"openai_api_base",
"api_key",
"temperature",
"seed",
"output_parser",
"system_prompt",
"tools",
"input_value",
"handle_parsing_errors",
"verbose",
"max_iterations",
"agent_description",
"memory",
"sender",
"sender_name",
"n_messages",
"session_id",
"order",
"template",
"add_current_date_tool"
],
"frozen": false,
"icon": "bot",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Response",
"method": "message_response",
"name": "response",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"add_current_date_tool": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Current Date",
"dynamic": false,
"info": "If true, will add a tool to the agent that returns the current date.",
"list": false,
"name": "add_current_date_tool",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"agent_description": {
"_input_type": "MultilineInput",
"advanced": true,
"display_name": "Agent Description",
"dynamic": false,
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "agent_description",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "A helpful assistant with access to the following tools:"
},
"agent_llm": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Provider",
"dynamic": false,
"info": "The provider of the language model that the agent will use to generate responses.",
"input_types": [],
"name": "agent_llm",
"options": [
"Amazon Bedrock",
"Anthropic",
"Azure OpenAI",
"Groq",
"NVIDIA",
"OpenAI",
"Custom"
],
"placeholder": "",
"real_time_refresh": true,
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "OpenAI"
},
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "The OpenAI API Key to use for the OpenAI model.",
"input_types": [
"Message"
],
"load_from_db": false,
"name": "api_key",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n llm_model, display_name = self.get_llm()\n self.model_name = get_model_name(llm_model, display_name=display_name)\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = await self.get_memory_data()\n\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise ValueError(msg)\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n raise ValueError(msg)\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n return build_config\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Handle Parse Errors",
"dynamic": false,
"info": "Should the Agent fix errors when reading user input for better processing?",
"list": false,
"name": "handle_parsing_errors",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"input_value": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "The input provided by the user for the agent to process.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"json_mode": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "JSON Mode",
"dynamic": false,
"info": "If True, it will output JSON regardless of passing a schema.",
"list": false,
"name": "json_mode",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_iterations": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Iterations",
"dynamic": false,
"info": "The maximum number of attempts the agent can make to complete its task before it stops.",
"list": false,
"name": "max_iterations",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 15
},
"max_tokens": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"list": false,
"name": "max_tokens",
"placeholder": "",
"range_spec": {
"max": 128000,
"min": 0,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"memory": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "External Memory",
"dynamic": false,
"info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.",
"input_types": [
"BaseChatMessageHistory"
],
"list": false,
"name": "memory",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Name",
"dynamic": false,
"info": "",
"name": "model_name",
"options": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "gpt-4o-mini"
},
"n_messages": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Number of Messages",
"dynamic": false,
"info": "Number of messages to retrieve.",
"list": false,
"name": "n_messages",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 100
},
"openai_api_base": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "OpenAI API Base",
"dynamic": false,
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
"list": false,
"load_from_db": false,
"name": "openai_api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"order": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Order",
"dynamic": false,
"info": "Order of the messages.",
"name": "order",
"options": [
"Ascending",
"Descending"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Ascending"
},
"output_parser": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Output Parser",
"dynamic": false,
"info": "The parser to use to parse the output of the model",
"input_types": [
"OutputParser"
],
"list": false,
"name": "output_parser",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"output_schema": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Schema",
"dynamic": false,
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]",
"list": true,
"name": "output_schema",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"seed": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Seed",
"dynamic": false,
"info": "The seed controls the reproducibility of the job.",
"list": false,
"name": "seed",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 1
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Filter by sender type.",
"name": "sender",
"options": [
"Machine",
"User",
"Machine and User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine and User"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Filter by sender name.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"system_prompt": {
"_input_type": "MultilineInput",
"advanced": false,
"display_name": "Agent Instructions",
"dynamic": false,
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "system_prompt",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "You are a knowledgeable Local Expert with extensive information about the selected city, its attractions, and customs. Your goal is to provide the BEST insights about the city. Compile an in-depth guide for travelers, including key attractions, local customs, special events, and daily activity recommendations. Focus on hidden gems and local hotspots. Your final output should be a comprehensive city guide, rich in cultural insights and practical tips."
},
"temperature": {
"_input_type": "FloatInput",
"advanced": true,
"display_name": "Temperature",
"dynamic": false,
"info": "",
"list": false,
"name": "temperature",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "float",
"value": 0.1
},
"template": {
"_input_type": "MultilineInput",
"advanced": true,
"display_name": "Template",
"dynamic": false,
"info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{sender_name}: {text}"
},
"tools": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Tools",
"dynamic": false,
"info": "These are the tools that the agent can use to help with tasks.",
"input_types": [
"Tool",
"BaseTool",
"StructuredTool"
],
"list": true,
"name": "tools",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"verbose": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Verbose",
"dynamic": false,
"info": "",
"list": false,
"name": "verbose",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
}
},
"tool_mode": false
},
"type": "Agent"
},
"dragging": false,
"height": 650,
"id": "Agent-cj2PH",
"position": {
"x": 3185.66991544494,
"y": 355.95841004876377
},
"positionAbsolute": {
"x": 3185.66991544494,
"y": 355.95841004876377
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Define the agent's instructions, then enter a task to complete using tools.",
"display_name": "Travel Concierge Agent",
"id": "Agent-ImgzA",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Define the agent's instructions, then enter a task to complete using tools.",
"display_name": "Travel Concierge Agent",
"documentation": "",
"edited": false,
"field_order": [
"agent_llm",
"max_tokens",
"model_kwargs",
"json_mode",
"output_schema",
"model_name",
"openai_api_base",
"api_key",
"temperature",
"seed",
"output_parser",
"system_prompt",
"tools",
"input_value",
"handle_parsing_errors",
"verbose",
"max_iterations",
"agent_description",
"memory",
"sender",
"sender_name",
"n_messages",
"session_id",
"order",
"template",
"add_current_date_tool"
],
"frozen": false,
"icon": "bot",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Response",
"method": "message_response",
"name": "response",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"add_current_date_tool": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Current Date",
"dynamic": false,
"info": "If true, will add a tool to the agent that returns the current date.",
"list": false,
"name": "add_current_date_tool",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"agent_description": {
"_input_type": "MultilineInput",
"advanced": true,
"display_name": "Agent Description",
"dynamic": false,
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "agent_description",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "A helpful assistant with access to the following tools:"
},
"agent_llm": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Provider",
"dynamic": false,
"info": "The provider of the language model that the agent will use to generate responses.",
"input_types": [],
"name": "agent_llm",
"options": [
"Amazon Bedrock",
"Anthropic",
"Azure OpenAI",
"Groq",
"NVIDIA",
"OpenAI",
"Custom"
],
"placeholder": "",
"real_time_refresh": true,
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "OpenAI"
},
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "The OpenAI API Key to use for the OpenAI model.",
"input_types": [
"Message"
],
"load_from_db": false,
"name": "api_key",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n llm_model, display_name = self.get_llm()\n self.model_name = get_model_name(llm_model, display_name=display_name)\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = await self.get_memory_data()\n\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise ValueError(msg)\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n raise ValueError(msg)\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = component_class.update_build_config(build_config, field_value, field_name)\n\n return build_config\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Handle Parse Errors",
"dynamic": false,
"info": "Should the Agent fix errors when reading user input for better processing?",
"list": false,
"name": "handle_parsing_errors",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"input_value": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "The input provided by the user for the agent to process.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"json_mode": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "JSON Mode",
"dynamic": false,
"info": "If True, it will output JSON regardless of passing a schema.",
"list": false,
"name": "json_mode",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_iterations": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Iterations",
"dynamic": false,
"info": "The maximum number of attempts the agent can make to complete its task before it stops.",
"list": false,
"name": "max_iterations",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 15
},
"max_tokens": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"list": false,
"name": "max_tokens",
"placeholder": "",
"range_spec": {
"max": 128000,
"min": 0,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"memory": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "External Memory",
"dynamic": false,
"info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.",
"input_types": [
"BaseChatMessageHistory"
],
"list": false,
"name": "memory",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Name",
"dynamic": false,
"info": "",
"name": "model_name",
"options": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "gpt-4o-mini"
},
"n_messages": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Number of Messages",
"dynamic": false,
"info": "Number of messages to retrieve.",
"list": false,
"name": "n_messages",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 100
},
"openai_api_base": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "OpenAI API Base",
"dynamic": false,
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
"list": false,
"load_from_db": false,
"name": "openai_api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"order": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Order",
"dynamic": false,
"info": "Order of the messages.",
"name": "order",
"options": [
"Ascending",
"Descending"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Ascending"
},
"output_parser": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Output Parser",
"dynamic": false,
"info": "The parser to use to parse the output of the model",
"input_types": [
"OutputParser"
],
"list": false,
"name": "output_parser",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"output_schema": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Schema",
"dynamic": false,
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]",
"list": true,
"name": "output_schema",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"seed": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Seed",
"dynamic": false,
"info": "The seed controls the reproducibility of the job.",
"list": false,
"name": "seed",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 1
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Filter by sender type.",
"name": "sender",
"options": [
"Machine",
"User",
"Machine and User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine and User"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Filter by sender name.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"system_prompt": {
"_input_type": "MultilineInput",
"advanced": false,
"display_name": "Agent Instructions",
"dynamic": false,
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "system_prompt",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "You are an Amazing Travel Concierge, a specialist in travel planning and logistics with decades of experience. Your goal is to create the most amazing travel itineraries with budget and packing suggestions for the city. Expand the city guide into a full 7-day travel itinerary with detailed per-day plans. Include weather forecasts, places to eat, packing suggestions, and a budget breakdown. Suggest actual places to visit, hotels to stay, and restaurants to go to. Your final output should be a complete expanded travel plan, formatted as markdown, encompassing a daily schedule, anticipated weather conditions, recommended clothing and items to pack, and a detailed budget."
},
"temperature": {
"_input_type": "FloatInput",
"advanced": true,
"display_name": "Temperature",
"dynamic": false,
"info": "",
"list": false,
"name": "temperature",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "float",
"value": 0.1
},
"template": {
"_input_type": "MultilineInput",
"advanced": true,
"display_name": "Template",
"dynamic": false,
"info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{sender_name}: {text}"
},
"tools": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Tools",
"dynamic": false,
"info": "These are the tools that the agent can use to help with tasks.",
"input_types": [
"Tool",
"BaseTool",
"StructuredTool"
],
"list": true,
"name": "tools",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"verbose": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Verbose",
"dynamic": false,
"info": "",
"list": false,
"name": "verbose",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
}
},
"tool_mode": false
},
"type": "Agent"
},
"dragging": false,
"height": 650,
"id": "Agent-ImgzA",
"position": {
"x": 3889.695953842898,
"y": 370.3161168611889
},
"positionAbsolute": {
"x": 3889.695953842898,
"y": 370.3161168611889
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "note-jCp1N",
"node": {
"description": "# Travel Planning Agents \n\nThe travel planning system is a smart setup that uses several specialized \"agents\" to help plan incredible trips. Imagine each agent as a travel expert focusing on a part of your journey. Here's how it works:\n\n- **User-Friendly Start:** You start by telling the system about your travel needs—where you want to go and what you love to do.\n\n- **Data Collection:** The agents uses its tools to gather current info about various destinations, like the best travel times, weather, and costs.\n\n- **Three Key Agents:**\n - **City Selection Agent:** Picks the best places to visit based on your likes and current data.\n - **Local Expert Agent:** Gathers interesting details about what to do and see in the chosen city.\n - **Travel Concierge Agent:** Builds a day-by-day plan that includes where to stay, eat, and explore!\n\n- **Tools and Data:** Each agent uses tools to find and organize the latest information so you get recommendations that are both accurate and exciting.\n\n- **Final Plan:** Once everything is put together, you receive a complete, easy-to-follow travel itinerary, perfect for your adventure!\n",
"display_name": "",
"documentation": "",
"template": {}
},
"type": "note"
},
"dragging": false,
"height": 636,
"id": "note-jCp1N",
"position": {
"x": 1076.3710803600266,
"y": 92.06058855045646
},
"positionAbsolute": {
"x": 1076.3710803600266,
"y": 92.06058855045646
},
"resizing": false,
"selected": false,
"style": {
"height": 636,
"width": 600
},
"type": "noteNode",
"width": 600
},
{
"data": {
"id": "note-jgIF0",
"node": {
"description": "# **City Selection Agent**\n - **Purpose:** This agent evaluates potential travel destinations based on user input and external data sources.\n - **Core Functions:** Analyzes factors such as weather, local events, and travel costs to recommend optimal cities.\n - **Tools Utilized:** Employs APIs and data-fetching tools to gather real-time information for decision-making.\n",
"display_name": "",
"documentation": "",
"template": {
"backgroundColor": "blue"
}
},
"type": "note"
},
"dragging": false,
"height": 362,
"id": "note-jgIF0",
"position": {
"x": 2122.4146132377227,
"y": 485.2212661145467
},
"positionAbsolute": {
"x": 2122.4146132377227,
"y": 485.2212661145467
},
"resizing": false,
"selected": false,
"style": {
"height": 362,
"width": 331
},
"type": "noteNode",
"width": 331
},
{
"data": {
"id": "note-NTTln",
"node": {
"description": "# **Local Expert Agent**\n - **Purpose:** Focused on gathering and providing an in-depth guide to the selected city.\n - **Core Functions:** Compiles insights into cultural attractions, local customs, and unique experiences.\n - **Tools Utilized:** Uses web content fetchers and data APIs to collect detailed local insights and enhance the user understanding with hidden gems.\n",
"display_name": "",
"documentation": "",
"template": {
"backgroundColor": "blue"
}
},
"type": "note"
},
"dragging": false,
"height": 366,
"id": "note-NTTln",
"position": {
"x": 2827.660803823376,
"y": 488.6092281195304
},
"positionAbsolute": {
"x": 2827.660803823376,
"y": 488.6092281195304
},
"resizing": false,
"selected": false,
"style": {
"height": 366,
"width": 351
},
"type": "noteNode",
"width": 351
},
{
"data": {
"id": "note-45aOQ",
"node": {
"description": "# **Travel Concierge Agent**\n - **Purpose:** Crafts detailed travel itineraries that are customized to the traveler's interests and needs.\n - **Core Functions:** Offers a comprehensive daily schedule, including accommodations, dining spots, and activities.\n - **Tools Utilized:** Integrates calculators and data tools for accurate budget planning and itinerary logistics.",
"display_name": "",
"documentation": "",
"template": {
"backgroundColor": "blue"
}
},
"type": "note"
},
"dragging": false,
"height": 344,
"id": "note-45aOQ",
"position": {
"x": 3536.084279543714,
"y": 496.3155992003396
},
"positionAbsolute": {
"x": 3536.084279543714,
"y": 496.3155992003396
},
"resizing": false,
"selected": false,
"style": {
"height": 344,
"width": 344
},
"type": "noteNode",
"width": 344
},
{
"data": {
"id": "note-elTLU",
"node": {
"description": "## Configure the agent by obtaining your OpenAI API key from [platform.openai.com](https://platform.openai.com). Under \"Model Provider\", choose:\n- OpenAI: Default, requires only API key\n- Anthropic/Azure/Groq/NVIDIA: Each requires their own API keys\n- Custom: Use your own model endpoint + authentication\n\nSelect model and input API key before running the flow.",
"display_name": "",
"documentation": "",
"template": {
"backgroundColor": "rose"
}
},
"type": "note"
},
"dragging": false,
"height": 325,
"id": "note-elTLU",
"position": {
"x": 2463.3881993480218,
"y": 42.83594355441298
},
"positionAbsolute": {
"x": 2463.3881993480218,
"y": 42.83594355441298
},
"selected": false,
"type": "noteNode",
"width": 325
}
],
"viewport": {
"x": -1078.5758749396496,
"y": -166.63499501100648,
"zoom": 0.6513143480813044
}
},
"description": "starterProjects.travelPlanning.description",
"endpoint_name": null,
"gradient": "0",
"icon": "Plane",
"id": "d6d33090-44c4-4a4b-8d06-c93fcf426446",
"is_component": false,
"last_tested_version": "1.0.19.post2",
"name": "starterProjects.travelPlanning.name",
"tags": [
"agents",
"openai"
]
}