|
import base64 |
|
from mimetypes import guess_type |
|
from dotenv import load_dotenv |
|
from typing import TypedDict, Annotated, List |
|
from langgraph.graph.message import add_messages |
|
from langchain_core.messages import AnyMessage |
|
from langchain_openai import ChatOpenAI |
|
from langgraph.prebuilt import ToolNode |
|
from langgraph.graph import START, StateGraph |
|
from langgraph.prebuilt import tools_condition |
|
from langchain_tavily import TavilySearch |
|
from langchain_community.tools import RequestsGetTool |
|
from langchain_community.utilities.requests import TextRequestsWrapper |
|
from openai import OpenAI, audio |
|
import pandas as pd |
|
from langchain_experimental.tools.python.tool import PythonREPLTool |
|
|
|
load_dotenv() |
|
|
|
|
|
gpt1 = 'gpt-4o' |
|
gpt2 = 'gpt-4.1-2025-04-14' |
|
gpt3 = 'o3-mini' |
|
model = ChatOpenAI(model=gpt3) |
|
|
|
def integer_comparison(numb1: int, numb2: int) -> int: |
|
""" |
|
Given input parameters |
|
* numb1: an integer number, |
|
* numb2: an integer number, |
|
This function returns |
|
* 0 if integer numb1 is equal to integer numb2 |
|
* 1 if integer numb1 is strictly bigger than integer numb2 |
|
* -1 if integer numb1 is strictly smaller than integer numb2 |
|
""" |
|
if numb1 == numb2: |
|
return 0 |
|
elif numb1 > numb2: |
|
return 1 |
|
else: |
|
return -1 |
|
|
|
def local_image_to_data_url(image_path: str) -> str: |
|
|
|
mime_type, _ = guess_type(image_path) |
|
if mime_type is None: |
|
mime_type = "application/octet-stream" |
|
|
|
with open(image_path, "rb") as f: |
|
data = f.read() |
|
b64 = base64.b64encode(data).decode("utf-8") |
|
return f"data:{mime_type};base64,{b64}" |
|
|
|
def describe_a_photo(file: str) -> str: |
|
""" |
|
Given input parameters |
|
* file: file name of an image to be described in detail, |
|
This function returns |
|
* A string containing the description of the image |
|
""" |
|
data_url = local_image_to_data_url(f"assets/{file}") |
|
client = OpenAI() |
|
messages = [ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
"Describe what you see in this image:", |
|
{ |
|
"type": "image_url", |
|
"image_url": { |
|
"url": data_url, |
|
"detail": "auto" |
|
} |
|
} |
|
] |
|
} |
|
] |
|
resp = client.chat.completions.create(model="gpt-4o", messages=messages) |
|
return resp.choices[0].message.content |
|
|
|
def transcript_an_audio(file: str) -> str: |
|
""" |
|
Given input parameters |
|
* file: file name of an audio to be transcripted |
|
This function returns |
|
* A string containing the transcription of the audio file |
|
""" |
|
with open(f"assets/{file}", "rb") as audio_file: |
|
|
|
resp = audio.transcriptions.create( |
|
model="whisper-1", |
|
file=audio_file, |
|
|
|
) |
|
transcript = resp.text |
|
return transcript |
|
|
|
def read_an_excel(file: str) -> str: |
|
""" |
|
Given input parameters |
|
* file: file name of an excel file to be attached |
|
This function returns |
|
* A string containing the excel rows as text using json format |
|
""" |
|
df = pd.read_excel(f"assets/{file}") |
|
records = df.to_dict(orient="records") |
|
return str(records) |
|
|
|
def load_python_script(file: str) -> str: |
|
""" |
|
Given input parameters |
|
* file: file name of an python script file to be executed |
|
This function returns |
|
* A string containing the file content, the python script |
|
""" |
|
with open(f"assets/{file}", "rb") as f: |
|
data = f.read() |
|
return str(data) |
|
|
|
|
|
requests_wrapper = TextRequestsWrapper() |
|
|
|
|
|
visit_tool = RequestsGetTool( |
|
requests_wrapper=requests_wrapper, |
|
allow_dangerous_requests=True |
|
) |
|
|
|
|
|
|
|
tools = [TavilySearch(max_results=5), |
|
visit_tool, |
|
integer_comparison, |
|
describe_a_photo, |
|
transcript_an_audio, |
|
read_an_excel, |
|
load_python_script, |
|
PythonREPLTool()] |
|
|
|
|
|
llm_with_tools = model.bind_tools(tools) |
|
|
|
|
|
|
|
class AgentState(TypedDict): |
|
messages: Annotated[List[AnyMessage], add_messages] |
|
|
|
def assistant(state: AgentState): |
|
return { |
|
"messages": [llm_with_tools.invoke(state["messages"])], |
|
} |
|
|
|
def create_and_compile_oai_agent(): |
|
from openai import OpenAI |
|
import os |
|
|
|
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) |
|
models = client.models.list() |
|
|
|
|
|
|
|
|
|
|
|
|
|
builder = StateGraph(AgentState) |
|
|
|
|
|
builder.add_node("assistant", assistant) |
|
builder.add_node("tools", ToolNode(tools)) |
|
|
|
|
|
builder.add_edge(START, "assistant") |
|
builder.add_conditional_edges( |
|
"assistant", |
|
|
|
|
|
tools_condition, |
|
) |
|
builder.add_edge("tools", "assistant") |
|
return builder.compile() |
|
|
|
|