from huggingface_hub import InferenceClient from smolagents import CodeAgent, HfApiModel, tool, InferenceClientModel, DuckDuckGoSearchTool import os import requests from tools.final_answer import FinalAnswerTool nebius = 'eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDExNTU5MzMyNDgzNjQxMjMzNjgyMyIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNDI5MjA3MSwidXVpZCI6IjNjNWRhMzFlLWU5MmQtNDhlMy04YTY3LTZhODBmMTU3OTE0MiIsIm5hbWUiOiJXYWdlbnQiLCJleHBpcmVzX2F0IjoiMjAzMC0wNS0wNlQxMDowMToxMSswMDAwIn0.4Oqy2ACWBTdKuq1yOlCW96w93791Og_AYnrhgbqp49g' HF_TOKEN = os.environ.get("HF_TOKEN") def tt(msg): client = InferenceClient( provider="hf-inference", api_key=HF_TOKEN, ) result = client.text_generation("reply to the following whatsapp message: " + msg, max_new_tokens=10, model="Qwen/QwQ-32B", ) return result def tc(msg): client = InferenceClient( provider="hf-inference", api_key=HF_TOKEN, ) messages = [{"role": "user", "content": msg}] result = client.chat_completion(messages, max_tokens=30, model="Qwen/Qwen2.5-Coder-32B-Instruct", ) return result.choices[0].message.content def name_date(n,d): return [n,d] @tool def create_event_tool(title: str, date_time: str) -> str: """ Returns the created event. Args: title: the name of the event you want to create. date_time: the date and time for which you want the report. """ #lon, lat = convert_location_to_coordinates(title) #date_time = datetime.strptime(date_time) return str(name_date(title, date_time)) final_answer = FinalAnswerTool() #model=InferenceClientModel(model_id="Qwen/Qwen2.5-72B-Instruct", token = HF_TOKEN) model=InferenceClientModel(provider="nebius", model_id="Qwen/Qwen2.5-Coder-7B-Instruct", token = nebius, max_tokens=2096, temperature=0.1) #agent = CodeAgent(tools=[create_event_tool], model=model, add_base_tools=True) # Load system prompt from prompt.yaml file with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[final_answer, create_event_tool], # add your tools here (don't remove final_answer) max_steps=4, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates # Pass system prompt to CodeAgent ) def ag(msg): result = agent.run(msg) return result