Aroyzz-o6 commited on
Commit
77e12bf
·
verified ·
1 Parent(s): 4b6d634

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +242 -113
app.py CHANGED
@@ -1,138 +1,267 @@
1
  from dotenv import load_dotenv
 
2
  import google.generativeai as genai
 
3
  import json
 
4
  import os
 
5
  import requests
 
6
  from pypdf import PdfReader
 
7
  import gradio as gr
 
8
  import re
9
 
10
- # The faulty import has been removed from here
11
 
12
  load_dotenv(override=True)
 
13
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
14
 
15
- # ... (the rest of your functions like push, record_user_details, etc. remain the same) ...
 
16
  def push(text):
17
- requests.post(
18
- "https://api.pushover.net/1/messages.json",
19
- data={
20
- "token": os.getenv("PUSHOVER_TOKEN"),
21
- "user": os.getenv("PUSHOVER_USER"),
22
- "message": text,
23
- }
24
- )
 
 
 
 
 
 
 
 
 
 
25
 
26
  def record_user_details(email, name="Name not provided", notes="not provided"):
27
- push(f"Recording interest from {name} with email {email} and notes {notes}")
28
- return {"recorded": "ok"}
 
 
 
 
29
 
30
  def record_unknown_question(question):
31
- push(f"Recording {question} asked that I couldn't answer")
32
- return {"recorded": "ok"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  tools = {
35
- "function_declarations": [
36
- {"name": "record_user_details", "description": "Record that a user is interested in being in touch and provided an email address."},
37
- {"name": "record_unknown_question", "description": "Record any question that couldn't be answered as you didn't know the answer."}
38
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  }
40
 
 
 
 
 
 
 
41
  class Me:
42
- def __init__(self):
43
- self.model = genai.GenerativeModel("gemini-1.5-flash")
44
- self.name = "Aritra Roy"
45
- # ... (Your __init__ logic remains the same) ...
46
- reader = PdfReader("me/linkedin.pdf")
47
- self.linkedin = ""
48
- for page in reader.pages:
49
- text = page.extract_text()
50
- if text:
51
- self.linkedin += text
52
- with open("me/summary.txt", "r", encoding="utf-8") as f:
53
- self.summary = f.read()
54
-
55
- def handle_tool_call(self, tool_call):
56
- # ... (Your handle_tool_call logic remains the same) ...
57
- tool_name = tool_call.name
58
- arguments = tool_call.args
59
- print(f"Tool called: {tool_name} with args {arguments}", flush=True)
60
- if tool_name == "record_user_details":
61
- result = record_user_details(**arguments)
62
- elif tool_name == "record_unknown_question":
63
- result = record_unknown_question(**arguments)
64
- else:
65
- result = {}
66
- return {"role": "tool", "parts": [json.dumps(result)], "tool_call_id": tool_call.name + "_id"}
67
-
68
-
69
- def system_prompt(self):
70
- # ... (Your system_prompt logic remains the same) ...
71
- prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, At no point you can say you are an AI assistant. YOU ARE {self.name}, ANSWERING TO POTENTIAL CLIENTS OR HIRERS." \
72
- f"particularly questions related to {self.name}'s career, background, skills and experience. " \
73
- f"Be professional and engaging. If you don't know the answer to any question, under no circumstances you can make up an answer, use your " \
74
- f"record_unknown_question tool. If the user shares their email or wants to connect, use " \
75
- f"record_user_details to record it."
76
- prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
77
- prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
78
- return prompt
79
-
80
- def chat(self, message, history):
81
- # Using the more efficient history management
82
- if not history:
83
- messages = [{"role": "user", "parts": [self.system_prompt()]}]
84
- else:
85
- messages = []
86
- for user_msg, model_msg in history:
87
- messages.append({"role": "user", "parts": [user_msg]})
88
- messages.append({"role": "model", "parts": [model_msg]})
89
- messages.append({"role": "user", "parts": [message]})
90
-
91
- done = False
92
- while not done:
93
- response = self.model.generate_content(messages, tools=tools)
94
- candidate = response.candidates[0]
95
- finish_reason = candidate.finish_reason
96
- if finish_reason == "TOOL_USE":
97
- tool_call = candidate.content.parts[0].function_call
98
- result = self.handle_tool_call(tool_call)
99
- messages.append({"role": "model", "parts": [candidate.content.parts[0]]})
100
- messages.append(result)
101
- else:
102
- done = True
103
- return candidate.content.parts[0].text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  if __name__ == "__main__":
106
- me = Me()
107
-
108
- # Your theme definition remains the same
109
- theme = gr.themes.Default(
110
- primary_hue="teal",
111
- font=gr.themes.GoogleFont("Inter")
112
- ).set(
113
- body_background_fill="#1a202c",
114
- body_background_fill_dark="#1a202c",
115
- button_primary_background_fill="#38b2ac",
116
- button_primary_background_fill_dark="#38b2ac",
117
- )
118
-
119
- # --- KEY CHANGE: Add Custom CSS to remove padding ---
120
- # This CSS will be injected into the Gradio app to override its styles.
121
- custom_css = """
122
- /* Remove the outer padding from the main Gradio container */
123
- .gradio-container { padding: 0 !important; }
124
- /* Ensure the chat window takes up the full available height */
125
- #chatbot { height: 100% !important; }
126
- """
127
-
128
- # Launch the UI, passing in the new custom CSS
129
- with gr.Blocks(theme=theme, css=custom_css) as demo:
130
- # We define the Chatbot component separately to give it an ID
131
- chatbot = gr.Chatbot(
132
- elem_id="chatbot", # Assign an ID for CSS targeting
133
- height=600 # Set a base height
134
- )
135
- # Then we pass the chatbot component to the ChatInterface
136
- gr.ChatInterface(me.chat, chatbot=chatbot)
137
-
138
- demo.launch()
 
1
  from dotenv import load_dotenv
2
+
3
  import google.generativeai as genai
4
+
5
  import json
6
+
7
  import os
8
+
9
  import requests
10
+
11
  from pypdf import PdfReader
12
+
13
  import gradio as gr
14
+
15
  import re
16
 
17
+
18
 
19
  load_dotenv(override=True)
20
+
21
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
22
 
23
+
24
+
25
  def push(text):
26
+
27
+     requests.post(
28
+
29
+         "https://api.pushover.net/1/messages.json",
30
+
31
+         data={
32
+
33
+             "token": os.getenv("PUSHOVER_TOKEN"),
34
+
35
+             "user": os.getenv("PUSHOVER_USER"),
36
+
37
+             "message": text,
38
+
39
+         }
40
+
41
+     )
42
+
43
+
44
 
45
  def record_user_details(email, name="Name not provided", notes="not provided"):
46
+
47
+     push(f"Recording interest from {name} with email {email} and notes {notes}")
48
+
49
+     return {"recorded": "ok"}
50
+
51
+
52
 
53
  def record_unknown_question(question):
54
+
55
+     push(f"Recording {question} asked that I couldn't answer")
56
+
57
+     return {"recorded": "ok"}
58
+
59
+
60
+
61
+ record_user_details_json = {
62
+
63
+     "name": "record_user_details",
64
+
65
+     "description": "Record that a user is interested in being in touch and provided an email address. Expects a JSON object: { 'email': string, 'name': string (optional), 'notes': string (optional) }"
66
+
67
+ }
68
+
69
+
70
+
71
+ record_unknown_question_json = {
72
+
73
+     "name": "record_unknown_question",
74
+
75
+     "description": "Record any question that couldn't be answered. Expects a JSON object: { 'question': string }"
76
+
77
+ }
78
+
79
+
80
 
81
  tools = {
82
+
83
+     "function_declarations": [
84
+
85
+         {
86
+
87
+             "name": "record_user_details",
88
+
89
+             "description": "Record that a user is interested in being in touch and provided an email address.",
90
+
91
+             
92
+
93
+         },
94
+
95
+         {
96
+
97
+             "name": "record_unknown_question",
98
+
99
+             "description": "Record any question that couldn't be answered as you didn't know the answer.",
100
+
101
+            
102
+
103
+         }
104
+
105
+     ]
106
+
107
  }
108
 
109
+
110
+
111
+
112
+
113
+
114
+
115
  class Me:
116
+
117
+     def __init__(self):
118
+
119
+         self.model = genai.GenerativeModel("gemini-2.0-flash")
120
+
121
+         self.name = "Aritra Roy"
122
+
123
+         reader = PdfReader("me/linkedin.pdf")
124
+
125
+         self.linkedin = ""
126
+
127
+         for page in reader.pages:
128
+
129
+             text = page.extract_text()
130
+
131
+             if text:
132
+
133
+                 self.linkedin += text
134
+
135
+         with open("me/summary.txt", "r", encoding="utf-8") as f:
136
+
137
+             self.summary = f.read()
138
+
139
+
140
+
141
+     def handle_tool_call(self, tool_call):
142
+
143
+         tool_name = tool_call.name
144
+
145
+         arguments = tool_call.args
146
+
147
+         print(f"Tool called: {tool_name} with args {arguments}", flush=True)
148
+
149
+
150
+
151
+         if tool_name == "record_user_details":
152
+
153
+             result = record_user_details(**arguments)
154
+
155
+         elif tool_name == "record_unknown_question":
156
+
157
+             result = record_unknown_question(**arguments)
158
+
159
+         else:
160
+
161
+             result = {}
162
+
163
+
164
+
165
+         return {"role": "tool", "parts": [json.dumps(result)], "tool_call_id": tool_call.name + "_id"}
166
+
167
+
168
+
169
+     def system_prompt(self):
170
+
171
+         prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, At no point you can say you are an AI assistant. YOU ARE {self.name}, ANSWERING TO POTENTIAL CLIENTS OR HIRERS." \
172
+
173
+                  f"particularly questions related to {self.name}'s career, background, skills and experience. " \
174
+
175
+                  f"Be professional and engaging. If you don't know the answer to any question, under no circumstances you can make up an answer, use your " \
176
+
177
+                  f"record_unknown_question tool. If the user shares their email or wants to connect, use " \
178
+
179
+                  f"record_user_details to record it."
180
+
181
+
182
+
183
+         prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
184
+
185
+         prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
186
+
187
+         return prompt
188
+
189
+
190
+
191
+     def chat(self, message, history):
192
+
193
+         messages = [{"role": "user", "parts": [self.system_prompt()]}]
194
+
195
+         for msg in history:
196
+
197
+             messages.append({"role": "user", "parts": [msg["content"]]})
198
+
199
+         messages.append({"role": "user", "parts": [message]})
200
+
201
+
202
+
203
+         # Hardcoded fallback: check if user shares email/name
204
+
205
+         if re.search(r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b", message) or \
206
+
207
+            "my name" in message.lower():
208
+
209
+             print("User provided email or name, forcing tool fallback...")
210
+
211
+             record_user_details(email=message)  # Assuming user input has email
212
+
213
+             push(f"User details recorded: {message}")
214
+
215
+             return "I've recorded your details for review."
216
+
217
+
218
+
219
+         
220
+
221
+
222
+
223
+         done = False
224
+
225
+         while not done:
226
+
227
+             response = self.model.generate_content(
228
+
229
+                 messages,
230
+
231
+                 tools=tools,
232
+
233
+             )
234
+
235
+             candidate = response.candidates[0]
236
+
237
+             finish_reason = candidate.finish_reason
238
+
239
+
240
+
241
+             if finish_reason == "TOOL_USE":
242
+
243
+                 tool_call = candidate.content.parts[0].function_call
244
+
245
+                 result = self.handle_tool_call(tool_call)
246
+
247
+                 messages.append({"role": "model", "parts": [candidate.content.parts[0]]})
248
+
249
+                 messages.append(result)
250
+
251
+             else:
252
+
253
+                 done = True
254
+
255
+
256
+
257
+         return candidate.content.parts[0].text
258
+
259
+
260
+
261
+
262
 
263
  if __name__ == "__main__":
264
+
265
+     me = Me()
266
+
267
+     gr.ChatInterface(me.chat, type="messages").launch()