Commit
·
552d19a
1
Parent(s):
7ae5410
..
Browse files
app.py
CHANGED
@@ -780,7 +780,6 @@ class GaiaLevel1Agent:
|
|
780 |
|
781 |
if genai and GOOGLE_GEMINI_API_KEY:
|
782 |
try:
|
783 |
-
# Corrected: Initialize client explicitly with the GOOGLE_GEMINI_API_KEY
|
784 |
self.genai_client = genai.Client(api_key=GOOGLE_GEMINI_API_KEY)
|
785 |
gaia_logger.info(f"Google GenAI Client initialized successfully with GOOGLE_GEMINI_API_KEY. Will use model '{self.llm_model_name}'.")
|
786 |
except Exception as e:
|
@@ -1150,8 +1149,13 @@ class GaiaLevel1Agent:
|
|
1150 |
|
1151 |
|
1152 |
try:
|
1153 |
-
|
1154 |
-
|
|
|
|
|
|
|
|
|
|
|
1155 |
current_safety_settings_list_of_dicts = [
|
1156 |
{"category": types.HarmCategory.HARM_CATEGORY_HARASSMENT, "threshold": types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE},
|
1157 |
{"category": types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, "threshold": types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE},
|
@@ -1160,9 +1164,11 @@ class GaiaLevel1Agent:
|
|
1160 |
]
|
1161 |
sdk_safety_settings = [types.SafetySetting(category=s["category"], threshold=s["threshold"]) for s in current_safety_settings_list_of_dicts]
|
1162 |
|
1163 |
-
#
|
1164 |
api_call_config = types.GenerateContentConfig(
|
1165 |
-
|
|
|
|
|
1166 |
safety_settings=sdk_safety_settings
|
1167 |
)
|
1168 |
|
@@ -1211,17 +1217,9 @@ class GaiaLevel1Agent:
|
|
1211 |
gaia_logger.info(f"LLM Raw Full Answer (first 200 chars): {llm_answer_text[:200]}...")
|
1212 |
return self._parse_llm_output(llm_answer_text)
|
1213 |
|
1214 |
-
except ValueError as ve:
|
1215 |
-
|
1216 |
-
|
1217 |
-
fr_from_ex = "Unknown (from ValueError)"
|
1218 |
-
match_fr = re.search(r"finish_reason.*?is\s*(\w+)", str(ve), re.IGNORECASE)
|
1219 |
-
if match_fr: fr_from_ex = match_fr.group(1)
|
1220 |
-
return {"model_answer": "LLM Error: Invalid response state",
|
1221 |
-
"reasoning_trace": f"Could not parse LLM response. Finish reason possibly {fr_from_ex}. Details: {str(ve)[:150]}"}
|
1222 |
-
else:
|
1223 |
-
gaia_logger.error(f"ValueError during Google GenAI call or processing: {ve}", exc_info=True)
|
1224 |
-
return {"model_answer": "LLM Error: Value error", "reasoning_trace": f"A value error occurred: {str(ve)}"}
|
1225 |
except Exception as e:
|
1226 |
gaia_logger.error(f"Error calling Google GenAI API: {e}", exc_info=True)
|
1227 |
error_type_name = type(e).__name__
|
@@ -1468,5 +1466,4 @@ if __name__ == "__main__":
|
|
1468 |
|
1469 |
|
1470 |
print("-"*(60 + len(" GAIA Level 1 Agent - RAG, FileProc, Video Analysis ")) + "\n")
|
1471 |
-
# Removed ssr=False as it was causing a TypeError with the Gradio version in the environment
|
1472 |
demo.launch(server_name="0.0.0.0", server_port=7860, debug=False, share=False)
|
|
|
780 |
|
781 |
if genai and GOOGLE_GEMINI_API_KEY:
|
782 |
try:
|
|
|
783 |
self.genai_client = genai.Client(api_key=GOOGLE_GEMINI_API_KEY)
|
784 |
gaia_logger.info(f"Google GenAI Client initialized successfully with GOOGLE_GEMINI_API_KEY. Will use model '{self.llm_model_name}'.")
|
785 |
except Exception as e:
|
|
|
1149 |
|
1150 |
|
1151 |
try:
|
1152 |
+
# Corrected: Define generation and safety settings directly for GenerateContentConfig
|
1153 |
+
# generation_config parameters
|
1154 |
+
temp = 0.1
|
1155 |
+
top_p_val = 0.95
|
1156 |
+
max_tokens = 1024
|
1157 |
+
|
1158 |
+
# safety_settings parameters
|
1159 |
current_safety_settings_list_of_dicts = [
|
1160 |
{"category": types.HarmCategory.HARM_CATEGORY_HARASSMENT, "threshold": types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE},
|
1161 |
{"category": types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, "threshold": types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE},
|
|
|
1164 |
]
|
1165 |
sdk_safety_settings = [types.SafetySetting(category=s["category"], threshold=s["threshold"]) for s in current_safety_settings_list_of_dicts]
|
1166 |
|
1167 |
+
# Create the main config object for the generate_content call
|
1168 |
api_call_config = types.GenerateContentConfig(
|
1169 |
+
temperature=temp,
|
1170 |
+
top_p=top_p_val,
|
1171 |
+
max_output_tokens=max_tokens,
|
1172 |
safety_settings=sdk_safety_settings
|
1173 |
)
|
1174 |
|
|
|
1217 |
gaia_logger.info(f"LLM Raw Full Answer (first 200 chars): {llm_answer_text[:200]}...")
|
1218 |
return self._parse_llm_output(llm_answer_text)
|
1219 |
|
1220 |
+
except ValueError as ve: # Catch pydantic.ValidationError specifically if possible, or broader ValueError
|
1221 |
+
gaia_logger.error(f"ValueError during Google GenAI call or processing: {ve}", exc_info=True) # Log with full traceback
|
1222 |
+
return {"model_answer": "LLM Error: Value error in config", "reasoning_trace": f"A value error occurred: {str(ve)}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1223 |
except Exception as e:
|
1224 |
gaia_logger.error(f"Error calling Google GenAI API: {e}", exc_info=True)
|
1225 |
error_type_name = type(e).__name__
|
|
|
1466 |
|
1467 |
|
1468 |
print("-"*(60 + len(" GAIA Level 1 Agent - RAG, FileProc, Video Analysis ")) + "\n")
|
|
|
1469 |
demo.launch(server_name="0.0.0.0", server_port=7860, debug=False, share=False)
|