GRC_framework / utils.py
abdulnim's picture
adding app to huggingface space
61b67b5
raw
history blame
5 kB
ai_audit_analysis_categories = {
"AI Audit": [
"sentiment_analysis",
"emotion_detection",
"political_bias_detection",
"stress_level_detection",
"empathy_level_assessment",
"mood_detection",
"toxicity_detection"
],
"GDPR": [
"Data Handling and Processing",
"Consent and Transparency",
"Data Security",
"Environmental Impact"],
"Toxicity": [
"Content Moderation",
"Reporting Mechanism",
"Content guidelines",
"User Education"],
"Legal": [
"Privacy Policy",
"Data Retention",
"Consent Mechanism",
"GDPR Compliance"],
"Context": [
"Ethical AI",
"Bais Mitigation",
"Fairness Assestment",
"Explainability"],
"Governance": [
"Model development",
"Data Quality",
"Bais Mitigation",
"Fairness Assestment"
"Explainability"
"User Input"],
"RiskManagement": [
"Corporate Ethics",
"Board Management",
"Stakeholder Engagement",
"Risk Management"],
"Robustness": [
"System Reliability",
"Quality Assurance", "Stress Testing",
"Fail-Safe Procedures"],
"Sustainability": [
"Renewable Resources",
"Waste Reduction",
"Energy Efficiency",
"Sustainable Practices"]
}
# Define a standard template for prompts
STANDARD_PROMPT_TEMPLATE = "You are a data analysis assistant capable of {analysis_type} analysis. {specific_instruction} Respond with your analysis in JSON format. The JSON schema should include '{json_schema}'."
def get_system_prompt(analysis_type: str) -> str:
specific_instruction = ANALYSIS_TYPES.get(analysis_type, "Perform the analysis as per the specified type.")
json_schema = JSON_SCHEMAS.get(analysis_type, {})
json_schema_str = ', '.join([f"'{key}': {value}" for key, value in json_schema.items()])
return (f"You are a data analyst API capable of {analysis_type} analysis. "
f"{specific_instruction} Please respond with your analysis directly in JSON format "
f"(without using Markdown code blocks or any other formatting). Always include confidence_score:number (0-1) with two decimals for result based on analysis"
f"The JSON schema should include: {{{json_schema_str}}}.")
ANALYSIS_TYPES = {
"sentiment_analysis": "Analyze the sentiment of the provided text. Determine whether the sentiment is positive, negative, or neutral and provide a confidence score.",
"emotion_detection": "Detect and identify the primary emotions expressed in the provided text. Provide a score for the intensity of the detected emotion.",
"political_bias_detection": "Detect any political bias in the provided text, identifying leaning towards particular ideologies or parties.",
"stress_level_detection": "Analyze the text to assess stress levels, identifying triggers and intensity of stress.",
"empathy_level_assessment": "Assess the level of empathy expressed in the text, identifying empathetic responses and tendencies.",
"mood_detection": "Detect the mood of the individual based on textual cues, ranging from happy to sad, calm to angry.",
"toxicity_detection": "Identify and assess the level of toxicity in the provided text. Determine whether the text contains harmful, offensive, or inappropriate content and provide a score indicating the severity of the toxicity"
}
JSON_SCHEMAS = {
"sentiment_analysis": {
"sentiment": "string (positive, negative, neutral)",
"confidence_score": "number (0-1)",
"text_snippets": "array of strings (specific text portions contributing to sentiment)"
},
"emotion_detection": {
"emotion": "string (primary emotion detected)",
"confidence_score": "number (0-1)",
"secondary_emotions": "array of objects (secondary emotions and their scores)"
},
"political_bias_detection": {
"bias": "string (left, right, neutral)",
"confidence_score": "number (0-1)",
"bias_indicators": "array of strings (elements indicating bias)",
"political_alignment_score": "number (quantifying degree of political bias)"
},
"stress_level_detection": {
"stress_level": "string",
"stress_triggers": "array of strings"
},
"empathy_level_assessment": {
"empathy_level": "string",
"empathetic_responses": "array of strings"
},
"mood_detection": {
"mood": "string",
"mood_intensity": "number"
},
"toxicity_detection": {
"toxicity_level": "string (none, low, medium, high)",
"toxicity_flags": "array of strings (specific words or phrases contributing to toxicity)",
"contextual_factors": "array of objects (additional contextual elements influencing toxicity interpretation)"
}
}