File size: 5,001 Bytes
61b67b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137


ai_audit_analysis_categories = {
    "AI Audit": [
        "sentiment_analysis", 
        "emotion_detection", 
        "political_bias_detection", 
        "stress_level_detection",
        "empathy_level_assessment",
        "mood_detection",
        "toxicity_detection"
        ],

    "GDPR": [
        "Data Handling and Processing", 
        "Consent and Transparency", 
        "Data Security", 
        "Environmental Impact"],

    "Toxicity": [
        "Content Moderation", 
        "Reporting Mechanism", 
        "Content guidelines", 
        "User Education"],

    "Legal": [
        "Privacy Policy", 
        "Data Retention", 
        "Consent Mechanism", 
        "GDPR Compliance"],

    "Context": [
        "Ethical AI", 
        "Bais Mitigation", 
        "Fairness Assestment", 
        "Explainability"],

    "Governance": [
        "Model development", 
        "Data Quality", 
        "Bais Mitigation", 
        "Fairness Assestment"
        "Explainability"
        "User Input"],

    "RiskManagement": [
     "Corporate Ethics", 
     "Board Management", 
     "Stakeholder Engagement", 
     "Risk Management"],
    
    "Robustness": [
        "System Reliability", 
        "Quality Assurance", "Stress Testing", 
        "Fail-Safe Procedures"],

    "Sustainability": [
        "Renewable Resources", 
        "Waste Reduction", 
        "Energy Efficiency", 
        "Sustainable Practices"]
}






# Define a standard template for prompts
STANDARD_PROMPT_TEMPLATE = "You are a data analysis assistant capable of {analysis_type} analysis. {specific_instruction} Respond with your analysis in JSON format. The JSON schema should include '{json_schema}'."




def get_system_prompt(analysis_type: str) -> str:
    specific_instruction = ANALYSIS_TYPES.get(analysis_type, "Perform the analysis as per the specified type.")
    json_schema = JSON_SCHEMAS.get(analysis_type, {})
    json_schema_str = ', '.join([f"'{key}': {value}" for key, value in json_schema.items()])
    return (f"You are a data analyst API capable of {analysis_type} analysis. "
            f"{specific_instruction} Please respond with your analysis directly in JSON format "
            f"(without using Markdown code blocks or any other formatting). Always include confidence_score:number (0-1) with two decimals for result based on analysis"
            f"The JSON schema should include: {{{json_schema_str}}}.")




ANALYSIS_TYPES = {
    "sentiment_analysis": "Analyze the sentiment of the provided text. Determine whether the sentiment is positive, negative, or neutral and provide a confidence score.",
    "emotion_detection": "Detect and identify the primary emotions expressed in the provided text. Provide a score for the intensity of the detected emotion.",
    "political_bias_detection": "Detect any political bias in the provided text, identifying leaning towards particular ideologies or parties.",
    "stress_level_detection": "Analyze the text to assess stress levels, identifying triggers and intensity of stress.",
    "empathy_level_assessment": "Assess the level of empathy expressed in the text, identifying empathetic responses and tendencies.",
    "mood_detection": "Detect the mood of the individual based on textual cues, ranging from happy to sad, calm to angry.",
    "toxicity_detection": "Identify and assess the level of toxicity in the provided text. Determine whether the text contains harmful, offensive, or inappropriate content and provide a score indicating the severity of the toxicity"
}


JSON_SCHEMAS = {
    "sentiment_analysis": {
         "sentiment": "string (positive, negative, neutral)",
         "confidence_score": "number (0-1)",
         "text_snippets": "array of strings (specific text portions contributing to sentiment)"
     },
    "emotion_detection": {
         "emotion": "string (primary emotion detected)",
         "confidence_score": "number (0-1)",
         "secondary_emotions": "array of objects (secondary emotions and their scores)"
     },
     "political_bias_detection": {
         "bias": "string (left, right, neutral)",
         "confidence_score": "number (0-1)",
         "bias_indicators": "array of strings (elements indicating bias)",
         "political_alignment_score": "number (quantifying degree of political bias)"
     },
     "stress_level_detection": {
         "stress_level": "string", 
         "stress_triggers": "array of strings"
     },
    "empathy_level_assessment": {
         "empathy_level": "string", 
         "empathetic_responses": "array of strings"
     },
     "mood_detection": {
         "mood": "string", 
         "mood_intensity": "number"
     },
     "toxicity_detection": {
         "toxicity_level": "string (none, low, medium, high)",
         "toxicity_flags": "array of strings (specific words or phrases contributing to toxicity)",
         "contextual_factors": "array of objects (additional contextual elements influencing toxicity interpretation)"
     }
}