File size: 8,126 Bytes
61b67b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363e4fa
 
 
61b67b5
 
 
 
 
cbdc259
363e4fa
61b67b5
 
e81e2ba
 
61b67b5
 
 
 
 
 
 
e7d05e6
 
61b67b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225

import os
import requests
import gradio as gr
requests.adapters.DEFAULT_TIMEOUT = 60
import time
import openai
from openai import OpenAI
from utils import ai_audit_analysis_categories, get_system_prompt, ANALYSIS_TYPES
import json


# Create Global Variables
client = OpenAI(api_key= "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")

global complete_chat_history, bot_last_message
bot_last_message = "" 
complete_chat_history = []


# /////////////////// *****************************///////////////// Utitlity Functions
#region Utility Functions

# Function to update OpenAPI key the API key
def update_api_key(new_api_key):
    global client
    if new_api_key.strip() != "":
        client = OpenAI(api_key=new_api_key)
    return "API Key updated successfully"


def load_chatboat_last_message():
    return bot_last_message

def load_chatboat_complet_history():
    complete_text = ""
    for turn in complete_chat_history:
        user_message, bot_message = turn
        complete_text = f"{complete_text}\nUser: {user_message}\nAssistant: {bot_message}"
    return complete_text


def format_json_result_to_html(result):
    formatted_result = ""
    for key, value in result.items():
        if isinstance(value, list):
            formatted_result += f"<strong>{key.title()}:</strong><br>" + "<br>".join(value) + "<br><br>"
        else:
            formatted_result += f"<strong>{key.title()}:</strong> {value}<br>"
    return formatted_result.strip()

def format_json_result(result):
    formatted_result = ""
    for key, value in result.items():
        if isinstance(value, list):
            formatted_result += f"{key.title()}:\n" + "\n".join(value) + "\n\n"
        else:
            formatted_result += f"{key.title()}: {value}\n"
    return formatted_result.strip()

# Function to dynamically format the JSON result into Markdown format
def format_result_to_markdown(result):
    formatted_result = ""
    for key, value in result.items():
        formatted_result += f"**{key.title()}**: "
        if isinstance(value, list):
            formatted_result += "\n" + "\n".join(f"- {item}" for item in value) + "\n\n"
        else:
            formatted_result += f"{value}\n\n"
    return formatted_result.strip()

#endregion






# /////////////////// *****************************///////////////// Conversation with Open Ai Chatboat
#region Conversation with Open Ai Chatboat
# A Normal call to OpenAI API '''
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0):
    response = client.chat.completions.create(
    messages=[
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt}
    ],
    model="gpt-3.5-turbo",
    )
    
    res = response.choices[0].message.content  
    return res

# Lets format the prompt from the chat_history so that its looks good on the UI
def format_chat_prompt(message, chat_history, max_convo_length):
    prompt = ""
    for turn in chat_history[-max_convo_length:]:
        user_message, bot_message = turn
        prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
    prompt = f"{prompt}\nUser: {message}\nAssistant:"
    return prompt


# This function gets a message from user, passes it to chat gpt and return the output
def get_response_from_chatboat(message,chat_history, max_convo_length=10):
    global bot_last_message, complete_chat_history
    formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
    bot_message = chat(system_prompt='You are a friendly chatbot. Generate the output for only the Assistant.',user_prompt=formatted_prompt)

    chat_history.append((message, bot_message))
    complete_chat_history.append((message, bot_message))
    bot_last_message = bot_message
    return "", chat_history

#endregion



def analyse_current_conversation(text, analysis_type):
    
    if(ANALYSIS_TYPES.get(analysis_type, None) is None):
        return f"Analysis type {analysis_type} is not implemented yet, please choose another category"
    
    if not text:
        return f"No text provided to analyze for {analysis_type}, please provide text or load from chatboat history"
    
    word_count = len(text.split())

    if(word_count < 20 ):
        return f" The text is too short to analyze for {analysis_type}, please provide a large text"

    system_prompt = get_system_prompt(analysis_type)
    text_to_analyze = text

    response = client.chat.completions.create(
    messages=[
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": text_to_analyze}
    ],
    model="gpt-3.5-turbo",
    )
    
    analysis_result = response.choices[0].message.content  
    print(analysis_result)
    parsed_result = json.loads(analysis_result)

    formated_json = format_result_to_markdown(parsed_result)

    print(parsed_result)
    # Your implementation for counting words and performing analysis
    return formated_json





#region UI Related Functions

def update_dropdown(main_category):
    # Get the subcategories based on the selected main category
    subcategories = ai_audit_analysis_categories.get(main_category, [])
    print(subcategories)
    return gr.Dropdown(choices=subcategories, value=subcategories[0] if subcategories else None)


def update_analysis_type(subcategory):
    pass
    print(subcategory)



#endregion



with gr.Blocks() as demo:

    gr.Markdown("<center><img src='https://huggingface.co/spaces/abdulnim/GRC_framework/resolve/main/logo.png' alt='Align X' width='150'/></center>")


    # Add a text field for the API key
    api_key_field = gr.Textbox(label="Enter your Chatgpt OpenAI API Key")
    update_api_key_btn = gr.Button("Update API Key")
    update_api_key_btn.click(update_api_key, inputs=[api_key_field], outputs=[])

    # gr.Markdown("# AI Audit and GRC Framework!")
    gr.Markdown("# AlignX Demo")

    with gr.Tabs():
        with gr.TabItem("Prompt Testing"):
            gr.Markdown("## Prompt Testing")
            chatbot = gr.Chatbot(height=600)
            msg = gr.Textbox(label="Write something for the chatbot here")
            clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
            submit_btn = gr.Button("Submit")
            submit_btn.click(get_response_from_chatboat, inputs=[msg, chatbot], outputs=[msg, chatbot])
            msg.submit(get_response_from_chatboat, inputs=[msg, chatbot], outputs=[msg, chatbot])

        with gr.TabItem("Prompt Assessment"):
            gr.Markdown("## Prompt Assessment")
            gr.Markdown("Load your chatbot text or write your own to  and analyze it")
            text_field = gr.Textbox(label="Text to Process", interactive=True, lines=2)

            # Radio button and dropdown list
            initial_main_category = next(iter(ai_audit_analysis_categories))
            initial_sub_categories = ai_audit_analysis_categories[initial_main_category]

            main_category_radio = gr.Radio(list(ai_audit_analysis_categories.keys()), label="Main Audit Categories", value=initial_main_category)
            sub_category_dropdown = gr.Dropdown(choices=initial_sub_categories, label="Sub Categories", value=initial_sub_categories[0])
            # Update the dropdown based on the radio selection
            main_category_radio.change(fn=update_dropdown, inputs= main_category_radio, outputs=sub_category_dropdown)
            sub_category_dropdown.change(fn=update_analysis_type, inputs=sub_category_dropdown)

            load_last_message_btn = gr.Button("Load Last Message")
            load_complete_conv_btn = gr.Button("Load Complete Chat History")
            process_btn = gr.Button("Process")
            # analysis_result = gr.Label()
            analysis_result = gr.Markdown()
            load_last_message_btn.click(load_chatboat_last_message, inputs=[], outputs=text_field)
            load_complete_conv_btn.click(load_chatboat_complet_history, inputs=[], outputs=text_field)
            process_btn.click(analyse_current_conversation, inputs=[text_field, sub_category_dropdown], outputs=analysis_result)

demo.launch(share=True)