Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import model_inferencing as MINFER | |
| import general_bias_measurement as GBM | |
| import model_comparison as MCOMP | |
| import user_evaluation_variables | |
| import pandas as pd | |
| import numpy as np | |
| import json | |
| import csv | |
| from itertools import cycle | |
| import random | |
| import time | |
| import datetime | |
| import zipfile | |
| from io import BytesIO, StringIO | |
| def completed_setup(tabs, modelID): | |
| with tabs[0]: | |
| st.write("\U0001F917 ", modelID, " has been loaded!") | |
| st.write("Ready for General Bias Evaluation") | |
| # general_bias_eval_setup(tabs[0]) | |
| with tabs[1]: | |
| st.write("\U0001F917 ", modelID, " has been loaded!") | |
| st.write("Ready for Task-Oriented Bias Evaluation") | |
| with tabs[3]: | |
| if not all([user_evaluation_variables.OBJECT_IMAGES_IN_UI, user_evaluation_variables.OCCUPATION_IMAGES_IN_UI, user_evaluation_variables.TASK_IMAGES_IN_UI]): | |
| st.write("\U0001F917 ", modelID, " has been loaded!") | |
| st.write("Waiting for Images to be generated.") | |
| # if any([user_evaluation_variables.OBJECT_IMAGES_IN_UI, user_evaluation_variables.OCCUPATION_IMAGES_IN_UI, | |
| # user_evaluation_variables.TASK_IMAGES_IN_UI]): | |
| update_images_tab(tabs[3]) | |
| with tabs[0]: | |
| general_bias_eval_setup(tabs[0], modelID, tabs[3]) | |
| with tabs[1]: | |
| task_oriented_bias_eval_setup(tabs[1],modelID, tabs[3]) | |
| def general_bias_eval_setup(tab, modelID, imagesTab): | |
| generalBiasSetupDF_EVAL = pd.DataFrame( | |
| { | |
| "GEN Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps", "Image Size (N x N)"], | |
| "GEN Values": ["10", "100", "512"], | |
| } | |
| ) | |
| generalBiasSetupDF_TYPE = pd.DataFrame( | |
| { | |
| "Image Types": ["Objects", "Person in Frame", "Occupations / Label"], | |
| "Check": [True, True, True], | |
| } | |
| ) | |
| tableColumn1, tableColumn2 = st.columns(2) | |
| with tab: | |
| with tableColumn1: | |
| GENValTable = st.data_editor( | |
| generalBiasSetupDF_EVAL, | |
| column_config={ | |
| "GEN Eval. Variable": st.column_config.Column( | |
| "Variable", | |
| help="General Bias Evaluation variable to control extent of evaluations", | |
| width=None, | |
| required=None, | |
| disabled=True, | |
| ), | |
| "GEN Values": st.column_config.Column( | |
| "Values", | |
| help="Input values in this column", | |
| width=None, | |
| required=True, | |
| disabled=False, | |
| ), | |
| }, | |
| hide_index=True, | |
| num_rows="fixed", | |
| ) | |
| with tableColumn2: | |
| GENCheckTable = st.data_editor( | |
| generalBiasSetupDF_TYPE, | |
| column_config={ | |
| "Check": st.column_config.CheckboxColumn( | |
| "Select", | |
| help="Select the types of images you want to generate", | |
| default=False, | |
| ) | |
| }, | |
| disabled=["Image Types"], | |
| hide_index=True, | |
| num_rows="fixed", | |
| ) | |
| if st.button('Evaluate!', key="EVAL_BUTTON_GEN"): | |
| initiate_general_bias_evaluation(tab, modelID, [GENValTable, GENCheckTable], imagesTab) | |
| st.rerun() | |
| if user_evaluation_variables.RUN_TIME and user_evaluation_variables.CURRENT_EVAL_TYPE == 'general': | |
| GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21, 'general') | |
| st.write("\U0001F553 Time Taken: ", user_evaluation_variables.RUN_TIME) | |
| saveEvalsButton = st.button("Save + Upload Evaluations", key='SAVE_EVAL_GEN') | |
| saveDistButton = st.button("Download Object Distribution", key='SAVE_TOP_GEN') | |
| if saveEvalsButton: | |
| st.write("Saving and uploading evaluations") | |
| user_evaluation_variables.update_evaluation_table('general',False) | |
| user_evaluation_variables.reset_variables('general') | |
| if saveDistButton: | |
| download_word_distribution_csv(user_evaluation_variables.EVAL_METRICS, | |
| user_evaluation_variables.EVAL_ID, 'general') | |
| def task_oriented_bias_eval_setup(tab,modelID,imagesTab): | |
| biasSetupDF_EVAL = pd.DataFrame( | |
| { | |
| "TO Eval. Variable": ["No. Images to Generate per prompt", "No. Inference Steps", "Image Size (N x N)"], | |
| "TO Values": ["10", "100", "512"], | |
| } | |
| ) | |
| with tab: | |
| TOValTable = st.data_editor( | |
| biasSetupDF_EVAL, | |
| column_config={ | |
| "TO Eval. Variable": st.column_config.Column( | |
| "Variable", | |
| help="General Bias Evaluation variable to control extent of evaluations", | |
| width=None, | |
| required=None, | |
| disabled=True, | |
| ), | |
| "TO Values": st.column_config.Column( | |
| "Values", | |
| help="Input values in this column", | |
| width=None, | |
| required=True, | |
| disabled=False, | |
| ), | |
| }, | |
| hide_index=True, | |
| num_rows="fixed", | |
| ) | |
| target = st.text_input('What is the single-token target of your task-oriented evaluation study ' | |
| 'e.g.: "burger", "coffee", "men", "women"') | |
| if st.button('Evaluate!', key="EVAL_BUTTON_TO"): | |
| if len(target) > 0: | |
| initiate_task_oriented_bias_evaluation(tab, modelID, TOValTable, target, imagesTab) | |
| st.rerun() | |
| else: | |
| st.error('Please input a target for your task-oriented analysis', icon="🚨") | |
| # update_images_tab(imagesTab) | |
| if user_evaluation_variables.RUN_TIME and user_evaluation_variables.CURRENT_EVAL_TYPE == 'task-oriented': | |
| GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21, 'task-oriented') | |
| st.write("\U0001F553 Time Taken: ", user_evaluation_variables.RUN_TIME) | |
| saveEvalsButton = st.button("Save + Upload Evaluations", key='SAVE_EVAL_TASK') | |
| saveDistButton = st.button("Download Object Distribution", key='SAVE_TOP_TASK') | |
| if saveEvalsButton: | |
| st.write("Saving and uploading evaluations") | |
| user_evaluation_variables.update_evaluation_table('task-oriented',False) | |
| user_evaluation_variables.reset_variables('task-oriented') | |
| if saveDistButton: | |
| download_word_distribution_csv(user_evaluation_variables.EVAL_METRICS, | |
| user_evaluation_variables.EVAL_ID, user_evaluation_variables.TASK_TARGET) | |
| # update_images_tab(imagesTab) | |
| def download_word_distribution_csv(data, evalID, evalType): | |
| filePath = './'+evalID+'_'+evalType+'_word_distribution.csv' | |
| listOfObjects = list(data[0].items()) | |
| with open(filePath, 'w', newline='') as fp: | |
| csvwriter = csv.writer(fp) | |
| csvwriter.writerows([["Evaluation ID", evalID], | |
| ["Distribution Bias", data[2]], | |
| ["Jaccard hallucination", np.mean(data[3])], | |
| ["Generative Miss Rate", np.mean(data[4])]]) | |
| csvwriter.writerow(['Position', 'Object', 'No. Occurences', 'Normalized']) | |
| for obj, val, norm, ii in zip(listOfObjects, data[0].values(), data[1], range(len(listOfObjects))): | |
| csvwriter.writerow([ii, obj[0], val, norm]) | |
| st.success('Successfully downloaded word distribution data!', icon="✅") | |
| def initiate_general_bias_evaluation(tab, modelID, specs, imagesTab): | |
| startTime = time.time() | |
| objectData = None | |
| occupationData = None | |
| objects = [] | |
| actions = [] | |
| occupations = [] | |
| occupationDescriptors = [] | |
| objectPrompts = None | |
| occupationPrompts = None | |
| objectImages = [] | |
| objectCaptions = [] | |
| occupationImages = [] | |
| occupationCaptions = [] | |
| evaluationImages = [] | |
| evaluationCaptions = [] | |
| with tab: | |
| st.write("Initiating General Bias Evaluation Experiments with the following setup:") | |
| st.write(" ***Model*** = ", modelID) | |
| infoColumn1, infoColumn2 = st.columns(2) | |
| with infoColumn1: | |
| st.write(" ***No. Images per prompt*** = ", specs[0]["GEN Values"][0]) | |
| st.write(" ***No. Steps*** = ", specs[0]["GEN Values"][1]) | |
| st.write(" ***Image Size*** = ", specs[0]["GEN Values"][2], "$\\times$", specs[0]["GEN Values"][2]) | |
| with infoColumn2: | |
| st.write(" ***Objects*** = ", specs[1]["Check"][0]) | |
| st.write(" ***Objects and Actions*** = ", specs[1]["Check"][1]) | |
| st.write(" ***Occupations*** = ", specs[1]["Check"][2]) | |
| st.markdown("___") | |
| if specs[1]["Check"][0]: | |
| objectData = read_csv_to_list("./data/list_of_objects.csv") | |
| if specs[1]["Check"][2]: | |
| occupationData = read_csv_to_list("./data/list_of_occupations.csv") | |
| if objectData == None and occupationData == None: | |
| st.error('Make sure that at least one of the "Objects" or "Occupations" rows are checked', icon="🚨") | |
| else: | |
| if specs[1]["Check"][0]: | |
| for row in objectData[1:]: | |
| objects.append(row[0]) | |
| if specs[1]["Check"][1]: | |
| for row in objectData[1:]: | |
| actions.append(row[1:]) | |
| if specs[1]["Check"][2]: | |
| for row in occupationData[1:]: | |
| occupations.append(row[0]) | |
| occupationDescriptors.append(row[1:]) | |
| with infoColumn1: | |
| st.write("***No. Objects*** = ", len(objects)) | |
| st.write("***No. Actions*** = ", len(actions)*3) | |
| with infoColumn2: | |
| st.write("***No. Occupations*** = ", len(occupations)) | |
| st.write("***No. Occupation Descriptors*** = ", len(occupationDescriptors)*3) | |
| if len(objects) > 0: | |
| objectPrompts = MINFER.construct_general_bias_evaluation_prompts(objects, actions) | |
| if len(occupations) > 0: | |
| occupationPrompts = MINFER.construct_general_bias_evaluation_prompts(occupations, occupationDescriptors) | |
| if objectPrompts is not None: | |
| OBJECTprogressBar = st.progress(0, text="Generating Object-related images. Please wait.") | |
| objectImages, objectCaptions = MINFER.generate_test_images(OBJECTprogressBar, "Generating Object-related images. Please wait.", | |
| objectPrompts, int(specs[0]["GEN Values"][0]), | |
| int(specs[0]["GEN Values"][1]), int(specs[0]["GEN Values"][2])) | |
| evaluationImages+=objectImages | |
| evaluationCaptions+=objectCaptions[0] | |
| TXTObjectPrompts = "" | |
| if occupationPrompts is not None: | |
| OCCprogressBar = st.progress(0, text="Generating Occupation-related images. Please wait.") | |
| occupationImages, occupationCaptions = MINFER.generate_test_images(OCCprogressBar, "Generating Occupation-related images. Please wait.", | |
| occupationPrompts, int(specs[0]["GEN Values"][0]), | |
| int(specs[0]["GEN Values"][1]), int(specs[0]["GEN Values"][2])) | |
| evaluationImages += occupationImages | |
| evaluationCaptions += occupationCaptions[0] | |
| if len(evaluationImages) > 0: | |
| EVALprogressBar = st.progress(0, text="Evaluating "+modelID+" Model Images. Please wait.") | |
| user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(evaluationImages, evaluationCaptions, EVALprogressBar, False, "GENERAL") | |
| # GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21) | |
| elapsedTime = time.time() - startTime | |
| # st.write("\U0001F553 Time Taken: ", str(datetime.timedelta(seconds=elapsedTime)).split(".")[0]) | |
| user_evaluation_variables.NO_SAMPLES = len(evaluationImages) | |
| user_evaluation_variables.RESOLUTION = specs[0]["GEN Values"][2] + "x" + specs[0]["GEN Values"][2] | |
| user_evaluation_variables.INFERENCE_STEPS = int(specs[0]["GEN Values"][1]) | |
| user_evaluation_variables.GEN_OBJECTS = bool(specs[1]["Check"][0]) | |
| user_evaluation_variables.GEN_ACTIONS = bool(specs[1]["Check"][1]) | |
| user_evaluation_variables.GEN_OCCUPATIONS = bool(specs[1]["Check"][2]) | |
| user_evaluation_variables.DIST_BIAS = float(f"{user_evaluation_variables.EVAL_METRICS[2]:.4f}") | |
| user_evaluation_variables.HALLUCINATION = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[3]):.4f}") | |
| user_evaluation_variables.MISS_RATE = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[4]):.4f}") | |
| user_evaluation_variables.EVAL_ID = MCOMP.get_evaluation_id('general', True) | |
| user_evaluation_variables.DATE = datetime.datetime.utcnow().strftime('%d-%m-%Y') | |
| user_evaluation_variables.TIME = datetime.datetime.utcnow().strftime('%H:%M:%S') | |
| user_evaluation_variables.RUN_TIME = str(datetime.timedelta(seconds=elapsedTime)).split(".")[0] | |
| user_evaluation_variables.OBJECT_IMAGES =objectImages | |
| user_evaluation_variables.OBJECT_CAPTIONS = objectCaptions | |
| user_evaluation_variables.OCCUPATION_IMAGES = occupationImages | |
| user_evaluation_variables.OCCUPATION_CAPTIONS = occupationCaptions | |
| user_evaluation_variables.CURRENT_EVAL_TYPE = 'general' | |
| def initiate_task_oriented_bias_evaluation(tab, modelID, specs, target, imagesTab): | |
| startTime = time.time() | |
| TASKImages = [] | |
| TASKCaptions = [] | |
| with tab: | |
| st.write("Initiating Task-Oriented Bias Evaluation Experiments with the following setup:") | |
| st.write(" ***Model*** = ", modelID) | |
| infoColumn1, infoColumn2 = st.columns(2) | |
| st.write(" ***No. Images per prompt*** = ", specs["TO Values"][0]) | |
| st.write(" ***No. Steps*** = ", specs["TO Values"][1]) | |
| st.write(" ***Image Size*** = ", specs["TO Values"][2], "$\\times$", specs["TO Values"][2]) | |
| st.write(" ***Target*** = ", target.lower()) | |
| st.markdown("___") | |
| captionsToExtract = 50 | |
| if (captionsToExtract * int(specs['TO Values'][0])) < 30: | |
| st.error('There should be at least 30 images generated, You are attempting to generate:\t' | |
| + str(captionsToExtract * int(specs['TO Values'][0]))+'.\nPlease readjust your No. Images per prompt', | |
| icon="🚨") | |
| else: | |
| COCOLoadingBar = st.progress(0, text="Scanning through COCO Dataset for relevant prompts. Please wait") | |
| prompts, cocoIDs = get_COCO_captions('./data/COCO_captions.json', target.lower(), COCOLoadingBar, captionsToExtract) | |
| if len(prompts) == 0: | |
| st.error('Woops! Could not find **ANY** relevant COCO prompts for the target: '+target.lower()+ | |
| '\nPlease input a different target', icon="🚨") | |
| elif len(prompts) > 0 and len(prompts) < captionsToExtract: | |
| st.warning('WARNING: Only found '+str(len(prompts))+ ' relevant COCO prompts for the target: '+target.lower()+ | |
| '\nWill work with these. Nothing to worry about!', icon="⚠️") | |
| else: | |
| st.success('Successfully found '+str(captionsToExtract)+' relevant COCO prompts', icon="✅") | |
| if len(prompts) > 0: | |
| COCOUIOutput = [] | |
| for id, pr in zip(cocoIDs, prompts): | |
| COCOUIOutput.append([id, pr]) | |
| st.write('**Here are some of the randomised '+'"'+target.lower()+'"'+' captions extracted from the COCO dataset**') | |
| COCOUIOutput.insert(0, ('ID', 'Caption')) | |
| st.table(COCOUIOutput[:11]) | |
| TASKprogressBar = st.progress(0, text="Generating Task-oriented images. Please wait.") | |
| TASKImages, TASKCaptions = MINFER.generate_task_oriented_images(TASKprogressBar,"Generating Task-oriented images. Please wait.", | |
| prompts, cocoIDs, int(specs["TO Values"][0]), | |
| int(specs["TO Values"][1]), int(specs["TO Values"][2])) | |
| EVALprogressBar = st.progress(0, text="Evaluating " + modelID + " Model Images. Please wait.") | |
| user_evaluation_variables.EVAL_METRICS = GBM.evaluate_t2i_model_images(TASKImages, TASKCaptions[0], EVALprogressBar, False, "TASK") | |
| # GBM.output_eval_results(user_evaluation_variables.EVAL_METRICS, 21) | |
| elapsedTime = time.time() - startTime | |
| # st.write("\U0001F553 Time Taken: ", str(datetime.timedelta(seconds=elapsedTime)).split(".")[0]) | |
| user_evaluation_variables.NO_SAMPLES = len(TASKImages) | |
| user_evaluation_variables.RESOLUTION = specs["TO Values"][2]+"x"+specs["TO Values"][2] | |
| user_evaluation_variables.INFERENCE_STEPS = int(specs["TO Values"][1]) | |
| user_evaluation_variables.DIST_BIAS = float(f"{user_evaluation_variables.EVAL_METRICS[2]:.4f}") | |
| user_evaluation_variables.HALLUCINATION = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[3]):.4f}") | |
| user_evaluation_variables.MISS_RATE = float(f"{np.mean(user_evaluation_variables.EVAL_METRICS[4]):.4f}") | |
| user_evaluation_variables.TASK_TARGET = target.lower() | |
| user_evaluation_variables.EVAL_ID = MCOMP.get_evaluation_id('task-oriented', True) | |
| user_evaluation_variables.DATE = datetime.datetime.utcnow().strftime('%d-%m-%Y') | |
| user_evaluation_variables.TIME = datetime.datetime.utcnow().strftime('%H:%M:%S') | |
| user_evaluation_variables.RUN_TIME = str(datetime.timedelta(seconds=elapsedTime)).split(".")[0] | |
| user_evaluation_variables.TASK_IMAGES = TASKImages | |
| user_evaluation_variables.TASK_CAPTIONS = TASKCaptions | |
| user_evaluation_variables.TASK_COCOIDs = cocoIDs | |
| user_evaluation_variables.CURRENT_EVAL_TYPE = 'task-oriented' | |
| def download_and_zip_images(zipImagePath, images, captions, imageType): | |
| csvFileName = None | |
| if imageType == 'object': | |
| csvFileName = 'object_prompts.csv' | |
| elif imageType == 'occupation': | |
| csvFileName = 'occupation_prompts.csv' | |
| else: | |
| csvFileName = 'task-oriented_prompts.csv' | |
| with st.spinner("Zipping images..."): | |
| with zipfile.ZipFile(zipImagePath, 'w') as img_zip: | |
| for idx, image in enumerate(images): | |
| imgName = captions[1][idx] | |
| imageFile = BytesIO() | |
| image.save(imageFile, 'JPEG') | |
| img_zip.writestr(imgName, imageFile.getvalue()) | |
| # Saving prompt data as accompanying csv file | |
| string_buffer = StringIO() | |
| csvwriter = csv.writer(string_buffer) | |
| if imageType in ['object', 'occupation']: | |
| csvwriter.writerow(['No.', 'Prompt']) | |
| for prompt, ii in zip(captions[0], range(len(captions[0]))): | |
| csvwriter.writerow([ii + 1, prompt]) | |
| else: | |
| csvwriter.writerow(['COCO ID', 'Prompt']) | |
| for prompt, id in zip(captions[0], user_evaluation_variables.TASK_COCOIDs): | |
| csvwriter.writerow([id, prompt]) | |
| img_zip.writestr(csvFileName, string_buffer.getvalue()) | |
| st.success('Successfully zipped and downloaded images!', icon="✅") | |
| def update_images_tab(imagesTab): | |
| with imagesTab: | |
| if len(user_evaluation_variables.OBJECT_IMAGES) > 0: | |
| with st.expander('Object-related Images'): | |
| user_evaluation_variables.OBJECT_IMAGES_IN_UI = True | |
| TXTObjectPrompts = "" | |
| for prompt, ii in zip(user_evaluation_variables.OBJECT_CAPTIONS[0], range(len(user_evaluation_variables.OBJECT_CAPTIONS[0]))): | |
| TXTObjectPrompts += str(1 + ii) + '. ' + prompt + '\n' | |
| st.write("**Object-related General Bias Evaluation Images**") | |
| st.write("Number of Generated Images = ", len(user_evaluation_variables.OBJECT_IMAGES)) | |
| st.write("Corresponding Number of *unique* Captions = ", len(user_evaluation_variables.OBJECT_CAPTIONS[0])) | |
| st.text_area("***List of Object Prompts***", | |
| TXTObjectPrompts, | |
| height=400, | |
| disabled=False, | |
| key='TEXT_AREA_OBJECT') | |
| cols = cycle(st.columns(3)) | |
| for idx, image in enumerate(user_evaluation_variables.OBJECT_IMAGES): | |
| next(cols).image(image, width=225, caption=user_evaluation_variables.OBJECT_CAPTIONS[1][idx]) | |
| saveObjectImages = st.button("Save Object-related Images") | |
| if saveObjectImages: | |
| zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.EVAL_ID + '_object_related_images.zip' | |
| download_and_zip_images(zipPath, user_evaluation_variables.OBJECT_IMAGES, | |
| user_evaluation_variables.OBJECT_CAPTIONS, 'object') | |
| if len(user_evaluation_variables.OCCUPATION_IMAGES) > 0: | |
| user_evaluation_variables.OCCUPATION_IMAGES_IN_UI = True | |
| with st.expander('Occupation-related Images'): | |
| TXTOccupationPrompts = "" | |
| for prompt, ii in zip(user_evaluation_variables.OCCUPATION_CAPTIONS[0], range(len(user_evaluation_variables.OCCUPATION_CAPTIONS[0]))): | |
| TXTOccupationPrompts += str(1 + ii) + '. ' + prompt + '\n' | |
| st.write("**Occupation-related General Bias Evaluation Images**") | |
| st.write("Number of Generated Images = ", len(user_evaluation_variables.OCCUPATION_IMAGES)) | |
| st.write("Corresponding Number of *unique* Captions = ", len(user_evaluation_variables.OCCUPATION_CAPTIONS[0])) | |
| st.text_area("***List of Occupation Prompts***", | |
| TXTOccupationPrompts, | |
| height=400, | |
| disabled=False, | |
| key='TEXT_AREA_OCCU') | |
| cols = cycle(st.columns(3)) | |
| for idx, image in enumerate(user_evaluation_variables.OCCUPATION_IMAGES): | |
| next(cols).image(image, width=225, caption=user_evaluation_variables.OCCUPATION_CAPTIONS[1][idx]) | |
| saveOccupationImages = st.button("Save Occupation-related Images") | |
| if saveOccupationImages: | |
| zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.EVAL_ID + '_occupation_related_images.zip' | |
| download_and_zip_images(zipPath, user_evaluation_variables.OCCUPATION_IMAGES, | |
| user_evaluation_variables.OCCUPATION_CAPTIONS, 'occupation') | |
| if len(user_evaluation_variables.TASK_IMAGES) > 0: | |
| with st.expander(user_evaluation_variables.TASK_TARGET+'-related Images'): | |
| user_evaluation_variables.TASK_IMAGES_IN_UI = True | |
| TXTTaskPrompts = "" | |
| for prompt, id in zip(user_evaluation_variables.TASK_CAPTIONS[0], user_evaluation_variables.TASK_COCOIDs): | |
| TXTTaskPrompts += "ID_" + str(id) + '. ' + prompt + '\n' | |
| st.write("**Task-oriented Bias Evaluation Images. Target** = ", user_evaluation_variables.TASK_TARGET) | |
| st.write("Number of Generated Images = ", len(user_evaluation_variables.TASK_IMAGES)) | |
| st.write("Corresponding Number of *unique* Captions = ", len(user_evaluation_variables.TASK_CAPTIONS[0])) | |
| st.text_area("***List of Task-Oriented Prompts***", | |
| TXTTaskPrompts, | |
| height=400, | |
| disabled=False, | |
| key='TEXT_AREA_TASK') | |
| cols = cycle(st.columns(3)) | |
| for idx, image in enumerate(user_evaluation_variables.TASK_IMAGES): | |
| next(cols).image(image, width=225, caption=user_evaluation_variables.TASK_CAPTIONS[1][idx]) | |
| saveTaskImages = st.button("Save Task-oriented Images") | |
| if saveTaskImages: | |
| zipPath = 'TBYB_' + user_evaluation_variables.USERNAME + '_' + user_evaluation_variables.EVAL_ID + '_'+ user_evaluation_variables.TASK_TARGET+'-oriented_images.zip' | |
| download_and_zip_images(zipPath, user_evaluation_variables.TASK_IMAGES, | |
| user_evaluation_variables.TASK_CAPTIONS, 'task-oriented') | |
| def get_COCO_captions(filePath, target, progressBar, NPrompts=50): | |
| captionData = json.load(open(filePath)) | |
| COCOCaptions = [] | |
| COCOIDs = [] | |
| random.seed(42) | |
| random.shuffle(captionData['annotations']) | |
| for anno in captionData['annotations']: | |
| if target in anno.get('caption').lower().split(' '): | |
| if len(COCOCaptions) < NPrompts: | |
| COCOCaptions.append(anno.get('caption').lower()) | |
| COCOIDs.append(str(anno.get('id'))) | |
| percentComplete = len(COCOCaptions) / NPrompts | |
| progressBar.progress(percentComplete, text="Scanning through COCO Dataset for relevant prompts. Please wait") | |
| return (COCOCaptions, COCOIDs) | |
| def read_csv_to_list(filePath): | |
| data = [] | |
| with open(filePath, 'r', newline='') as csvfile: | |
| csvReader = csv.reader(csvfile) | |
| for row in csvReader: | |
| data.append(row) | |
| return data | |