|
import gradio as gr |
|
from transformers import BlipProcessor, BlipForConditionalGeneration, AutoTokenizer, AutoModelForSequenceClassification |
|
from transformers import pipeline |
|
|
|
|
|
caption_model_name = "Salesforce/blip-image-captioning-large" |
|
caption_processor = BlipProcessor.from_pretrained(caption_model_name) |
|
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_name) |
|
|
|
def generate_caption_and_analyze_emotions(image): |
|
|
|
caption_inputs = caption_processor(images=image, return_tensors="pt") |
|
|
|
|
|
caption = caption_model.generate(**caption_inputs) |
|
|
|
|
|
decoded_caption = caption_processor.decode(caption[0], skip_special_tokens=True) |
|
|
|
|
|
emotion_model_name = "SamLowe/roberta-base-go_emotions" |
|
emotion_classifier = pipeline(model=emotion_model_name) |
|
|
|
results = emotion_classifier(decoded_caption) |
|
if results[0]['label'] == 'neutral' or results[0]['score'] <= 0.40: |
|
final_output = f"Sentiment of image is not clear, image shows {decoded_caption}." |
|
else: |
|
final_output = f"Sentiment of the image shows {results[0]['label']}." |
|
|
|
return final_output |
|
|
|
|
|
inputs = gr.inputs.Image(label="Upload an image") |
|
outputs = gr.outputs.Textbox(label="Sentiment Analysis") |
|
|
|
|
|
app = gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=inputs, outputs=outputs) |
|
|
|
|
|
if __name__ == "__main__": |
|
app.launch() |
|
|