muneebashraf's picture
commit
d2fe332
raw
history blame
1.66 kB
import gradio as gr
from transformers import BlipProcessor, BlipForConditionalGeneration, AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
# Load the image captioning model and tokenizer
caption_model_name = "Salesforce/blip-image-captioning-large"
caption_processor = BlipProcessor.from_pretrained(caption_model_name)
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_name)
def generate_caption_and_analyze_emotions(image):
# Preprocess the image for caption generation
caption_inputs = caption_processor(images=image, return_tensors="pt")
# Generate caption using the caption model
caption = caption_model.generate(**caption_inputs)
# Decode the output caption
decoded_caption = caption_processor.decode(caption[0], skip_special_tokens=True)
# Load the emotion analysis model and tokenizer
emotion_model_name = "SamLowe/roberta-base-go_emotions"
emotion_classifier = pipeline(model=emotion_model_name)
results = emotion_classifier(decoded_caption)
if results[0]['label'] == 'neutral' or results[0]['score'] <= 0.40:
final_output = f"Sentiment of image is not clear, image shows {decoded_caption}."
else:
final_output = f"Sentiment of the image shows {results[0]['label']}."
return final_output
# Define the Gradio interface
inputs = gr.inputs.Image(label="Upload an image")
outputs = gr.outputs.Textbox(label="Sentiment Analysis")
# Create the Gradio app
app = gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=inputs, outputs=outputs)
# Launch the Gradio app
if __name__ == "__main__":
app.launch()