Commit
·
de04e86
1
Parent(s):
7ebe50e
commit
Browse files
app.py
CHANGED
@@ -1,25 +1,41 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import BlipProcessor, BlipForConditionalGeneration
|
3 |
|
4 |
-
# Load the model and tokenizer
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
12 |
|
13 |
-
|
14 |
-
caption
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# Decode the output caption
|
17 |
-
decoded_caption =
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
# Define the Gradio interface
|
21 |
inputs = gr.inputs.Image(label="Upload an image")
|
22 |
-
outputs = gr.outputs.Textbox(label="Generated Caption")
|
23 |
|
24 |
-
# Create the Gradio app
|
25 |
-
gr.Interface(fn=
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration, RobertaTokenizer, RobertaForSequenceClassification
|
3 |
|
4 |
+
# Load the image captioning model and tokenizer
|
5 |
+
caption_model_name = "Salesforce/blip-image-captioning-large"
|
6 |
+
caption_processor = BlipProcessor.from_pretrained(caption_model_name)
|
7 |
+
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_name)
|
8 |
|
9 |
+
# Load the emotion analysis model and tokenizer
|
10 |
+
emotion_model_name = "SamLowe/roberta-base-go_emotions"
|
11 |
+
emotion_tokenizer = RobertaTokenizer.from_pretrained(emotion_model_name)
|
12 |
+
emotion_model = RobertaForSequenceClassification.from_pretrained(emotion_model_name)
|
13 |
|
14 |
+
def generate_caption_and_analyze_emotions(image):
|
15 |
+
# Preprocess the image for caption generation
|
16 |
+
caption_inputs = caption_processor(images=image, return_tensors="pt")
|
17 |
+
|
18 |
+
# Generate caption using the caption model
|
19 |
+
caption = caption_model.generate(**caption_inputs)
|
20 |
|
21 |
# Decode the output caption
|
22 |
+
decoded_caption = caption_processor.decode(caption[0], skip_special_tokens=True)
|
23 |
+
|
24 |
+
# Analyze emotions of the generated caption
|
25 |
+
# Preprocess the caption for emotion analysis
|
26 |
+
emotion_inputs = emotion_tokenizer(decoded_caption, return_tensors="pt")
|
27 |
+
emotion_outputs = emotion_model(**emotion_inputs)
|
28 |
+
|
29 |
+
# Get the predicted emotion label
|
30 |
+
emotion_label = emotion_tokenizer.decode(emotion_outputs.logits.argmax())
|
31 |
+
|
32 |
+
# Prepare the final output with sentiment information
|
33 |
+
final_output = f"The sentiment in the provided image shows: {emotion_label}.\n\nGenerated Caption: {decoded_caption}"
|
34 |
+
return final_output
|
35 |
|
36 |
# Define the Gradio interface
|
37 |
inputs = gr.inputs.Image(label="Upload an image")
|
38 |
+
outputs = gr.outputs.Textbox(label="Generated Caption and Sentiment Analysis")
|
39 |
|
40 |
+
# Create the Gradio app
|
41 |
+
gr.Interface(fn=generate_caption_and_analyze_emotions, inputs=inputs, outputs=outputs).launch()
|