File size: 3,345 Bytes
5250f7c
 
 
 
246c304
b135402
246c304
 
5250f7c
246c304
5250f7c
 
b135402
 
246c304
 
5250f7c
246c304
5250f7c
 
246c304
 
 
b135402
5250f7c
 
b135402
 
246c304
 
 
 
 
 
 
 
 
b135402
 
 
 
 
 
 
 
5250f7c
b135402
246c304
 
5250f7c
b135402
 
5250f7c
246c304
 
5250f7c
246c304
 
5250f7c
b135402
246c304
 
b135402
5250f7c
b135402
5250f7c
b135402
5250f7c
 
246c304
b135402
 
 
 
246c304
b135402
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# streamlit_app.py
import streamlit as st
from transformers import pipeline

# Caching the text classification models
@st.cache_resource
def load_pipeline(model_name):
    return pipeline("text-classification", model=model_name)

# Initialize session state for conversation history, bot response, and selected model
if 'conversation_history' not in st.session_state:
    st.session_state.conversation_history = ""
if 'bot_response' not in st.session_state:
    st.session_state.bot_response = ""
if 'selected_model' not in st.session_state:
    st.session_state.selected_model = "distilbert/distilbert-base-uncased-finetuned-sst-2-english"

def classify_text(user_message):
    # Update the conversation history
    st.session_state.conversation_history += f"User: {user_message}\n"
    pipe = load_pipeline(st.session_state.selected_model)
    result = pipe(user_message)[0]  # pipe returns a list of results
    st.session_state.conversation_history += f"Bot: {result['label']} (Score: {result['score']:.2f})\n"
    st.session_state.bot_response = result
    return result

# Sidebar options
st.sidebar.title("App Settings")

# Model selection
model_options = {
    "DistilBERT Sentiment Analysis": "distilbert/distilbert-base-uncased-finetuned-sst-2-english",
    "BERT Multilingual Sentiment Analysis": "nlptown/bert-base-multilingual-uncased-sentiment"
}
selected_model = st.sidebar.selectbox("Select model:", list(model_options.keys()))
st.session_state.selected_model = model_options[selected_model]

show_history = st.sidebar.checkbox("Show conversation history", value=True)
character_limit = st.sidebar.slider("Set character limit for input:", min_value=50, max_value=500, value=200)

# Session reset button
if st.sidebar.button("Reset Conversation"):
    st.session_state.conversation_history = ""
    st.session_state.bot_response = ""
    st.sidebar.success("Conversation history cleared.")

# Streamlit app layout
st.title("🧠 Text Classification Bot")
st.subheader("Classify your text with a sentiment analysis model!")

# Input field with character limit
user_message = st.text_input(f"Enter your message (max {character_limit} characters):", max_chars=character_limit)

# Send button to generate classification
if st.button("Classify"):
    if user_message:
        # Get classification from the selected model
        classification_result = classify_text(user_message)
        
        # Display bot's response in a dedicated area
        st.markdown("### Classification Result")
        st.success(f"**Label:** {classification_result['label']}\n**Score:** {classification_result['score']:.2f}")
        
        if show_history:
            # Display conversation history in a text area for better scrolling
            st.write("### Conversation History")
            st.text_area("Conversation", value=st.session_state.conversation_history, height=250, max_chars=None)
    else:
        # Show a warning if no message is provided
        st.warning("Please enter a message before classifying.")

# About section
st.markdown("---")
st.markdown("### About this App")
st.info("This app uses pre-trained models for sentiment analysis. You can select a model and enter text to see its classification and sentiment score.")

st.sidebar.markdown("---")
st.sidebar.write("Created by [Your Name](https://github.com/yourprofile)")