Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import pandas as pd | |
| import numpy as np | |
| import re | |
| import pickle | |
| import nltk | |
| from transformers import AutoTokenizer, TFAutoModelForSequenceClassification | |
| from sklearn.preprocessing import LabelEncoder | |
| from nltk.corpus import stopwords | |
| from nltk.stem import WordNetLemmatizer | |
| import inflect | |
| # Ensure NLTK stopwords and wordnet are downloaded | |
| nltk.download('stopwords') | |
| nltk.download('wordnet') | |
| # Load the tokenizer, label encoder, and model | |
| def load_resources(): | |
| tokenizer = AutoTokenizer.from_pretrained('./transformer_tokenizer') | |
| with open('./label_encoder_tf.pickle', 'rb') as handle: | |
| encoder = pickle.load(handle) | |
| model = TFAutoModelForSequenceClassification.from_pretrained('./transformer_model') | |
| return tokenizer, encoder, model | |
| tokenizer, encoder, model = load_resources() | |
| # Preprocessing functions | |
| def expand_contractions(text, contractions_dict): | |
| contractions_pattern = re.compile('({})'.format('|'.join(contractions_dict.keys())), flags=re.IGNORECASE | re.DOTALL) | |
| def expand_match(contraction): | |
| match = contraction.group(0) | |
| first_char = match[0] | |
| expanded_contraction = contractions_dict.get(match.lower(), match) | |
| return first_char + expanded_contraction[1:] | |
| expanded_text = contractions_pattern.sub(expand_match, text) | |
| return re.sub("'", "", expanded_text) | |
| def convert_numbers_to_words(text): | |
| p = inflect.engine() | |
| words = text.split() | |
| return ' '.join([p.number_to_words(word) if word.isdigit() else word for word in words]) | |
| def preprocess_text(text): | |
| contractions_dict = { | |
| "ain't": "am not", "aren't": "are not", "can't": "cannot", "can't've": "cannot have", "'cause": "because", | |
| "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", | |
| "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", | |
| "haven't": "have not", "he'd": "he had", "he'd've": "he would have", "he'll": "he will", "he'll've": "he will have", | |
| "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", | |
| "I'd": "I had", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am", "I've": "I have", | |
| "isn't": "is not", "it'd": "it had", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", | |
| "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", | |
| "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", | |
| "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", | |
| "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", | |
| "she'd": "she had", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", | |
| "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", | |
| "so's": "so is", "that'd": "that had", "that'd've": "that would have", "that's": "that is", "there'd": "there had", | |
| "there'd've": "there would have", "there's": "there is", "they'd": "they had", "they'd've": "they would have", | |
| "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", | |
| "wasn't": "was not", "we'd": "we had", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", | |
| "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", | |
| "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", | |
| "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", | |
| "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", | |
| "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", | |
| "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", | |
| "y'all've": "you all have", "you'd": "you had", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", | |
| "you're": "you are", "you've": "you have" | |
| } | |
| text = text.lower() | |
| text = expand_contractions(text, contractions_dict) | |
| text = convert_numbers_to_words(text) | |
| text = re.sub(r'[^\w\s]', '', text) | |
| stop_words = set(stopwords.words('english')) | |
| text = ' '.join([word for word in text.split() if word not in stop_words]) | |
| lemmatizer = WordNetLemmatizer() | |
| text = ' '.join([lemmatizer.lemmatize(word) for word in text.split()]) | |
| return text | |
| # Define the prediction function | |
| def predict_spam(text): | |
| preprocessed_text = preprocess_text(text) | |
| encoding = tokenizer(preprocessed_text, return_tensors='tf', truncation=True, padding=True) | |
| prediction = model(encoding).logits | |
| predicted_label = np.argmax(prediction, axis=1) | |
| decoded_label = encoder.inverse_transform(predicted_label) | |
| return decoded_label[0] | |
| # Create the Gradio interface | |
| iface = gr.Interface(fn=predict_spam, | |
| inputs=gr.Textbox(lines=2, placeholder="Enter SMS message here..."), | |
| outputs="text", | |
| title="SMS Spam Classification with Transformer Model", | |
| description="Enter an SMS message to classify it as spam or ham.") | |
| # Launch the interface | |
| iface.launch(share=True) |