import gradio as gr import torch from transformers import RobertaForSequenceClassification, RobertaTokenizer # Load the saved model and tokenizer model_path = "Charankarnati18/TASK3" # Your HuggingFace model repo tokenizer = RobertaTokenizer.from_pretrained(model_path) model = RobertaForSequenceClassification.from_pretrained(model_path) # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) # Label mapping label_map = {0: "Non-toxic", 1: "Slightly Toxic", 2: "Highly Toxic"} def predict_toxicity(text): # Tokenize input inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=64) inputs = {k: v.to(device) for k, v in inputs.items()} # Make prediction model.eval() with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probabilities = torch.softmax(logits, dim=1) prediction = torch.argmax(logits, dim=1).item() # Convert probabilities to percentages probs = probabilities[0].cpu().numpy() * 100 # Create results dictionary results = { "Prediction": label_map[prediction], "Non-toxic": f"{probs[0]:.2f}%", "Neutal": f"{probs[1]:.2f}%", "Toxic": f"{probs[2]:.2f}%" } return results # Create Gradio interface demo = gr.Interface( fn=predict_toxicity, inputs=gr.Textbox(placeholder="Enter text to analyze for toxicity...", lines=5), outputs=gr.JSON(), title="Text Toxicity Analyzer", description="This app analyzes text and classifies it as non-toxic, slightly toxic, or highly toxic.", examples=[ ["This is a wonderful day!"], ["I don't really like this product."], ["You are an absolute idiot and I hate you."] ], theme=gr.themes.Base() ) # Launch the app demo.launch()