PatoFlamejanteTV commited on
Commit
6216139
·
verified ·
1 Parent(s): 430d0d4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ app.py
3
+ A simple Gradio front-end to demo the multi-model safety analyzer.
4
+ Push to a Hugging Face Space (runtime: python) and ensure required models are accessible.
5
+ """
6
+
7
+ import gradio as gr
8
+ import json
9
+ from classifier import analyze_text, MODEL_HANDLES, load_models
10
+
11
+ # Try to ensure models are loaded (load_models is called at import but re-call gracefully)
12
+ try:
13
+ load_models()
14
+ except Exception:
15
+ pass
16
+
17
+ def pretty_result(text: str):
18
+ res = analyze_text(text or "")
19
+ # build human-readable output
20
+ out_lines = []
21
+ out_lines.append("Normalized:\n" + (res.get("normalized") or ""))
22
+ out_lines.append(f"\nEntropy: {res.get('entropy', 0.0):.2f}\n")
23
+ if res.get("heuristic_flags"):
24
+ out_lines.append("Heuristic flags:")
25
+ for h in res["heuristic_flags"]:
26
+ out_lines.append(f"- {h.get('type')}: {h.get('explain')}")
27
+ if "matches" in h:
28
+ out_lines.append(f" matches: {h['matches']}")
29
+ if res.get("model_flags"):
30
+ out_lines.append("\nModel flags:")
31
+ for m in res["model_flags"]:
32
+ model = m.get("model", "unknown")
33
+ label = m.get("label", "")
34
+ score = m.get("score", None)
35
+ out_lines.append(f"- {m.get('type')} | {model} | {label} | score={score}")
36
+ out_lines.append(f" explain: {m.get('explain')}")
37
+ if not res.get("heuristic_flags") and not res.get("model_flags"):
38
+ out_lines.append("\nNo flags detected (no guarantees).")
39
+ out_lines.append("\nNotes: " + res.get("notes", ""))
40
+ return "\n".join(out_lines), json.dumps(res, indent=2, ensure_ascii=False)
41
+
42
+ demo_description = """
43
+ # Text Safety Analyzer (multi-model)
44
+ Paste text or a prompt below. The system runs heuristics + multiple models (toxicity/harm + URL detection) and returns
45
+ explainable flags. This is a detection tool—human review recommended for any enforcement action.
46
+ """
47
+
48
+ with gr.Blocks() as demo:
49
+ gr.Markdown(demo_description)
50
+ txt = gr.Textbox(lines=8, placeholder="Paste text, prompt, or suspicious content here...")
51
+ analyze_btn = gr.Button("Analyze")
52
+ out_text = gr.Textbox(lines=14, label="Result (human readable)")
53
+ out_json = gr.Textbox(lines=20, label="Raw JSON result")
54
+ analyze_btn.click(fn=pretty_result, inputs=txt, outputs=[out_text, out_json])
55
+
56
+ if __name__ == "__main__":
57
+ demo.launch()