akshit4857 commited on
Commit
d916e82
Β·
verified Β·
1 Parent(s): 66b8862

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +368 -226
src/streamlit_app.py CHANGED
@@ -1,10 +1,13 @@
1
  """
2
- Truth Detector - Simple & Accurate Edition
 
3
  """
4
 
5
  import os
6
  import io
7
  import numpy as np
 
 
8
  import streamlit as st
9
  from transformers import pipeline, logging as hf_logging
10
  from PIL import Image
@@ -15,277 +18,416 @@ import urllib.parse
15
  import math
16
  import warnings
17
 
18
- # --- Setup: Silence the technical noise ---
19
- warnings.filterwarnings("ignore")
 
 
 
 
 
 
20
  hf_logging.set_verbosity_error()
 
 
21
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
 
 
22
  matplotlib.use('Agg')
23
 
 
 
 
24
  st.set_page_config(
25
- page_title="Truth Detector",
26
- page_icon="πŸ”",
27
  layout="wide",
28
  initial_sidebar_state="collapsed"
29
  )
30
 
31
- # ==========================================
32
- # 🧠 THE AI BRAINS (Expert Configuration)
33
- # ==========================================
34
-
35
- # 1. Bot Detector: Checks if text was written by ChatGPT/AI
36
- MODEL_FAKE = "openai-community/roberta-base-openai-detector"
37
-
38
- # 2. Mood Scanner: Checks if the reviewer is Happy, Sad, or Angry
39
- MODEL_MOOD = "cardiffnlp/twitter-roberta-base-sentiment-latest"
40
 
41
- # 3. Grammar Checker: Checks if sentences make sense to a human
42
- MODEL_GRAMMAR = "textattack/roberta-base-CoLA"
43
 
44
- # 4. Image Checker A: High Precision
45
- MODEL_IMG_A = "dima806/ai_generated_image_detection"
46
-
47
- # 5. Image Checker B: High Reliability
48
- MODEL_IMG_B = "umm-maybe/AI-image-detector"
49
-
50
- # ==========================================
51
-
52
- # --- Secrets Management ---
53
- def get_token():
54
- if hasattr(st, "secrets") and "HF_TOKEN" in st.secrets:
55
- return st.secrets["HF_TOKEN"]
56
- return os.environ.get("HF_TOKEN")
 
 
 
 
 
 
 
 
57
 
58
- HF_TOKEN = get_token()
 
59
 
60
- # --- Simple & Clean UI Design ---
 
 
61
  def inject_custom_css():
62
  st.markdown("""
63
  <style>
64
- /* Force a clean white/light look */
65
- .stApp { background-color: #FFFFFF; color: #333333; }
66
-
67
- /* Headers */
68
- h1 { color: #FF4B4B; font-weight: 800; text-align: center; }
69
- h3 { color: #333333; font-weight: 700; }
70
-
71
- /* Stats Cards */
72
- .result-card {
73
- background-color: #F8F9FA;
74
- border-radius: 15px;
75
- padding: 20px;
76
- text-align: center;
77
- border: 1px solid #E0E0E0;
78
- box-shadow: 0 2px 5px rgba(0,0,0,0.05);
79
- }
80
- .result-value { font-size: 28px; font-weight: 900; color: #333; }
81
- .result-label { font-size: 14px; color: #666; text-transform: uppercase; margin-top: 5px; }
82
-
83
- /* Buttons */
84
- .stButton>button {
85
- border-radius: 50px;
86
- width: 100%;
87
- background-color: #FF4B4B;
88
- color: white;
89
- font-weight: bold;
90
- border: none;
91
- padding: 10px;
92
- }
93
- .stButton>button:hover { background-color: #D43F3F; color: white; }
94
  </style>
95
  """, unsafe_allow_html=True)
96
 
97
- # --- Load Models (The "Squad") ---
 
 
98
  @st.cache_resource(show_spinner=False)
99
- def load_ai_squad():
100
- squad = {}
 
 
 
 
 
 
 
101
  try:
102
- # Load Text Models
103
- squad['fake'] = pipeline("text-classification", model=MODEL_FAKE, token=HF_TOKEN)
104
- squad['mood'] = pipeline("sentiment-analysis", model=MODEL_MOOD, tokenizer=MODEL_MOOD, token=HF_TOKEN)
105
- squad['grammar'] = pipeline("text-classification", model=MODEL_GRAMMAR, token=HF_TOKEN)
106
-
107
- # Load Image Models
108
- try:
109
- squad['img_a'] = pipeline("image-classification", model=MODEL_IMG_A, token=HF_TOKEN)
110
- squad['img_b'] = pipeline("image-classification", model=MODEL_IMG_B, token=HF_TOKEN)
111
- squad['img_status'] = "Double Check Mode (High Accuracy)"
112
- except:
113
- if 'img_a' not in squad:
114
- squad['img_a'] = pipeline("image-classification", model=MODEL_IMG_A, token=HF_TOKEN)
115
- squad['img_status'] = "Single Check Mode (Standard Accuracy)"
116
-
117
  except Exception as e:
118
- return None, str(e)
119
- return squad, None
120
-
121
- # --- Logic: Analyze Text ---
122
- def check_text(text, squad):
123
- # 1. Bot Check
124
- res_fake = squad['fake'](text[:512])[0]
125
- if res_fake['label'] == 'Fake':
126
- bot_score = res_fake['score']
127
- else:
128
- bot_score = 1 - res_fake['score']
129
 
130
- # 2. Mood Check
131
- res_mood = squad['mood'](text[:512])[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
- # 3. Grammar Check
134
- res_grammar = squad['grammar'](text[:512])[0]
135
- # Label 1 = Good Grammar
136
- if res_grammar['label'] == 'LABEL_1':
137
- grammar_score = res_grammar['score']
138
- else:
139
- grammar_score = 1 - res_grammar['score']
 
 
 
 
 
 
 
 
 
140
 
141
  return {
142
- "bot_score": bot_score * 100,
143
- "mood_label": res_mood['label'],
144
- "grammar_score": grammar_score * 100
 
 
 
145
  }
146
 
147
- # --- Logic: Analyze Image ---
148
- def check_image(img, squad):
149
- score_a = 0.0
150
- score_b = 0.0
151
-
152
- ai_words = ['fake', 'artificial', 'ai', 'generated']
 
 
 
 
 
 
 
153
 
154
- # Check Brain A
155
- if 'img_a' in squad:
156
- for r in squad['img_a'](img):
157
- if any(w in r['label'].lower() for w in ai_words):
158
- score_a = r['score']
 
 
 
 
 
 
 
 
 
 
159
 
160
- # Check Brain B
161
- if 'img_b' in squad:
162
- for r in squad['img_b'](img):
163
- if any(w in r['label'].lower() for w in ai_words):
164
- score_b = r['score']
165
  else:
166
- score_b = score_a # Fallback if B is missing
167
-
168
- avg_ai = (score_a + score_b) / 2
169
- match_level = 1.0 - abs(score_a - score_b) # 1.0 = They agree perfectly
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
- return {"ai_chance": avg_ai * 100, "match": match_level}
 
 
 
 
 
 
 
 
172
 
173
- # --- Helpers ---
174
- def get_image_from_url(url):
 
 
175
  try:
176
  headers = {'User-Agent': 'Mozilla/5.0'}
177
- r = requests.get(url, headers=headers, timeout=5, stream=True)
178
- return Image.open(io.BytesIO(r.content)).convert("RGB")
179
- except: return None
 
 
180
 
181
- # --- MAIN APP UI ---
182
- def main():
183
- inject_custom_css()
184
 
185
- st.markdown("<h1>πŸ”Ž Review & Image Truth Detector</h1>", unsafe_allow_html=True)
186
- st.markdown("<p style='text-align: center;'>Use AI to spot Fake Reviews and AI Images instantly.</p>", unsafe_allow_html=True)
 
 
 
 
187
 
188
- with st.spinner("πŸš€ Loading AI Models..."):
189
- squad, err = load_ai_squad()
 
 
 
 
 
 
190
 
191
- if not squad:
192
- st.error(f"Error loading models: {err}")
193
- st.stop()
 
 
 
 
 
 
 
194
 
195
- tab1, tab2 = st.tabs(["πŸ“ Text Check", "πŸ“Έ Image Check"])
 
 
 
 
 
 
 
 
 
 
 
196
 
197
- # --- TEXT CHECKER ---
198
  with tab1:
199
- col1, col2 = st.columns([2, 1])
200
  with col1:
201
- txt = st.text_area("Paste Review Here:", height=150, placeholder="e.g. This product is amazing!")
202
  with col2:
203
- if st.button("Analyze Text"):
204
- if txt:
205
- res = check_text(txt, squad)
206
- st.session_state['text_res'] = res
207
-
208
- if 'text_res' in st.session_state:
209
- res = st.session_state['text_res']
210
- st.markdown("---")
211
-
212
- c1, c2, c3 = st.columns(3)
213
-
214
- # Simple Bot Score Card
215
- color = "red" if res['bot_score'] > 50 else "green"
216
- c1.markdown(f"""
217
- <div class="result-card">
218
- <div class="result-value" style="color:{color}">{res['bot_score']:.0f}%</div>
219
- <div class="result-label">Bot Probability</div>
220
- </div>""", unsafe_allow_html=True)
221
-
222
- # Grammar Card
223
- c2.markdown(f"""
224
- <div class="result-card">
225
- <div class="result-value">{res['grammar_score']:.0f}%</div>
226
- <div class="result-label">Grammar Quality</div>
227
- </div>""", unsafe_allow_html=True)
228
-
229
- # Mood Card
230
- c3.markdown(f"""
231
- <div class="result-card">
232
- <div class="result-value">{res['mood_label']}</div>
233
- <div class="result-label">Overall Mood</div>
234
- </div>""", unsafe_allow_html=True)
235
-
236
- st.write("")
237
-
238
- # Simple Verdict
239
- if res['bot_score'] > 70:
240
- st.error("🚨 **Verdict: FAKE / BOT**")
241
- st.write("This text looks highly suspicious. It matches patterns used by AI generators like ChatGPT.")
242
- elif res['bot_score'] > 40:
243
- st.warning("πŸ€” **Verdict: UNSURE**")
244
- st.write("It has some weird patterns, but could be a unique human writing style.")
245
- else:
246
- st.success("βœ… **Verdict: REAL HUMAN**")
247
- st.write("The writing style is natural and likely written by a real person.")
248
-
249
- # --- IMAGE CHECKER ---
250
  with tab2:
251
- st.caption(f"System Status: {squad.get('img_status')}")
252
- url = st.text_input("Paste Image Link:", placeholder="https://example.com/image.jpg")
 
 
253
 
254
- if st.button("Scan Image"):
255
- if url:
256
- with st.spinner("Scanning pixels..."):
257
- img = get_image_from_url(url)
258
- if img:
259
- ic1, ic2 = st.columns([1, 1])
260
- with ic1:
261
- st.image(img, use_column_width=True, caption="Your Image")
262
- with ic2:
263
- data = check_image(img, squad)
264
- ai_score = data['ai_chance']
265
-
266
- # Simple Results
267
- if data['match'] < 0.6:
268
- st.warning("πŸ€” **Confusing Image**")
269
- st.write("Our models disagree. This happens with heavy filters.")
270
- elif ai_score > 60:
271
- st.error(f"πŸ€– **Verdict: AI GENERATED** ({ai_score:.0f}%)")
272
- st.write("We found digital artifacts common in tools like Midjourney.")
273
- else:
274
- st.success(f"πŸ“Έ **Verdict: REAL PHOTO** ({100-ai_score:.0f}%)")
275
- st.write("This looks like a genuine photo.")
276
-
277
- st.progress(ai_score/100, text=f"AI Probability: {ai_score:.1f}%")
278
-
279
- # Google Lens Link
280
- lens = f"https://lens.google.com/uploadbyurl?url={urllib.parse.quote(url)}"
281
- st.markdown(f"""
282
- <br><a href="{lens}" target="_blank" style="text-decoration:none;">
283
- <div style="background:#4285F4;color:white;padding:12px;border-radius:10px;text-align:center;font-weight:bold;">
284
- 🌍 Check on Google Lens
285
- </div>
286
- </a>""", unsafe_allow_html=True)
287
- else:
288
- st.error("Invalid Image Link.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
  if __name__ == "__main__":
291
  main()
 
1
  """
2
+ Fake Review Detector - Streamlit Application
3
+ Optimized for Hugging Face Spaces deployment
4
  """
5
 
6
  import os
7
  import io
8
  import numpy as np
9
+ from collections import Counter
10
+ from typing import Dict, Optional, List, Tuple
11
  import streamlit as st
12
  from transformers import pipeline, logging as hf_logging
13
  from PIL import Image
 
18
  import math
19
  import warnings
20
 
21
+ # -------------------------
22
+ # Log Suppression
23
+ # -------------------------
24
+ # 1. Suppress Python Warnings (Deprecation, UserWarning)
25
+ warnings.filterwarnings("ignore", category=UserWarning, module="transformers")
26
+ warnings.filterwarnings("ignore", category=FutureWarning, module="transformers")
27
+
28
+ # 2. Suppress Hugging Face Informational Logs (Weights initialization, CPU usage)
29
  hf_logging.set_verbosity_error()
30
+
31
+ # 3. Suppress TensorFlow/PyTorch logs if backend triggers them
32
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
33
+
34
+ # Set matplotlib backend for server environments
35
  matplotlib.use('Agg')
36
 
37
+ # -------------------------
38
+ # Configuration
39
+ # -------------------------
40
  st.set_page_config(
41
+ page_title="Review Validator πŸ”",
42
+ page_icon="πŸ”",
43
  layout="wide",
44
  initial_sidebar_state="collapsed"
45
  )
46
 
47
+ # Constants
48
+ FAKE_MODEL_NAME = "akshit4857/autotrain-razz4-h7crd"
49
+ SENTIMENT_MODEL_NAME = "cardiffnlp/twitter-roberta-base-sentiment-latest"
50
+ EMOTION_MODEL_NAME = "j-hartmann/emotion-english-distilroberta-base"
51
+ # Primary Image Model (High Precision)
52
+ IMAGE_MODEL_PRIMARY = "dima806/ai_generated_image_detection"
53
+ # Backup Image Model (High Reliability)
54
+ IMAGE_MODEL_BACKUP = "umm-maybe/AI-image-detector"
 
55
 
56
+ MAX_TEXT_LENGTH = 5000
 
57
 
58
+ # -------------------------
59
+ # Secrets & Environment Management
60
+ # -------------------------
61
+ def get_secret(key: str, default: str = None) -> Optional[str]:
62
+ """
63
+ Robust secret retrieval.
64
+ Prioritizes Environment Variables (HF Spaces) to avoid Streamlit secrets file errors.
65
+ """
66
+ # 1. Priority: Environment Variables (Hugging Face Secrets)
67
+ if key in os.environ:
68
+ return os.environ[key]
69
+
70
+ # 2. Fallback: Streamlit Secrets (Local .toml file)
71
+ try:
72
+ if hasattr(st, 'secrets') and key in st.secrets:
73
+ return st.secrets[key]
74
+ except Exception:
75
+ # Ignore errors if secrets.toml doesn't exist
76
+ pass
77
+
78
+ return default
79
 
80
+ HF_TOKEN = get_secret("HF_TOKEN")
81
+ OPENAI_API_KEY = get_secret("OPENAI_API_KEY")
82
 
83
+ # -------------------------
84
+ # Custom CSS
85
+ # -------------------------
86
  def inject_custom_css():
87
  st.markdown("""
88
  <style>
89
+ .stApp { background: linear-gradient(to bottom, #ffffff, #f8f9fa); }
90
+ h1 { font-family: 'Source Sans Pro', sans-serif; color: #FF4B4B; text-align: center; font-weight: 800; }
91
+ .stButton>button { border-radius: 50px; border: 2px solid #FF4B4B; background-color: white; color: #FF4B4B; font-weight: bold; transition: all 0.3s ease; }
92
+ .stButton>button:hover { background-color: #FF4B4B; color: white; transform: scale(1.02); }
93
+ .stat-card { background-color: white; border-radius: 20px; padding: 15px; box-shadow: 0 4px 15px rgba(0,0,0,0.05); text-align: center; border: 1px solid #f0f0f0; height: 100%; }
94
+ .stat-value { font-size: 1.8em; font-weight: 900; color: #333; }
95
+ .stat-label { font-size: 0.8em; color: #888; text-transform: uppercase; letter-spacing: 1px; }
96
+ .report-box { background-color: #ffffff; padding: 25px; border-radius: 20px; border: 2px dashed #e0e0e0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  </style>
98
  """, unsafe_allow_html=True)
99
 
100
+ # -------------------------
101
+ # Model Loading (Ensemble)
102
+ # -------------------------
103
  @st.cache_resource(show_spinner=False)
104
+ def load_models() -> Tuple[Dict, List[str]]:
105
+ """
106
+ Load all models for the ensemble with individual error handling.
107
+ Returns: (models_dictionary, list_of_error_messages)
108
+ """
109
+ models = {}
110
+ errors = []
111
+
112
+ # 1. Fake Detector (Critical)
113
  try:
114
+ models['fake'] = pipeline("text-classification", model=FAKE_MODEL_NAME, token=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  except Exception as e:
116
+ errors.append(f"Fake Detector: {str(e)}")
117
+
118
+ # 2. Sentiment
119
+ try:
120
+ models['sentiment'] = pipeline("sentiment-analysis", model=SENTIMENT_MODEL_NAME, tokenizer=SENTIMENT_MODEL_NAME, token=HF_TOKEN)
121
+ except Exception as e:
122
+ errors.append(f"Sentiment Model: {str(e)}")
 
 
 
 
123
 
124
+ # 3. Emotion
125
+ try:
126
+ # top_k=None replaces deprecated return_all_scores=True
127
+ models['emotion'] = pipeline("text-classification", model=EMOTION_MODEL_NAME, top_k=None, token=HF_TOKEN)
128
+ except Exception as e:
129
+ errors.append(f"Emotion Model: {str(e)}")
130
+
131
+ # 4. Image (With Failover Strategy)
132
+ models['image_engine'] = "Offline"
133
+ try:
134
+ # Try primary precision model first
135
+ models['img_a'] = pipeline("image-classification", model=IMAGE_MODEL_PRIMARY, token=HF_TOKEN)
136
+ # Try backup model
137
+ models['img_b'] = pipeline("image-classification", model=IMAGE_MODEL_BACKUP, token=HF_TOKEN)
138
+ models['image_engine'] = "Dual-Core"
139
+ except Exception as e:
140
+ print(f"Dual engine load failed: {e}")
141
+ try:
142
+ if 'img_a' not in models:
143
+ models['img_a'] = pipeline("image-classification", model=IMAGE_MODEL_PRIMARY, token=HF_TOKEN)
144
+ models['image_engine'] = "Single-Core"
145
+ errors.append("Note: Running in reduced precision mode (one image model failed).")
146
+ except Exception as e2:
147
+ models['image_engine'] = "Failed"
148
+ errors.append(f"Image Checker failed completely: {str(e2)}")
149
+
150
+ return models, errors
151
+
152
+ # Initialize models
153
+ inject_custom_css()
154
+ with st.spinner("🍳 Prepping the Kitchen..."):
155
+ ensemble, load_errors = load_models()
156
+
157
+ # Handle Critical Errors
158
+ if 'fake' not in ensemble:
159
+ st.error("❌ Critical Error: Failed to load the core Fake Detection model.")
160
+ if load_errors:
161
+ st.error(f"Details: {load_errors}")
162
+ st.stop()
163
+
164
+ if load_errors:
165
+ with st.expander("⚠️ System Warnings", expanded=False):
166
+ for err in load_errors:
167
+ st.warning(err)
168
+
169
+ # -------------------------
170
+ # Advanced Feature Extraction
171
+ # -------------------------
172
+ def calculate_complexity_score(text: str) -> float:
173
+ """Calculate linguistic complexity (0-100)"""
174
+ words = text.split()
175
+ if not words: return 0
176
+ avg_len = sum(len(w) for w in words) / len(words)
177
+ ttr = len(set(words)) / len(words)
178
+ score = (avg_len * 5) + (ttr * 50)
179
+ return min(100, max(0, score))
180
+
181
+ def extract_deep_features(text: str, models: dict) -> Dict:
182
+ """Run multi-model analysis"""
183
+ sent_label = "Unknown"
184
+ sent_score = 0.0
185
+ top_emo = {'label': 'Unknown', 'score': 0.0}
186
+
187
+ # 1. Fake Detection
188
+ fake_res = models['fake'](text[:512])[0]
189
+ is_fake_prob = fake_res['score'] if fake_res['label'] == 'Fake' else (1 - fake_res['score'])
190
 
191
+ # 2. Sentiment
192
+ if 'sentiment' in models:
193
+ try:
194
+ sent_res = models['sentiment'](text[:512])[0]
195
+ sent_score = sent_res['score']
196
+ sent_label = sent_res['label']
197
+ except Exception: pass
198
+
199
+ # 3. Emotion
200
+ if 'emotion' in models:
201
+ try:
202
+ emo_res = models['emotion'](text[:512])[0]
203
+ top_emo = max(emo_res, key=lambda x: x['score'])
204
+ except Exception: pass
205
+
206
+ complexity = calculate_complexity_score(text)
207
 
208
  return {
209
+ "fake_probability": is_fake_prob * 100,
210
+ "sentiment_label": sent_label,
211
+ "sentiment_confidence": sent_score * 100,
212
+ "primary_emotion": top_emo['label'],
213
+ "emotion_confidence": top_emo['score'] * 100,
214
+ "complexity_score": complexity
215
  }
216
 
217
+ # -------------------------
218
+ # AI-Powered Dynamic Explanation
219
+ # -------------------------
220
+ def generate_friendly_report(text: str, features: Dict) -> str:
221
+ """Generates a simple, friendly explanation"""
222
+ if not OPENAI_API_KEY:
223
+ return generate_fallback_report(features)
224
+
225
+ prompt = (
226
+ f"Analyze this product/food review. Fake Score: {features['fake_probability']:.1f}%. "
227
+ f"Vibe: {features['primary_emotion']}. Text: {text[:600]}... "
228
+ "Explain if it sounds like a real customer or a bot. Be simple and helpful."
229
+ )
230
 
231
+ try:
232
+ headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
233
+ payload = {
234
+ "model": "gpt-4o-mini",
235
+ "messages": [{"role": "user", "content": prompt}],
236
+ "temperature": 0.5
237
+ }
238
+ response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload, timeout=10)
239
+ return response.json()["choices"][0]["message"]["content"]
240
+ except:
241
+ return generate_fallback_report(features)
242
+
243
+ def generate_fallback_report(features: Dict) -> str:
244
+ """Simple rule-based report"""
245
+ f_prob = features['fake_probability']
246
 
247
+ if f_prob > 50:
248
+ return f"⚠️ **Suspicious!** There is a {f_prob:.0f}% chance this review was written by a bot or paid service."
 
 
 
249
  else:
250
+ return "βœ… **Looks Good!** This review reads like a genuine customer experience."
251
+
252
+ # -------------------------
253
+ # Visualization: Radar Chart
254
+ # -------------------------
255
+ def create_radar_chart(features: Dict) -> plt.Figure:
256
+ """Creates a multi-aspect radar chart"""
257
+ categories = ['Suspiciousness', 'Feelings', 'Drama', 'Simplicity']
258
+ inv_comp = 100 - features['complexity_score']
259
+
260
+ values = [
261
+ features['fake_probability'],
262
+ features['sentiment_confidence'],
263
+ features['emotion_confidence'],
264
+ inv_comp
265
+ ]
266
+
267
+ # Close the loop
268
+ values += values[:1]
269
+ angles = [n / 4 * 2 * math.pi for n in range(4)] + [0]
270
+
271
+ fig, ax = plt.subplots(figsize=(4, 4), subplot_kw=dict(polar=True))
272
+
273
+ # Styling
274
+ ax.set_facecolor('#ffffff')
275
+ plt.gcf().patch.set_facecolor('#ffffff')
276
 
277
+ ax.plot(angles, values, linewidth=2, linestyle='solid', color='#FF4B4B')
278
+ ax.fill(angles, values, '#FF4B4B', alpha=0.2)
279
+
280
+ ax.set_xticks(angles[:-1])
281
+ ax.set_xticklabels(categories, size=9)
282
+ ax.set_yticks([])
283
+ ax.spines['polar'].set_visible(False)
284
+
285
+ return fig
286
 
287
+ # -------------------------
288
+ # Image Functions
289
+ # -------------------------
290
+ def get_image_from_url(url: str) -> Optional[Image.Image]:
291
  try:
292
  headers = {'User-Agent': 'Mozilla/5.0'}
293
+ response = requests.get(url, headers=headers, timeout=10, stream=True)
294
+ response.raise_for_status()
295
+ return Image.open(io.BytesIO(response.content)).convert("RGB")
296
+ except Exception:
297
+ return None
298
 
299
+ def analyze_image_dual_engine(img, models) -> Dict:
300
+ score_a_ai = 0.0
301
+ score_b_ai = 0.0
302
 
303
+ # Engine A
304
+ if 'img_a' in models:
305
+ res_a = models['img_a'](img)
306
+ for r in res_a:
307
+ if r['label'].lower() in ['fake', 'artificial', 'ai', 'generated']:
308
+ score_a_ai = r['score']
309
 
310
+ # Engine B
311
+ if 'img_b' in models:
312
+ res_b = models['img_b'](img)
313
+ for r in res_b:
314
+ if r['label'].lower() in ['fake', 'artificial', 'ai', 'generated']:
315
+ score_b_ai = r['score']
316
+ else:
317
+ score_b_ai = score_a_ai
318
 
319
+ avg_ai_score = (score_a_ai + score_b_ai) / 2
320
+ agreement = 1.0 - abs(score_a_ai - score_b_ai)
321
+
322
+ return {
323
+ "avg_ai": avg_ai_score,
324
+ "avg_real": 1.0 - avg_ai_score,
325
+ "score_a": score_a_ai,
326
+ "score_b": score_b_ai,
327
+ "agreement": agreement
328
+ }
329
 
330
+ # -------------------------
331
+ # Main UI
332
+ # -------------------------
333
+ def main():
334
+ st.markdown("""
335
+ <div style='text-align: center; padding-bottom: 20px;'>
336
+ <h1 style='margin:0;'>πŸ” Review & Food Validator</h1>
337
+ <p style='color: #888;'>Is that burger real? Is that review a bot?</p>
338
+ </div>
339
+ """, unsafe_allow_html=True)
340
+
341
+ tab1, tab2 = st.tabs(["πŸ“ Review Text", "🍟 Food/Product Image"])
342
 
343
+ # --- TAB 1: TEXT ---
344
  with tab1:
345
+ col1, col2 = st.columns([3, 1])
346
  with col1:
347
+ txt = st.text_area("Paste Review:", height=120, placeholder="e.g. 'The pizza was cold and...'")
348
  with col2:
349
+ st.info("πŸ’‘ **Tip:** Paste the full text for best results.")
350
+ if st.button("Scan Text", type="primary", use_container_width=True):
351
+ if not txt:
352
+ st.toast("Please paste some text first!")
353
+ st.stop()
354
+
355
+ with st.spinner("Analysing linguistics..."):
356
+ feats = extract_deep_features(txt, ensemble)
357
+ rep = generate_friendly_report(txt, feats)
358
+
359
+ st.markdown("---")
360
+
361
+ # Metrics
362
+ c1, c2, c3, c4 = st.columns(4)
363
+ c1.metric("Fake-o-Meter", f"{feats['fake_probability']:.0f}%")
364
+ c2.metric("Mood", feats['sentiment_label'])
365
+ c3.metric("Vibe", feats['primary_emotion'].title())
366
+ c4.metric("Vocab", f"{feats['complexity_score']:.0f}/100")
367
+
368
+ # Chart & Report
369
+ col_c, col_r = st.columns([1, 2])
370
+ with col_c:
371
+ st.pyplot(create_radar_chart(feats))
372
+ with col_r:
373
+ st.markdown(f"<div class='report-box'>{rep}</div>", unsafe_allow_html=True)
374
+
375
+ # --- TAB 2: IMAGE ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  with tab2:
377
+ st.markdown("### πŸ“Έ Product & Food Verification")
378
+ st.caption("Step 1: Check for AI Generation. Step 2: Check if it's stolen from the web.")
379
+
380
+ url = st.text_input("Paste Image URL (Right click image -> Copy Image Address):")
381
 
382
+ if st.button("πŸ” Validate Image", type="primary"):
383
+ if not url: st.stop()
384
+
385
+ if ensemble['image_engine'] == "Failed":
386
+ st.error("System Offline")
387
+ st.stop()
388
+
389
+ with st.spinner("Forensic Scan in progress..."):
390
+ img = get_image_from_url(url)
391
+ if img:
392
+ c_img, c_data = st.columns([1, 1])
393
+ with c_img:
394
+ st.image(img, use_column_width=True)
395
+
396
+ with c_data:
397
+ # Dual Engine Analysis
398
+ res = analyze_image_dual_engine(img, ensemble)
399
+ ai_p = res['avg_ai'] * 100
400
+ real_p = res['avg_real'] * 100
401
+
402
+ st.subheader("Step 1: AI Detection")
403
+
404
+ # Verdict Logic
405
+ if res['agreement'] < 0.6:
406
+ st.warning("πŸ€” **Uncertain Result**")
407
+ st.markdown("The models disagree. This might be a heavily filtered real photo.")
408
+ elif ai_p > 60:
409
+ st.error(f"πŸ€– **Likely AI Generated** ({ai_p:.0f}%)")
410
+ st.markdown("Visual patterns suggest this is computer-generated.")
411
+ elif real_p > 60:
412
+ st.success(f"βœ… **Likely Real Camera** ({real_p:.0f}%)")
413
+ st.markdown("Visual noise patterns match a real camera sensor.")
414
+ else:
415
+ st.warning("🀷 **Inconclusive**")
416
+
417
+ st.progress(res['avg_ai'], text=f"AI Probability: {ai_p:.1f}%")
418
+
419
+ st.markdown("---")
420
+ st.subheader("Step 2: Internet Check")
421
+ st.markdown("Check if this photo was **stolen** from another website (Recipe blog, Stock photo, etc).")
422
+
423
+ lens = f"https://lens.google.com/uploadbyurl?url={urllib.parse.quote(url)}"
424
+ st.markdown(f"""
425
+ <a href="{lens}" target="_blank" style="display:block;text-align:center;background:#4285F4;color:white;padding:10px;border-radius:8px;text-decoration:none;font-weight:bold;">
426
+ 🌍 Check if image exists online
427
+ </a>
428
+ """, unsafe_allow_html=True)
429
+ else:
430
+ st.error("Could not load image. Check the link!")
431
 
432
  if __name__ == "__main__":
433
  main()