akshit4857 commited on
Commit
dab7ab2
ยท
verified ยท
1 Parent(s): aeb6f67

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +299 -364
src/streamlit_app.py CHANGED
@@ -1,13 +1,11 @@
1
  """
2
- Fake Review Detector - Streamlit Application
3
- Optimized for Hugging Face Spaces deployment
4
  """
5
 
6
  import os
7
  import io
 
8
  import numpy as np
9
- from collections import Counter
10
- from typing import Dict, Optional, List, Tuple
11
  import streamlit as st
12
  from transformers import pipeline, logging as hf_logging
13
  from PIL import Image
@@ -18,416 +16,353 @@ import urllib.parse
18
  import math
19
  import warnings
20
 
21
- # -------------------------
22
- # Log Suppression
23
- # -------------------------
24
- # 1. Suppress Python Warnings (Deprecation, UserWarning)
25
- warnings.filterwarnings("ignore", category=UserWarning, module="transformers")
26
- warnings.filterwarnings("ignore", category=FutureWarning, module="transformers")
27
-
28
- # 2. Suppress Hugging Face Informational Logs (Weights initialization, CPU usage)
29
  hf_logging.set_verbosity_error()
30
-
31
- # 3. Suppress TensorFlow/PyTorch logs if backend triggers them
32
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
33
-
34
- # Set matplotlib backend for server environments
35
  matplotlib.use('Agg')
36
 
37
- # -------------------------
38
- # Configuration
39
- # -------------------------
40
  st.set_page_config(
41
- page_title="Review Validator ๐Ÿ”",
42
- page_icon="๐Ÿ”",
43
  layout="wide",
44
  initial_sidebar_state="collapsed"
45
  )
46
 
47
- # Constants
48
- FAKE_MODEL_NAME = "akshit4857/autotrain-razz4-h7crd"
49
- SENTIMENT_MODEL_NAME = "cardiffnlp/twitter-roberta-base-sentiment-latest"
50
- EMOTION_MODEL_NAME = "j-hartmann/emotion-english-distilroberta-base"
51
- # Primary Image Model (High Precision)
52
- IMAGE_MODEL_PRIMARY = "mikedata/real_vs_fake_image_model_vit_base"
53
- # Backup Image Model (High Reliability)
54
- IMAGE_MODEL_BACKUP = "umm-maybe/AI-image-detector"
55
 
56
- MAX_TEXT_LENGTH = 5000
 
57
 
58
- # -------------------------
59
- # Secrets & Environment Management
60
- # -------------------------
61
- def get_secret(key: str, default: str = None) -> Optional[str]:
62
- """
63
- Robust secret retrieval.
64
- Prioritizes Environment Variables (HF Spaces) to avoid Streamlit secrets file errors.
65
- """
66
- # 1. Priority: Environment Variables (Hugging Face Secrets)
67
- if key in os.environ:
68
- return os.environ[key]
69
-
70
- # 2. Fallback: Streamlit Secrets (Local .toml file)
71
- try:
72
- if hasattr(st, 'secrets') and key in st.secrets:
73
- return st.secrets[key]
74
- except Exception:
75
- # Ignore errors if secrets.toml doesn't exist
76
- pass
77
-
78
- return default
79
 
80
- HF_TOKEN = get_secret("HF_TOKEN")
81
- OPENAI_API_KEY = get_secret("OPENAI_API_KEY")
82
 
83
- # -------------------------
84
- # Custom CSS
85
- # -------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  def inject_custom_css():
87
  st.markdown("""
88
  <style>
89
- .stApp { background: linear-gradient(to bottom, #ffffff, #f8f9fa); }
90
- h1 { font-family: 'Source Sans Pro', sans-serif; color: #FF4B4B; text-align: center; font-weight: 800; }
91
- .stButton>button { border-radius: 50px; border: 2px solid #FF4B4B; background-color: white; color: #FF4B4B; font-weight: bold; transition: all 0.3s ease; }
92
- .stButton>button:hover { background-color: #FF4B4B; color: white; transform: scale(1.02); }
93
- .stat-card { background-color: white; border-radius: 20px; padding: 15px; box-shadow: 0 4px 15px rgba(0,0,0,0.05); text-align: center; border: 1px solid #f0f0f0; height: 100%; }
94
- .stat-value { font-size: 1.8em; font-weight: 900; color: #333; }
95
- .stat-label { font-size: 0.8em; color: #888; text-transform: uppercase; letter-spacing: 1px; }
96
- .report-box { background-color: #ffffff; padding: 25px; border-radius: 20px; border: 2px dashed #e0e0e0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  </style>
98
  """, unsafe_allow_html=True)
99
 
100
- # -------------------------
101
- # Model Loading (Ensemble)
102
- # -------------------------
103
  @st.cache_resource(show_spinner=False)
104
- def load_models() -> Tuple[Dict, List[str]]:
105
- """
106
- Load all models for the ensemble with individual error handling.
107
- Returns: (models_dictionary, list_of_error_messages)
108
- """
109
- models = {}
110
- errors = []
111
-
112
- # 1. Fake Detector (Critical)
113
- try:
114
- models['fake'] = pipeline("text-classification", model=FAKE_MODEL_NAME, token=HF_TOKEN)
115
- except Exception as e:
116
- errors.append(f"Fake Detector: {str(e)}")
117
-
118
- # 2. Sentiment
119
  try:
120
- models['sentiment'] = pipeline("sentiment-analysis", model=SENTIMENT_MODEL_NAME, tokenizer=SENTIMENT_MODEL_NAME, token=HF_TOKEN)
121
- except Exception as e:
122
- errors.append(f"Sentiment Model: {str(e)}")
123
-
124
- # 3. Emotion
125
- try:
126
- # top_k=None replaces deprecated return_all_scores=True
127
- models['emotion'] = pipeline("text-classification", model=EMOTION_MODEL_NAME, top_k=None, token=HF_TOKEN)
128
- except Exception as e:
129
- errors.append(f"Emotion Model: {str(e)}")
130
-
131
- # 4. Image (With Failover Strategy)
132
- models['image_engine'] = "Offline"
133
- try:
134
- # Try primary precision model first
135
- models['img_a'] = pipeline("image-classification", model=IMAGE_MODEL_PRIMARY, token=HF_TOKEN)
136
- # Try backup model
137
- models['img_b'] = pipeline("image-classification", model=IMAGE_MODEL_BACKUP, token=HF_TOKEN)
138
- models['image_engine'] = "Dual-Core"
139
- except Exception as e:
140
- print(f"Dual engine load failed: {e}")
141
  try:
142
- if 'img_a' not in models:
143
- models['img_a'] = pipeline("image-classification", model=IMAGE_MODEL_PRIMARY, token=HF_TOKEN)
144
- models['image_engine'] = "Single-Core"
145
- errors.append("Note: Running in reduced precision mode (one image model failed).")
146
- except Exception as e2:
147
- models['image_engine'] = "Failed"
148
- errors.append(f"Image Checker failed completely: {str(e2)}")
149
-
150
- return models, errors
151
-
152
- # Initialize models
153
- inject_custom_css()
154
- with st.spinner("๐Ÿณ Prepping the Kitchen..."):
155
- ensemble, load_errors = load_models()
156
-
157
- # Handle Critical Errors
158
- if 'fake' not in ensemble:
159
- st.error("โŒ Critical Error: Failed to load the core Fake Detection model.")
160
- if load_errors:
161
- st.error(f"Details: {load_errors}")
162
- st.stop()
163
-
164
- if load_errors:
165
- with st.expander("โš ๏ธ System Warnings", expanded=False):
166
- for err in load_errors:
167
- st.warning(err)
168
-
169
- # -------------------------
170
- # Advanced Feature Extraction
171
- # -------------------------
172
- def calculate_complexity_score(text: str) -> float:
173
- """Calculate linguistic complexity (0-100)"""
174
- words = text.split()
175
- if not words: return 0
176
- avg_len = sum(len(w) for w in words) / len(words)
177
- ttr = len(set(words)) / len(words)
178
- score = (avg_len * 5) + (ttr * 50)
179
- return min(100, max(0, score))
180
 
181
- def extract_deep_features(text: str, models: dict) -> Dict:
182
- """Run multi-model analysis"""
183
- sent_label = "Unknown"
184
- sent_score = 0.0
185
- top_emo = {'label': 'Unknown', 'score': 0.0}
186
 
187
- # 1. Fake Detection
188
- fake_res = models['fake'](text[:512])[0]
189
- is_fake_prob = fake_res['score'] if fake_res['label'] == 'Fake' else (1 - fake_res['score'])
190
-
191
- # 2. Sentiment
192
- if 'sentiment' in models:
193
- try:
194
- sent_res = models['sentiment'](text[:512])[0]
195
- sent_score = sent_res['score']
196
- sent_label = sent_res['label']
197
- except Exception: pass
198
-
199
- # 3. Emotion
200
- if 'emotion' in models:
201
- try:
202
- emo_res = models['emotion'](text[:512])[0]
203
- top_emo = max(emo_res, key=lambda x: x['score'])
204
- except Exception: pass
205
 
206
- complexity = calculate_complexity_score(text)
 
 
207
 
208
  return {
209
- "fake_probability": is_fake_prob * 100,
210
- "sentiment_label": sent_label,
211
- "sentiment_confidence": sent_score * 100,
212
- "primary_emotion": top_emo['label'],
213
- "emotion_confidence": top_emo['score'] * 100,
214
- "complexity_score": complexity
215
  }
216
 
217
- # -------------------------
218
- # AI-Powered Dynamic Explanation
219
- # -------------------------
220
- def generate_friendly_report(text: str, features: Dict) -> str:
221
- """Generates a simple, friendly explanation"""
222
- if not OPENAI_API_KEY:
223
- return generate_fallback_report(features)
224
-
225
- prompt = (
226
- f"Analyze this product/food review. Fake Score: {features['fake_probability']:.1f}%. "
227
- f"Vibe: {features['primary_emotion']}. Text: {text[:600]}... "
228
- "Explain if it sounds like a real customer or a bot. Be simple and helpful."
229
- )
230
-
231
- try:
232
- headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
233
- payload = {
234
- "model": "gpt-4o-mini",
235
- "messages": [{"role": "user", "content": prompt}],
236
- "temperature": 0.5
237
- }
238
- response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload, timeout=10)
239
- return response.json()["choices"][0]["message"]["content"]
240
- except:
241
- return generate_fallback_report(features)
242
-
243
- def generate_fallback_report(features: Dict) -> str:
244
- """Simple rule-based report"""
245
- f_prob = features['fake_probability']
246
 
247
- if f_prob > 50:
248
- return f"โš ๏ธ **Suspicious!** There is a {f_prob:.0f}% chance this review was written by a bot or paid service."
249
- else:
250
- return "โœ… **Looks Good!** This review reads like a genuine customer experience."
251
-
252
- # -------------------------
253
- # Visualization: Radar Chart
254
- # -------------------------
255
- def create_radar_chart(features: Dict) -> plt.Figure:
256
- """Creates a multi-aspect radar chart"""
257
- categories = ['Suspiciousness', 'Feelings', 'Drama', 'Simplicity']
258
- inv_comp = 100 - features['complexity_score']
259
-
260
- values = [
261
- features['fake_probability'],
262
- features['sentiment_confidence'],
263
- features['emotion_confidence'],
264
- inv_comp
265
- ]
266
-
267
- # Close the loop
268
- values += values[:1]
269
- angles = [n / 4 * 2 * math.pi for n in range(4)] + [0]
270
-
271
- fig, ax = plt.subplots(figsize=(4, 4), subplot_kw=dict(polar=True))
272
 
273
- # Styling
274
- ax.set_facecolor('#ffffff')
275
- plt.gcf().patch.set_facecolor('#ffffff')
276
-
277
- ax.plot(angles, values, linewidth=2, linestyle='solid', color='#FF4B4B')
278
- ax.fill(angles, values, '#FF4B4B', alpha=0.2)
279
-
280
- ax.set_xticks(angles[:-1])
281
- ax.set_xticklabels(categories, size=9)
282
- ax.set_yticks([])
283
- ax.spines['polar'].set_visible(False)
284
 
285
- return fig
286
 
287
- # -------------------------
288
- # Image Functions
289
- # -------------------------
290
- def get_image_from_url(url: str) -> Optional[Image.Image]:
291
  try:
292
  headers = {'User-Agent': 'Mozilla/5.0'}
293
- response = requests.get(url, headers=headers, timeout=10, stream=True)
294
- response.raise_for_status()
295
- return Image.open(io.BytesIO(response.content)).convert("RGB")
296
- except Exception:
297
- return None
298
-
299
- def analyze_image_dual_engine(img, models) -> Dict:
300
- score_a_ai = 0.0
301
- score_b_ai = 0.0
302
-
303
- # Engine A
304
- if 'img_a' in models:
305
- res_a = models['img_a'](img)
306
- for r in res_a:
307
- if r['label'].lower() in ['fake', 'artificial', 'ai', 'generated']:
308
- score_a_ai = r['score']
309
-
310
- # Engine B
311
- if 'img_b' in models:
312
- res_b = models['img_b'](img)
313
- for r in res_b:
314
- if r['label'].lower() in ['fake', 'artificial', 'ai', 'generated']:
315
- score_b_ai = r['score']
316
- else:
317
- score_b_ai = score_a_ai
318
-
319
- avg_ai_score = (score_a_ai + score_b_ai) / 2
320
- agreement = 1.0 - abs(score_a_ai - score_b_ai)
321
 
322
- return {
323
- "avg_ai": avg_ai_score,
324
- "avg_real": 1.0 - avg_ai_score,
325
- "score_a": score_a_ai,
326
- "score_b": score_b_ai,
327
- "agreement": agreement
328
- }
 
329
 
330
- # -------------------------
331
- # Main UI
332
- # -------------------------
333
- def main():
334
  st.markdown("""
335
- <div style='text-align: center; padding-bottom: 20px;'>
336
- <h1 style='margin:0;'>๐Ÿ” Review & Food Validator</h1>
337
- <p style='color: #888;'>Is that burger real? Is that review a bot?</p>
338
- </div>
339
  """, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
 
341
- tab1, tab2 = st.tabs(["๐Ÿ“ Review Text", "๐ŸŸ Food/Product Image"])
342
-
343
- # --- TAB 1: TEXT ---
344
  with tab1:
345
- col1, col2 = st.columns([3, 1])
346
  with col1:
347
- txt = st.text_area("Paste Review:", height=120, placeholder="e.g. 'The pizza was cold and...'")
348
  with col2:
349
- st.info("๐Ÿ’ก **Tip:** Paste the full text for best results.")
350
- if st.button("Scan Text", type="primary", use_container_width=True):
351
- if not txt:
352
- st.toast("Please paste some text first!")
353
- st.stop()
354
-
355
- with st.spinner("Analysing linguistics..."):
356
- feats = extract_deep_features(txt, ensemble)
357
- rep = generate_friendly_report(txt, feats)
358
-
359
- st.markdown("---")
360
-
361
- # Metrics
362
- c1, c2, c3, c4 = st.columns(4)
363
- c1.metric("Fake-o-Meter", f"{feats['fake_probability']:.0f}%")
364
- c2.metric("Mood", feats['sentiment_label'])
365
- c3.metric("Vibe", feats['primary_emotion'].title())
366
- c4.metric("Vocab", f"{feats['complexity_score']:.0f}/100")
367
-
368
- # Chart & Report
369
- col_c, col_r = st.columns([1, 2])
370
- with col_c:
371
- st.pyplot(create_radar_chart(feats))
372
- with col_r:
373
- st.markdown(f"<div class='report-box'>{rep}</div>", unsafe_allow_html=True)
 
 
 
 
374
 
375
- # --- TAB 2: IMAGE ---
376
  with tab2:
377
- st.markdown("### ๐Ÿ“ธ Product & Food Verification")
378
- st.caption("Step 1: Check for AI Generation. Step 2: Check if it's stolen from the web.")
379
-
380
- url = st.text_input("Paste Image URL (Right click image -> Copy Image Address):")
381
 
382
- if st.button("๐Ÿ” Validate Image", type="primary"):
383
- if not url: st.stop()
 
384
 
385
- if ensemble['image_engine'] == "Failed":
386
- st.error("System Offline")
387
- st.stop()
388
-
389
- with st.spinner("Forensic Scan in progress..."):
390
- img = get_image_from_url(url)
 
 
 
 
 
391
  if img:
392
- c_img, c_data = st.columns([1, 1])
393
- with c_img:
394
- st.image(img, use_column_width=True)
395
-
396
- with c_data:
397
- # Dual Engine Analysis
398
- res = analyze_image_dual_engine(img, ensemble)
399
- ai_p = res['avg_ai'] * 100
400
- real_p = res['avg_real'] * 100
401
-
402
- st.subheader("Step 1: AI Detection")
403
-
404
- # Verdict Logic
405
- if res['agreement'] < 0.6:
406
- st.warning("๐Ÿค” **Uncertain Result**")
407
- st.markdown("The models disagree. This might be a heavily filtered real photo.")
408
- elif ai_p > 60:
409
- st.error(f"๐Ÿค– **Likely AI Generated** ({ai_p:.0f}%)")
410
- st.markdown("Visual patterns suggest this is computer-generated.")
411
- elif real_p > 60:
412
- st.success(f"โœ… **Likely Real Camera** ({real_p:.0f}%)")
413
- st.markdown("Visual noise patterns match a real camera sensor.")
414
- else:
415
- st.warning("๐Ÿคท **Inconclusive**")
416
 
417
- st.progress(res['avg_ai'], text=f"AI Probability: {ai_p:.1f}%")
418
-
419
- st.markdown("---")
420
- st.subheader("Step 2: Internet Check")
421
- st.markdown("Check if this photo was **stolen** from another website (Recipe blog, Stock photo, etc).")
422
-
423
- lens = f"https://lens.google.com/uploadbyurl?url={urllib.parse.quote(url)}"
424
- st.markdown(f"""
425
- <a href="{lens}" target="_blank" style="display:block;text-align:center;background:#4285F4;color:white;padding:10px;border-radius:8px;text-decoration:none;font-weight:bold;">
426
- ๐ŸŒ Check if image exists online
427
- </a>
428
- """, unsafe_allow_html=True)
 
 
 
429
  else:
430
- st.error("Could not load image. Check the link!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
 
432
  if __name__ == "__main__":
433
  main()
 
1
  """
2
+ Review Validator - Final Professional Edition
 
3
  """
4
 
5
  import os
6
  import io
7
+ import time
8
  import numpy as np
 
 
9
  import streamlit as st
10
  from transformers import pipeline, logging as hf_logging
11
  from PIL import Image
 
16
  import math
17
  import warnings
18
 
19
+ # --- Setup: Silence the technical noise ---
20
+ warnings.filterwarnings("ignore")
 
 
 
 
 
 
21
  hf_logging.set_verbosity_error()
 
 
22
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
 
 
23
  matplotlib.use('Agg')
24
 
 
 
 
25
  st.set_page_config(
26
+ page_title="Review Validator",
27
+ page_icon="๐Ÿ›ก๏ธ",
28
  layout="wide",
29
  initial_sidebar_state="collapsed"
30
  )
31
 
32
+ # ==========================================
33
+ # ๐Ÿง  THE AI BRAINS (High Precision Models)
34
+ # ==========================================
 
 
 
 
 
35
 
36
+ # 1. Bot Detector: The gold standard for catching GPT-written text
37
+ MODEL_FAKE = "fakespot-ai/roberta-base-ai-text-detection-v1"
38
 
39
+ # 2. Mood Scanner: Checks detailed sentiment
40
+ MODEL_MOOD = "cardiffnlp/twitter-roberta-base-sentiment-latest"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ # 3. Grammar Checker: Catches unnatural "perfect" bot grammar
43
+ MODEL_GRAMMAR = "textattack/roberta-base-CoLA"
44
 
45
+ # 4. Image Checker A: High Precision (Strict)
46
+ MODEL_IMG_A = "dima806/ai_vs_real_image_detection"
47
+
48
+ # 5. Image Checker B: High Reliability (Broad)
49
+ MODEL_IMG_B = "umm-maybe/AI-image-detector"
50
+
51
+ # ==========================================
52
+
53
+ # --- Secrets Management ---
54
+ def get_token():
55
+ if key := os.environ.get("HF_TOKEN"): return key
56
+ if hasattr(st, "secrets") and "HF_TOKEN" in st.secrets: return st.secrets["HF_TOKEN"]
57
+ return None
58
+
59
+ HF_TOKEN = get_token()
60
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
61
+
62
+ # --- Custom CSS ---
63
  def inject_custom_css():
64
  st.markdown("""
65
  <style>
66
+ /* Modern Clean Look */
67
+ .stApp { background-color: #FFFFFF; color: #333333; font-family: 'Helvetica Neue', sans-serif; }
68
+
69
+ /* Headings */
70
+ h1 { color: #2C3E50; font-weight: 800; }
71
+ h2 { color: #34495E; font-weight: 600; }
72
+
73
+ /* Hero Section */
74
+ .hero-box {
75
+ padding: 40px;
76
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
77
+ border-radius: 20px;
78
+ color: white;
79
+ text-align: center;
80
+ margin-bottom: 30px;
81
+ }
82
+ .hero-title { font-size: 3rem; font-weight: bold; margin-bottom: 10px; }
83
+ .hero-subtitle { font-size: 1.2rem; opacity: 0.9; }
84
+
85
+ /* Feature Cards */
86
+ .feature-card {
87
+ background: #F8F9FA;
88
+ padding: 20px;
89
+ border-radius: 15px;
90
+ border: 1px solid #EEEEEE;
91
+ text-align: center;
92
+ transition: transform 0.2s;
93
+ }
94
+ .feature-card:hover { transform: translateY(-5px); border-color: #764ba2; }
95
+ .emoji-icon { font-size: 3rem; margin-bottom: 10px; display: block; }
96
+
97
+ /* Result Stats */
98
+ .stat-box {
99
+ text-align: center;
100
+ padding: 15px;
101
+ border-radius: 12px;
102
+ background: white;
103
+ box-shadow: 0 4px 6px rgba(0,0,0,0.05);
104
+ border: 1px solid #EEE;
105
+ }
106
+ .stat-num { font-size: 24px; font-weight: 900; color: #333; }
107
+ .stat-txt { font-size: 12px; text-transform: uppercase; color: #777; letter-spacing: 1px; }
108
+
109
+ /* Custom Button */
110
+ .stButton>button {
111
+ border-radius: 30px;
112
+ font-weight: bold;
113
+ border: none;
114
+ padding: 0.5rem 2rem;
115
+ transition: all 0.3s;
116
+ }
117
  </style>
118
  """, unsafe_allow_html=True)
119
 
120
+ # --- Load Models ---
 
 
121
  @st.cache_resource(show_spinner=False)
122
+ def load_ai_squad():
123
+ squad = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  try:
125
+ squad['fake'] = pipeline("text-classification", model=MODEL_FAKE, token=HF_TOKEN)
126
+ squad['mood'] = pipeline("sentiment-analysis", model=MODEL_MOOD, tokenizer=MODEL_MOOD, token=HF_TOKEN)
127
+ squad['grammar'] = pipeline("text-classification", model=MODEL_GRAMMAR, token=HF_TOKEN)
128
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  try:
130
+ squad['img_a'] = pipeline("image-classification", model=MODEL_IMG_A, token=HF_TOKEN)
131
+ squad['img_b'] = pipeline("image-classification", model=MODEL_IMG_B, token=HF_TOKEN)
132
+ squad['img_status'] = "Active"
133
+ except:
134
+ if 'img_a' not in squad:
135
+ squad['img_a'] = pipeline("image-classification", model=MODEL_IMG_A, token=HF_TOKEN)
136
+ squad['img_status'] = "Partial"
137
+
138
+ except Exception as e:
139
+ return None, str(e)
140
+ return squad, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
+ # --- Logic: Analyze Text ---
143
+ def check_text(text, squad):
144
+ # 1. Bot Check
145
+ res_fake = squad['fake'](text[:512])[0]
146
+ bot_score = res_fake['score'] if res_fake['label'] == 'Fake' else (1 - res_fake['score'])
147
 
148
+ # 2. Mood Check
149
+ res_mood = squad['mood'](text[:512])[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
+ # 3. Grammar Check
152
+ res_grammar = squad['grammar'](text[:512])[0]
153
+ grammar_score = res_grammar['score'] if res_grammar['label'] == 'LABEL_1' else (1 - res_grammar['score'])
154
 
155
  return {
156
+ "bot_score": bot_score * 100,
157
+ "mood_label": res_mood['label'],
158
+ "grammar_score": grammar_score * 100
 
 
 
159
  }
160
 
161
+ # --- Logic: Analyze Image ---
162
+ def check_image(img, squad):
163
+ score_a = 0.0
164
+ score_b = 0.0
165
+ ai_words = ['fake', 'artificial', 'ai', 'generated']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
+ if 'img_a' in squad:
168
+ for r in squad['img_a'](img):
169
+ if any(w in r['label'].lower() for w in ai_words): score_a = r['score']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
+ if 'img_b' in squad:
172
+ for r in squad['img_b'](img):
173
+ if any(w in r['label'].lower() for w in ai_words): score_b = r['score']
174
+ else: score_b = score_a
175
+
176
+ avg_ai = (score_a + score_b) / 2
177
+ match_level = 1.0 - abs(score_a - score_b)
 
 
 
 
178
 
179
+ return {"ai_chance": avg_ai * 100, "match": match_level}
180
 
181
+ def get_image_from_url(url):
 
 
 
182
  try:
183
  headers = {'User-Agent': 'Mozilla/5.0'}
184
+ r = requests.get(url, headers=headers, timeout=5, stream=True)
185
+ return Image.open(io.BytesIO(r.content)).convert("RGB")
186
+ except: return None
187
+
188
+ # --- Plotting ---
189
+ def simple_chart(stats):
190
+ labels = ['Bot Chance', 'Grammar', 'Mood']
191
+ values = [stats['bot_score'], stats['grammar_score'], 50]
192
+ values += values[:1]
193
+ angles = [n / 3 * 2 * math.pi for n in range(3)] + [0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ fig, ax = plt.subplots(figsize=(3, 3), subplot_kw=dict(polar=True))
196
+ ax.set_facecolor('white')
197
+ plt.gcf().patch.set_facecolor('white')
198
+ ax.plot(angles, values, color='#764ba2', linewidth=2)
199
+ ax.fill(angles, values, color='#764ba2', alpha=0.2)
200
+ ax.set_xticks(angles[:-1]); ax.set_xticklabels(labels, size=8)
201
+ ax.set_yticks([]); ax.spines['polar'].set_visible(False)
202
+ return fig
203
 
204
+ # --- PAGES ---
205
+
206
+ def landing_page():
 
207
  st.markdown("""
208
+ <div class="hero-box">
209
+ <div class="hero-title">๐Ÿ›ก๏ธ Review Validator</div>
210
+ <div class="hero-subtitle">We check if reviews are Real or Fake. We check if product photos are Real or AI.</div>
211
+ </div>
212
  """, unsafe_allow_html=True)
213
+
214
+ c1, c2, c3 = st.columns(3)
215
+ with c1:
216
+ st.markdown("""
217
+ <div class="feature-card">
218
+ <span class="emoji-icon">๐Ÿค–</span>
219
+ <h3>Bot Buster</h3>
220
+ <p>We use AI to catch other AI! If a robot wrote the review, we will know.</p>
221
+ </div>
222
+ """, unsafe_allow_html=True)
223
+ with c2:
224
+ st.markdown("""
225
+ <div class="feature-card">
226
+ <span class="emoji-icon">๐Ÿ“ธ</span>
227
+ <h3>Fake Photo Finder</h3>
228
+ <p>Is that burger real or drawn by a computer? We check the pixels.</p>
229
+ </div>
230
+ """, unsafe_allow_html=True)
231
+ with c3:
232
+ st.markdown("""
233
+ <div class="feature-card">
234
+ <span class="emoji-icon">๐Ÿ‘ฎ</span>
235
+ <h3>Simple & Fast</h3>
236
+ <p>No complex words. Just Red (Bad) or Green (Good). Easy for everyone.</p>
237
+ </div>
238
+ """, unsafe_allow_html=True)
239
+
240
+ st.write("")
241
+ st.write("")
242
+ col1, col2, col3 = st.columns([1, 2, 1])
243
+ with col2:
244
+ if st.button("๐Ÿš€ START CHECKING REVIEWS NOW", type="primary", use_container_width=True):
245
+ st.session_state['page'] = 'detector'
246
+ st.rerun()
247
+
248
+ def detector_page(squad):
249
+ # Header & Selector
250
+ c1, c2 = st.columns([3, 1])
251
+ with c1:
252
+ st.markdown("### ๐Ÿ›’ Select the Website")
253
+ platform = st.selectbox("Where is this review from?", ["Amazon", "Flipkart", "Zomato", "Swiggy", "Myntra", "Other"], label_visibility="collapsed")
254
+ with c2:
255
+ if st.button("โฌ…๏ธ Back Home"):
256
+ st.session_state['page'] = 'landing'
257
+ st.rerun()
258
+
259
+ st.divider()
260
+
261
+ # Main Tabs
262
+ tab1, tab2 = st.tabs(["๐Ÿ“ Check Review Text", "๐Ÿ“ธ Check Product Image"])
263
 
264
+ # --- TEXT ---
 
 
265
  with tab1:
266
+ col1, col2 = st.columns([2, 1])
267
  with col1:
268
+ txt = st.text_area("Paste Review Here:", height=150, placeholder="Example: I ordered this yesterday and it is amazing...")
269
  with col2:
270
+ st.info("๐Ÿ’ก Tip: Paste the full review for the best result.")
271
+ if st.button("Analyze Text", type="primary", use_container_width=True):
272
+ if txt:
273
+ res = check_text(txt, squad)
274
+ st.session_state['text_res'] = res
275
+
276
+ if 'text_res' in st.session_state:
277
+ res = st.session_state['text_res']
278
+ st.markdown("---")
279
+
280
+ k1, k2, k3 = st.columns(3)
281
+ # Bot Score
282
+ color = "red" if res['bot_score'] > 50 else "green"
283
+ k1.markdown(f"""<div class="stat-box"><div class="stat-num" style="color:{color}">{res['bot_score']:.0f}%</div><div class="stat-txt">Bot Chance</div></div>""", unsafe_allow_html=True)
284
+ # Grammar
285
+ k2.markdown(f"""<div class="stat-box"><div class="stat-num">{res['grammar_score']:.0f}%</div><div class="stat-txt">Grammar Quality</div></div>""", unsafe_allow_html=True)
286
+ # Mood
287
+ k3.markdown(f"""<div class="stat-box"><div class="stat-num">{res['mood_label']}</div><div class="stat-txt">Review Mood</div></div>""", unsafe_allow_html=True)
288
+
289
+ st.write("")
290
+ v1, v2 = st.columns([1, 2])
291
+ with v1: st.pyplot(simple_chart(res))
292
+ with v2:
293
+ if res['bot_score'] > 70:
294
+ st.error("๐Ÿšจ **FAKE ALERT:** This review looks like it was written by a robot.")
295
+ elif res['bot_score'] > 40:
296
+ st.warning("๐Ÿค” **UNSURE:** It looks a bit weird, but could be real.")
297
+ else:
298
+ st.success("โœ… **REAL:** This looks like a genuine human review.")
299
 
300
+ # --- IMAGE ---
301
  with tab2:
302
+ col_in, col_view = st.columns([1, 1])
 
 
 
303
 
304
+ with col_in:
305
+ st.markdown("#### Step 1: Provide Image")
306
+ method = st.radio("Input Method", ["Paste URL", "Upload File"], horizontal=True, label_visibility="collapsed")
307
 
308
+ img = None
309
+ if method == "Paste URL":
310
+ url = st.text_input("Paste Image Link:")
311
+ if url: img = get_image_from_url(url)
312
+ else:
313
+ up_file = st.file_uploader("Upload Image", type=['jpg','png','jpeg'])
314
+ if up_file:
315
+ try: img = Image.open(up_file).convert("RGB")
316
+ except: st.error("Error reading file")
317
+
318
+ if st.button("Scan Image", type="primary", use_container_width=True):
319
  if img:
320
+ with st.spinner("Scanning for AI patterns..."):
321
+ data = check_image(img, squad)
322
+ st.session_state['img_res'] = data
323
+ st.session_state['current_img'] = img
324
+ else:
325
+ st.error("Please provide a valid image first.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
+ with col_view:
328
+ if 'current_img' in st.session_state:
329
+ st.image(st.session_state['current_img'], use_column_width=True, caption="Analyzed Image")
330
+
331
+ if 'img_res' in st.session_state:
332
+ data = st.session_state['img_res']
333
+ ai_score = data['ai_chance']
334
+
335
+ st.markdown("#### Step 2: Results")
336
+
337
+ if data['match'] < 0.6:
338
+ st.warning("๐Ÿค” **Confusing Image:** Our models disagree. It might be heavily filtered.")
339
+ elif ai_score > 60:
340
+ st.error(f"๐Ÿค– **AI GENERATED** ({ai_score:.0f}% sure)")
341
+ st.write("We found digital patterns that cameras don't make.")
342
  else:
343
+ st.success(f"๐Ÿ“ธ **REAL PHOTO** ({100-ai_score:.0f}% sure)")
344
+ st.write("This looks like a standard camera photo.")
345
+
346
+ st.progress(ai_score/100, text="AI Probability Bar")
347
+
348
+ # --- MAIN CONTROLLER ---
349
+ def main():
350
+ inject_custom_css()
351
+
352
+ if 'page' not in st.session_state:
353
+ st.session_state['page'] = 'landing'
354
+
355
+ with st.spinner("Loading AI Models..."):
356
+ squad, err = load_ai_squad()
357
+
358
+ if not squad:
359
+ st.error("System Error: Could not connect to AI brains.")
360
+ return
361
+
362
+ if st.session_state['page'] == 'landing':
363
+ landing_page()
364
+ else:
365
+ detector_page(squad)
366
 
367
  if __name__ == "__main__":
368
  main()