akshit4857 commited on
Commit
396bc22
Β·
verified Β·
1 Parent(s): 2d255cb

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +546 -0
src/streamlit_app.py CHANGED
@@ -2,7 +2,553 @@
2
  Fake Review Detector - Streamlit Application
3
  Optimized for Hugging Face Spaces deployment
4
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import os
7
  import io
8
  import numpy as np
 
2
  Fake Review Detector - Streamlit Application
3
  Optimized for Hugging Face Spaces deployment
4
  """
5
+ """
6
+ Fake Review Detector - Streamlit Application
7
+ Optimized for Hugging Face Spaces deployment
8
+ """
9
+
10
+ import os
11
+ import io
12
+ import numpy as np
13
+ from collections import Counter
14
+ from typing import Dict, Optional, List, Tuple
15
+ import streamlit as st
16
+ from transformers import pipeline, logging as hf_logging
17
+ from PIL import Image
18
+ import matplotlib.pyplot as plt
19
+ import matplotlib
20
+ import requests
21
+ import urllib.parse
22
+ import math
23
+ import warnings
24
+
25
+ # -------------------------
26
+ # Log Suppression
27
+ # -------------------------
28
+ warnings.filterwarnings("ignore", category=UserWarning, module="transformers")
29
+ warnings.filterwarnings("ignore", category=FutureWarning, module="transformers")
30
+ hf_logging.set_verbosity_error()
31
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
32
+ matplotlib.use('Agg')
33
+
34
+ # -------------------------
35
+ # Configuration
36
+ # -------------------------
37
+ st.set_page_config(
38
+ page_title="Truth Detector πŸ•΅οΈβ€β™€οΈ",
39
+ page_icon="πŸ•΅οΈβ€β™€οΈ",
40
+ layout="wide",
41
+ initial_sidebar_state="collapsed"
42
+ )
43
+
44
+ # Constants
45
+ FAKE_MODEL_NAME = "akshit4857/autotrain-razz4-h7crd"
46
+ SENTIMENT_MODEL_NAME = "cardiffnlp/twitter-roberta-base-sentiment-latest"
47
+ EMOTION_MODEL_NAME = "j-hartmann/emotion-english-distilroberta-base"
48
+
49
+ # --- DUAL ENGINE IMAGE DETECTION ---
50
+ IMAGE_MODEL_A = "dima806/ai_generated_image_detection"
51
+ IMAGE_MODEL_B = "umm-maybe/AI-image-detector"
52
+
53
+ MAX_TEXT_LENGTH = 5000
54
+
55
+ # -------------------------
56
+ # Secrets & Environment Management
57
+ # -------------------------
58
+ def get_secret(key: str, default: str = None) -> Optional[str]:
59
+ try:
60
+ if hasattr(st, 'secrets') and key in st.secrets:
61
+ return st.secrets[key]
62
+ except Exception:
63
+ pass
64
+ return os.environ.get(key, default)
65
+
66
+ HF_TOKEN = get_secret("HF_TOKEN")
67
+ OPENAI_API_KEY = get_secret("OPENAI_API_KEY")
68
+
69
+ # -------------------------
70
+ # Custom CSS for Playful UI
71
+ # -------------------------
72
+ def inject_custom_css():
73
+ st.markdown("""
74
+ <style>
75
+ /* General App Background */
76
+ .stApp {
77
+ background: linear-gradient(to bottom, #ffffff, #f8f9fa);
78
+ }
79
+
80
+ /* Fun Headers */
81
+ h1 {
82
+ font-family: 'Source Sans Pro', sans-serif;
83
+ color: #FF4B4B;
84
+ text-align: center;
85
+ font-weight: 800;
86
+ letter-spacing: -1px;
87
+ }
88
+
89
+ /* Rounded Buttons */
90
+ .stButton>button {
91
+ border-radius: 50px;
92
+ border: 2px solid #FF4B4B;
93
+ background-color: white;
94
+ color: #FF4B4B;
95
+ font-weight: bold;
96
+ transition: all 0.3s ease;
97
+ padding: 10px 25px;
98
+ }
99
+ .stButton>button:hover {
100
+ background-color: #FF4B4B;
101
+ color: white;
102
+ transform: scale(1.02);
103
+ border-color: #FF4B4B;
104
+ }
105
+
106
+ /* Card Style for Stats */
107
+ .stat-card {
108
+ background-color: white;
109
+ border-radius: 20px;
110
+ padding: 20px;
111
+ box-shadow: 0 4px 15px rgba(0,0,0,0.05);
112
+ text-align: center;
113
+ border: 1px solid #f0f0f0;
114
+ height: 100%;
115
+ }
116
+ .stat-value {
117
+ font-size: 2em;
118
+ font-weight: 900;
119
+ margin: 0;
120
+ color: #333;
121
+ }
122
+ .stat-label {
123
+ font-size: 0.9em;
124
+ color: #888;
125
+ text-transform: uppercase;
126
+ letter-spacing: 1px;
127
+ margin-top: 5px;
128
+ }
129
+
130
+ /* Report Box */
131
+ .report-box {
132
+ background-color: #ffffff;
133
+ padding: 25px;
134
+ border-radius: 20px;
135
+ border: 2px dashed #e0e0e0;
136
+ box-shadow: 0 2px 10px rgba(0,0,0,0.02);
137
+ }
138
+ </style>
139
+ """, unsafe_allow_html=True)
140
+
141
+ # -------------------------
142
+ # Model Loading
143
+ # -------------------------
144
+ @st.cache_resource(show_spinner=False)
145
+ def load_models() -> Tuple[Dict, List[str]]:
146
+ models = {}
147
+ errors = []
148
+
149
+ # Text Models
150
+ try:
151
+ models['fake'] = pipeline("text-classification", model=FAKE_MODEL_NAME, token=HF_TOKEN)
152
+ except Exception as e:
153
+ errors.append(f"Fake Detector: {str(e)}")
154
+
155
+ try:
156
+ models['sentiment'] = pipeline("sentiment-analysis", model=SENTIMENT_MODEL_NAME, tokenizer=SENTIMENT_MODEL_NAME, token=HF_TOKEN)
157
+ except Exception as e:
158
+ errors.append(f"Sentiment Model: {str(e)}")
159
+
160
+ try:
161
+ models['emotion'] = pipeline("text-classification", model=EMOTION_MODEL_NAME, top_k=None, token=HF_TOKEN)
162
+ except Exception as e:
163
+ errors.append(f"Emotion Model: {str(e)}")
164
+
165
+ # Image Models (Dual Engine)
166
+ models['image_engine'] = "Offline"
167
+ try:
168
+ # Load Engine A
169
+ models['img_a'] = pipeline("image-classification", model=IMAGE_MODEL_A, token=HF_TOKEN)
170
+ # Load Engine B
171
+ models['img_b'] = pipeline("image-classification", model=IMAGE_MODEL_B, token=HF_TOKEN)
172
+ models['image_engine'] = "Dual-Core (High Precision)"
173
+ except Exception as e:
174
+ print(f"Dual engine load failed: {e}")
175
+ try:
176
+ if 'img_a' not in models:
177
+ models['img_a'] = pipeline("image-classification", model=IMAGE_MODEL_A, token=HF_TOKEN)
178
+ models['image_engine'] = "Single-Core (Standard)"
179
+ errors.append("Note: Running in reduced precision mode (one image model failed).")
180
+ except Exception as e2:
181
+ models['image_engine'] = "Failed"
182
+ errors.append(f"Image Checker failed completely: {str(e2)}")
183
 
184
+ return models, errors
185
+
186
+ # Initialize
187
+ inject_custom_css()
188
+ with st.spinner("🧠 Waking up the AI Brains..."):
189
+ ensemble, load_errors = load_models()
190
+
191
+ if 'fake' not in ensemble:
192
+ st.error("❌ Oops! The main brain failed to load. Please refresh.")
193
+ st.stop()
194
+
195
+ # -------------------------
196
+ # Feature Extraction
197
+ # -------------------------
198
+ def calculate_complexity_score(text: str) -> float:
199
+ words = text.split()
200
+ if not words: return 0
201
+ avg_len = sum(len(w) for w in words) / len(words)
202
+ ttr = len(set(words)) / len(words)
203
+ score = (avg_len * 5) + (ttr * 50)
204
+ return min(100, max(0, score))
205
+
206
+ def extract_deep_features(text: str, models: dict) -> Dict:
207
+ sent_label = "Unknown"
208
+ sent_score = 0.0
209
+ top_emo = {'label': 'Unknown', 'score': 0.0}
210
+ emo_res = []
211
+
212
+ fake_res = models['fake'](text[:512])[0]
213
+ is_fake_prob = fake_res['score'] if fake_res['label'] == 'Fake' else (1 - fake_res['score'])
214
+
215
+ if 'sentiment' in models:
216
+ try:
217
+ sent_res = models['sentiment'](text[:512])[0]
218
+ sent_score = sent_res['score']
219
+ sent_label = sent_res['label']
220
+ except Exception: pass
221
+
222
+ if 'emotion' in models:
223
+ try:
224
+ emo_res = models['emotion'](text[:512])[0]
225
+ top_emo = max(emo_res, key=lambda x: x['score'])
226
+ except Exception: pass
227
+
228
+ complexity = calculate_complexity_score(text)
229
+
230
+ return {
231
+ "fake_probability": is_fake_prob * 100,
232
+ "sentiment_label": sent_label,
233
+ "sentiment_confidence": sent_score * 100,
234
+ "primary_emotion": top_emo['label'],
235
+ "emotion_confidence": top_emo['score'] * 100,
236
+ "complexity_score": complexity,
237
+ "raw_emotion_scores": emo_res
238
+ }
239
+
240
+ # -------------------------
241
+ # Friendly Explanation
242
+ # -------------------------
243
+ def generate_friendly_report(text: str, features: Dict) -> str:
244
+ if not OPENAI_API_KEY:
245
+ return generate_fallback_report(features)
246
+
247
+ prompt = (
248
+ f"Act as a super friendly detective. Analyze this review.\n\n"
249
+ f"STATS:\n"
250
+ f"- Fake Score: {features['fake_probability']:.1f}%\n"
251
+ f"- Mood: {features['sentiment_label']} ({features['sentiment_confidence']:.1f}%)\n"
252
+ f"- Vibe: {features['primary_emotion']}\n"
253
+ f"- Brainy Score: {features['complexity_score']:.1f}/100\n"
254
+ f"- Text: {text[:600]}...\n\n"
255
+ f"TASK:\n"
256
+ f"Is this real or fake? Explain why in simple, fun terms. No robot words.\n\n"
257
+ f"FORMAT:\n"
258
+ f"3 bullets: '✍️ Style Check', '❀️ Vibe Check', 'πŸ’‘ The Truth'."
259
+ )
260
+
261
+ try:
262
+ headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
263
+ payload = {
264
+ "model": "gpt-4o-mini",
265
+ "messages": [{"role": "user", "content": prompt}],
266
+ "temperature": 0.5
267
+ }
268
+ response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload, timeout=15)
269
+ return response.json()["choices"][0]["message"]["content"]
270
+ except:
271
+ return generate_fallback_report(features)
272
+
273
+ def generate_fallback_report(features: Dict) -> str:
274
+ f_prob = features['fake_probability']
275
+ emo = features['primary_emotion']
276
+ comp = features['complexity_score']
277
+
278
+ report = "### ✍️ Style Check\n"
279
+ if comp < 40:
280
+ report += "It uses very simple words over and over. Bots often do this to be fast. Humans usually mix it up more!\n\n"
281
+ else:
282
+ report += "The writing is detailed and flowery. This usually means a real person took the time to write it.\n\n"
283
+
284
+ report += "### ❀️ Vibe Check\n"
285
+ if f_prob > 70 and emo in ['joy', 'surprise']:
286
+ report += f"Whoa, so much '{emo}'! Fake reviews often scream 'THIS IS AMAZING' just to sell stuff.\n\n"
287
+ elif f_prob > 70 and emo in ['anger', 'disgust']:
288
+ report += f"Yikes, lots of '{emo}'. Sometimes rivals write nasty reviews to hurt a business.\n\n"
289
+ else:
290
+ report += f"The vibe feels like '{emo}', which matches the star rating perfectly.\n\n"
291
+
292
+ report += "### πŸ’‘ The Truth\n"
293
+ if f_prob > 50:
294
+ report += f"I'm **{f_prob:.0f}% sure this is FAKE**. It just doesn't feel right!"
295
+ else:
296
+ report += "This looks like a **REAL review** from a real person. You're good!"
297
+
298
+ return report
299
+
300
+ # -------------------------
301
+ # Visualization: Clean Radar Chart
302
+ # -------------------------
303
+ def create_radar_chart(features: Dict) -> plt.Figure:
304
+ categories = ['Fake-o-Meter', 'Feelings', 'Drama Level', 'Simple Words']
305
+ inv_complexity = 100 - features['complexity_score']
306
+
307
+ values = [
308
+ features['fake_probability'],
309
+ features['sentiment_confidence'],
310
+ features['emotion_confidence'],
311
+ inv_complexity
312
+ ]
313
+
314
+ N = len(categories)
315
+ angles = [n / float(N) * 2 * math.pi for n in range(N)]
316
+ values += values[:1]
317
+ angles += angles[:1]
318
+
319
+ fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
320
+
321
+ ax.set_facecolor('#ffffff')
322
+ plt.gcf().patch.set_facecolor('#ffffff')
323
+
324
+ ax.plot(angles, values, linewidth=3, linestyle='solid', color='#FF4B4B')
325
+ ax.fill(angles, values, '#FF4B4B', alpha=0.2)
326
+
327
+ ax.set_xticks(angles[:-1])
328
+ ax.set_xticklabels(categories, size=12, weight='bold', color="#444")
329
+
330
+ ax.set_yticks([25, 50, 75, 100])
331
+ ax.set_yticklabels(["", "", "", ""], color="grey", size=7)
332
+ ax.set_ylim(0, 100)
333
+
334
+ ax.spines['polar'].set_visible(False)
335
+ ax.grid(color='#eeeeee')
336
+
337
+ return fig
338
+
339
+ # -------------------------
340
+ # Image Logic (Dual Engine)
341
+ # -------------------------
342
+ def get_image_from_url(url: str) -> Optional[Image.Image]:
343
+ try:
344
+ headers = {'User-Agent': 'Mozilla/5.0'}
345
+ response = requests.get(url, headers=headers, timeout=10, stream=True)
346
+ response.raise_for_status()
347
+ return Image.open(io.BytesIO(response.content)).convert("RGB")
348
+ except Exception:
349
+ return None
350
+
351
+ def get_google_lens_url(image_url: str) -> str:
352
+ return f"https://lens.google.com/uploadbyurl?url={urllib.parse.quote(image_url)}"
353
+
354
+ def analyze_image_dual_engine(img, models) -> Dict:
355
+ score_a_ai = 0.0
356
+ score_b_ai = 0.0
357
+
358
+ if 'img_a' in models:
359
+ res_a = models['img_a'](img)
360
+ for r in res_a:
361
+ if r['label'].lower() in ['fake', 'artificial', 'ai', 'generated']:
362
+ score_a_ai = r['score']
363
+
364
+ if 'img_b' in models:
365
+ res_b = models['img_b'](img)
366
+ for r in res_b:
367
+ if r['label'].lower() in ['fake', 'artificial', 'ai', 'generated']:
368
+ score_b_ai = r['score']
369
+
370
+ if 'img_b' not in models:
371
+ score_b_ai = score_a_ai
372
+
373
+ avg_ai_score = (score_a_ai + score_b_ai) / 2
374
+
375
+ # Agreement: How close are the two models?
376
+ # If one says 90% and other says 10%, agreement is low (0.2)
377
+ # If both say 90%, agreement is high (1.0)
378
+ agreement = 1.0 - abs(score_a_ai - score_b_ai)
379
+
380
+ return {
381
+ "avg_ai": avg_ai_score,
382
+ "avg_real": 1.0 - avg_ai_score,
383
+ "score_a": score_a_ai,
384
+ "score_b": score_b_ai,
385
+ "agreement": agreement
386
+ }
387
+
388
+ # -------------------------
389
+ # Main UI
390
+ # -------------------------
391
+ def main():
392
+ st.markdown("""
393
+ <div style='text-align: center; padding-bottom: 30px;'>
394
+ <h1 style='margin-bottom: 0;'>πŸ•΅οΈβ€β™€οΈ Truth Detector</h1>
395
+ <p style='color: #888; font-size: 1.2em; margin-top: 5px;'>
396
+ Real or Fake? Let's investigate! πŸ”
397
+ </p>
398
+ </div>
399
+ """, unsafe_allow_html=True)
400
+
401
+ tab1, tab2 = st.tabs(["πŸ“ Check Text", "πŸ“Έ Check Photo"])
402
+
403
+ # --- TAB 1: TEXT ---
404
+ with tab1:
405
+ col_in1, col_in2 = st.columns([3, 1])
406
+ with col_in1:
407
+ review_text = st.text_area("Paste review here:", height=150, placeholder="e.g., 'OMG best product ever!!!'")
408
+ with col_in2:
409
+ st.markdown("""
410
+ <div class="stat-card" style="padding: 15px; text-align: left;">
411
+ <b>πŸ•΅οΈ Tips:</b><br>
412
+ <small>
413
+ β€’ Paste full reviews<br>
414
+ β€’ Long text is better<br>
415
+ β€’ Check English only
416
+ </small>
417
+ </div>
418
+ """, unsafe_allow_html=True)
419
+
420
+ if st.button("✨ Scan for Truth", type="primary", use_container_width=True):
421
+ if not review_text:
422
+ st.toast("Please paste some text first!")
423
+ st.stop()
424
+
425
+ with st.spinner("πŸ•΅οΈβ€β™€οΈ Investigating clues..."):
426
+ features = extract_deep_features(review_text, ensemble)
427
+ report = generate_friendly_report(review_text, features)
428
+
429
+ st.write("") # Spacer
430
+
431
+ # --- Fun Stat Cards ---
432
+ c1, c2, c3, c4 = st.columns(4)
433
+
434
+ # 1. Fake-o-Meter
435
+ risk_color = "#FF4B4B" if features['fake_probability'] > 60 else "#4CAF50"
436
+ c1.markdown(f"""
437
+ <div class="stat-card">
438
+ <div class="stat-value" style="color: {risk_color}">{features['fake_probability']:.0f}%</div>
439
+ <div class="stat-label">πŸ€– Fake-o-Meter</div>
440
+ </div>
441
+ """, unsafe_allow_html=True)
442
+
443
+ # 2. Mood
444
+ c2.markdown(f"""
445
+ <div class="stat-card">
446
+ <div class="stat-value">{features['sentiment_label']}</div>
447
+ <div class="stat-label">❀️ Mood</div>
448
+ </div>
449
+ """, unsafe_allow_html=True)
450
+
451
+ # 3. Vibe
452
+ c3.markdown(f"""
453
+ <div class="stat-card">
454
+ <div class="stat-value">{features['primary_emotion'].title()}</div>
455
+ <div class="stat-label">🎭 Vibe</div>
456
+ </div>
457
+ """, unsafe_allow_html=True)
458
+
459
+ # 4. Brainy Score
460
+ c4.markdown(f"""
461
+ <div class="stat-card">
462
+ <div class="stat-value">{features['complexity_score']:.0f}</div>
463
+ <div class="stat-label">🧠 Brainy Score</div>
464
+ </div>
465
+ """, unsafe_allow_html=True)
466
+
467
+ st.markdown("---")
468
+
469
+ # Chart & Text
470
+ col_chart, col_text = st.columns([1, 1.5])
471
+
472
+ with col_chart:
473
+ st.subheader("🎯 The Shape of Truth")
474
+ fig = create_radar_chart(features)
475
+ st.pyplot(fig)
476
+ plt.close(fig)
477
+
478
+ with col_text:
479
+ st.subheader("πŸ“ Detective's Notes")
480
+ st.markdown(f"""<div class="report-box">{report}</div>""", unsafe_allow_html=True)
481
+
482
+ # --- TAB 2: IMAGE ---
483
+ with tab2:
484
+ st.markdown("### πŸ“Έ Is this photo real?")
485
+
486
+ status_text = "Dual Brains Active 🧠🧠" if ensemble['image_engine'].startswith("Dual") else "Single Brain Mode 🧠"
487
+ st.caption(f"System Status: {status_text}")
488
+
489
+ img_url = st.text_input("Paste Image Link (URL):")
490
+
491
+ if st.button("πŸ” Scan Photo", type="primary"):
492
+ if not img_url: st.stop()
493
+
494
+ if ensemble['image_engine'] == "Failed":
495
+ st.error("Sorry, the photo scanner is sleeping right now.")
496
+ st.stop()
497
+
498
+ with st.spinner("πŸ‘€ Looking closely at pixels..."):
499
+ img = get_image_from_url(img_url)
500
+ if img:
501
+ c_img, c_res = st.columns([1, 1])
502
+ with c_img:
503
+ st.image(img, use_column_width=True)
504
+
505
+ with c_res:
506
+ res = analyze_image_dual_engine(img, ensemble)
507
+
508
+ ai_percent = res['avg_ai'] * 100
509
+ real_percent = res['avg_real'] * 100
510
+ agreement = res['agreement']
511
+
512
+ # --- Playful Verdicts ---
513
+ if agreement < 0.6:
514
+ st.warning(f"πŸ€” **It's Confusing!**")
515
+ st.markdown("Our AI brains disagree! One thinks it's real, the other says fake. It might be heavily edited.")
516
+ with st.expander("See Brain Argument"):
517
+ st.write(f"Brain A says: {res['score_a']*100:.0f}% Fake")
518
+ st.write(f"Brain B says: {res['score_b']*100:.0f}% Fake")
519
+
520
+ elif ai_percent > 60:
521
+ st.error(f"πŸ€– **Definitely AI!** ({ai_percent:.0f}% sure)")
522
+ st.markdown("This photo has computer-made patterns all over it.")
523
+
524
+ elif real_percent > 60:
525
+ st.success(f"πŸ“Έ **Real Photo!** ({real_percent:.0f}% sure)")
526
+ st.markdown("This looks like a genuine snapshot from a camera.")
527
+
528
+ else:
529
+ st.warning(f"🀷 **Hard to Tell**")
530
+ st.markdown("It's right on the edge. Might be a real photo with lots of filters.")
531
+
532
+ st.write("")
533
+ st.markdown("#### Confidence Levels")
534
+ st.progress(res['avg_real'], text=f"πŸ“Έ Real: {real_percent:.1f}%")
535
+ st.progress(res['avg_ai'], text=f"πŸ€– AI: {ai_percent:.1f}%")
536
+
537
+ lens = get_google_lens_url(img_url)
538
+ st.markdown(f"""
539
+ <br>
540
+ <a href="{lens}" target="_blank" style="
541
+ display: block; width: 100%; text-align: center;
542
+ padding: 12px; color: white; background-color: #4285F4;
543
+ border-radius: 10px; text-decoration: none; font-weight: bold;
544
+ box-shadow: 0 2px 5px rgba(0,0,0,0.1);
545
+ ">πŸ”Ž Double-Check on Google</a>
546
+ """, unsafe_allow_html=True)
547
+ else:
548
+ st.error("Couldn't grab that image. Is the link correct?")
549
+
550
+ if __name__ == "__main__":
551
+ main()
552
  import os
553
  import io
554
  import numpy as np