Skier8402 commited on
Commit
8af51e2
ยท
verified ยท
1 Parent(s): 427ab36

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +655 -34
src/streamlit_app.py CHANGED
@@ -1,40 +1,661 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ streamlitapp.py โ€” Vision Transformer Interpretability Dashboard (Streamlit app)
3
+
4
+ This Streamlit app provides interpretability tools for vision transformer and CNN models.
5
+ Features:
6
+ - LIME explanations for image classification predictions
7
+ - Uncertainty analysis via MC Dropout and Test-Time Augmentation (TTA)
8
+ - Switch between Hugging Face (ViT, Swin, DeiT) and timm (ResNet, EfficientNet, ConvNeXt) models
9
+ - Support for custom finetuned models and class mappings
10
+ - Interactive sidebar for model selection and checkpoint upload
11
+ - Feynman-style explanations and cheat-sheet for interpretability concepts
12
+
13
+ Inspired by and reuses code from:
14
+ - vit_and_captum.py (Integrated Gradients with Captum)
15
+ - vit_lime_uncertainty.py (LIME explanations and uncertainty)
16
+ - detr_and_interp.py (Grad-CAM for DETR, logging setup)
17
+ '''
18
+
19
  import streamlit as st
20
+ import html
21
+ import numpy as np, torch, matplotlib.pyplot as plt
22
+ from PIL import Image
23
+ from transformers import AutoModelForImageClassification, AutoImageProcessor, PreTrainedModel
24
+ from lime import lime_image
25
+ import torchvision.transforms as T
26
+ import timm
27
+ from skimage.segmentation import slic, mark_boundaries
28
+ import streamlit.components.v1 as components
29
 
 
 
30
 
31
+ # Add logging
32
+ import logging, os
33
+ from logging.handlers import RotatingFileHandler
34
+
35
+ LOG_DIR = os.path.join(os.path.dirname(__file__), "logs")
36
+ os.makedirs(LOG_DIR, exist_ok=True)
37
+ logfile = os.path.join(LOG_DIR, "interp.log")
38
+
39
+ logger = logging.getLogger("interp")
40
+ if not logger.handlers:
41
+ logger.setLevel(logging.INFO)
42
+ sh = logging.StreamHandler()
43
+ sh.setLevel(logging.INFO)
44
+ fh = RotatingFileHandler(logfile, maxBytes=5_000_000, backupCount=3, encoding="utf-8")
45
+ fh.setLevel(logging.INFO)
46
+ fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")
47
+ sh.setFormatter(fmt)
48
+ fh.setFormatter(fmt)
49
+ logger.addHandler(sh)
50
+ logger.addHandler(fh)
51
+
52
+
53
+ # ---------------- Setup ----------------
54
+ MODEL_NAME = "google/vit-base-patch16-224"
55
+ device = "cuda" if torch.cuda.is_available() else "cpu"
56
+
57
+ # ---------- Sidebar model selectors ----------
58
+ # Quick lists you can edit to test other HF / timm models
59
+ HF_MODELS = [
60
+ "google/vit-base-patch16-224",
61
+ "facebook/deit-base-patch16-224",
62
+ "microsoft/swin-tiny-patch4-window7-224",
63
+ "google/vit-large-patch16-224",
64
+ ]
65
+ TIMM_MODELS = [
66
+ "convnext_base",
67
+ "resnet50",
68
+ "efficientnet_b0",
69
+ ]
70
+
71
+ def model_selector(slot_key: str, default_source="hf"):
72
+ source = st.sidebar.selectbox(
73
+ f"{slot_key} source",
74
+ ["hf", "timm"],
75
+ index=0 if default_source == "hf" else 1,
76
+ key=f"{slot_key}_source",
77
+ )
78
+ if source == "hf":
79
+ hf_choice = st.sidebar.selectbox(
80
+ f"{slot_key} Hugging Face model",
81
+ HF_MODELS,
82
+ index=0,
83
+ key=f"{slot_key}_hf",
84
+ )
85
+ return f"hf:{hf_choice}"
86
+ else:
87
+ timm_choice = st.sidebar.selectbox(
88
+ f"{slot_key} timm model",
89
+ TIMM_MODELS,
90
+ index=0,
91
+ key=f"{slot_key}_timm",
92
+ )
93
+ return f"timm:{timm_choice}"
94
+
95
+ # ---------- Model Loader ----------
96
+ # Use Streamlit caching when available to avoid repeated downloads
97
+ try:
98
+ cache_decorator = st.cache_resource
99
+ except Exception:
100
+ from functools import lru_cache
101
+ cache_decorator = lru_cache(maxsize=8)
102
+
103
+ @cache_decorator
104
+ def load_model(choice, checkpoint=None, class_map=None, num_classes=None):
105
+ """
106
+ Load a model from HF, timm, or a custom checkpoint
107
+ Args:
108
+ choice: Model identifier ('hf:model_name' or 'timm:model_name')
109
+ checkpoint: Optional path to custom checkpoint file
110
+ class_map: Optional dict mapping class indices to labels
111
+ num_classes: Optional number of classes for custom models
112
+ """
113
+ logger.info("Loading model: %s", choice)
114
+ is_hf = choice.startswith("hf:")
115
+
116
+ # Parse model identifier
117
+ if is_hf:
118
+ hf_name = choice.split("hf:")[1]
119
+ if checkpoint: # Custom checkpoint
120
+ # For custom HF model, first load the architecture then apply weights
121
+ try:
122
+ if num_classes:
123
+ model = AutoModelForImageClassification.from_pretrained(
124
+ hf_name, num_labels=num_classes, ignore_mismatched_sizes=True
125
+ ).to(device)
126
+ else:
127
+ model = AutoModelForImageClassification.from_pretrained(hf_name).to(device)
128
+
129
+ # Load checkpoint with error handling
130
+ state_dict = torch.load(checkpoint, map_location=device)
131
+ # If state_dict is wrapped (common in training checkpoints)
132
+ if "model" in state_dict:
133
+ state_dict = state_dict["model"]
134
+ elif "state_dict" in state_dict:
135
+ state_dict = state_dict["state_dict"]
136
+
137
+ # Handle any prefix differences by checking and stripping if needed
138
+ if all(k.startswith('model.') for k in state_dict if k != 'config'):
139
+ state_dict = {k[6:]: v for k, v in state_dict.items() if k != 'config'}
140
+
141
+ # Load with flexible partial loading (ignore missing/unexpected)
142
+ model.load_state_dict(state_dict, strict=False)
143
+ logger.info("Custom checkpoint loaded for HF model")
144
+
145
+ # If custom class mapping provided, update config
146
+ if class_map:
147
+ model.config.id2label = class_map
148
+ model.config.label2id = {v: int(k) for k, v in class_map.items()}
149
+ except Exception as e:
150
+ logger.error(f"Error loading custom HF model: {e}")
151
+ st.error(f"Failed to load custom model: {e}")
152
+ # Fallback to base model
153
+ model = AutoModelForImageClassification.from_pretrained(hf_name).to(device)
154
+ else:
155
+ # Standard HF model
156
+ model = AutoModelForImageClassification.from_pretrained(hf_name).to(device)
157
+
158
+ processor = AutoImageProcessor.from_pretrained(hf_name)
159
+
160
+ elif choice.startswith("timm:"):
161
+ name = choice.split("timm:")[1]
162
+ if checkpoint: # Custom checkpoint
163
+ try:
164
+ # For timm, specify custom number of classes if provided
165
+ if num_classes:
166
+ model = timm.create_model(name, pretrained=False, num_classes=num_classes).to(device)
167
+ else:
168
+ model = timm.create_model(name, pretrained=True).to(device)
169
+
170
+ # Load checkpoint
171
+ state_dict = torch.load(checkpoint, map_location=device)
172
+ # Handle common checkpoint formats
173
+ if "model" in state_dict:
174
+ state_dict = state_dict["model"]
175
+ elif "state_dict" in state_dict:
176
+ state_dict = state_dict["state_dict"]
177
+
178
+ # Handle any prefix differences
179
+ if all(k.startswith('module.') for k in state_dict):
180
+ state_dict = {k[7:]: v for k, v in state_dict}
181
+
182
+ model.load_state_dict(state_dict, strict=False)
183
+ logger.info("Custom checkpoint loaded for timm model")
184
+ except Exception as e:
185
+ logger.error(f"Error loading custom timm model: {e}")
186
+ st.error(f"Failed to load custom model: {e}")
187
+ # Fallback to pretrained
188
+ model = timm.create_model(name, pretrained=True).to(device)
189
+ else:
190
+ # Standard timm model
191
+ model = timm.create_model(name, pretrained=True).to(device)
192
+
193
+ # Use a standard processor for timm
194
+ processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
195
+
196
+ # Set model to eval mode
197
+ model.eval()
198
+ logger.info("Model %s loaded (eval mode)", choice)
199
+
200
+ # Return model, processor, flag for HF, and class map
201
+ return model, processor, is_hf, class_map
202
+
203
+ # Add sidebar with clear sections
204
+ st.sidebar.title("Model Selection")
205
+
206
+ # Enhanced sidebar with custom model support
207
+ with st.sidebar:
208
+ # Add tabs for standard vs custom models
209
+ tab1, tab2 = st.tabs(["Standard Models", "Custom Finetuned Models"])
210
+
211
+ with tab1:
212
+ st.markdown("### ๐Ÿ“Š Standard Models")
213
+ st.markdown("Choose from pre-trained models:")
214
+ m1 = model_selector("Active Model", default_source="hf")
215
+
216
+ # Button to apply standard model change
217
+ if st.button("๐Ÿ“‹ Set as Active Model", help="Click to use the selected model for analysis", key="std_model_btn"):
218
+ with st.spinner(f"Loading {m1}..."):
219
+ model, processor, is_hf_model, _ = load_model(m1)
220
+ st.session_state.model = model
221
+ st.session_state.processor = processor
222
+ st.session_state.is_hf_model = is_hf_model
223
+ st.session_state.active_model = m1
224
+ st.session_state.using_custom = False
225
+ st.session_state.class_map = None
226
+ st.success(f"โœ… Model activated: {m1}")
227
+
228
+ with tab2:
229
+ st.markdown("### ๐Ÿ”ง Custom Finetuned Model")
230
+ st.markdown("Use your own finetuned model:")
231
+
232
+ # Select base architecture
233
+ custom_source = st.selectbox(
234
+ "Base architecture source",
235
+ ["hf", "timm"],
236
+ key="custom_source"
237
+ )
238
+
239
+ if custom_source == "hf":
240
+ custom_base = st.selectbox(
241
+ "Hugging Face base model",
242
+ HF_MODELS,
243
+ key="custom_hf_base"
244
+ )
245
+ base_model = f"hf:{custom_base}"
246
+ else:
247
+ custom_base = st.selectbox(
248
+ "timm base model",
249
+ TIMM_MODELS,
250
+ key="custom_timm_base"
251
+ )
252
+ base_model = f"timm:{custom_base}"
253
+
254
+ # Upload checkpoint file
255
+ uploaded_checkpoint = st.file_uploader(
256
+ "Upload model checkpoint (.pth, .bin)",
257
+ type=["pth", "bin", "pt", "ckpt"],
258
+ help="Upload your finetuned model weights"
259
+ )
260
+
261
+ # Optional class mapping
262
+ custom_classes = st.number_input(
263
+ "Number of classes (if different from base model)",
264
+ min_value=0, max_value=1000, value=0,
265
+ help="Leave at 0 to use default classes from base model"
266
+ )
267
+
268
+ uploaded_labels = st.file_uploader(
269
+ "Upload class labels (optional JSON)",
270
+ type=["json"],
271
+ help="JSON file mapping class indices to labels: {\"0\": \"cat\", \"1\": \"dog\"}"
272
+ )
273
+
274
+ # Process label mapping
275
+ class_map = None
276
+ if uploaded_labels:
277
+ try:
278
+ import json
279
+ class_map = json.loads(uploaded_labels.getvalue().decode("utf-8"))
280
+ st.success(f"โœ“ Loaded {len(class_map)} class labels")
281
+ except Exception as e:
282
+ st.error(f"Error loading class labels: {e}")
283
+
284
+ # Store uploaded file in session state if provided
285
+ if uploaded_checkpoint:
286
+ # Save to a temporary file
287
+ import tempfile
288
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
289
+ tmp_file.write(uploaded_checkpoint.getvalue())
290
+ checkpoint_path = tmp_file.name
291
+
292
+ # Store in session state
293
+ if 'checkpoint_path' not in st.session_state:
294
+ st.session_state.checkpoint_path = checkpoint_path
295
+
296
+ st.success("โœ“ Checkpoint ready to use")
297
+
298
+ # Button to apply custom model
299
+ if st.button("๐Ÿš€ Load Custom Model", help="Click to use your custom model"):
300
+ with st.spinner(f"Loading custom model based on {base_model}..."):
301
+ try:
302
+ num_classes = custom_classes if custom_classes > 0 else None
303
+ model, processor, is_hf_model, class_map = load_model(
304
+ base_model, checkpoint_path, class_map, num_classes
305
+ )
306
+ st.session_state.model = model
307
+ st.session_state.processor = processor
308
+ st.session_state.is_hf_model = is_hf_model
309
+ st.session_state.active_model = f"Custom {base_model}"
310
+ st.session_state.using_custom = True
311
+ st.session_state.class_map = class_map
312
+ st.success(f"โœ… Custom model activated!")
313
+ except Exception as e:
314
+ st.error(f"Failed to load custom model: {str(e)}")
315
+
316
+ # Explanation section
317
+ st.markdown("---")
318
+ st.markdown("### โ„น๏ธ Model Types")
319
+ st.markdown("""
320
+ - **HF (Hugging Face)**: Vision Transformer models with standard interpretability
321
+ - **timm (PyTorch Image Models)**: Classical CNN architectures like ResNet, EfficientNet
322
+
323
+ *Custom models must match the base architecture's format.*
324
+ """)
325
+
326
+ # Initialize model and processor from session state
327
+ if 'active_model' not in st.session_state:
328
+ # First time loading - use default model
329
+ m1 = "hf:google/vit-base-patch16-224"
330
+ st.session_state.active_model = m1
331
+ model, processor, is_hf_model, _ = load_model(m1)
332
+ st.session_state.model = model
333
+ st.session_state.processor = processor
334
+ st.session_state.is_hf_model = is_hf_model
335
+ st.session_state.using_custom = False
336
+ st.session_state.class_map = None
337
+ else:
338
+ # Get from session state
339
+ model = st.session_state.model
340
+ processor = st.session_state.processor
341
+ is_hf_model = st.session_state.is_hf_model
342
+
343
+ # Initialize explainer
344
+ explainer = lime_image.LimeImageExplainer()
345
+
346
+ st.title("๐Ÿง  Vision Transformer Interpretability Dashboard")
347
+ st.write("Upload an image and explore explanations with **LIME** and **Uncertainty Analysis**.")
348
+
349
+ # Add a Feynman-style "How it works" explanation as a collapsible expander
350
+ with st.expander("How it works โ€” Feynman-style explanations (click to expand)", expanded=False):
351
+ st.markdown("""
352
+ ## ๐Ÿง  Vision Transformer Interpretability โ€” Feynman-Style Explanations
353
+
354
+ ### Why do we care about interpretability & uncertainty?
355
+
356
+ Imagine you ask a kid to identify whether a picture is a cat. They point to the fur, ears, maybe whiskers. But what if the kid always focused on shadows, or background trees, instead of the cat itself? We want two things:
357
+
358
+ 1. **Why** did the model say โ€œcatโ€? What parts of the image made it decide so?
359
+ 2. **How confident** is the model in that decision? Could small changes flip it?
360
+
361
+ Interpretable methods show us #1. Uncertainty estimation shows us #2. Together, they help us see not just *what* the model does, but *whether* we should trust it.
362
+
363
+ ### Key techniques, in plain analogies
364
+
365
+ - **LIME (Local Interpretable Model-agnostic Explanations)**: For a single image & prediction, LIME perturbs (changes) parts of the image, watches how the prediction changes, and fits a simple model locally to understand which parts are most influential.
366
+ - Analogy: Like shining small spotlights on different parts of a stage during a play: you dim a section, see how the actorโ€™s reaction changes. The parts whose dimming changes the reaction most are parts the actor depends on.
367
+
368
+ - **Uncertainty in LIME (multiple LIME runs)**: Because LIME uses randomness (perturbing patches), different runs can give different โ€œimportantโ€ regions. Measuring how much they differ tells you how stable/fragile the explanation is.
369
+ - Analogy: If you ask several cooks what the dominant spice in a stew is and everyone agrees, you're confident; if opinions vary, your knowledge is shakier.
370
+
371
+ - **MC Dropout (Monte Carlo Dropout)**: Leave dropout on at inference time and run the model multiple times. The spread of predictions is a proxy for epistemic uncertainty.
372
+ - Analogy: Like a jury where each juror occasionally misses a sentence; if the verdict remains the same across many "faulty hearing" runs, trust it more.
373
+
374
+ - **Test-Time Augmentation (TTA) Uncertainty**: Apply small transforms (crops, flips) at inference and watch prediction variance. High variance โ†’ brittle model.
375
+ - Analogy: Take photos under slightly different lighting/angles; if the label flips, the model may depend on superficial cues.
376
+
377
+ ### How to read the visuals
378
 
379
+ - LIME highlights: bright / colored superpixels = influential regions. If background or artifacts light up, that's a red flag.
380
+ - LIME uncertainty heatmap: high std in a region means attributions are unstable there.
381
+ - MC Dropout / TTA histograms: narrow/tall peak = confident, wide/multi-modal = uncertain.
382
+
383
+ ### Limitations & caveats
384
+
385
+ - Stable explanations can still be consistently wrong if the model learned a bias.
386
+ - MC Dropout is an approximation โ€” it helps but doesn't fully replace calibrated probabilistic methods.
387
+ - TTA shows input sensitivity, not full distributional shift robustness.
388
+
389
+ ### Quick example (walkthrough)
390
+
391
+ 1. Upload image โ†’ model predicts label with some probability.
392
+ 2. LIME finds important superpixels; multiple LIME runs give mean + std maps.
393
+ 3. MC Dropout produces a histogram over runs; use it to judge epistemic uncertainty.
394
+ 4. TTA shows sensitivity to small input changes.
395
+
396
+ ### Practical tips
397
+
398
+ - Use explanation + uncertainty to guide active learning: label cases where the model is uncertain or explanations are unstable.
399
+ - For safety-critical systems, combine these visual signals with human review and stricter failure thresholds.
400
+
401
+ ### Where to read more
402
+
403
+ - Christoph Molnar โ€” Interpretable Machine Learning (chapter on LIME): https://christophm.github.io/interpretable-ml-book/lime.html
404
+ - Ribeiro et al., "Why Should I Trust You?" (original LIME paper): https://homes.cs.washington.edu/~marcotcr/blog/lime/
405
+ - Zhang et al., "Why Should You Trust My Explanation?" (LIME reliability): https://arxiv.org/abs/1904.12991
406
+ - MC Dropout practical guide & notes: https://medium.com/@ciaranbench/monte-carlo-dropout-a-practical-guide-4b4dc18014b5
407
+ """)
408
+
409
+ # Compact one-page cheat-sheet (quick flags & checks)
410
+ with st.expander("Cheat-sheet โ€” Quick flags & warnings", expanded=False):
411
+ cheat_text = """
412
+ Quick checks when an explanation looks suspicious
413
+
414
+ - Red flag: LIME highlights background or repeated dataset artifacts (logos, borders) โ€” model may have learned spurious cues.
415
+ - Red flag: LIME attribution std is high in key regions โ€” explanation unstable; try different segmentations or more samples.
416
+ - Red flag: MC Dropout or TTA histograms are multi-modal or very wide โ€” model uncertain; consider human review or abstain.
417
+ - Quick fixes: increase dataset diversity, add regularization, try different segmentation_fn parameters, or collect more labels for uncertain cases.
418
+
419
+ One-line definitions
420
+ - LIME: perturb + fit simple local model to explain a single prediction.
421
+ - MC Dropout: enable dropout at inference and sample to estimate epistemic uncertainty.
422
+ - TTA: apply small input transforms at inference to measure sensitivity / aleatoric uncertainty.
423
+
424
+ Pro-tip: Use explanation + uncertainty to drive active learning: pick instances with high prediction uncertainty or unstable explanations for labeling.
425
  """
426
 
427
+ # Show the cheat-sheet as markdown
428
+ st.markdown(cheat_text)
429
+
430
+ # Download button for the cheat-sheet as plain text
431
+ try:
432
+ st.download_button(
433
+ label="Download cheat-sheet (.txt)",
434
+ data=cheat_text,
435
+ file_name="cheat_sheet.txt",
436
+ mime="text/plain",
437
+ )
438
+ except Exception:
439
+ # Streamlit may raise if download_button isn't available in some environments; ignore gracefully
440
+ pass
441
+
442
+ # Copy-to-clipboard button using a small HTML+JS snippet
443
+ escaped = html.escape(cheat_text)
444
+ copy_html = f"""
445
+ <div>
446
+ <button id='copy-btn' style='padding:6px 10px;border-radius:4px;'>Copy cheat-sheet</button>
447
+ <script>
448
+ const btn = document.getElementById('copy-btn');
449
+ btn.addEventListener('click', async () => {{
450
+ try {{
451
+ await navigator.clipboard.writeText(`{escaped}`);
452
+ btn.innerText = 'Copied!';
453
+ setTimeout(() => btn.innerText = 'Copy cheat-sheet', 1500);
454
+ }} catch (e) {{
455
+ btn.innerText = 'Copy failed';
456
+ }}
457
+ }});
458
+ </script>
459
+ </div>
460
+ """
461
+ components.html(copy_html, height=70)
462
+
463
+ # Display active model clearly in the main panel
464
+ is_custom = st.session_state.get('using_custom', False)
465
+ custom_badge = " ๐Ÿ”ง Custom" if is_custom else ""
466
+ st.markdown(f"### Active Model: `{st.session_state.active_model}{custom_badge}`")
467
+ model_type = "Hugging Face Transformer" if is_hf_model else "timm CNN Architecture"
468
+ st.caption(f"Model type: {model_type}")
469
+
470
+ # ---------------- Helpers ----------------
471
+ def classifier_fn(images_batch):
472
+ # Use current model/processor from session state
473
+ inputs = processor(images=[Image.fromarray(x.astype(np.uint8)) for x in images_batch],
474
+ return_tensors="pt").to(device)
475
+ with torch.no_grad():
476
+ if is_hf_model:
477
+ outputs = model(**inputs)
478
+ logits = outputs.logits
479
+ else:
480
+ x = inputs['pixel_values']
481
+ logits = model(x)
482
+ probs = torch.softmax(logits, dim=-1).cpu().numpy()
483
+ return probs
484
+
485
+ def predict_probs(pil_img):
486
+ # Use current model/processor from session state
487
+ inputs = processor(images=pil_img, return_tensors="pt").to(device)
488
+ with torch.no_grad():
489
+ if is_hf_model:
490
+ outputs = model(**inputs)
491
+ logits = outputs.logits
492
+ else:
493
+ x = inputs['pixel_values']
494
+ logits = model(x)
495
+ probs = torch.softmax(logits, dim=-1).cpu().numpy()[0]
496
+ return probs
497
+
498
+ # ---------------- Upload ----------------
499
+ uploaded = st.file_uploader("Upload an image", type=["png","jpg","jpeg"])
500
+ if uploaded:
501
+ img = Image.open(uploaded).convert("RGB").resize((224,224))
502
+ logger.info("Uploaded image received (size=%s)", img.size)
503
+ st.image(img, caption="Uploaded image", use_container_width=True)
504
+
505
+ # ---------------- Prediction ----------------
506
+ probs = predict_probs(img)
507
+ pred_idx = int(np.argmax(probs))
508
+
509
+ # Get label - handle models differently based on source
510
+ if is_hf_model:
511
+ # Use model's config.id2label if available
512
+ pred_label = model.config.id2label[pred_idx]
513
+ elif st.session_state.get('class_map'):
514
+ # Use custom class map if provided (access defensively)
515
+ _class_map = st.session_state.get('class_map')
516
+ pred_label = _class_map.get(str(pred_idx), f"Class {pred_idx}") if _class_map is not None else f"Class {pred_idx}"
517
+ else:
518
+ # For timm models without labels
519
+ pred_label = f"Class {pred_idx}"
520
+
521
+ pred_prob = float(probs[pred_idx])
522
+ logger.info("Prediction: %s (%.3f)", pred_label, pred_prob)
523
+
524
+ st.subheader("๐Ÿ”ฎ Prediction")
525
+ st.write(f"**Top-1:** {pred_label} ({pred_prob:.3f})")
526
+
527
+ if not is_hf_model and not st.session_state.get('class_map'):
528
+ st.info("โ„น๏ธ Using model without class names. Upload a class mapping in the sidebar for friendly labels.")
529
+
530
+ # ---------------- LIME ----------------
531
+ st.subheader("๐Ÿ“ LIME Attribution")
532
+ st.markdown("""
533
+ **Local Interpretable Model-agnostic Explanations (LIME)** is a technique that approximates how a complex model (like ViT or ResNet) makes decisions for a specific input by creating a simpler, interpretable model around it.
534
+ It perturbs the image into segments and sees which ones most influence the prediction, revealing what the model "sees" as important.
535
+ This is crucial for debugging biases or understanding if the model focuses on relevant features vs. artifacts.
536
+ """)
537
+ img_np = np.array(img)
538
+
539
+ with st.spinner("Generating LIME explanation..."):
540
+ exp = explainer.explain_instance(
541
+ img_np, classifier_fn=classifier_fn, top_labels=1, num_samples=1000,
542
+ segmentation_fn=lambda x: slic(x, n_segments=60, compactness=9, start_label=0)
543
+ )
544
+ temp, mask = exp.get_image_and_mask(pred_idx, positive_only=True,
545
+ num_features=8, hide_rest=False)
546
+ lime_img = mark_boundaries(temp/255.0, mask)
547
+
548
+ st.image(lime_img, caption=f"LIME highlights regions important for '{pred_label}'")
549
+ st.info("""
550
+ **How to read:** Bright (or colored) segments show areas the model relied on most for its prediction โ€“ these are the "superpixels" that, when altered, change the output the most.
551
+ Green/red overlays often indicate positive/negative contributions. If irrelevant background or edges light up, it might signal the model learned spurious correlations (e.g., from training data artifacts).
552
+ Furthermore, this builds trust by showing if AI decisions align with human intuition.
553
+ """)
554
+
555
+ # ---------------- LIME Uncertainty ----------------
556
+ st.subheader("๐Ÿ“Š LIME Attribution Uncertainty")
557
+ st.markdown("""
558
+ Uncertainty in explanations arises because LIME is stochastic โ€“ it samples perturbations randomly. By running LIME multiple times, we can measure variability in attributions,
559
+ highlighting if the model's reasoning is consistent or fragile for this image. High variability suggests the explanation (and thus model confidence) isn't robust.
560
+ """)
561
+ logger.info("Starting LIME uncertainty runs (n=5)")
562
+ maps = []
563
+ for i in range(5):
564
+ logger.debug("LIME run %d", i+1)
565
+ exp = explainer.explain_instance(
566
+ img_np, classifier_fn=classifier_fn, top_labels=1, num_samples=500,
567
+ segmentation_fn=lambda x: slic(x, n_segments=60, compactness=9, start_label=0)
568
+ )
569
+ local_exp = dict(exp.local_exp)[pred_idx]
570
+ segments = exp.segments
571
+ attr_map = np.zeros(segments.shape)
572
+ for seg_id, weight in local_exp:
573
+ attr_map[segments == seg_id] = weight
574
+ maps.append(attr_map)
575
+ maps = np.stack(maps)
576
+ mean_attr, std_attr = maps.mean(0), maps.std(0)
577
+
578
+ fig, ax = plt.subplots(1,2, figsize=(8,4))
579
+ im1 = ax[0].imshow(mean_attr, cmap="jet"); ax[0].set_title("Mean attribution"); ax[0].axis("off")
580
+ plt.colorbar(im1, ax=ax[0], fraction=0.046)
581
+ im2 = ax[1].imshow(std_attr, cmap="hot"); ax[1].set_title("Attribution std (uncertainty)"); ax[1].axis("off")
582
+ plt.colorbar(im2, ax=ax[1], fraction=0.046)
583
+ st.pyplot(fig)
584
+ st.info("""
585
+ **How to read:** The left heatmap shows average importance across runs (hotter = more influential). The right shows standard deviation โ€“ high std (yellow/red) means unstable explanations for those regions.
586
+ If uncertainty is high in key areas, the model might overfit or need more diverse training data. This helps ML practitioners quantify explanation reliability.
587
+ """)
588
+ logger.info("Completed LIME uncertainty runs")
589
+
590
+ # ---------------- MC Dropout ----------------
591
+ st.subheader("๐ŸŽฒ MC Dropout Uncertainty")
592
+ st.markdown("""
593
+ Monte Carlo (MC) Dropout treats dropout layers (normally off during inference) as a Bayesian approximation to estimate epistemic uncertainty โ€“ how much the model "doesn't know" due to limited training.
594
+ By enabling dropout and sampling predictions multiple times, we see if the model consistently agrees on the class or wavers, indicating potential unreliability.
595
+ """)
596
+ logger.info("Starting MC Dropout sampling")
597
+ model.train() # enable dropout
598
+ mc_preds = []
599
+ with torch.no_grad():
600
+ for _ in range(30):
601
+ probs_mc = predict_probs(img)
602
+ mc_preds.append(probs_mc)
603
+ model.eval()
604
+ mc_preds = np.stack(mc_preds)
605
+ mc_mean = mc_preds.mean(0)
606
+ mc_top = mc_mean.argmax()
607
+ if is_hf_model:
608
+ mc_label = model.config.id2label[mc_top]
609
+ elif st.session_state.get('class_map'):
610
+ _class_map = st.session_state.get('class_map')
611
+ mc_label = _class_map.get(str(mc_top), f"Class {mc_top}") if _class_map is not None else f"Class {mc_top}"
612
+ else:
613
+ mc_label = f"Class {mc_top}"
614
+ p = mc_preds[:, mc_top]
615
+
616
+ fig, ax = plt.subplots()
617
+ ax.hist(p, bins=15, color="C0")
618
+ ax.set_title(f"MC Dropout: p({mc_label}) across samples")
619
+ st.pyplot(fig)
620
+ st.info("""
621
+ **How to read:** This histogram shows probability distributions for the top class across 30 samples. A narrow, peaked distribution means stable confidence (low uncertainty).
622
+ A wide spread or multiple modes suggests the model is unsure, possibly due to out-of-distribution inputs. For devs, this flags cases needing human review; it highlights risky predictions.
623
+ """)
624
+ logger.info("Completed MC Dropout: top=%s", mc_label)
625
+
626
+ # ---------------- Test-Time Augmentation (TTA) Uncertainty ----------------
627
+ st.subheader("๐Ÿ”„ Test-Time Augmentation (TTA) Uncertainty")
628
+ st.markdown("""
629
+ Test-Time Augmentation (TTA) applies random transformations (crops, flips) at inference to probe aleatoric uncertainty โ€“ noise inherent in the input or model.
630
+ If predictions vary wildly under small changes, the model relies on brittle features, revealing data-related issues rather than model knowledge gaps.
631
+ """)
632
+ logger.info("Starting TTA sampling")
633
+ tta_tfms = T.Compose([T.Resize(256), T.RandomResizedCrop(224, scale=(0.9,1.0)), T.RandomHorizontalFlip(p=0.5)])
634
+ tta_preds = []
635
+ with torch.no_grad():
636
+ for _ in range(20):
637
+ aug = tta_tfms(img)
638
+ probs_tta = predict_probs(aug)
639
+ tta_preds.append(probs_tta)
640
+ tta_preds = np.stack(tta_preds)
641
+ tta_mean = tta_preds.mean(0)
642
+ tta_top = tta_mean.argmax()
643
+ if is_hf_model:
644
+ tta_label = model.config.id2label[tta_top]
645
+ elif st.session_state.get('class_map'):
646
+ _class_map = st.session_state.get('class_map')
647
+ tta_label = _class_map.get(str(tta_top), f"Class {tta_top}") if _class_map is not None else f"Class {tta_top}"
648
+ else:
649
+ tta_label = f"Class {tta_top}"
650
+ p_tta = tta_preds[:, tta_top]
651
+
652
+ fig, ax = plt.subplots()
653
+ ax.hist(p_tta, bins=15, color="C1")
654
+ ax.set_title(f"TTA: p({tta_label}) across augmentations")
655
+ st.pyplot(fig)
656
+ st.info("""
657
+ **How to read:** Similar to MC Dropout, but focused on input variations. Low variance means the prediction is robust to perturbations (good sign). High variance indicates sensitivity to details like lighting/position,
658
+ common in overfitted models. Use this to assess if your AI system handles real-world variability well.
659
+ """)
660
+ logger.info("Completed TTA: top=%s", tta_label)
661
+ # ---------------- Summary ----------------