Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- vit_and_captum.py +199 -0
- vit_lime_uncertainty.py +205 -0
vit_and_captum.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Example of using Captum Integrated Gradients with a Vision Transformer (ViT) model
|
3 |
+
to explain image classification predictions.
|
4 |
+
This example downloads a random image from the web, runs it through a pre-trained
|
5 |
+
ViT model, and uses Captum to compute and visualize attributions.
|
6 |
+
|
7 |
+
IG: It’s like asking the computer not just what’s in the image,
|
8 |
+
but which parts of the picture convinced it to give that answer.
|
9 |
+
|
10 |
+
IG: Integrated Gradients
|
11 |
+
|
12 |
+
Like turning up the brightness on a photo and seeing which parts
|
13 |
+
of the picture made the model confident in its answer.
|
14 |
+
'''
|
15 |
+
|
16 |
+
import torch
|
17 |
+
from torchvision import transforms
|
18 |
+
from PIL import Image
|
19 |
+
import matplotlib.pyplot as plt
|
20 |
+
import requests
|
21 |
+
import random
|
22 |
+
from io import BytesIO
|
23 |
+
import numpy as np
|
24 |
+
from PIL import Image as PILImage
|
25 |
+
import requests
|
26 |
+
import random
|
27 |
+
from PIL import ImageFilter
|
28 |
+
|
29 |
+
# Add logging
|
30 |
+
import logging, os
|
31 |
+
from logging.handlers import RotatingFileHandler
|
32 |
+
LOG_DIR = os.path.join(os.path.dirname(__file__), "logs")
|
33 |
+
os.makedirs(LOG_DIR, exist_ok=True)
|
34 |
+
logfile = os.path.join(LOG_DIR, "interp.log")
|
35 |
+
logger = logging.getLogger("vit_and_captum")
|
36 |
+
if not logger.handlers:
|
37 |
+
logger.setLevel(logging.INFO)
|
38 |
+
sh = logging.StreamHandler()
|
39 |
+
fh = RotatingFileHandler(logfile, maxBytes=5_000_000, backupCount=3, encoding="utf-8")
|
40 |
+
fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")
|
41 |
+
sh.setFormatter(fmt); fh.setFormatter(fmt)
|
42 |
+
logger.addHandler(sh); logger.addHandler(fh)
|
43 |
+
|
44 |
+
# ---- Step 1: Load model ----
|
45 |
+
# Using a Vision Transformer (ViT) model from Hugging Face Transformers
|
46 |
+
from transformers import ViTForImageClassification, ViTImageProcessor
|
47 |
+
|
48 |
+
# Load pre-trained model and processor
|
49 |
+
model_name = "google/vit-base-patch16-224"
|
50 |
+
model = ViTForImageClassification.from_pretrained(model_name)
|
51 |
+
processor = ViTImageProcessor.from_pretrained(model_name)
|
52 |
+
|
53 |
+
# run in eval mode for inference
|
54 |
+
model.eval()
|
55 |
+
|
56 |
+
# ---- Step 2: Load an image ----
|
57 |
+
# Function to download a random image from DuckDuckGo
|
58 |
+
def download_random_image():
|
59 |
+
# DuckDuckGo image search for ImageNet-style images
|
60 |
+
search_terms = ["dog", "cat", "bird", "car", "airplane", "horse", "elephant", "tiger", "lion", "bear"]
|
61 |
+
term = random.choice(search_terms)
|
62 |
+
|
63 |
+
# multiple providers to improve reliability
|
64 |
+
providers = [
|
65 |
+
f"https://source.unsplash.com/224x224/?{term}",
|
66 |
+
f"https://picsum.photos/seed/{term}/224/224",
|
67 |
+
f"https://loremflickr.com/224/224/{term}",
|
68 |
+
# placekitten is a good fallback for cat-like images (serves an image for any request)
|
69 |
+
f"https://placekitten.com/224/224"
|
70 |
+
]
|
71 |
+
|
72 |
+
headers = {"User-Agent": "Mozilla/5.0 (compatible; ImageFetcher/1.0)"}
|
73 |
+
for url in providers:
|
74 |
+
try:
|
75 |
+
response = requests.get(url, timeout=10, headers=headers, allow_redirects=True)
|
76 |
+
if response.status_code != 200:
|
77 |
+
logger.warning("Provider %s returned status %d", url, response.status_code)
|
78 |
+
continue
|
79 |
+
|
80 |
+
# Try to identify and open image content
|
81 |
+
try:
|
82 |
+
img = Image.open(BytesIO(response.content)).convert("RGB")
|
83 |
+
except Exception as img_err:
|
84 |
+
logger.warning("Failed to parse image from %s: %s", url, img_err)
|
85 |
+
continue
|
86 |
+
|
87 |
+
# Ensure it's exactly 224x224
|
88 |
+
try:
|
89 |
+
img = img.resize((224, 224), Image.Resampling.LANCZOS)
|
90 |
+
except Exception:
|
91 |
+
# Fallback if PIL version doesn't have Image.Resampling
|
92 |
+
img = img.resize((224, 224), Image.LANCZOS)
|
93 |
+
logger.info("Downloaded random image from %s for term=%s", url, term)
|
94 |
+
return img
|
95 |
+
except requests.RequestException as e:
|
96 |
+
logger.warning("Request failed for %s: %s", url, e)
|
97 |
+
continue
|
98 |
+
|
99 |
+
logger.error("All providers failed; using fallback solid-color image.")
|
100 |
+
img = Image.new("RGB", (224, 224), color=(128, 128, 128))
|
101 |
+
return img
|
102 |
+
|
103 |
+
# Download and use a random image
|
104 |
+
img = download_random_image()
|
105 |
+
# Preprocess the image to pytorch tensor
|
106 |
+
inputs = processor(images=img, return_tensors="pt")
|
107 |
+
|
108 |
+
# ---- Step 3: Run prediction ----
|
109 |
+
with torch.no_grad(): # no gradients needed for inference
|
110 |
+
outputs = model(**inputs) # inputs is a dict
|
111 |
+
probs = outputs.logits.softmax(-1) # most probable class
|
112 |
+
pred_idx = probs.argmax(-1).item() # index of predicted class
|
113 |
+
logger.info("Predicted %s (idx=%d)", model.config.id2label[pred_idx], pred_idx)
|
114 |
+
|
115 |
+
# NEW: show top-k predictions to give context
|
116 |
+
topk = 5
|
117 |
+
topk_vals, topk_idx = torch.topk(probs, k=topk)
|
118 |
+
topk_vals = topk_vals.squeeze().cpu().numpy()
|
119 |
+
topk_idx = topk_idx.squeeze().cpu().numpy()
|
120 |
+
print("Top-{} predictions:".format(topk))
|
121 |
+
for v,i in zip(topk_vals, topk_idx):
|
122 |
+
print(f" {model.config.id2label[int(i)]:30s} {float(v):.4f}")
|
123 |
+
print("Chosen prediction:", model.config.id2label[pred_idx])
|
124 |
+
|
125 |
+
# ---- Step 4: Captum Integrated Gradients ----
|
126 |
+
from captum.attr import IntegratedGradients
|
127 |
+
# Captum expects a forward function that returns a tensor (not a ModelOutput dataclass)
|
128 |
+
def forward_func(pixel_values):
|
129 |
+
# ensure we call the model and return raw logits or probabilities as a Tensor
|
130 |
+
outputs = model(pixel_values=pixel_values)
|
131 |
+
# outputs is a ModelOutput dataclass; return the logits tensor
|
132 |
+
return outputs.logits
|
133 |
+
|
134 |
+
# IntegratedGradients should be given the forward function
|
135 |
+
ig = IntegratedGradients(forward_func)
|
136 |
+
|
137 |
+
# Captum needs the inputs to require gradients
|
138 |
+
input_tensor = inputs["pixel_values"].clone().detach()
|
139 |
+
input_tensor.requires_grad_(True)
|
140 |
+
|
141 |
+
# Now compute attributions for the predicted class index
|
142 |
+
# (recompute with more steps and ask for convergence delta)
|
143 |
+
attributions, convergence_delta = ig.attribute(
|
144 |
+
input_tensor,
|
145 |
+
target=pred_idx,
|
146 |
+
n_steps=100,
|
147 |
+
return_convergence_delta=True,
|
148 |
+
)
|
149 |
+
logger.info("IG convergence delta: %s", convergence_delta)
|
150 |
+
|
151 |
+
# ---- Step 5: Visualize attribution heatmap (normalized + overlay) ----
|
152 |
+
|
153 |
+
# aggregate over channels (signed mean keeps sign of contributions)
|
154 |
+
attr = attributions.squeeze().mean(dim=0).detach().cpu().numpy()
|
155 |
+
|
156 |
+
# Normalize to [-1,1] to show positive vs negative contributions with diverging colormap
|
157 |
+
min_v, max_v = float(attr.min()), float(attr.max())
|
158 |
+
norm_denom = max(abs(min_v), abs(max_v)) + 1e-8
|
159 |
+
attr_signed = attr / norm_denom # now in approx [-1,1]
|
160 |
+
|
161 |
+
# OPTIONAL: smooth heatmap slightly to make overlays more intuitive
|
162 |
+
try:
|
163 |
+
heat_pil = PILImage.fromarray(np.uint8((attr_signed + 1) * 127.5))
|
164 |
+
heat_pil = heat_pil.filter(ImageFilter.GaussianBlur(radius=1.5))
|
165 |
+
attr_signed = (np.array(heat_pil).astype(float) / 127.5) - 1.0
|
166 |
+
except Exception:
|
167 |
+
# If PIL filter not available, continue without smoothing
|
168 |
+
pass
|
169 |
+
|
170 |
+
# Create overlay using a diverging colormap (positive = warm, negative = cool)
|
171 |
+
plt.figure(figsize=(6,6))
|
172 |
+
plt.imshow(img)
|
173 |
+
plt.imshow(attr_signed, cmap="seismic", alpha=0.45, vmin=-1, vmax=1)
|
174 |
+
cb = plt.colorbar(fraction=0.046, pad=0.04)
|
175 |
+
cb.set_label("Signed attribution (normalized)")
|
176 |
+
plt.title(f"IG overlay — pred: {model.config.id2label[pred_idx]} ({float(probs.squeeze()[pred_idx]):.3f})")
|
177 |
+
plt.axis("off")
|
178 |
+
|
179 |
+
# Show standalone signed heatmap for clearer inspection
|
180 |
+
plt.figure(figsize=(4,4))
|
181 |
+
plt.imshow(attr_signed, cmap="seismic", vmin=-1, vmax=1)
|
182 |
+
plt.colorbar()
|
183 |
+
plt.title("Signed IG Attribution (neg=blue, pos=red)")
|
184 |
+
plt.axis("off")
|
185 |
+
|
186 |
+
plt.show()
|
187 |
+
|
188 |
+
# Add concise runtime interpretability guidance
|
189 |
+
def print_interpretability_summary():
|
190 |
+
print("\nHow to read the results (quick guide):")
|
191 |
+
print("- IG signed heatmap: red/warm = supports the predicted class; blue/cool = opposes it.")
|
192 |
+
print("- Normalize by max-abs when comparing images. Check IG 'convergence delta' — large values mean treat attributions cautiously.")
|
193 |
+
print("- LIME panel (if used): green/highlighted superpixels indicate locally important regions; background-dominated explanations are a red flag.")
|
194 |
+
print("- MC Dropout histogram: narrow peak → stable belief; wide/multi-modal → epistemic uncertainty.")
|
195 |
+
print("- TTA histogram: many flips under small augmentations → fragile/aleatoric sensitivity.")
|
196 |
+
print("- Predictive entropy: higher → more uncertainty in the full distribution.")
|
197 |
+
print("- Variation ratio: fraction of samples not matching majority; higher → more disagreement.\n")
|
198 |
+
|
199 |
+
print_interpretability_summary()
|
vit_lime_uncertainty.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import requests
|
6 |
+
import random
|
7 |
+
from io import BytesIO
|
8 |
+
|
9 |
+
from transformers import ViTForImageClassification, ViTImageProcessor
|
10 |
+
from lime import lime_image
|
11 |
+
from skimage.segmentation import slic, mark_boundaries
|
12 |
+
|
13 |
+
# Add logging
|
14 |
+
import logging, os
|
15 |
+
from logging.handlers import RotatingFileHandler
|
16 |
+
LOG_DIR = os.path.join(os.path.dirname(__file__), "logs")
|
17 |
+
os.makedirs(LOG_DIR, exist_ok=True)
|
18 |
+
logfile = os.path.join(LOG_DIR, "interp.log")
|
19 |
+
logger = logging.getLogger("vit_lime_uncertainty")
|
20 |
+
if not logger.handlers:
|
21 |
+
logger.setLevel(logging.INFO)
|
22 |
+
sh = logging.StreamHandler()
|
23 |
+
fh = RotatingFileHandler(logfile, maxBytes=5_000_000, backupCount=3, encoding="utf-8")
|
24 |
+
fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")
|
25 |
+
sh.setFormatter(fmt); fh.setFormatter(fmt)
|
26 |
+
logger.addHandler(sh); logger.addHandler(fh)
|
27 |
+
|
28 |
+
# ---- Step 1: Load model & processor ----
|
29 |
+
model_name = "google/vit-base-patch16-224"
|
30 |
+
model = ViTForImageClassification.from_pretrained(model_name)
|
31 |
+
processor = ViTImageProcessor.from_pretrained(model_name)
|
32 |
+
model.eval()
|
33 |
+
|
34 |
+
# ---- Step 2: Robust random image downloader (multiple providers + fallback) ----
|
35 |
+
def download_random_image(size=(224, 224)):
|
36 |
+
search_terms = ["dog", "cat", "bird", "car", "airplane", "horse", "elephant", "tiger", "lion", "bear"]
|
37 |
+
term = random.choice(search_terms)
|
38 |
+
providers = [
|
39 |
+
f"https://source.unsplash.com/{size[0]}x{size[1]}/?{term}",
|
40 |
+
f"https://picsum.photos/seed/{term}/{size[0]}/{size[1]}",
|
41 |
+
f"https://loremflickr.com/{size[0]}/{size[1]}/{term}",
|
42 |
+
f"https://placekitten.com/{size[0]}/{size[1]}"
|
43 |
+
]
|
44 |
+
headers = {"User-Agent": "Mozilla/5.0 (compatible; ImageFetcher/1.0)"}
|
45 |
+
for url in providers:
|
46 |
+
try:
|
47 |
+
r = requests.get(url, timeout=10, headers=headers, allow_redirects=True)
|
48 |
+
if r.status_code != 200:
|
49 |
+
logger.warning("Provider %s returned status %d", url, r.status_code)
|
50 |
+
continue
|
51 |
+
try:
|
52 |
+
img = Image.open(BytesIO(r.content)).convert("RGB")
|
53 |
+
except Exception as e:
|
54 |
+
logger.warning("Failed to open image from %s: %s", url, e)
|
55 |
+
continue
|
56 |
+
try:
|
57 |
+
img = img.resize(size, Image.Resampling.LANCZOS)
|
58 |
+
except Exception:
|
59 |
+
img = img.resize(size, Image.LANCZOS)
|
60 |
+
logger.info("Downloaded image for '%s' from %s", term, url)
|
61 |
+
return img
|
62 |
+
except requests.RequestException as e:
|
63 |
+
logger.warning("Request exception %s for %s", e, url)
|
64 |
+
continue
|
65 |
+
logger.error("All providers failed; using fallback solid-color image.")
|
66 |
+
return Image.new("RGB", size, color=(128, 128, 128))
|
67 |
+
|
68 |
+
# ---- Step 3: Classifier function for LIME ----
|
69 |
+
def classifier_fn(images_batch):
|
70 |
+
"""
|
71 |
+
images_batch: list or numpy array of images with shape (N, H, W, 3),
|
72 |
+
values in [0,255] or uint8. Return numpy array (N, num_classes) of probabilities.
|
73 |
+
"""
|
74 |
+
# transformer processor accepts numpy arrays directly
|
75 |
+
if isinstance(images_batch, np.ndarray):
|
76 |
+
imgs = [img.astype(np.uint8) for img in images_batch]
|
77 |
+
else:
|
78 |
+
imgs = images_batch
|
79 |
+
inputs = processor(images=imgs, return_tensors="pt")
|
80 |
+
with torch.no_grad():
|
81 |
+
outputs = model(**inputs)
|
82 |
+
probs = torch.softmax(outputs.logits, dim=-1).cpu().numpy()
|
83 |
+
return probs
|
84 |
+
|
85 |
+
# ---- Step 4: Run LIME multiple times to estimate uncertainty ----
|
86 |
+
def lime_explanations_with_uncertainty(img_pil, n_runs=6, num_samples=1000, segments_kwargs=None):
|
87 |
+
if segments_kwargs is None:
|
88 |
+
segments_kwargs = {"n_segments": 50, "compactness": 10}
|
89 |
+
|
90 |
+
explainer = lime_image.LimeImageExplainer()
|
91 |
+
img_np = np.array(img_pil) # H,W,3 uint8
|
92 |
+
|
93 |
+
run_maps = []
|
94 |
+
for run in range(n_runs):
|
95 |
+
logger.info("LIME run %d/%d (num_samples=%d)", run+1, n_runs, num_samples)
|
96 |
+
# segmentation function to ensure reproducible-ish segments per run
|
97 |
+
segmentation_fn = lambda x: slic(x, start_label=0, **segments_kwargs)
|
98 |
+
|
99 |
+
explanation = explainer.explain_instance(
|
100 |
+
img_np,
|
101 |
+
classifier_fn=classifier_fn,
|
102 |
+
top_labels=5,
|
103 |
+
hide_color=0,
|
104 |
+
num_samples=num_samples,
|
105 |
+
segmentation_fn=segmentation_fn
|
106 |
+
)
|
107 |
+
|
108 |
+
preds = classifier_fn(np.expand_dims(img_np, 0))
|
109 |
+
pred_label = int(preds[0].argmax())
|
110 |
+
|
111 |
+
local_exp = dict(explanation.local_exp)[pred_label]
|
112 |
+
segments = explanation.segments # shape (H,W) of segment ids
|
113 |
+
|
114 |
+
attr_map = np.zeros(segments.shape, dtype=float)
|
115 |
+
for seg_id, weight in local_exp:
|
116 |
+
attr_map[segments == seg_id] = weight
|
117 |
+
|
118 |
+
run_maps.append(attr_map)
|
119 |
+
runs_stack = np.stack(run_maps, axis=0)
|
120 |
+
mean_attr = runs_stack.mean(axis=0)
|
121 |
+
std_attr = runs_stack.std(axis=0)
|
122 |
+
logger.info("Completed %d LIME runs, mean/std shapes: %s / %s", n_runs, mean_attr.shape, std_attr.shape)
|
123 |
+
# compute segments once for overlay (use same segmentation kwargs)
|
124 |
+
segments_final = slic(img_np, start_label=0, **segments_kwargs)
|
125 |
+
return img_np, mean_attr, std_attr, segments_final, pred_label, preds.squeeze()
|
126 |
+
|
127 |
+
# ---- Step 5: Visualize results ----
|
128 |
+
def plot_mean_and_uncertainty(img_np, mean_attr, std_attr, segments, pred_label, probs, cmap_mean="jet", cmap_unc="hot"):
|
129 |
+
# normalize for display (center mean at 0)
|
130 |
+
def normalize(x):
|
131 |
+
mn, mx = x.min(), x.max()
|
132 |
+
return (x - mn) / (mx - mn + 1e-8)
|
133 |
+
|
134 |
+
mean_norm = normalize(mean_attr)
|
135 |
+
std_norm = normalize(std_attr)
|
136 |
+
|
137 |
+
# show label + prob in title
|
138 |
+
pred_name = model.config.id2label[int(pred_label)]
|
139 |
+
pred_prob = float(probs[int(pred_label)])
|
140 |
+
|
141 |
+
fig, axes = plt.subplots(2, 3, figsize=(15, 9))
|
142 |
+
axes = axes.flatten()
|
143 |
+
|
144 |
+
axes[0].imshow(img_np)
|
145 |
+
axes[0].set_title("Original image")
|
146 |
+
axes[0].axis("off")
|
147 |
+
|
148 |
+
# overlay mean attribution with segment boundaries
|
149 |
+
overlay = img_np.copy().astype(float) / 255.0
|
150 |
+
axes[1].imshow(mark_boundaries(overlay, segments, color=(1,1,0)))
|
151 |
+
im1 = axes[1].imshow(mean_norm, cmap=cmap_mean, alpha=0.5)
|
152 |
+
axes[1].set_title(f"Mean attribution (overlay)\npred: {pred_name} ({pred_prob:.3f})")
|
153 |
+
axes[1].axis("off")
|
154 |
+
fig.colorbar(im1, ax=axes[1], fraction=0.046, pad=0.04)
|
155 |
+
|
156 |
+
# uncertainty map and contour where std is high
|
157 |
+
im2 = axes[2].imshow(std_norm, cmap=cmap_unc)
|
158 |
+
axes[2].set_title("Uncertainty (std)")
|
159 |
+
axes[2].axis("off")
|
160 |
+
fig.colorbar(im2, ax=axes[2], fraction=0.046, pad=0.04)
|
161 |
+
|
162 |
+
# histogram of mean attribution values
|
163 |
+
axes[3].hist(mean_attr.ravel(), bins=50, color="C0")
|
164 |
+
axes[3].set_title("Distribution of mean attribution")
|
165 |
+
|
166 |
+
# histogram of uncertainty values
|
167 |
+
axes[4].hist(std_attr.ravel(), bins=50, color="C1")
|
168 |
+
axes[4].set_title("Distribution of attribution std (uncertainty)")
|
169 |
+
|
170 |
+
# show uncertainty contour over image (high uncertainty regions)
|
171 |
+
thresh = np.percentile(std_attr, 90)
|
172 |
+
contour_mask = std_attr >= thresh
|
173 |
+
axes[5].imshow(img_np)
|
174 |
+
axes[5].imshow(np.ma.masked_where(~contour_mask, contour_mask), cmap="Reds", alpha=0.45)
|
175 |
+
axes[5].set_title(f"Top-10% uncertainty (threshold={thresh:.3f})")
|
176 |
+
axes[5].axis("off")
|
177 |
+
|
178 |
+
plt.tight_layout()
|
179 |
+
plt.show()
|
180 |
+
|
181 |
+
# ---- Main: run example ----
|
182 |
+
if __name__ == "__main__":
|
183 |
+
logger.info("Script started")
|
184 |
+
img = download_random_image()
|
185 |
+
img_np, mean_attr, std_attr, segments, pred_label, probs = lime_explanations_with_uncertainty(
|
186 |
+
img_pil=img,
|
187 |
+
n_runs=6, # increase for better uncertainty estimates (longer)
|
188 |
+
num_samples=1000, # LIME samples per run
|
189 |
+
segments_kwargs={"n_segments": 60, "compactness": 9}
|
190 |
+
)
|
191 |
+
logger.info("Plotting results and finishing")
|
192 |
+
plot_mean_and_uncertainty(img_np, mean_attr, std_attr, segments, pred_label, probs)
|
193 |
+
|
194 |
+
# Add concise runtime interpretability guidance
|
195 |
+
def print_interpretability_summary():
|
196 |
+
print("\nHow to read the results (quick guide):")
|
197 |
+
print("- LIME panel: green/highlighted superpixels are locally important for the predicted class; if background dominates, that's a red flag.")
|
198 |
+
print("- LIME uncertainty (std): high std regions indicate unstable explanations across runs.")
|
199 |
+
print("- MC Dropout histogram: narrow peak → stable belief; wide/multi-modal → epistemic uncertainty.")
|
200 |
+
print("- TTA histogram: if small flips/crops cause big swings, prediction depends on fragile cues (aleatoric-ish sensitivity).")
|
201 |
+
print("- Predictive entropy: higher means more uncertainty in the class distribution.")
|
202 |
+
print("- Variation ratio: fraction of samples not in the majority class; higher → more disagreement.\n")
|
203 |
+
|
204 |
+
print_interpretability_summary()
|
205 |
+
logger.info("Script finished")
|