peggy30 commited on
Commit
fe54272
·
1 Parent(s): 5963f5d

add ice pdp

Browse files
pages/Anchors.py CHANGED
@@ -86,7 +86,7 @@ def explain_example(anchors_threshold, example_idx):
86
  class_names,
87
  feature_names,
88
  X_train.values,
89
- categorical_names, seed=42)
90
 
91
  # Explain the selected sample
92
  exp = explainer.explain_instance(X_test.values[example_idx], global_model.predict, threshold=anchors_threshold)
 
86
  class_names,
87
  feature_names,
88
  X_train.values,
89
+ categorical_names)
90
 
91
  # Explain the selected sample
92
  exp = explainer.explain_instance(X_test.values[example_idx], global_model.predict, threshold=anchors_threshold)
pages/ICE_and_PDP.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import Libraries
2
+ import matplotlib.pyplot as plt
3
+ import streamlit as st
4
+ import src.prompt_config as prompt_params
5
+ # Models
6
+ import xgboost
7
+ from sklearn.model_selection import train_test_split
8
+ from sklearn.inspection import PartialDependenceDisplay
9
+ # XAI (Explainability)
10
+ # import shap
11
+
12
+ # Global Variables to Store Model & Data
13
+ global_model = None
14
+ X_train, X_test, y_train, y_test = None, None, None, None
15
+
16
+
17
+ def train_model():
18
+ """ Train the XGBoost model only once and store it globally. """
19
+ global global_model, X_train, X_test, y_train, y_test
20
+
21
+ if global_model is None:
22
+ # Load Data from SHAP library
23
+ X, y = shap.datasets.adult()
24
+
25
+ # Split data
26
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)
27
+
28
+ # Train XGBoost model
29
+ global_model = xgboost.XGBClassifier()
30
+ global_model.fit(X_train, y_train)
31
+
32
+ print("XGBoost Model training completed!")
33
+
34
+ def explain_example(features, kind):
35
+ """ Explain a given sample without retraining the model. """
36
+ global global_model, X_train, X_test, y_train, y_test
37
+
38
+ if global_model is None:
39
+ train_model()
40
+
41
+ fig, ax = plt.subplots(figsize=(10, 5))
42
+ PartialDependenceDisplay.from_estimator(global_model, X_test, features, kind=kind)
43
+
44
+ st.pyplot(fig)
45
+
46
+ def main():
47
+ global global_model
48
+
49
+ # Ensure the model is trained only once
50
+ if global_model is None:
51
+ train_model()
52
+ # Define feature names
53
+ feature_names = ["Age", "Workclass", "Education-Num", "Marital Status", "Occupation",
54
+ "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss", "Hours per week", "Country"]
55
+
56
+ selected_feature = st.sidebar.selectbox("Select a feature for PDP/ICE analysis:", feature_names)
57
+
58
+ kind = st.sidebar.selectbox("Select plot type:", ["average", "both", "individual"])
59
+
60
+ st.title("ICE (Individual Conditional Expectation) and PDP (Partial Dependence Plot)")
61
+ st.write(prompt_params.ICE_INTRODUCTION)
62
+ # Explain the selected sample
63
+ if st.button("Explain Sample"):
64
+ explain_example(selected_feature, kind)
65
+
66
+
67
+ if __name__ == '__main__':
68
+ main()
pages/SHAP.py CHANGED
@@ -92,7 +92,7 @@ def main():
92
  label="Select the sample index to explain:",
93
  min_value=0,
94
  max_value=len(X_test) - 1, # Ensures the index is within range
95
- value=1, # Default value
96
  step=1, # Step size
97
  help=prompt_params.EXAMPLE_BE_EXPLAINED_IDX,
98
  )
 
92
  label="Select the sample index to explain:",
93
  min_value=0,
94
  max_value=len(X_test) - 1, # Ensures the index is within range
95
+ value=100, # Default value
96
  step=1, # Step size
97
  help=prompt_params.EXAMPLE_BE_EXPLAINED_IDX,
98
  )
src/prompt_config.py CHANGED
@@ -77,4 +77,32 @@ The process of SHAP includes the following steps:
77
  4. **Ensure Additivity**: The sum of SHAP values should match the model's prediction difference from the baseline.
78
 
79
  By using SHAP, **interpretability** improves by generating stable and mathematically sound explanations, making models more transparent and trustworthy.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  """
 
77
  4. **Ensure Additivity**: The sum of SHAP values should match the model's prediction difference from the baseline.
78
 
79
  By using SHAP, **interpretability** improves by generating stable and mathematically sound explanations, making models more transparent and trustworthy.
80
+ """
81
+
82
+ ICE_INTRODUCTION = """
83
+ Individual Conditional Expectation (ICE) plots provide a more granular view of feature influence by displaying the response of each individual instance to changes in a selected feature, rather than averaging across all instances as in Partial Dependence Plots (PDP).
84
+
85
+ The process of ICE includes the following steps:
86
+ 1. **Select the Feature of Interest**: Choose a variable to analyze, such as Age.
87
+ 2. **Create a Feature Grid**: Define a range of values for the chosen feature (e.g., Age from 20 to 80).
88
+ 3. **Iterate Over Each Instance**: For each sample, replace the feature with values from the grid while keeping all other features unchanged.
89
+ 4. **Compute Predictions**: Use the trained model to predict outcomes for each modified sample and store the predictions.
90
+ 5. **Plot Individual Curves**: Each sample produces a separate curve, representing how its prediction evolves as the feature changes.
91
+
92
+ By using ICE, **interpretability** improves by showing how a feature influences predictions at an individual level, capturing heterogeneous effects that PDP might average out.
93
+
94
+ A Partial Dependence Plot (PDP) illustrates the marginal effect of a selected feature on the model’s predictions while averaging out the influence of all other features.
95
+
96
+ The process of PDP includes the following steps:
97
+ 1. **Select the Feature of Interest**: Choose a variable for PDP analysis, such as Age.
98
+ 2. **Create a Feature Grid**: Define a range of values for the selected feature (e.g., Age from 20 to 80).
99
+ 3. **Modify the Dataset and Compute Predictions**: For each value in the grid, replace the feature value in all instances while keeping other features unchanged. Use the trained model to predict outcomes for the modified dataset.
100
+ 4. **Compute the Average Prediction**: Aggregate predictions across all instances and calculate the mean for each feature value in the grid.
101
+
102
+ By using PDP, **interpretability** improves by showing the average effect of a feature on model predictions, making complex models more explainable.
103
+
104
+ When `kind` is selected:
105
+ - **both**: Displays both ICE and PDP.
106
+ - **individual**: Displays only ICE.
107
+ - **average**: Displays only PDP.
108
  """