peggy30 commited on
Commit
1bd4e77
Β·
1 Parent(s): 40976c7

add intrroduction

Browse files
pages/ALE.py CHANGED
@@ -52,10 +52,10 @@ def explain_example():
52
 
53
  st.pyplot(fig)
54
 
55
- fig, ax = plt.subplots(figsize=(10, 5))
56
  st.write("2D Second-Order ALE Plot")
57
  ale_plot(global_model, X_test, X_train.columns[:2], bins=10)
58
- st.pyplot(fig)
59
 
60
  def main():
61
  global global_model
 
52
 
53
  st.pyplot(fig)
54
 
55
+ fig1, ax1 = plt.subplots(figsize=(10, 5))
56
  st.write("2D Second-Order ALE Plot")
57
  ale_plot(global_model, X_test, X_train.columns[:2], bins=10)
58
+ st.pyplot(fig1)
59
 
60
  def main():
61
  global global_model
pages/PermutationFeatureImportance.py CHANGED
@@ -63,8 +63,8 @@ def main():
63
  if global_model is None:
64
  train_model()
65
 
66
- st.title("ALE (Accumulated Local Effects)")
67
- st.write(prompt_params.PERMUTTATION_INTRODUCTION)
68
  # Explain the selected sample
69
  if st.button("Explain Sample"):
70
  explain_example()
 
63
  if global_model is None:
64
  train_model()
65
 
66
+ st.title("Permutation Feature Importance")
67
+ st.write(prompt_params.PERMUTATION_INTRODUCTION)
68
  # Explain the selected sample
69
  if st.button("Explain Sample"):
70
  explain_example()
src/prompt_config.py CHANGED
@@ -1,7 +1,24 @@
1
  APP_INTRODUCTION = """
2
- This application provides explainability for machine learning models using LIME and SHAP.
3
- It allows users to explore how different features influence model predictions by selecting
4
- specific samples and visualizing their explanations interactively.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  """
6
  LIME_INTRODUCTION = """
7
  LIME (Local Interpretable Model-agnostic Explanations) is a technique used to interpret the predictions of black-box machine learning models.
 
1
  APP_INTRODUCTION = """
2
+ # **Explanatory AI for Income Prediction - App Overview**
3
+
4
+ This application provides an **Explainable AI (XAI)** framework to analyze a machine learning model trained on the **UCI Adult Income Dataset** ([link](https://archive.ics.uci.edu/dataset/2/adult)). The model predicts whether an individual earns more than **$50,000 per year** based on key demographic and employment-related features, including:
5
+
6
+ - **Age**
7
+ - **Education Level**
8
+ - **Marital Status**
9
+ - **Sex (Gender)**
10
+ - **Occupation, Work Hours, Country**, etc.
11
+
12
+ To ensure transparency and interpretability, the app utilizes multiple **XAI techniques** to explain model predictions:
13
+
14
+ ### **Explainability Methods**
15
+ 1. **ALE (Accumulated Local Effects)** – Measures feature influence while considering feature dependencies.
16
+ 2. **Anchors** – Provides high-precision rule-based explanations for individual predictions.
17
+ 3. **ICE & PDP (Individual Conditional Expectation & Partial Dependence Plots)** – Visualizes how a feature affects the model prediction globally and individually.
18
+ 4. **LIME (Local Interpretable Model-agnostic Explanations)** – Generates local approximations to explain specific predictions.
19
+ 5. **Permutation Feature Importance** – Assesses the importance of features by measuring the change in prediction error after shuffling feature values.
20
+ 6. **SHAP (SHapley Additive exPlanations)** – Computes fair feature attributions based on cooperative game theory.
21
+
22
  """
23
  LIME_INTRODUCTION = """
24
  LIME (Local Interpretable Model-agnostic Explanations) is a technique used to interpret the predictions of black-box machine learning models.