Spaces:
Sleeping
Sleeping
add intrroduction
Browse files- pages/ALE.py +2 -2
- pages/PermutationFeatureImportance.py +2 -2
- src/prompt_config.py +20 -3
pages/ALE.py
CHANGED
@@ -52,10 +52,10 @@ def explain_example():
|
|
52 |
|
53 |
st.pyplot(fig)
|
54 |
|
55 |
-
|
56 |
st.write("2D Second-Order ALE Plot")
|
57 |
ale_plot(global_model, X_test, X_train.columns[:2], bins=10)
|
58 |
-
st.pyplot(
|
59 |
|
60 |
def main():
|
61 |
global global_model
|
|
|
52 |
|
53 |
st.pyplot(fig)
|
54 |
|
55 |
+
fig1, ax1 = plt.subplots(figsize=(10, 5))
|
56 |
st.write("2D Second-Order ALE Plot")
|
57 |
ale_plot(global_model, X_test, X_train.columns[:2], bins=10)
|
58 |
+
st.pyplot(fig1)
|
59 |
|
60 |
def main():
|
61 |
global global_model
|
pages/PermutationFeatureImportance.py
CHANGED
@@ -63,8 +63,8 @@ def main():
|
|
63 |
if global_model is None:
|
64 |
train_model()
|
65 |
|
66 |
-
st.title("
|
67 |
-
st.write(prompt_params.
|
68 |
# Explain the selected sample
|
69 |
if st.button("Explain Sample"):
|
70 |
explain_example()
|
|
|
63 |
if global_model is None:
|
64 |
train_model()
|
65 |
|
66 |
+
st.title("Permutation Feature Importance")
|
67 |
+
st.write(prompt_params.PERMUTATION_INTRODUCTION)
|
68 |
# Explain the selected sample
|
69 |
if st.button("Explain Sample"):
|
70 |
explain_example()
|
src/prompt_config.py
CHANGED
@@ -1,7 +1,24 @@
|
|
1 |
APP_INTRODUCTION = """
|
2 |
-
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
"""
|
6 |
LIME_INTRODUCTION = """
|
7 |
LIME (Local Interpretable Model-agnostic Explanations) is a technique used to interpret the predictions of black-box machine learning models.
|
|
|
1 |
APP_INTRODUCTION = """
|
2 |
+
# **Explanatory AI for Income Prediction - App Overview**
|
3 |
+
|
4 |
+
This application provides an **Explainable AI (XAI)** framework to analyze a machine learning model trained on the **UCI Adult Income Dataset** ([link](https://archive.ics.uci.edu/dataset/2/adult)). The model predicts whether an individual earns more than **$50,000 per year** based on key demographic and employment-related features, including:
|
5 |
+
|
6 |
+
- **Age**
|
7 |
+
- **Education Level**
|
8 |
+
- **Marital Status**
|
9 |
+
- **Sex (Gender)**
|
10 |
+
- **Occupation, Work Hours, Country**, etc.
|
11 |
+
|
12 |
+
To ensure transparency and interpretability, the app utilizes multiple **XAI techniques** to explain model predictions:
|
13 |
+
|
14 |
+
### **Explainability Methods**
|
15 |
+
1. **ALE (Accumulated Local Effects)** β Measures feature influence while considering feature dependencies.
|
16 |
+
2. **Anchors** β Provides high-precision rule-based explanations for individual predictions.
|
17 |
+
3. **ICE & PDP (Individual Conditional Expectation & Partial Dependence Plots)** β Visualizes how a feature affects the model prediction globally and individually.
|
18 |
+
4. **LIME (Local Interpretable Model-agnostic Explanations)** β Generates local approximations to explain specific predictions.
|
19 |
+
5. **Permutation Feature Importance** β Assesses the importance of features by measuring the change in prediction error after shuffling feature values.
|
20 |
+
6. **SHAP (SHapley Additive exPlanations)** β Computes fair feature attributions based on cooperative game theory.
|
21 |
+
|
22 |
"""
|
23 |
LIME_INTRODUCTION = """
|
24 |
LIME (Local Interpretable Model-agnostic Explanations) is a technique used to interpret the predictions of black-box machine learning models.
|