Spaces:
No application file
No application file
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +51 -38
src/streamlit_app.py
CHANGED
@@ -1,40 +1,53 @@
|
|
1 |
-
import altair as alt
|
2 |
-
import numpy as np
|
3 |
-
import pandas as pd
|
4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
"""
|
7 |
-
# Welcome to Streamlit!
|
8 |
-
|
9 |
-
Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
|
10 |
-
If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
|
11 |
-
forums](https://discuss.streamlit.io).
|
12 |
-
|
13 |
-
In the meantime, below is an example of what you can do with just a few lines of code:
|
14 |
-
"""
|
15 |
-
|
16 |
-
num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
|
17 |
-
num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
|
18 |
-
|
19 |
-
indices = np.linspace(0, 1, num_points)
|
20 |
-
theta = 2 * np.pi * num_turns * indices
|
21 |
-
radius = indices
|
22 |
-
|
23 |
-
x = radius * np.cos(theta)
|
24 |
-
y = radius * np.sin(theta)
|
25 |
-
|
26 |
-
df = pd.DataFrame({
|
27 |
-
"x": x,
|
28 |
-
"y": y,
|
29 |
-
"idx": indices,
|
30 |
-
"rand": np.random.randn(num_points),
|
31 |
-
})
|
32 |
-
|
33 |
-
st.altair_chart(alt.Chart(df, height=700, width=700)
|
34 |
-
.mark_point(filled=True)
|
35 |
-
.encode(
|
36 |
-
x=alt.X("x", axis=None),
|
37 |
-
y=alt.Y("y", axis=None),
|
38 |
-
color=alt.Color("idx", legend=None, scale=alt.Scale()),
|
39 |
-
size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
|
40 |
-
))
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from tensorflow.keras.models import load_model
|
3 |
+
from tensorflow.keras.layers import DepthwiseConv2D
|
4 |
+
from PIL import Image, ImageOps
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
# Optional: Patch DepthwiseConv2D if needed
|
8 |
+
class PatchedDepthwiseConv2D(DepthwiseConv2D):
|
9 |
+
def __init__(self, *args, groups=1, **kwargs):
|
10 |
+
super().__init__(*args, **kwargs)
|
11 |
+
|
12 |
+
# Load model
|
13 |
+
model = load_model(r"keras_model.h5", compile=False, custom_objects={"DepthwiseConv2D": PatchedDepthwiseConv2D})
|
14 |
+
|
15 |
+
# Load class labels
|
16 |
+
with open(r"labels.txt", "r") as f:
|
17 |
+
class_names = f.readlines()
|
18 |
+
|
19 |
+
st.title("♻️ Garbage Classification Predictor")
|
20 |
+
|
21 |
+
# Upload image
|
22 |
+
uploaded_file = st.file_uploader("Upload a waste image (jpg, png)", type=["jpg", "jpeg", "png"])
|
23 |
+
|
24 |
+
if st.button("🧪 Predict Waste Type"):
|
25 |
+
if uploaded_file is not None:
|
26 |
+
image = Image.open(uploaded_file)
|
27 |
+
st.image(image, use_container_width=True)
|
28 |
+
|
29 |
+
|
30 |
+
# Preprocess image
|
31 |
+
image = image.convert("RGB")
|
32 |
+
image = ImageOps.fit(image, (224, 224), Image.Resampling.LANCZOS)
|
33 |
+
image_array = np.asarray(image)
|
34 |
+
normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
|
35 |
+
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
|
36 |
+
data[0] = normalized_image_array
|
37 |
+
|
38 |
+
# Make prediction
|
39 |
+
prediction = model.predict(data)
|
40 |
+
index = np.argmax(prediction)
|
41 |
+
predicted_label = class_names[index].strip()
|
42 |
+
confidence = prediction[0][index]
|
43 |
+
|
44 |
+
# Display result
|
45 |
+
st.success(f"Predicted Waste Type: **{predicted_label.upper()}**")
|
46 |
+
st.write(f"Confidence Score: **{confidence:.2f}**")
|
47 |
+
st.write("♻️ Dispose responsibly!")
|
48 |
+
else:
|
49 |
+
st.warning("⚠️ Please upload an image before predicting.")
|
50 |
+
# 🔚 Footer
|
51 |
+
st.markdown("---")
|
52 |
+
st.markdown("<p style='text-align: center; font-size: 18px;'>Developed with ❤️ By Twinkle Ghangare for EDUNET FOUNDATION </p>", unsafe_allow_html=True)
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|