first test huggingface
Browse files- .gitignore +207 -0
- LICENSE +21 -0
- __init__.py +1 -0
- api_call.py +145 -0
- api_tests.ipynb +1097 -0
- data/kit_1001_2025-09-22.csv +0 -0
- genai.py +87 -0
- gradio_app.py +97 -0
- requirements.txt +7 -0
- utils.py +149 -0
- weather_data_visualisation.ipynb +127 -0
- weather_data_visualisation.py +100 -0
.gitignore
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[codz]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py.cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
#poetry.toml
|
| 110 |
+
|
| 111 |
+
# pdm
|
| 112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 113 |
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
| 114 |
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
| 115 |
+
#pdm.lock
|
| 116 |
+
#pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# pixi
|
| 121 |
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
| 122 |
+
#pixi.lock
|
| 123 |
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
| 124 |
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
| 125 |
+
.pixi
|
| 126 |
+
|
| 127 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 128 |
+
__pypackages__/
|
| 129 |
+
|
| 130 |
+
# Celery stuff
|
| 131 |
+
celerybeat-schedule
|
| 132 |
+
celerybeat.pid
|
| 133 |
+
|
| 134 |
+
# SageMath parsed files
|
| 135 |
+
*.sage.py
|
| 136 |
+
|
| 137 |
+
# Environments
|
| 138 |
+
.env
|
| 139 |
+
.envrc
|
| 140 |
+
.venv
|
| 141 |
+
env/
|
| 142 |
+
venv/
|
| 143 |
+
ENV/
|
| 144 |
+
env.bak/
|
| 145 |
+
venv.bak/
|
| 146 |
+
|
| 147 |
+
# Spyder project settings
|
| 148 |
+
.spyderproject
|
| 149 |
+
.spyproject
|
| 150 |
+
|
| 151 |
+
# Rope project settings
|
| 152 |
+
.ropeproject
|
| 153 |
+
|
| 154 |
+
# mkdocs documentation
|
| 155 |
+
/site
|
| 156 |
+
|
| 157 |
+
# mypy
|
| 158 |
+
.mypy_cache/
|
| 159 |
+
.dmypy.json
|
| 160 |
+
dmypy.json
|
| 161 |
+
|
| 162 |
+
# Pyre type checker
|
| 163 |
+
.pyre/
|
| 164 |
+
|
| 165 |
+
# pytype static type analyzer
|
| 166 |
+
.pytype/
|
| 167 |
+
|
| 168 |
+
# Cython debug symbols
|
| 169 |
+
cython_debug/
|
| 170 |
+
|
| 171 |
+
# PyCharm
|
| 172 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 173 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 174 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 175 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 176 |
+
#.idea/
|
| 177 |
+
|
| 178 |
+
# Abstra
|
| 179 |
+
# Abstra is an AI-powered process automation framework.
|
| 180 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 181 |
+
# Learn more at https://abstra.io/docs
|
| 182 |
+
.abstra/
|
| 183 |
+
|
| 184 |
+
# Visual Studio Code
|
| 185 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 186 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 187 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 188 |
+
# you could uncomment the following to ignore the entire vscode folder
|
| 189 |
+
# .vscode/
|
| 190 |
+
|
| 191 |
+
# Ruff stuff:
|
| 192 |
+
.ruff_cache/
|
| 193 |
+
|
| 194 |
+
# PyPI configuration file
|
| 195 |
+
.pypirc
|
| 196 |
+
|
| 197 |
+
# Cursor
|
| 198 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 199 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 200 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 201 |
+
.cursorignore
|
| 202 |
+
.cursorindexingignore
|
| 203 |
+
|
| 204 |
+
# Marimo
|
| 205 |
+
marimo/_static/
|
| 206 |
+
marimo/_lsp/
|
| 207 |
+
__marimo__/
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Karim Hamdi
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Makes teleagriculture a Python package
|
api_call.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Daily data fetch for Teleagriculture kits.
|
| 2 |
+
|
| 3 |
+
Usage:
|
| 4 |
+
python api_call.py --kit-id 1001 --format csv
|
| 5 |
+
|
| 6 |
+
Env:
|
| 7 |
+
- KIT_API_KEY: optional Bearer token for the API
|
| 8 |
+
- KITS_API_BASE: override base URL (default https://kits.teleagriculture.org/api)
|
| 9 |
+
"""
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import argparse
|
| 13 |
+
import os
|
| 14 |
+
from datetime import datetime, timedelta
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import List, Optional
|
| 17 |
+
|
| 18 |
+
import pandas as pd
|
| 19 |
+
|
| 20 |
+
# Import utility function and config
|
| 21 |
+
from utils import get_kit_measurements_df, BASE_URL
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_last_day_data(kit_id: int) -> pd.DataFrame:
|
| 25 |
+
"""Fetches all sensor data for a given kit from the last 24 hours."""
|
| 26 |
+
print(f"API base: {BASE_URL}")
|
| 27 |
+
print(f"Fetching last day's measurements for kit {kit_id}...\n")
|
| 28 |
+
|
| 29 |
+
# Fetch all data, sensors will be discovered automatically
|
| 30 |
+
df = get_kit_measurements_df(kit_id)
|
| 31 |
+
|
| 32 |
+
if df.empty or 'timestamp' not in df.columns:
|
| 33 |
+
print("No data or timestamp column found.")
|
| 34 |
+
return pd.DataFrame()
|
| 35 |
+
|
| 36 |
+
# Filter for the last 24 hours
|
| 37 |
+
# The timestamp column is already converted to timezone-aware datetimes in get_kit_measurements_df
|
| 38 |
+
one_day_ago = pd.Timestamp.utcnow() - timedelta(days=1)
|
| 39 |
+
last_day_df = df[df['timestamp'] >= one_day_ago].copy()
|
| 40 |
+
|
| 41 |
+
print(f"Fetched rows from the last day: {len(last_day_df)}")
|
| 42 |
+
if not last_day_df.empty:
|
| 43 |
+
try:
|
| 44 |
+
# Recalculate 'value' as numeric, coercing errors
|
| 45 |
+
last_day_df['value'] = pd.to_numeric(last_day_df['value'], errors='coerce')
|
| 46 |
+
|
| 47 |
+
print("Summary statistics for the last day:")
|
| 48 |
+
# Group by sensor and calculate statistics
|
| 49 |
+
summary = last_day_df.groupby('sensor')['value'].agg(['mean', 'min', 'max', 'count']).round(2)
|
| 50 |
+
print(summary)
|
| 51 |
+
|
| 52 |
+
except Exception as e:
|
| 53 |
+
print(f"Could not generate summary statistics: {e}")
|
| 54 |
+
|
| 55 |
+
return last_day_df
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def parse_args() -> argparse.Namespace:
|
| 59 |
+
p = argparse.ArgumentParser(description="Fetch all measurements for a Teleagriculture kit and save to disk.")
|
| 60 |
+
p.add_argument("--kit-id", type=int, required=True, help="Numeric kit id to fetch (e.g., 1001)")
|
| 61 |
+
p.add_argument(
|
| 62 |
+
"--sensors",
|
| 63 |
+
type=str,
|
| 64 |
+
default=None,
|
| 65 |
+
help="Comma-separated sensor names to limit (default: discover all sensors on the kit)",
|
| 66 |
+
)
|
| 67 |
+
p.add_argument("--page-size", type=int, default=100, help="Page size for pagination (default: 100)")
|
| 68 |
+
p.add_argument(
|
| 69 |
+
"--format",
|
| 70 |
+
choices=["csv", "parquet"],
|
| 71 |
+
default="csv",
|
| 72 |
+
help="Output format (default: csv)",
|
| 73 |
+
)
|
| 74 |
+
p.add_argument(
|
| 75 |
+
"--out",
|
| 76 |
+
type=str,
|
| 77 |
+
default=None,
|
| 78 |
+
help="Output file path. If not provided, saves under teleagriculture/data/kit_<id>_<YYYY-MM-DD>.<ext>",
|
| 79 |
+
)
|
| 80 |
+
return p.parse_args()
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def main() -> int:
|
| 84 |
+
args = parse_args()
|
| 85 |
+
|
| 86 |
+
sensors: Optional[List[str]] = None
|
| 87 |
+
if args.sensors:
|
| 88 |
+
sensors = [s.strip() for s in args.sensors.split(",") if s.strip()]
|
| 89 |
+
|
| 90 |
+
print(f"API base: {BASE_URL}")
|
| 91 |
+
print(f"Fetching kit {args.kit_id} measurements...\n")
|
| 92 |
+
df = get_kit_measurements_df(args.kit_id, sensors=sensors, page_size=args.page_size)
|
| 93 |
+
|
| 94 |
+
print(f"Fetched rows: {len(df)}")
|
| 95 |
+
if not df.empty:
|
| 96 |
+
try:
|
| 97 |
+
per_sensor = df.groupby("sensor").size().sort_values(ascending=False)
|
| 98 |
+
print("Rows per sensor:")
|
| 99 |
+
for s, n in per_sensor.items():
|
| 100 |
+
print(f" - {s}: {n}")
|
| 101 |
+
except Exception:
|
| 102 |
+
pass
|
| 103 |
+
|
| 104 |
+
# Determine output path
|
| 105 |
+
ext = args.format
|
| 106 |
+
if args.out:
|
| 107 |
+
out_path = Path(args.out)
|
| 108 |
+
else:
|
| 109 |
+
dt = datetime.utcnow().strftime("%Y-%m-%d")
|
| 110 |
+
out_dir = Path(__file__).parent / "data"
|
| 111 |
+
out_path = out_dir / f"kit_{args.kit_id}_{dt}.{ext}"
|
| 112 |
+
|
| 113 |
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
| 114 |
+
|
| 115 |
+
if args.format == "csv":
|
| 116 |
+
df.to_csv(out_path, index=False)
|
| 117 |
+
print(f"\nSaved CSV -> {out_path.resolve()}")
|
| 118 |
+
elif args.format == "parquet":
|
| 119 |
+
try:
|
| 120 |
+
df.to_parquet(out_path, index=False)
|
| 121 |
+
print(f"\nSaved Parquet -> {out_path.resolve()}")
|
| 122 |
+
except ImportError:
|
| 123 |
+
print("\nParquet write failed. Please install pyarrow or fastparquet.")
|
| 124 |
+
return 1
|
| 125 |
+
except Exception as e:
|
| 126 |
+
print(f"\nAn error occurred while saving the Parquet file: {e}")
|
| 127 |
+
return 1
|
| 128 |
+
|
| 129 |
+
return 0
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
if __name__ == "__main__":
|
| 133 |
+
# Example of using the new function.
|
| 134 |
+
# You can run this part by uncommenting it and running the script.
|
| 135 |
+
# try:
|
| 136 |
+
# kit_id_to_test = 1001 # Replace with a valid kit ID
|
| 137 |
+
# last_day_data = get_last_day_data(kit_id_to_test)
|
| 138 |
+
# if not last_day_data.empty:
|
| 139 |
+
# print("\n--- Last Day Dataframe ---")
|
| 140 |
+
# print(last_day_data.head())
|
| 141 |
+
# print("--------------------------")
|
| 142 |
+
# except Exception as e:
|
| 143 |
+
# print(f"An error occurred during the example run: {e}")
|
| 144 |
+
|
| 145 |
+
raise SystemExit(main())
|
api_tests.ipynb
ADDED
|
@@ -0,0 +1,1097 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "8d8da681",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"# Teleagriculture API Tests\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"This notebook tests API endpoints to find the board with the most data points."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": 25,
|
| 16 |
+
"id": "45dc5eca",
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [],
|
| 19 |
+
"source": [
|
| 20 |
+
"# Import required libraries\n",
|
| 21 |
+
"import requests\n",
|
| 22 |
+
"import json\n",
|
| 23 |
+
"import pandas as pd\n",
|
| 24 |
+
"import matplotlib.pyplot as plt\n",
|
| 25 |
+
"from typing import List, Dict, Optional\n",
|
| 26 |
+
"from datetime import datetime"
|
| 27 |
+
]
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"cell_type": "markdown",
|
| 31 |
+
"id": "f61e398c",
|
| 32 |
+
"metadata": {},
|
| 33 |
+
"source": [
|
| 34 |
+
"## API Configuration\n",
|
| 35 |
+
"\n",
|
| 36 |
+
"Based on the teleagriculture project documentation, these are IoT hardware boards that send data to cloud platforms. This notebook demonstrates how to query a data platform that collects data from multiple teleagriculture boards."
|
| 37 |
+
]
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"cell_type": "code",
|
| 41 |
+
"execution_count": 26,
|
| 42 |
+
"id": "0f5ac5fe",
|
| 43 |
+
"metadata": {},
|
| 44 |
+
"outputs": [
|
| 45 |
+
{
|
| 46 |
+
"name": "stdout",
|
| 47 |
+
"output_type": "stream",
|
| 48 |
+
"text": [
|
| 49 |
+
"API: https://kits.teleagriculture.org/api\n",
|
| 50 |
+
"Auth: none\n"
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
],
|
| 54 |
+
"source": [
|
| 55 |
+
"# API Configuration for Teleagriculture Kits API (minimal)\n",
|
| 56 |
+
"BASE_URL = \"https://kits.teleagriculture.org/api\" # official kits API base\n",
|
| 57 |
+
"\n",
|
| 58 |
+
"# Optional: put KIT_API_KEY in env to POST; GETs are public per docs (but docs also mention bearer header; we support both)\n",
|
| 59 |
+
"import os\n",
|
| 60 |
+
"KIT_API_KEY = os.getenv(\"KIT_API_KEY\")\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"HEADERS = {\n",
|
| 63 |
+
" \"Accept\": \"application/json\",\n",
|
| 64 |
+
"}\n",
|
| 65 |
+
"if KIT_API_KEY:\n",
|
| 66 |
+
" HEADERS[\"Authorization\"] = f\"Bearer {KIT_API_KEY}\"\n",
|
| 67 |
+
"\n",
|
| 68 |
+
"print(\"API:\", BASE_URL)\n",
|
| 69 |
+
"print(\"Auth:\", \"Bearer set\" if \"Authorization\" in HEADERS else \"none\")"
|
| 70 |
+
]
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"cell_type": "code",
|
| 74 |
+
"execution_count": 27,
|
| 75 |
+
"id": "9e43c541",
|
| 76 |
+
"metadata": {},
|
| 77 |
+
"outputs": [],
|
| 78 |
+
"source": [
|
| 79 |
+
"# Minimal helpers per official docs\n",
|
| 80 |
+
"from typing import Tuple, Optional\n",
|
| 81 |
+
"\n",
|
| 82 |
+
"def get_kit_info(kit_id: int) -> Optional[dict]:\n",
|
| 83 |
+
" url = f\"{BASE_URL}/kits/{kit_id}\"\n",
|
| 84 |
+
" try:\n",
|
| 85 |
+
" r = requests.get(url, headers=HEADERS, timeout=30)\n",
|
| 86 |
+
" if r.status_code == 200:\n",
|
| 87 |
+
" return r.json().get(\"data\")\n",
|
| 88 |
+
" return None\n",
|
| 89 |
+
" except requests.RequestException:\n",
|
| 90 |
+
" return None\n",
|
| 91 |
+
"\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"def count_sensor_measurements(kit_id: int, sensor_name: str, page_size: int = 50, max_pages: int = 200) -> int:\n",
|
| 94 |
+
" \"\"\"Count all measurements for a kit sensor using cursor pagination.\n",
|
| 95 |
+
" Limits pages to avoid unbounded runs.\n",
|
| 96 |
+
" \"\"\"\n",
|
| 97 |
+
" total = 0\n",
|
| 98 |
+
" cursor = None\n",
|
| 99 |
+
" pages = 0\n",
|
| 100 |
+
" while pages < max_pages:\n",
|
| 101 |
+
" params = {\"page[size]\": str(page_size)}\n",
|
| 102 |
+
" if cursor:\n",
|
| 103 |
+
" params[\"page[cursor]\"] = cursor\n",
|
| 104 |
+
" url = f\"{BASE_URL}/kits/{kit_id}/{sensor_name}/measurements\"\n",
|
| 105 |
+
" try:\n",
|
| 106 |
+
" r = requests.get(url, headers=HEADERS, params=params, timeout=30)\n",
|
| 107 |
+
" except requests.RequestException:\n",
|
| 108 |
+
" break\n",
|
| 109 |
+
" if r.status_code == 404:\n",
|
| 110 |
+
" break\n",
|
| 111 |
+
" if r.status_code != 200:\n",
|
| 112 |
+
" break\n",
|
| 113 |
+
" try:\n",
|
| 114 |
+
" body = r.json()\n",
|
| 115 |
+
" except Exception:\n",
|
| 116 |
+
" break\n",
|
| 117 |
+
" data = body.get(\"data\")\n",
|
| 118 |
+
" if isinstance(data, list):\n",
|
| 119 |
+
" total += len(data)\n",
|
| 120 |
+
" else:\n",
|
| 121 |
+
" break\n",
|
| 122 |
+
" meta = body.get(\"meta\", {})\n",
|
| 123 |
+
" cursor = meta.get(\"next_cursor\")\n",
|
| 124 |
+
" pages += 1\n",
|
| 125 |
+
" if not cursor:\n",
|
| 126 |
+
" break\n",
|
| 127 |
+
" return total"
|
| 128 |
+
]
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"cell_type": "markdown",
|
| 132 |
+
"id": "3b944747",
|
| 133 |
+
"metadata": {},
|
| 134 |
+
"source": [
|
| 135 |
+
"## Fetch Boards Function\n",
|
| 136 |
+
"\n",
|
| 137 |
+
"Function to retrieve all registered teleagriculture boards from the data platform API. Each \"board\" represents a deployed IoT device collecting agricultural data."
|
| 138 |
+
]
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"cell_type": "markdown",
|
| 142 |
+
"id": "43460b20",
|
| 143 |
+
"metadata": {},
|
| 144 |
+
"source": [
|
| 145 |
+
"## Fetch all sensors for a kit and count in parallel\n",
|
| 146 |
+
"\n",
|
| 147 |
+
"Minimal helpers to grab all sensors from one kit and count each sensor’s datapoints concurrently."
|
| 148 |
+
]
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"cell_type": "code",
|
| 152 |
+
"execution_count": 28,
|
| 153 |
+
"id": "bde9a436",
|
| 154 |
+
"metadata": {},
|
| 155 |
+
"outputs": [
|
| 156 |
+
{
|
| 157 |
+
"name": "stdout",
|
| 158 |
+
"output_type": "stream",
|
| 159 |
+
"text": [
|
| 160 |
+
"KIT 1001 BEST {'sensor': 'NH3', 'count': 1221}\n"
|
| 161 |
+
]
|
| 162 |
+
}
|
| 163 |
+
],
|
| 164 |
+
"source": [
|
| 165 |
+
"from concurrent.futures import ThreadPoolExecutor, as_completed\n",
|
| 166 |
+
"\n",
|
| 167 |
+
"\n",
|
| 168 |
+
"def get_kit_sensors(kit_id: int) -> list[dict]:\n",
|
| 169 |
+
" kit = get_kit_info(kit_id)\n",
|
| 170 |
+
" if not kit:\n",
|
| 171 |
+
" return []\n",
|
| 172 |
+
" sensors = kit.get(\"sensors\") or []\n",
|
| 173 |
+
" # normalize: keep only id and name if present\n",
|
| 174 |
+
" out = []\n",
|
| 175 |
+
" for s in sensors:\n",
|
| 176 |
+
" if isinstance(s, dict) and s.get(\"name\"):\n",
|
| 177 |
+
" out.append({\"id\": s.get(\"id\"), \"name\": s.get(\"name\")})\n",
|
| 178 |
+
" return out\n",
|
| 179 |
+
"\n",
|
| 180 |
+
"\n",
|
| 181 |
+
"def count_all_sensors_for_kit(kit_id: int, page_size: int = 50, max_workers: int = 8) -> dict:\n",
|
| 182 |
+
" sensors = get_kit_sensors(kit_id)\n",
|
| 183 |
+
" if not sensors:\n",
|
| 184 |
+
" return {\"kit_id\": kit_id, \"counts\": {}, \"best\": None}\n",
|
| 185 |
+
"\n",
|
| 186 |
+
" counts: dict[str, int] = {}\n",
|
| 187 |
+
" best = {\"sensor\": None, \"count\": -1}\n",
|
| 188 |
+
"\n",
|
| 189 |
+
" def _worker(sname: str) -> tuple[str, int]:\n",
|
| 190 |
+
" c = count_sensor_measurements(kit_id, sname, page_size=page_size)\n",
|
| 191 |
+
" return sname, c\n",
|
| 192 |
+
"\n",
|
| 193 |
+
" with ThreadPoolExecutor(max_workers=max_workers) as ex:\n",
|
| 194 |
+
" futures = {ex.submit(_worker, s[\"name\"]): s[\"name\"] for s in sensors}\n",
|
| 195 |
+
" for fut in as_completed(futures):\n",
|
| 196 |
+
" sname = futures[fut]\n",
|
| 197 |
+
" try:\n",
|
| 198 |
+
" sname, c = fut.result()\n",
|
| 199 |
+
" counts[sname] = c\n",
|
| 200 |
+
" if c > best[\"count\"]:\n",
|
| 201 |
+
" best = {\"sensor\": sname, \"count\": c}\n",
|
| 202 |
+
" except Exception:\n",
|
| 203 |
+
" counts[sname] = 0\n",
|
| 204 |
+
" return {\"kit_id\": kit_id, \"counts\": counts, \"best\": best}\n",
|
| 205 |
+
"\n",
|
| 206 |
+
"# minimal run example (change the kit id here)\n",
|
| 207 |
+
"one_kit_result = count_all_sensors_for_kit(1001, page_size=50)\n",
|
| 208 |
+
"print(\"KIT\", one_kit_result[\"kit_id\"], \"BEST\", one_kit_result[\"best\"])"
|
| 209 |
+
]
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"cell_type": "code",
|
| 213 |
+
"execution_count": 29,
|
| 214 |
+
"id": "76457d0a",
|
| 215 |
+
"metadata": {},
|
| 216 |
+
"outputs": [
|
| 217 |
+
{
|
| 218 |
+
"name": "stdout",
|
| 219 |
+
"output_type": "stream",
|
| 220 |
+
"text": [
|
| 221 |
+
"📡 Board fetching function defined successfully!\n",
|
| 222 |
+
"🌿 Ready to query teleagriculture board data from platform API.\n"
|
| 223 |
+
]
|
| 224 |
+
}
|
| 225 |
+
],
|
| 226 |
+
"source": [
|
| 227 |
+
"def fetch_all_boards() -> List[Dict]:\n",
|
| 228 |
+
" \"\"\"\n",
|
| 229 |
+
" Fetch all registered teleagriculture boards from the data platform.\n",
|
| 230 |
+
" \n",
|
| 231 |
+
" Returns:\n",
|
| 232 |
+
" List[Dict]: List of board objects with metadata, or empty list if error occurs\n",
|
| 233 |
+
" \"\"\"\n",
|
| 234 |
+
" try:\n",
|
| 235 |
+
" # Common API endpoints for IoT platforms that might host teleagriculture data\n",
|
| 236 |
+
" possible_endpoints = [\n",
|
| 237 |
+
" \"/devices\", # Common IoT platform endpoint\n",
|
| 238 |
+
" \"/boards\", # Board-specific endpoint\n",
|
| 239 |
+
" \"/nodes\", # LoRaWAN nodes\n",
|
| 240 |
+
" \"/sensors\", # Sensor networks\n",
|
| 241 |
+
" \"/stations\" # Weather/agri stations\n",
|
| 242 |
+
" ]\n",
|
| 243 |
+
" \n",
|
| 244 |
+
" for endpoint in possible_endpoints:\n",
|
| 245 |
+
" try:\n",
|
| 246 |
+
" url = f\"{BASE_URL}{endpoint}\"\n",
|
| 247 |
+
" response = requests.get(url, headers=HEADERS, timeout=30)\n",
|
| 248 |
+
" \n",
|
| 249 |
+
" if response.status_code == 200:\n",
|
| 250 |
+
" data = response.json()\n",
|
| 251 |
+
" \n",
|
| 252 |
+
" # Handle different response formats\n",
|
| 253 |
+
" if isinstance(data, list):\n",
|
| 254 |
+
" boards = data\n",
|
| 255 |
+
" elif isinstance(data, dict):\n",
|
| 256 |
+
" # Try common keys for device arrays\n",
|
| 257 |
+
" for key in ['devices', 'boards', 'nodes', 'sensors', 'stations', 'data', 'results']:\n",
|
| 258 |
+
" if key in data and isinstance(data[key], list):\n",
|
| 259 |
+
" boards = data[key]\n",
|
| 260 |
+
" break\n",
|
| 261 |
+
" else:\n",
|
| 262 |
+
" boards = []\n",
|
| 263 |
+
" else:\n",
|
| 264 |
+
" boards = []\n",
|
| 265 |
+
" \n",
|
| 266 |
+
" if boards:\n",
|
| 267 |
+
" print(f\"✅ Successfully fetched {len(boards)} boards from {endpoint}\")\n",
|
| 268 |
+
" return boards\n",
|
| 269 |
+
" \n",
|
| 270 |
+
" except Exception as e:\n",
|
| 271 |
+
" continue # Try next endpoint\n",
|
| 272 |
+
" \n",
|
| 273 |
+
" print(\"❌ Could not find boards at any common endpoint\")\n",
|
| 274 |
+
" return []\n",
|
| 275 |
+
" \n",
|
| 276 |
+
" except requests.exceptions.RequestException as e:\n",
|
| 277 |
+
" print(f\"❌ Network error: {e}\")\n",
|
| 278 |
+
" return []\n",
|
| 279 |
+
" except json.JSONDecodeError as e:\n",
|
| 280 |
+
" print(f\"❌ JSON decode error: {e}\")\n",
|
| 281 |
+
" return []\n",
|
| 282 |
+
" except Exception as e:\n",
|
| 283 |
+
" print(f\"❌ Unexpected error: {e}\")\n",
|
| 284 |
+
" return []\n",
|
| 285 |
+
"\n",
|
| 286 |
+
"# Test the function (will be used later)\n",
|
| 287 |
+
"print(\"📡 Board fetching function defined successfully!\")\n",
|
| 288 |
+
"print(\"🌿 Ready to query teleagriculture board data from platform API.\")"
|
| 289 |
+
]
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"cell_type": "markdown",
|
| 293 |
+
"id": "ec646dfe",
|
| 294 |
+
"metadata": {},
|
| 295 |
+
"source": [
|
| 296 |
+
"## Data Point Counting Function\n",
|
| 297 |
+
"\n",
|
| 298 |
+
"Function to count sensor data points collected by each teleagriculture board. This could include temperature readings, soil moisture, humidity, light levels, etc."
|
| 299 |
+
]
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"cell_type": "code",
|
| 303 |
+
"execution_count": 30,
|
| 304 |
+
"id": "875bf5fc",
|
| 305 |
+
"metadata": {},
|
| 306 |
+
"outputs": [
|
| 307 |
+
{
|
| 308 |
+
"name": "stdout",
|
| 309 |
+
"output_type": "stream",
|
| 310 |
+
"text": [
|
| 311 |
+
"📡 Sensor data counting functions defined successfully!\n",
|
| 312 |
+
"🌱 Ready to analyze agricultural sensor data from teleagriculture boards.\n"
|
| 313 |
+
]
|
| 314 |
+
}
|
| 315 |
+
],
|
| 316 |
+
"source": [
|
| 317 |
+
"def count_board_data_points(board_id: str) -> int:\n",
|
| 318 |
+
" \"\"\"\n",
|
| 319 |
+
" Count sensor data points collected by a specific teleagriculture board.\n",
|
| 320 |
+
" \n",
|
| 321 |
+
" Args:\n",
|
| 322 |
+
" board_id (str): The ID of the teleagriculture board\n",
|
| 323 |
+
" \n",
|
| 324 |
+
" Returns:\n",
|
| 325 |
+
" int: Number of data points (sensor readings) collected by the board\n",
|
| 326 |
+
" \"\"\"\n",
|
| 327 |
+
" try:\n",
|
| 328 |
+
" # Teleagriculture boards typically send sensor data to these types of endpoints\n",
|
| 329 |
+
" possible_endpoints = [\n",
|
| 330 |
+
" f\"/devices/{board_id}/data\", # Device data endpoint\n",
|
| 331 |
+
" f\"/devices/{board_id}/measurements\", # Measurement endpoint \n",
|
| 332 |
+
" f\"/devices/{board_id}/sensors\", # Sensor readings\n",
|
| 333 |
+
" f\"/boards/{board_id}/readings\", # Board readings\n",
|
| 334 |
+
" f\"/nodes/{board_id}/uplinks\", # LoRaWAN uplink messages\n",
|
| 335 |
+
" f\"/stations/{board_id}/observations\" # Weather station observations\n",
|
| 336 |
+
" ]\n",
|
| 337 |
+
" \n",
|
| 338 |
+
" for endpoint in possible_endpoints:\n",
|
| 339 |
+
" try:\n",
|
| 340 |
+
" url = f\"{BASE_URL}{endpoint}\"\n",
|
| 341 |
+
" response = requests.get(url, headers=HEADERS, timeout=30)\n",
|
| 342 |
+
" \n",
|
| 343 |
+
" if response.status_code == 200:\n",
|
| 344 |
+
" data = response.json()\n",
|
| 345 |
+
" \n",
|
| 346 |
+
" # Handle different data formats from IoT platforms\n",
|
| 347 |
+
" if isinstance(data, list):\n",
|
| 348 |
+
" count = len(data)\n",
|
| 349 |
+
" elif isinstance(data, dict):\n",
|
| 350 |
+
" # Try common keys for sensor data arrays\n",
|
| 351 |
+
" for key in ['measurements', 'readings', 'data', 'sensors', 'uplinks', 'observations', 'records']:\n",
|
| 352 |
+
" if key in data and isinstance(data[key], list):\n",
|
| 353 |
+
" count = len(data[key])\n",
|
| 354 |
+
" break\n",
|
| 355 |
+
" else:\n",
|
| 356 |
+
" # Count sensor types if structured differently\n",
|
| 357 |
+
" sensor_keys = ['temperature', 'humidity', 'pressure', 'soil_moisture', 'light', 'ph', 'nitrogen']\n",
|
| 358 |
+
" count = sum(1 for key in sensor_keys if key in data and data[key] is not None)\n",
|
| 359 |
+
" \n",
|
| 360 |
+
" if count == 0:\n",
|
| 361 |
+
" count = len(data) # Fallback to total keys\n",
|
| 362 |
+
" else:\n",
|
| 363 |
+
" count = 0\n",
|
| 364 |
+
" \n",
|
| 365 |
+
" print(f\"📊 Board {board_id}: {count} data points found via {endpoint}\")\n",
|
| 366 |
+
" return count\n",
|
| 367 |
+
" \n",
|
| 368 |
+
" except Exception as e:\n",
|
| 369 |
+
" continue # Try next endpoint\n",
|
| 370 |
+
" \n",
|
| 371 |
+
" print(f\"⚠️ Could not fetch sensor data for board {board_id}\")\n",
|
| 372 |
+
" return 0\n",
|
| 373 |
+
" \n",
|
| 374 |
+
" except Exception as e:\n",
|
| 375 |
+
" print(f\"❌ Error counting data points for board {board_id}: {e}\")\n",
|
| 376 |
+
" return 0\n",
|
| 377 |
+
"\n",
|
| 378 |
+
"def get_board_data_counts(boards: List[Dict]) -> Dict[str, Dict]:\n",
|
| 379 |
+
" \"\"\"\n",
|
| 380 |
+
" Get sensor data counts for all teleagriculture boards.\n",
|
| 381 |
+
" \n",
|
| 382 |
+
" Args:\n",
|
| 383 |
+
" boards (List[Dict]): List of board/device objects from the platform\n",
|
| 384 |
+
" \n",
|
| 385 |
+
" Returns:\n",
|
| 386 |
+
" Dict[str, Dict]: Dictionary with board info and data counts\n",
|
| 387 |
+
" \"\"\"\n",
|
| 388 |
+
" board_stats = {}\n",
|
| 389 |
+
" \n",
|
| 390 |
+
" for board in boards:\n",
|
| 391 |
+
" # Handle different IoT platform object structures\n",
|
| 392 |
+
" board_id = (board.get('id') or board.get('device_id') or board.get('node_id') or \n",
|
| 393 |
+
" board.get('sensor_id') or board.get('station_id') or board.get('_id'))\n",
|
| 394 |
+
" \n",
|
| 395 |
+
" board_name = (board.get('name') or board.get('device_name') or board.get('label') or \n",
|
| 396 |
+
" board.get('title') or board.get('station_name') or f\"Board {board_id}\")\n",
|
| 397 |
+
" \n",
|
| 398 |
+
" # Get location info if available (common in agricultural IoT)\n",
|
| 399 |
+
" location = board.get('location') or board.get('coordinates') or board.get('position')\n",
|
| 400 |
+
" \n",
|
| 401 |
+
" if board_id:\n",
|
| 402 |
+
" data_count = count_board_data_points(str(board_id))\n",
|
| 403 |
+
" board_stats[board_id] = {\n",
|
| 404 |
+
" 'name': board_name,\n",
|
| 405 |
+
" 'data_count': data_count,\n",
|
| 406 |
+
" 'location': location,\n",
|
| 407 |
+
" 'board_info': board\n",
|
| 408 |
+
" }\n",
|
| 409 |
+
" else:\n",
|
| 410 |
+
" print(f\"⚠️ Skipping board without ID: {board}\")\n",
|
| 411 |
+
" \n",
|
| 412 |
+
" return board_stats\n",
|
| 413 |
+
"\n",
|
| 414 |
+
"print(\"📡 Sensor data counting functions defined successfully!\")\n",
|
| 415 |
+
"print(\"🌱 Ready to analyze agricultural sensor data from teleagriculture boards.\")"
|
| 416 |
+
]
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"cell_type": "markdown",
|
| 420 |
+
"id": "e101dd72",
|
| 421 |
+
"metadata": {},
|
| 422 |
+
"source": [
|
| 423 |
+
"## Find Board with Most Data Points\n",
|
| 424 |
+
"\n",
|
| 425 |
+
"Main execution logic to analyze all boards and identify the one with the most data points."
|
| 426 |
+
]
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"cell_type": "code",
|
| 430 |
+
"execution_count": 31,
|
| 431 |
+
"id": "2d6d95de",
|
| 432 |
+
"metadata": {},
|
| 433 |
+
"outputs": [
|
| 434 |
+
{
|
| 435 |
+
"name": "stdout",
|
| 436 |
+
"output_type": "stream",
|
| 437 |
+
"text": [
|
| 438 |
+
"🚀 Starting board analysis...\n",
|
| 439 |
+
"==================================================\n",
|
| 440 |
+
"📋 Fetching all boards...\n",
|
| 441 |
+
"❌ Could not find boards at any common endpoint\n",
|
| 442 |
+
"❌ No boards found or error occurred. Check your API configuration.\n",
|
| 443 |
+
"❌ Could not find boards at any common endpoint\n",
|
| 444 |
+
"❌ No boards found or error occurred. Check your API configuration.\n"
|
| 445 |
+
]
|
| 446 |
+
}
|
| 447 |
+
],
|
| 448 |
+
"source": [
|
| 449 |
+
"def find_board_with_most_data():\n",
|
| 450 |
+
" \"\"\"\n",
|
| 451 |
+
" Main function to find the board with the most data points.\n",
|
| 452 |
+
" \"\"\"\n",
|
| 453 |
+
" print(\"🚀 Starting board analysis...\")\n",
|
| 454 |
+
" print(\"=\" * 50)\n",
|
| 455 |
+
" \n",
|
| 456 |
+
" # Step 1: Fetch all boards\n",
|
| 457 |
+
" print(\"📋 Fetching all boards...\")\n",
|
| 458 |
+
" boards = fetch_all_boards()\n",
|
| 459 |
+
" \n",
|
| 460 |
+
" if not boards:\n",
|
| 461 |
+
" print(\"❌ No boards found or error occurred. Check your API configuration.\")\n",
|
| 462 |
+
" return None\n",
|
| 463 |
+
" \n",
|
| 464 |
+
" print(f\"✅ Found {len(boards)} boards\")\n",
|
| 465 |
+
" print()\n",
|
| 466 |
+
" \n",
|
| 467 |
+
" # Step 2: Count data points for each board\n",
|
| 468 |
+
" print(\"📊 Counting data points for each board...\")\n",
|
| 469 |
+
" board_stats = get_board_data_counts(boards)\n",
|
| 470 |
+
" \n",
|
| 471 |
+
" if not board_stats:\n",
|
| 472 |
+
" print(\"❌ Could not get data counts for any boards.\")\n",
|
| 473 |
+
" return None\n",
|
| 474 |
+
" \n",
|
| 475 |
+
" # Step 3: Find the board with the most data points\n",
|
| 476 |
+
" max_board_id = max(board_stats.keys(), key=lambda k: board_stats[k]['data_count'])\n",
|
| 477 |
+
" max_board = board_stats[max_board_id]\n",
|
| 478 |
+
" \n",
|
| 479 |
+
" print()\n",
|
| 480 |
+
" print(\"🏆 RESULTS\")\n",
|
| 481 |
+
" print(\"=\" * 50)\n",
|
| 482 |
+
" print(f\"Board with most data points:\")\n",
|
| 483 |
+
" print(f\" 📋 Name: {max_board['name']}\")\n",
|
| 484 |
+
" print(f\" 🆔 ID: {max_board_id}\")\n",
|
| 485 |
+
" print(f\" 📊 Data Points: {max_board['data_count']}\")\n",
|
| 486 |
+
" print()\n",
|
| 487 |
+
" \n",
|
| 488 |
+
" # Summary of all boards\n",
|
| 489 |
+
" print(\"📋 All Boards Summary:\")\n",
|
| 490 |
+
" print(\"-\" * 30)\n",
|
| 491 |
+
" sorted_boards = sorted(board_stats.items(), key=lambda x: x[1]['data_count'], reverse=True)\n",
|
| 492 |
+
" \n",
|
| 493 |
+
" for i, (board_id, stats) in enumerate(sorted_boards, 1):\n",
|
| 494 |
+
" emoji = \"🥇\" if i == 1 else \"🥈\" if i == 2 else \"🥉\" if i == 3 else \"📋\"\n",
|
| 495 |
+
" print(f\"{emoji} {stats['name']}: {stats['data_count']} data points\")\n",
|
| 496 |
+
" \n",
|
| 497 |
+
" return {\n",
|
| 498 |
+
" 'winner': max_board,\n",
|
| 499 |
+
" 'winner_id': max_board_id,\n",
|
| 500 |
+
" 'all_stats': board_stats\n",
|
| 501 |
+
" }\n",
|
| 502 |
+
"\n",
|
| 503 |
+
"# Execute the analysis\n",
|
| 504 |
+
"result = find_board_with_most_data()"
|
| 505 |
+
]
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"cell_type": "markdown",
|
| 509 |
+
"id": "b191ccdf",
|
| 510 |
+
"metadata": {},
|
| 511 |
+
"source": [
|
| 512 |
+
"## Data Visualization\n",
|
| 513 |
+
"\n",
|
| 514 |
+
"Create charts and detailed analysis of the board data points."
|
| 515 |
+
]
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"cell_type": "markdown",
|
| 519 |
+
"id": "30486ada",
|
| 520 |
+
"metadata": {},
|
| 521 |
+
"source": [
|
| 522 |
+
"## Sensor Scan: IDs 1001–1060\n",
|
| 523 |
+
"\n",
|
| 524 |
+
"Iterate over sensor IDs 1001 to 1060, query the platform API, and find the sensor with the most datapoints."
|
| 525 |
+
]
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"cell_type": "code",
|
| 529 |
+
"execution_count": 32,
|
| 530 |
+
"id": "e860c05e",
|
| 531 |
+
"metadata": {},
|
| 532 |
+
"outputs": [
|
| 533 |
+
{
|
| 534 |
+
"name": "stdout",
|
| 535 |
+
"output_type": "stream",
|
| 536 |
+
"text": [
|
| 537 |
+
"kit 1001 sensor ftTemp: 1219\n",
|
| 538 |
+
"kit 1001 sensor gbHum: 1219\n",
|
| 539 |
+
"kit 1001 sensor gbHum: 1219\n",
|
| 540 |
+
"kit 1001 sensor gbTemp: 1219\n",
|
| 541 |
+
"kit 1001 sensor gbTemp: 1219\n",
|
| 542 |
+
"kit 1001 sensor Moist: 1219\n",
|
| 543 |
+
"kit 1001 sensor Moist: 1219\n",
|
| 544 |
+
"kit 1001 sensor CO: 1221\n",
|
| 545 |
+
"kit 1001 sensor CO: 1221\n",
|
| 546 |
+
"kit 1001 sensor NO2: 1221\n",
|
| 547 |
+
"kit 1001 sensor NO2: 1221\n",
|
| 548 |
+
"kit 1001 sensor NH3: 1221\n",
|
| 549 |
+
"kit 1001 sensor NH3: 1221\n",
|
| 550 |
+
"kit 1001 sensor C3H8: 1221\n",
|
| 551 |
+
"kit 1001 sensor C3H8: 1221\n",
|
| 552 |
+
"kit 1001 sensor C4H10: 1221\n",
|
| 553 |
+
"kit 1001 sensor C4H10: 1221\n",
|
| 554 |
+
"kit 1001 sensor CH4: 1221\n",
|
| 555 |
+
"kit 1001 sensor CH4: 1221\n",
|
| 556 |
+
"kit 1001 sensor H2: 1221\n",
|
| 557 |
+
"kit 1001 sensor H2: 1221\n",
|
| 558 |
+
"kit 1001 sensor C2H5OH: 1221\n",
|
| 559 |
+
"kit 1001 sensor pH: 1219\n",
|
| 560 |
+
"kit 1001 sensor NO3: 0\n",
|
| 561 |
+
"kit 1001 sensor NO2_aq: 0\n",
|
| 562 |
+
"kit 1001 sensor GH: 0\n",
|
| 563 |
+
"kit 1001 sensor KH: 0\n",
|
| 564 |
+
"kit 1001 sensor pH_strip: 0\n",
|
| 565 |
+
"kit 1001 sensor Cl2: 0\n",
|
| 566 |
+
"kit 1002 sensor ftTemp: 1218\n",
|
| 567 |
+
"kit 1002 sensor gbHum: 1218\n",
|
| 568 |
+
"kit 1002 sensor gbTemp: 1218\n",
|
| 569 |
+
"kit 1002 sensor Moist: 1218\n",
|
| 570 |
+
"kit 1002 sensor CO: 1218\n",
|
| 571 |
+
"kit 1002 sensor NO2: 1218\n",
|
| 572 |
+
"kit 1002 sensor NH3: 1218\n",
|
| 573 |
+
"kit 1002 sensor C3H8: 1218\n",
|
| 574 |
+
"kit 1002 sensor C4H10: 1218\n",
|
| 575 |
+
"kit 1002 sensor CH4: 1218\n",
|
| 576 |
+
"kit 1002 sensor H2: 1218\n",
|
| 577 |
+
"kit 1002 sensor C2H5OH: 1218\n",
|
| 578 |
+
"kit 1002 sensor pH: 1218\n",
|
| 579 |
+
"kit 1002 sensor NO3: 0\n",
|
| 580 |
+
"kit 1002 sensor NO2_aq: 0\n",
|
| 581 |
+
"kit 1002 sensor GH: 0\n",
|
| 582 |
+
"kit 1002 sensor KH: 0\n",
|
| 583 |
+
"kit 1002 sensor pH_strip: 0\n",
|
| 584 |
+
"kit 1002 sensor Cl2: 0\n",
|
| 585 |
+
"kit 1002 sensor Battery: 663\n",
|
| 586 |
+
"kit 1002 sensor temp: 1074\n",
|
| 587 |
+
"kit 1003 sensor pH_strip: 2\n",
|
| 588 |
+
"kit 1003 sensor temp2: 83\n",
|
| 589 |
+
"kit 1003 sensor hum: 6000\n",
|
| 590 |
+
"kit 1003 sensor temp: 4980\n",
|
| 591 |
+
"kit 1003 sensor mois: 30\n",
|
| 592 |
+
"kit 1003 sensor Battery: 210\n",
|
| 593 |
+
"kit 1004 sensor ftTemp: 60\n",
|
| 594 |
+
"kit 1004 sensor gbHum: 1548\n",
|
| 595 |
+
"kit 1004 sensor gbTemp: 1547\n",
|
| 596 |
+
"kit 1004 sensor Moist: 4658\n",
|
| 597 |
+
"kit 1004 sensor Soil Moisture: 1210\n",
|
| 598 |
+
"kit 1004 sensor NO2: 4658\n",
|
| 599 |
+
"kit 1004 sensor NH3: 4658\n",
|
| 600 |
+
"kit 1004 sensor C3H8: 4658\n",
|
| 601 |
+
"kit 1004 sensor C4H10: 4658\n",
|
| 602 |
+
"kit 1004 sensor CH4: 2880\n",
|
| 603 |
+
"kit 1004 sensor H2: 4658\n",
|
| 604 |
+
"kit 1004 sensor C2H5OH: 4658\n"
|
| 605 |
+
]
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"ename": "KeyboardInterrupt",
|
| 609 |
+
"evalue": "",
|
| 610 |
+
"output_type": "error",
|
| 611 |
+
"traceback": [
|
| 612 |
+
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
|
| 613 |
+
"\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
|
| 614 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[32]\u001b[39m\u001b[32m, line 24\u001b[39m\n\u001b[32m 21\u001b[39m best = {\u001b[33m\"\u001b[39m\u001b[33mkit_id\u001b[39m\u001b[33m\"\u001b[39m: kit_id, \u001b[33m\"\u001b[39m\u001b[33msensor\u001b[39m\u001b[33m\"\u001b[39m: name, \u001b[33m\"\u001b[39m\u001b[33mcount\u001b[39m\u001b[33m\"\u001b[39m: cnt}\n\u001b[32m 22\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m best\n\u001b[32m---> \u001b[39m\u001b[32m24\u001b[39m best = \u001b[43mfind_max_sensor_in_range\u001b[49m\u001b[43m(\u001b[49m\u001b[32;43m1001\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m1060\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpage_size\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m50\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[32m 25\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33mRESULT\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 26\u001b[39m \u001b[38;5;28mprint\u001b[39m(best)\n",
|
| 615 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[32]\u001b[39m\u001b[32m, line 18\u001b[39m, in \u001b[36mfind_max_sensor_in_range\u001b[39m\u001b[34m(start_kit, end_kit, page_size)\u001b[39m\n\u001b[32m 16\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m name:\n\u001b[32m 17\u001b[39m \u001b[38;5;28;01mcontinue\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m18\u001b[39m cnt = \u001b[43mcount_sensor_measurements\u001b[49m\u001b[43m(\u001b[49m\u001b[43mkit_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpage_size\u001b[49m\u001b[43m=\u001b[49m\u001b[43mpage_size\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 19\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mkit \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mkit_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m sensor \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mcnt\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 20\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m cnt > best[\u001b[33m\"\u001b[39m\u001b[33mcount\u001b[39m\u001b[33m\"\u001b[39m]:\n",
|
| 616 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[27]\u001b[39m\u001b[32m, line 28\u001b[39m, in \u001b[36mcount_sensor_measurements\u001b[39m\u001b[34m(kit_id, sensor_name, page_size, max_pages)\u001b[39m\n\u001b[32m 26\u001b[39m url = \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mBASE_URL\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m/kits/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mkit_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00msensor_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m/measurements\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 27\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m28\u001b[39m r = \u001b[43mrequests\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m=\u001b[49m\u001b[43mHEADERS\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mparams\u001b[49m\u001b[43m=\u001b[49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m30\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[32m 29\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m requests.RequestException:\n\u001b[32m 30\u001b[39m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
|
| 617 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/requests/api.py:73\u001b[39m, in \u001b[36mget\u001b[39m\u001b[34m(url, params, **kwargs)\u001b[39m\n\u001b[32m 62\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mget\u001b[39m(url, params=\u001b[38;5;28;01mNone\u001b[39;00m, **kwargs):\n\u001b[32m 63\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33mr\u001b[39m\u001b[33;03m\"\"\"Sends a GET request.\u001b[39;00m\n\u001b[32m 64\u001b[39m \n\u001b[32m 65\u001b[39m \u001b[33;03m :param url: URL for the new :class:`Request` object.\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 70\u001b[39m \u001b[33;03m :rtype: requests.Response\u001b[39;00m\n\u001b[32m 71\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m73\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mget\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mparams\u001b[49m\u001b[43m=\u001b[49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 618 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/requests/api.py:59\u001b[39m, in \u001b[36mrequest\u001b[39m\u001b[34m(method, url, **kwargs)\u001b[39m\n\u001b[32m 55\u001b[39m \u001b[38;5;66;03m# By using the 'with' statement we are sure the session is closed, thus we\u001b[39;00m\n\u001b[32m 56\u001b[39m \u001b[38;5;66;03m# avoid leaving sockets open which can trigger a ResourceWarning in some\u001b[39;00m\n\u001b[32m 57\u001b[39m \u001b[38;5;66;03m# cases, and look like a memory leak in others.\u001b[39;00m\n\u001b[32m 58\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m sessions.Session() \u001b[38;5;28;01mas\u001b[39;00m session:\n\u001b[32m---> \u001b[39m\u001b[32m59\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43msession\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m=\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 619 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/requests/sessions.py:589\u001b[39m, in \u001b[36mSession.request\u001b[39m\u001b[34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[39m\n\u001b[32m 584\u001b[39m send_kwargs = {\n\u001b[32m 585\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mtimeout\u001b[39m\u001b[33m\"\u001b[39m: timeout,\n\u001b[32m 586\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mallow_redirects\u001b[39m\u001b[33m\"\u001b[39m: allow_redirects,\n\u001b[32m 587\u001b[39m }\n\u001b[32m 588\u001b[39m send_kwargs.update(settings)\n\u001b[32m--> \u001b[39m\u001b[32m589\u001b[39m resp = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprep\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43msend_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 591\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
|
| 620 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/requests/sessions.py:703\u001b[39m, in \u001b[36mSession.send\u001b[39m\u001b[34m(self, request, **kwargs)\u001b[39m\n\u001b[32m 700\u001b[39m start = preferred_clock()\n\u001b[32m 702\u001b[39m \u001b[38;5;66;03m# Send the request\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m703\u001b[39m r = \u001b[43madapter\u001b[49m\u001b[43m.\u001b[49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 705\u001b[39m \u001b[38;5;66;03m# Total elapsed time of the request (approximately)\u001b[39;00m\n\u001b[32m 706\u001b[39m elapsed = preferred_clock() - start\n",
|
| 621 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/requests/adapters.py:667\u001b[39m, in \u001b[36mHTTPAdapter.send\u001b[39m\u001b[34m(self, request, stream, timeout, verify, cert, proxies)\u001b[39m\n\u001b[32m 664\u001b[39m timeout = TimeoutSauce(connect=timeout, read=timeout)\n\u001b[32m 666\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m667\u001b[39m resp = \u001b[43mconn\u001b[49m\u001b[43m.\u001b[49m\u001b[43murlopen\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 668\u001b[39m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m=\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m.\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 669\u001b[39m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m=\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 670\u001b[39m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m=\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m.\u001b[49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 671\u001b[39m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m=\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m.\u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 672\u001b[39m \u001b[43m \u001b[49m\u001b[43mredirect\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 673\u001b[39m \u001b[43m \u001b[49m\u001b[43massert_same_host\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 674\u001b[39m \u001b[43m \u001b[49m\u001b[43mpreload_content\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 675\u001b[39m \u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 676\u001b[39m \u001b[43m \u001b[49m\u001b[43mretries\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmax_retries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 677\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 678\u001b[39m \u001b[43m \u001b[49m\u001b[43mchunked\u001b[49m\u001b[43m=\u001b[49m\u001b[43mchunked\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 679\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 681\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m (ProtocolError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m err:\n\u001b[32m 682\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m(err, request=request)\n",
|
| 622 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/urllib3/connectionpool.py:787\u001b[39m, in \u001b[36mHTTPConnectionPool.urlopen\u001b[39m\u001b[34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)\u001b[39m\n\u001b[32m 784\u001b[39m response_conn = conn \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m release_conn \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 786\u001b[39m \u001b[38;5;66;03m# Make the request on the HTTPConnection object\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m787\u001b[39m response = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_make_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 788\u001b[39m \u001b[43m \u001b[49m\u001b[43mconn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 789\u001b[39m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 790\u001b[39m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 791\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout_obj\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 792\u001b[39m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m=\u001b[49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 793\u001b[39m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 794\u001b[39m \u001b[43m \u001b[49m\u001b[43mchunked\u001b[49m\u001b[43m=\u001b[49m\u001b[43mchunked\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 795\u001b[39m \u001b[43m \u001b[49m\u001b[43mretries\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 796\u001b[39m \u001b[43m \u001b[49m\u001b[43mresponse_conn\u001b[49m\u001b[43m=\u001b[49m\u001b[43mresponse_conn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 797\u001b[39m \u001b[43m \u001b[49m\u001b[43mpreload_content\u001b[49m\u001b[43m=\u001b[49m\u001b[43mpreload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 798\u001b[39m \u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m=\u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 799\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mresponse_kw\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 800\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 802\u001b[39m \u001b[38;5;66;03m# Everything went great!\u001b[39;00m\n\u001b[32m 803\u001b[39m clean_exit = \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
| 623 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/urllib3/connectionpool.py:534\u001b[39m, in \u001b[36mHTTPConnectionPool._make_request\u001b[39m\u001b[34m(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)\u001b[39m\n\u001b[32m 532\u001b[39m \u001b[38;5;66;03m# Receive the response from the server\u001b[39;00m\n\u001b[32m 533\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m534\u001b[39m response = \u001b[43mconn\u001b[49m\u001b[43m.\u001b[49m\u001b[43mgetresponse\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 535\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m (BaseSSLError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[32m 536\u001b[39m \u001b[38;5;28mself\u001b[39m._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n",
|
| 624 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/site-packages/urllib3/connection.py:516\u001b[39m, in \u001b[36mHTTPConnection.getresponse\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 513\u001b[39m _shutdown = \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m.sock, \u001b[33m\"\u001b[39m\u001b[33mshutdown\u001b[39m\u001b[33m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[32m 515\u001b[39m \u001b[38;5;66;03m# Get the response from http.client.HTTPConnection\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m516\u001b[39m httplib_response = \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[43mgetresponse\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 518\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m 519\u001b[39m assert_header_parsing(httplib_response.msg)\n",
|
| 625 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/http/client.py:1430\u001b[39m, in \u001b[36mHTTPConnection.getresponse\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 1428\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m 1429\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1430\u001b[39m \u001b[43mresponse\u001b[49m\u001b[43m.\u001b[49m\u001b[43mbegin\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1431\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m:\n\u001b[32m 1432\u001b[39m \u001b[38;5;28mself\u001b[39m.close()\n",
|
| 626 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/http/client.py:331\u001b[39m, in \u001b[36mHTTPResponse.begin\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 329\u001b[39m \u001b[38;5;66;03m# read until we get a non-100 response\u001b[39;00m\n\u001b[32m 330\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m331\u001b[39m version, status, reason = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_read_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 332\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m status != CONTINUE:\n\u001b[32m 333\u001b[39m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
|
| 627 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/http/client.py:292\u001b[39m, in \u001b[36mHTTPResponse._read_status\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 291\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m_read_status\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m--> \u001b[39m\u001b[32m292\u001b[39m line = \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mfp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mreadline\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_MAXLINE\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m1\u001b[39;49m\u001b[43m)\u001b[49m, \u001b[33m\"\u001b[39m\u001b[33miso-8859-1\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 293\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(line) > _MAXLINE:\n\u001b[32m 294\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m LineTooLong(\u001b[33m\"\u001b[39m\u001b[33mstatus line\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
| 628 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/socket.py:719\u001b[39m, in \u001b[36mSocketIO.readinto\u001b[39m\u001b[34m(self, b)\u001b[39m\n\u001b[32m 717\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m(\u001b[33m\"\u001b[39m\u001b[33mcannot read from timed out object\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 718\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m719\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_sock\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 720\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[32m 721\u001b[39m \u001b[38;5;28mself\u001b[39m._timeout_occurred = \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
| 629 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/ssl.py:1304\u001b[39m, in \u001b[36mSSLSocket.recv_into\u001b[39m\u001b[34m(self, buffer, nbytes, flags)\u001b[39m\n\u001b[32m 1300\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m flags != \u001b[32m0\u001b[39m:\n\u001b[32m 1301\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 1302\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m\"\u001b[39m %\n\u001b[32m 1303\u001b[39m \u001b[38;5;28mself\u001b[39m.\u001b[34m__class__\u001b[39m)\n\u001b[32m-> \u001b[39m\u001b[32m1304\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1305\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1306\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m().recv_into(buffer, nbytes, flags)\n",
|
| 630 |
+
"\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/random/lib/python3.13/ssl.py:1138\u001b[39m, in \u001b[36mSSLSocket.read\u001b[39m\u001b[34m(self, len, buffer)\u001b[39m\n\u001b[32m 1136\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m 1137\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1138\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_sslobj\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1139\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1140\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._sslobj.read(\u001b[38;5;28mlen\u001b[39m)\n",
|
| 631 |
+
"\u001b[31mKeyboardInterrupt\u001b[39m: "
|
| 632 |
+
]
|
| 633 |
+
}
|
| 634 |
+
],
|
| 635 |
+
"source": [
|
| 636 |
+
"# Minimal scan: kits 1001..1060 — find sensor with most datapoints\n",
|
| 637 |
+
"\n",
|
| 638 |
+
"def find_max_sensor_in_range(start_kit: int = 1015, end_kit: int = 1060, page_size: int = 50) -> dict:\n",
|
| 639 |
+
" best = {\"kit_id\": None, \"sensor\": None, \"count\": -1}\n",
|
| 640 |
+
" for kit_id in range(start_kit, end_kit + 1):\n",
|
| 641 |
+
" kit = get_kit_info(kit_id)\n",
|
| 642 |
+
" if not kit or not isinstance(kit, dict):\n",
|
| 643 |
+
" print(f\"kit {kit_id}: not found\")\n",
|
| 644 |
+
" continue\n",
|
| 645 |
+
" sensors = kit.get(\"sensors\") or []\n",
|
| 646 |
+
" if not sensors:\n",
|
| 647 |
+
" print(f\"kit {kit_id}: no sensors\")\n",
|
| 648 |
+
" continue\n",
|
| 649 |
+
" for s in sensors:\n",
|
| 650 |
+
" name = s.get(\"name\")\n",
|
| 651 |
+
" if not name:\n",
|
| 652 |
+
" continue\n",
|
| 653 |
+
" cnt = count_sensor_measurements(kit_id, name, page_size=page_size)\n",
|
| 654 |
+
" print(f\"kit {kit_id} sensor {name}: {cnt}\")\n",
|
| 655 |
+
" if cnt > best[\"count\"]:\n",
|
| 656 |
+
" best = {\"kit_id\": kit_id, \"sensor\": name, \"count\": cnt}\n",
|
| 657 |
+
" return best\n",
|
| 658 |
+
"\n",
|
| 659 |
+
"best = find_max_sensor_in_range(1001, 1060, page_size=50)\n",
|
| 660 |
+
"print(\"\\nRESULT\")\n",
|
| 661 |
+
"print(best)"
|
| 662 |
+
]
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"cell_type": "code",
|
| 666 |
+
"execution_count": 18,
|
| 667 |
+
"id": "3216b9fb",
|
| 668 |
+
"metadata": {},
|
| 669 |
+
"outputs": [
|
| 670 |
+
{
|
| 671 |
+
"name": "stdout",
|
| 672 |
+
"output_type": "stream",
|
| 673 |
+
"text": [
|
| 674 |
+
"🔎 Scanning sensors from 1001 to 1060...\n",
|
| 675 |
+
"Sensor 1001: 0 datapoints (via /sensors/1001)\n",
|
| 676 |
+
"Sensor 1002: 0 datapoints (via /sensors/1002)\n",
|
| 677 |
+
"Sensor 1003: 0 datapoints (via /sensors/1003)\n",
|
| 678 |
+
"Sensor 1004: 0 datapoints (via /sensors/1004)\n",
|
| 679 |
+
"Sensor 1005: 0 datapoints (via /sensors/1005)\n",
|
| 680 |
+
"Sensor 1006: 0 datapoints (via /sensors/1006)\n",
|
| 681 |
+
"Sensor 1007: 0 datapoints (via /sensors/1007)\n",
|
| 682 |
+
"Sensor 1008: 0 datapoints (via /sensors/1008)\n",
|
| 683 |
+
"Sensor 1009: 0 datapoints (via /sensors/1009)\n",
|
| 684 |
+
"Sensor 1010: 0 datapoints (via /sensors/1010)\n",
|
| 685 |
+
"Sensor 1011: 0 datapoints (via /sensors/1011)\n",
|
| 686 |
+
"Sensor 1012: 0 datapoints (via /sensors/1012)\n",
|
| 687 |
+
"Sensor 1013: 0 datapoints (via /sensors/1013)\n",
|
| 688 |
+
"Sensor 1014: 0 datapoints (via /sensors/1014)\n",
|
| 689 |
+
"Sensor 1015: 0 datapoints (via /sensors/1015)\n",
|
| 690 |
+
"Sensor 1016: 0 datapoints (via /sensors/1016)\n",
|
| 691 |
+
"Sensor 1017: 0 datapoints (via /sensors/1017)\n",
|
| 692 |
+
"Sensor 1018: 0 datapoints (via /sensors/1018)\n",
|
| 693 |
+
"Sensor 1019: 0 datapoints (via /sensors/1019)\n",
|
| 694 |
+
"Sensor 1020: 0 datapoints (via /sensors/1020)\n",
|
| 695 |
+
"Sensor 1021: 0 datapoints (via /sensors/1021)\n",
|
| 696 |
+
"Sensor 1022: 0 datapoints (via /sensors/1022)\n",
|
| 697 |
+
"Sensor 1023: 0 datapoints (via /sensors/1023)\n",
|
| 698 |
+
"Sensor 1024: 0 datapoints (via /sensors/1024)\n",
|
| 699 |
+
"Sensor 1025: 0 datapoints (via /sensors/1025)\n",
|
| 700 |
+
"Sensor 1026: 0 datapoints (via /sensors/1026)\n",
|
| 701 |
+
"Sensor 1027: 0 datapoints (via /sensors/1027)\n",
|
| 702 |
+
"Sensor 1028: 0 datapoints (via /sensors/1028)\n",
|
| 703 |
+
"Sensor 1029: 0 datapoints (via /sensors/1029)\n",
|
| 704 |
+
"Sensor 1030: 0 datapoints (via /sensors/1030)\n",
|
| 705 |
+
"Sensor 1031: 0 datapoints (via /sensors/1031)\n",
|
| 706 |
+
"Sensor 1032: 0 datapoints (via /sensors/1032)\n",
|
| 707 |
+
"Sensor 1033: 0 datapoints (via /sensors/1033)\n",
|
| 708 |
+
"Sensor 1034: 0 datapoints (via /sensors/1034)\n",
|
| 709 |
+
"Sensor 1035: 0 datapoints (via /sensors/1035)\n",
|
| 710 |
+
"Sensor 1036: 0 datapoints (via /sensors/1036)\n",
|
| 711 |
+
"Sensor 1037: 0 datapoints (via /sensors/1037)\n",
|
| 712 |
+
"Sensor 1038: 0 datapoints (via /sensors/1038)\n",
|
| 713 |
+
"Sensor 1039: 0 datapoints (via /sensors/1039)\n",
|
| 714 |
+
"Sensor 1040: 0 datapoints (via /sensors/1040)\n",
|
| 715 |
+
"Sensor 1041: 0 datapoints (via /sensors/1041)\n",
|
| 716 |
+
"Sensor 1042: 0 datapoints (via /sensors/1042)\n",
|
| 717 |
+
"Sensor 1043: 0 datapoints (via /sensors/1043)\n",
|
| 718 |
+
"Sensor 1044: 0 datapoints (via /sensors/1044)\n",
|
| 719 |
+
"Sensor 1045: 0 datapoints (via /sensors/1045)\n",
|
| 720 |
+
"Sensor 1046: 0 datapoints (via /sensors/1046)\n",
|
| 721 |
+
"Sensor 1047: 0 datapoints (via /sensors/1047)\n",
|
| 722 |
+
"Sensor 1048: 0 datapoints (via /sensors/1048)\n",
|
| 723 |
+
"Sensor 1049: 0 datapoints (via /sensors/1049)\n",
|
| 724 |
+
"Sensor 1050: 0 datapoints (via /sensors/1050)\n",
|
| 725 |
+
"Sensor 1051: 0 datapoints (via /sensors/1051)\n",
|
| 726 |
+
"Sensor 1052: 0 datapoints (via /sensors/1052)\n",
|
| 727 |
+
"Sensor 1053: 0 datapoints (via /sensors/1053/readings)\n",
|
| 728 |
+
"Sensor 1054: 0 datapoints (via /sensors/1054)\n",
|
| 729 |
+
"Sensor 1055: 0 datapoints (via /sensors/1055)\n",
|
| 730 |
+
"Sensor 1056: 0 datapoints (via /sensors/1056)\n",
|
| 731 |
+
"Sensor 1057: 0 datapoints (via /sensors/1057)\n",
|
| 732 |
+
"Sensor 1058: 0 datapoints (via /sensors/1058)\n",
|
| 733 |
+
"Sensor 1059: 0 datapoints (via /sensors/1059)\n",
|
| 734 |
+
"Sensor 1060: 0 datapoints (via /sensors/1060)\n",
|
| 735 |
+
"\n",
|
| 736 |
+
"🏁 Scan complete.\n",
|
| 737 |
+
"🏆 Sensor with most datapoints:\n",
|
| 738 |
+
" 🆔 ID: 1001\n",
|
| 739 |
+
" 📊 Count: 0\n",
|
| 740 |
+
" 🔗 Endpoint: /sensors/1001\n"
|
| 741 |
+
]
|
| 742 |
+
}
|
| 743 |
+
],
|
| 744 |
+
"source": [
|
| 745 |
+
"from collections import defaultdict\n",
|
| 746 |
+
"\n",
|
| 747 |
+
"def _count_datapoints_from_response(data) -> int:\n",
|
| 748 |
+
" \"\"\"Best-effort count of datapoints from arbitrary API responses.\"\"\"\n",
|
| 749 |
+
" if data is None:\n",
|
| 750 |
+
" return 0\n",
|
| 751 |
+
" if isinstance(data, list):\n",
|
| 752 |
+
" return len(data)\n",
|
| 753 |
+
" if isinstance(data, dict):\n",
|
| 754 |
+
" # Prefer common array keys\n",
|
| 755 |
+
" for key in [\n",
|
| 756 |
+
" 'data', 'results', 'measurements', 'readings', 'entries', 'values',\n",
|
| 757 |
+
" 'observations', 'records', 'points'\n",
|
| 758 |
+
" ]:\n",
|
| 759 |
+
" if key in data and isinstance(data[key], list):\n",
|
| 760 |
+
" return len(data[key])\n",
|
| 761 |
+
" # Fallback: count scalar series\n",
|
| 762 |
+
" return sum(1 for v in data.values() if isinstance(v, (int, float, str, bool)))\n",
|
| 763 |
+
" return 0\n",
|
| 764 |
+
"\n",
|
| 765 |
+
"\n",
|
| 766 |
+
"def fetch_sensor_datapoints(sensor_id: int) -> tuple[int, dict]:\n",
|
| 767 |
+
" \"\"\"\n",
|
| 768 |
+
" Try multiple likely endpoints for a sensor and return the datapoint count and last successful meta.\n",
|
| 769 |
+
" Returns (count, meta) where meta contains endpoint and status.\n",
|
| 770 |
+
" \"\"\"\n",
|
| 771 |
+
" endpoints = [\n",
|
| 772 |
+
" f\"/sensors/{sensor_id}\",\n",
|
| 773 |
+
" f\"/sensors/{sensor_id}/data\",\n",
|
| 774 |
+
" f\"/sensors/{sensor_id}/readings\",\n",
|
| 775 |
+
" f\"/sensors/{sensor_id}/measurements\",\n",
|
| 776 |
+
" f\"/devices/{sensor_id}/data\",\n",
|
| 777 |
+
" f\"/nodes/{sensor_id}/uplinks\",\n",
|
| 778 |
+
" ]\n",
|
| 779 |
+
"\n",
|
| 780 |
+
" last_error = None\n",
|
| 781 |
+
" for ep in endpoints:\n",
|
| 782 |
+
" url = f\"{BASE_URL.rstrip('/')}{ep}\"\n",
|
| 783 |
+
" try:\n",
|
| 784 |
+
" r = requests.get(url, headers=HEADERS, timeout=30)\n",
|
| 785 |
+
" if r.status_code == 200:\n",
|
| 786 |
+
" try:\n",
|
| 787 |
+
" data = r.json()\n",
|
| 788 |
+
" except Exception:\n",
|
| 789 |
+
" data = None\n",
|
| 790 |
+
" count = _count_datapoints_from_response(data)\n",
|
| 791 |
+
" return count, {\"endpoint\": ep, \"status\": r.status_code}\n",
|
| 792 |
+
" else:\n",
|
| 793 |
+
" last_error = {\"endpoint\": ep, \"status\": r.status_code, \"text\": r.text[:200]}\n",
|
| 794 |
+
" except requests.RequestException as e:\n",
|
| 795 |
+
" last_error = {\"endpoint\": ep, \"error\": str(e)}\n",
|
| 796 |
+
" continue\n",
|
| 797 |
+
" return 0, (last_error or {\"endpoint\": None, \"error\": \"no-endpoint-succeeded\"})\n",
|
| 798 |
+
"\n",
|
| 799 |
+
"\n",
|
| 800 |
+
"def scan_sensors_and_find_max(start_id: int = 1001, end_id: int = 1060):\n",
|
| 801 |
+
" print(f\"🔎 Scanning sensors from {start_id} to {end_id}...\")\n",
|
| 802 |
+
" best = {\n",
|
| 803 |
+
" \"sensor_id\": None,\n",
|
| 804 |
+
" \"count\": -1,\n",
|
| 805 |
+
" \"meta\": {}\n",
|
| 806 |
+
" }\n",
|
| 807 |
+
" results = {}\n",
|
| 808 |
+
"\n",
|
| 809 |
+
" for sid in range(start_id, end_id + 1):\n",
|
| 810 |
+
" count, meta = fetch_sensor_datapoints(sid)\n",
|
| 811 |
+
" results[sid] = {\"count\": count, \"meta\": meta}\n",
|
| 812 |
+
" print(f\"Sensor {sid}: {count} datapoints (via {meta.get('endpoint')})\")\n",
|
| 813 |
+
" if count > best[\"count\"]:\n",
|
| 814 |
+
" best = {\"sensor_id\": sid, \"count\": count, \"meta\": meta}\n",
|
| 815 |
+
"\n",
|
| 816 |
+
" print(\"\\n🏁 Scan complete.\")\n",
|
| 817 |
+
" if best[\"sensor_id\"] is not None:\n",
|
| 818 |
+
" print(\"🏆 Sensor with most datapoints:\")\n",
|
| 819 |
+
" print(f\" 🆔 ID: {best['sensor_id']}\")\n",
|
| 820 |
+
" print(f\" 📊 Count: {best['count']}\")\n",
|
| 821 |
+
" print(f\" 🔗 Endpoint: {best['meta'].get('endpoint')}\")\n",
|
| 822 |
+
" else:\n",
|
| 823 |
+
" print(\"No sensors returned datapoints in the given range.\")\n",
|
| 824 |
+
"\n",
|
| 825 |
+
" return {\"best\": best, \"results\": results}\n",
|
| 826 |
+
"\n",
|
| 827 |
+
"# Run the scan now\n",
|
| 828 |
+
"scan_result = scan_sensors_and_find_max(1001, 1060)"
|
| 829 |
+
]
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"cell_type": "code",
|
| 833 |
+
"execution_count": 16,
|
| 834 |
+
"id": "46506887",
|
| 835 |
+
"metadata": {},
|
| 836 |
+
"outputs": [
|
| 837 |
+
{
|
| 838 |
+
"name": "stdout",
|
| 839 |
+
"output_type": "stream",
|
| 840 |
+
"text": [
|
| 841 |
+
"⚠️ Run the board analysis first to see visualizations.\n",
|
| 842 |
+
"💡 Make sure to update the API_KEY and BASE_URL in the configuration section.\n"
|
| 843 |
+
]
|
| 844 |
+
}
|
| 845 |
+
],
|
| 846 |
+
"source": [
|
| 847 |
+
"def create_board_analysis_chart(board_stats: Dict[str, Dict]):\n",
|
| 848 |
+
" \"\"\"\n",
|
| 849 |
+
" Create visualizations for board data analysis.\n",
|
| 850 |
+
" \n",
|
| 851 |
+
" Args:\n",
|
| 852 |
+
" board_stats (Dict[str, Dict]): Board statistics from get_board_data_counts\n",
|
| 853 |
+
" \"\"\"\n",
|
| 854 |
+
" if not board_stats:\n",
|
| 855 |
+
" print(\"❌ No board statistics available for visualization.\")\n",
|
| 856 |
+
" return\n",
|
| 857 |
+
" \n",
|
| 858 |
+
" # Prepare data for plotting\n",
|
| 859 |
+
" board_names = [stats['name'] for stats in board_stats.values()]\n",
|
| 860 |
+
" data_counts = [stats['data_count'] for stats in board_stats.values()]\n",
|
| 861 |
+
" board_ids = list(board_stats.keys())\n",
|
| 862 |
+
" \n",
|
| 863 |
+
" # Create DataFrame for better handling\n",
|
| 864 |
+
" df = pd.DataFrame({\n",
|
| 865 |
+
" 'Board ID': board_ids,\n",
|
| 866 |
+
" 'Board Name': board_names,\n",
|
| 867 |
+
" 'Data Points': data_counts\n",
|
| 868 |
+
" })\n",
|
| 869 |
+
" \n",
|
| 870 |
+
" # Sort by data points for better visualization\n",
|
| 871 |
+
" df = df.sort_values('Data Points', ascending=True)\n",
|
| 872 |
+
" \n",
|
| 873 |
+
" # Create the plot\n",
|
| 874 |
+
" plt.figure(figsize=(12, 8))\n",
|
| 875 |
+
" \n",
|
| 876 |
+
" # Horizontal bar chart\n",
|
| 877 |
+
" bars = plt.barh(range(len(df)), df['Data Points'], color='skyblue', alpha=0.7)\n",
|
| 878 |
+
" \n",
|
| 879 |
+
" # Customize the plot\n",
|
| 880 |
+
" plt.yticks(range(len(df)), df['Board Name'])\n",
|
| 881 |
+
" plt.xlabel('Number of Data Points')\n",
|
| 882 |
+
" plt.title('Data Points per Board - Teleagriculture API Analysis', fontsize=16, fontweight='bold')\n",
|
| 883 |
+
" plt.grid(axis='x', alpha=0.3)\n",
|
| 884 |
+
" \n",
|
| 885 |
+
" # Add value labels on bars\n",
|
| 886 |
+
" for i, (bar, value) in enumerate(zip(bars, df['Data Points'])):\n",
|
| 887 |
+
" plt.text(value + max(df['Data Points']) * 0.01, bar.get_y() + bar.get_height()/2, \n",
|
| 888 |
+
" str(value), va='center', fontweight='bold')\n",
|
| 889 |
+
" \n",
|
| 890 |
+
" # Highlight the board with most data points\n",
|
| 891 |
+
" max_idx = df['Data Points'].idxmax()\n",
|
| 892 |
+
" bars[df.index.get_loc(max_idx)].set_color('gold')\n",
|
| 893 |
+
" bars[df.index.get_loc(max_idx)].set_alpha(1.0)\n",
|
| 894 |
+
" \n",
|
| 895 |
+
" plt.tight_layout()\n",
|
| 896 |
+
" plt.show()\n",
|
| 897 |
+
" \n",
|
| 898 |
+
" # Print detailed statistics\n",
|
| 899 |
+
" print(\"📊 DETAILED STATISTICS\")\n",
|
| 900 |
+
" print(\"=\" * 50)\n",
|
| 901 |
+
" print(f\"Total boards analyzed: {len(df)}\")\n",
|
| 902 |
+
" print(f\"Total data points across all boards: {df['Data Points'].sum()}\")\n",
|
| 903 |
+
" print(f\"Average data points per board: {df['Data Points'].mean():.1f}\")\n",
|
| 904 |
+
" print(f\"Median data points per board: {df['Data Points'].median():.1f}\")\n",
|
| 905 |
+
" print(f\"Standard deviation: {df['Data Points'].std():.1f}\")\n",
|
| 906 |
+
" print()\n",
|
| 907 |
+
" \n",
|
| 908 |
+
" # Show top 3 boards\n",
|
| 909 |
+
" top_3 = df.nlargest(3, 'Data Points')\n",
|
| 910 |
+
" print(\"🏆 TOP 3 BOARDS:\")\n",
|
| 911 |
+
" for i, (_, row) in enumerate(top_3.iterrows(), 1):\n",
|
| 912 |
+
" emoji = \"🥇\" if i == 1 else \"🥈\" if i == 2 else \"🥉\"\n",
|
| 913 |
+
" print(f\"{emoji} {row['Board Name']}: {row['Data Points']} data points\")\n",
|
| 914 |
+
" \n",
|
| 915 |
+
" return df\n",
|
| 916 |
+
"\n",
|
| 917 |
+
"# Create visualization if we have results\n",
|
| 918 |
+
"if 'result' in locals() and result and result.get('all_stats'):\n",
|
| 919 |
+
" print(\"📈 Creating visualization...\")\n",
|
| 920 |
+
" df_analysis = create_board_analysis_chart(result['all_stats'])\n",
|
| 921 |
+
"else:\n",
|
| 922 |
+
" print(\"⚠️ Run the board analysis first to see visualizations.\")\n",
|
| 923 |
+
" print(\"💡 Make sure to update the API_KEY and BASE_URL in the configuration section.\")"
|
| 924 |
+
]
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"cell_type": "code",
|
| 928 |
+
"execution_count": null,
|
| 929 |
+
"id": "e01c5bf6",
|
| 930 |
+
"metadata": {},
|
| 931 |
+
"outputs": [],
|
| 932 |
+
"source": []
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"cell_type": "markdown",
|
| 936 |
+
"id": "3553a610",
|
| 937 |
+
"metadata": {},
|
| 938 |
+
"source": [
|
| 939 |
+
"## Simple helper: get all sensor data for a kit id\n",
|
| 940 |
+
"\n",
|
| 941 |
+
"This function fetches all available measurements for a given kit (board) id across all its sensors and returns a tidy pandas DataFrame. It uses the same BASE_URL and HEADERS configured above and follows the API's cursor pagination automatically.\n",
|
| 942 |
+
"\n",
|
| 943 |
+
"- Input: kit_id (int)\n",
|
| 944 |
+
"- Optional: sensors (list[str]) to limit which sensors to fetch; defaults to all sensors on the kit\n",
|
| 945 |
+
"- Output: pandas DataFrame with columns like: kit_id, sensor, timestamp/value/..., depending on the API payload"
|
| 946 |
+
]
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"cell_type": "code",
|
| 950 |
+
"execution_count": null,
|
| 951 |
+
"id": "051f0aab",
|
| 952 |
+
"metadata": {},
|
| 953 |
+
"outputs": [],
|
| 954 |
+
"source": [
|
| 955 |
+
"from typing import Iterable, Any\n",
|
| 956 |
+
"\n",
|
| 957 |
+
"def _paginate(url: str, params: dict | None = None, headers: dict | None = None, page_size: int = 100, max_pages: int = 500):\n",
|
| 958 |
+
" \"\"\"Generator yielding pages from cursor-paginated endpoint returning {'data': [...], 'meta': {'next_cursor': '...'}}\"\"\"\n",
|
| 959 |
+
" params = dict(params or {})\n",
|
| 960 |
+
" params[\"page[size]\"] = str(page_size)\n",
|
| 961 |
+
" cursor = None\n",
|
| 962 |
+
" pages = 0\n",
|
| 963 |
+
" while pages < max_pages:\n",
|
| 964 |
+
" if cursor:\n",
|
| 965 |
+
" params[\"page[cursor]\"] = cursor\n",
|
| 966 |
+
" try:\n",
|
| 967 |
+
" r = requests.get(url, headers=headers, params=params, timeout=30)\n",
|
| 968 |
+
" except requests.RequestException:\n",
|
| 969 |
+
" break\n",
|
| 970 |
+
" if r.status_code != 200:\n",
|
| 971 |
+
" break\n",
|
| 972 |
+
" try:\n",
|
| 973 |
+
" payload = r.json()\n",
|
| 974 |
+
" except Exception:\n",
|
| 975 |
+
" break\n",
|
| 976 |
+
" data = payload.get(\"data\")\n",
|
| 977 |
+
" meta = payload.get(\"meta\", {})\n",
|
| 978 |
+
" yield data if isinstance(data, list) else []\n",
|
| 979 |
+
" cursor = meta.get(\"next_cursor\")\n",
|
| 980 |
+
" pages += 1\n",
|
| 981 |
+
" if not cursor:\n",
|
| 982 |
+
" break\n",
|
| 983 |
+
"\n",
|
| 984 |
+
"\n",
|
| 985 |
+
"def get_kit_measurements_df(kit_id: int, sensors: Iterable[str] | None = None, page_size: int = 100) -> pd.DataFrame:\n",
|
| 986 |
+
" \"\"\"\n",
|
| 987 |
+
" Fetch all measurements for a given kit across selected sensors and return a tidy DataFrame.\n",
|
| 988 |
+
"\n",
|
| 989 |
+
" - kit_id: numeric id of the kit/board\n",
|
| 990 |
+
" - sensors: optional list of sensor names; if None, will discover sensors via get_kit_info(kit_id)\n",
|
| 991 |
+
" - page_size: page size for cursor pagination\n",
|
| 992 |
+
"\n",
|
| 993 |
+
" Returns a DataFrame with columns: kit_id, sensor, timestamp, value, unit, _raw\n",
|
| 994 |
+
" (Columns may include NaNs if the API doesn't provide those fields.)\n",
|
| 995 |
+
" \"\"\"\n",
|
| 996 |
+
" # Discover sensors if not provided\n",
|
| 997 |
+
" sensor_list: list[str]\n",
|
| 998 |
+
" if sensors is None:\n",
|
| 999 |
+
" kit = get_kit_info(kit_id)\n",
|
| 1000 |
+
" if not kit:\n",
|
| 1001 |
+
" return pd.DataFrame(columns=[\"kit_id\", \"sensor\", \"timestamp\", \"value\", \"unit\", \"_raw\"])\n",
|
| 1002 |
+
" sensor_list = [s.get(\"name\") for s in (kit.get(\"sensors\") or []) if isinstance(s, dict) and s.get(\"name\")]\n",
|
| 1003 |
+
" else:\n",
|
| 1004 |
+
" sensor_list = [s for s in sensors if s]\n",
|
| 1005 |
+
"\n",
|
| 1006 |
+
" rows: list[dict[str, Any]] = []\n",
|
| 1007 |
+
"\n",
|
| 1008 |
+
" for sname in sensor_list:\n",
|
| 1009 |
+
" base = f\"{BASE_URL}/kits/{kit_id}/{sname}/measurements\"\n",
|
| 1010 |
+
" for page in _paginate(base, headers=HEADERS, page_size=page_size):\n",
|
| 1011 |
+
" for item in page:\n",
|
| 1012 |
+
" if not isinstance(item, dict):\n",
|
| 1013 |
+
" continue\n",
|
| 1014 |
+
" rec = item\n",
|
| 1015 |
+
" # Some APIs wrap fields inside 'attributes'\n",
|
| 1016 |
+
" if isinstance(rec.get(\"attributes\"), dict):\n",
|
| 1017 |
+
" # merge attributes shallowly (attributes wins for overlapping keys)\n",
|
| 1018 |
+
" rec = {**{k: v for k, v in rec.items() if k != \"attributes\"}, **rec[\"attributes\"]}\n",
|
| 1019 |
+
" # Normalize common fields\n",
|
| 1020 |
+
" ts = rec.get(\"timestamp\") or rec.get(\"time\") or rec.get(\"created_at\") or rec.get(\"datetime\")\n",
|
| 1021 |
+
" val = rec.get(\"value\") or rec.get(\"reading\") or rec.get(\"measurement\") or rec.get(\"val\")\n",
|
| 1022 |
+
" unit = rec.get(\"unit\") or rec.get(\"units\")\n",
|
| 1023 |
+
" rows.append({\n",
|
| 1024 |
+
" \"kit_id\": kit_id,\n",
|
| 1025 |
+
" \"sensor\": sname,\n",
|
| 1026 |
+
" \"timestamp\": ts,\n",
|
| 1027 |
+
" \"value\": val,\n",
|
| 1028 |
+
" \"unit\": unit,\n",
|
| 1029 |
+
" \"_raw\": item, # keep original\n",
|
| 1030 |
+
" })\n",
|
| 1031 |
+
"\n",
|
| 1032 |
+
" df = pd.DataFrame(rows)\n",
|
| 1033 |
+
" # Coerce timestamp and sort\n",
|
| 1034 |
+
" if not df.empty and \"timestamp\" in df.columns:\n",
|
| 1035 |
+
" try:\n",
|
| 1036 |
+
" df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"], errors=\"coerce\", utc=True)\n",
|
| 1037 |
+
" df = df.sort_values([\"sensor\", \"timestamp\"], kind=\"stable\")\n",
|
| 1038 |
+
" except Exception:\n",
|
| 1039 |
+
" pass\n",
|
| 1040 |
+
" return df"
|
| 1041 |
+
]
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"cell_type": "code",
|
| 1045 |
+
"execution_count": null,
|
| 1046 |
+
"id": "e5f429f9",
|
| 1047 |
+
"metadata": {},
|
| 1048 |
+
"outputs": [],
|
| 1049 |
+
"source": [
|
| 1050 |
+
"# Demo: fetch all data for a kit id (adjust kit_id)\n",
|
| 1051 |
+
"KIT_DEMO_ID = 1001 # change as needed\n",
|
| 1052 |
+
"\n",
|
| 1053 |
+
"df_all = get_kit_measurements_df(KIT_DEMO_ID)\n",
|
| 1054 |
+
"print(f\"Fetched {len(df_all)} rows for kit {KIT_DEMO_ID}\")\n",
|
| 1055 |
+
"df_all.head()"
|
| 1056 |
+
]
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"cell_type": "code",
|
| 1060 |
+
"execution_count": null,
|
| 1061 |
+
"id": "61c9be14",
|
| 1062 |
+
"metadata": {},
|
| 1063 |
+
"outputs": [],
|
| 1064 |
+
"source": [
|
| 1065 |
+
"# Simplest helper: get a DataFrame for a kit id\n",
|
| 1066 |
+
"\n",
|
| 1067 |
+
"def get_kit_df(kit_id: int) -> pd.DataFrame:\n",
|
| 1068 |
+
" return get_kit_measurements_df(kit_id)\n",
|
| 1069 |
+
"\n",
|
| 1070 |
+
"# Example usage:\n",
|
| 1071 |
+
"# df = get_kit_df(1001)\n",
|
| 1072 |
+
"# df.head()"
|
| 1073 |
+
]
|
| 1074 |
+
}
|
| 1075 |
+
],
|
| 1076 |
+
"metadata": {
|
| 1077 |
+
"kernelspec": {
|
| 1078 |
+
"display_name": "random",
|
| 1079 |
+
"language": "python",
|
| 1080 |
+
"name": "python3"
|
| 1081 |
+
},
|
| 1082 |
+
"language_info": {
|
| 1083 |
+
"codemirror_mode": {
|
| 1084 |
+
"name": "ipython",
|
| 1085 |
+
"version": 3
|
| 1086 |
+
},
|
| 1087 |
+
"file_extension": ".py",
|
| 1088 |
+
"mimetype": "text/x-python",
|
| 1089 |
+
"name": "python",
|
| 1090 |
+
"nbconvert_exporter": "python",
|
| 1091 |
+
"pygments_lexer": "ipython3",
|
| 1092 |
+
"version": "3.13.3"
|
| 1093 |
+
}
|
| 1094 |
+
},
|
| 1095 |
+
"nbformat": 4,
|
| 1096 |
+
"nbformat_minor": 5
|
| 1097 |
+
}
|
data/kit_1001_2025-09-22.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
genai.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from typing import Optional
|
| 4 |
+
from io import BytesIO
|
| 5 |
+
|
| 6 |
+
load_dotenv() # take environment variables from .env.
|
| 7 |
+
|
| 8 |
+
from google import genai
|
| 9 |
+
|
| 10 |
+
client = genai.Client()
|
| 11 |
+
|
| 12 |
+
# Default creative prompt
|
| 13 |
+
DEFAULT_PROMPT = (
|
| 14 |
+
"Turn the provided data visualization into a painting using an eastern art style."
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def generate_genai_image(
|
| 19 |
+
input_image: Optional[Image.Image] = None,
|
| 20 |
+
prompt: Optional[str] = None,
|
| 21 |
+
model: str = "gemini-2.5-flash-image-preview",
|
| 22 |
+
save_to_disk: bool = False,
|
| 23 |
+
) -> Optional[Image.Image]:
|
| 24 |
+
"""Generate a stylized image from an input PIL image using Google GenAI.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
input_image: Source PIL image. If None, calls weather_data_visualisation() to generate in-memory.
|
| 28 |
+
prompt: Optional text prompt; falls back to DEFAULT_PROMPT.
|
| 29 |
+
model: Model name to use.
|
| 30 |
+
save_to_disk: When True, saves to output/generated_image.png (no disk reads occur).
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
A PIL.Image on success, or None if generation failed.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
# Resolve input image strictly in-memory
|
| 37 |
+
img = input_image
|
| 38 |
+
if img is None:
|
| 39 |
+
try:
|
| 40 |
+
from weather_data_visualisation import weather_data_visualisation
|
| 41 |
+
|
| 42 |
+
img = weather_data_visualisation(save_to_disk=False)
|
| 43 |
+
except Exception:
|
| 44 |
+
img = None
|
| 45 |
+
|
| 46 |
+
if img is None:
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
# Prepare prompt
|
| 50 |
+
ptxt = prompt or DEFAULT_PROMPT
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
response = client.models.generate_content(
|
| 54 |
+
model=model,
|
| 55 |
+
contents=[ptxt, img],
|
| 56 |
+
)
|
| 57 |
+
except Exception as e:
|
| 58 |
+
# No credentials or API issue
|
| 59 |
+
print(f"GenAI request failed: {e}")
|
| 60 |
+
return None
|
| 61 |
+
|
| 62 |
+
output_image = None
|
| 63 |
+
try:
|
| 64 |
+
for part in response.candidates[0].content.parts:
|
| 65 |
+
if getattr(part, "text", None) is not None:
|
| 66 |
+
# Optional: print any textual response
|
| 67 |
+
print(part.text)
|
| 68 |
+
elif getattr(part, "inline_data", None) is not None:
|
| 69 |
+
output_image = Image.open(BytesIO(part.inline_data.data)).convert("RGB")
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(f"Failed to parse GenAI response: {e}")
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
# Optional save without using as a future fallback
|
| 75 |
+
if output_image is not None and save_to_disk:
|
| 76 |
+
try:
|
| 77 |
+
import os
|
| 78 |
+
|
| 79 |
+
os.makedirs("output", exist_ok=True)
|
| 80 |
+
output_image.save("output/generated_image.png")
|
| 81 |
+
except Exception:
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
return output_image
|
| 85 |
+
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
generate_genai_image()
|
gradio_app.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple
|
| 2 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
WEATHER_PNG_PATH = None # disk fallback removed
|
| 6 |
+
GENERATED_PNG_PATH = None # disk fallback removed
|
| 7 |
+
|
| 8 |
+
def _placeholder_image(size: Tuple[int, int], text: str, bg=(230, 230, 230)) -> Image.Image:
|
| 9 |
+
# Always create a simple placeholder; don't try to read a file here
|
| 10 |
+
img = Image.new("RGB", size, color=bg)
|
| 11 |
+
draw = ImageDraw.Draw(img)
|
| 12 |
+
try:
|
| 13 |
+
font = ImageFont.load_default()
|
| 14 |
+
except Exception:
|
| 15 |
+
font = None
|
| 16 |
+
w, h = draw.textbbox((0, 0), text, font=font)[2:]
|
| 17 |
+
draw.text(((size[0] - w) / 2, (size[1] - h) / 2), text, fill=(80, 80, 80), font=font)
|
| 18 |
+
return img
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def load_weather_plot(size: Tuple[int, int] = (1024, 1024)) -> Image.Image:
|
| 22 |
+
"""Load the weather plot image produced by weather_data_visualisation.py.
|
| 23 |
+
|
| 24 |
+
Attempts to import the script to generate the file on first run. Falls back
|
| 25 |
+
to a placeholder if unavailable.
|
| 26 |
+
"""
|
| 27 |
+
try:
|
| 28 |
+
# Prefer calling the function to get a PIL image directly
|
| 29 |
+
from weather_data_visualisation import weather_data_visualisation
|
| 30 |
+
|
| 31 |
+
img = weather_data_visualisation(save_to_disk=False)
|
| 32 |
+
if isinstance(img, Image.Image):
|
| 33 |
+
if img.size != size:
|
| 34 |
+
img = img.resize(size, Image.LANCZOS)
|
| 35 |
+
return img
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Weather plot generation failed: {e}")
|
| 38 |
+
|
| 39 |
+
return _placeholder_image(size, "Weather plot unavailable")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def load_genai_output(size: Tuple[int, int] = (1024, 1024)) -> Image.Image:
|
| 43 |
+
"""Load the GenAI output image if available; otherwise return a placeholder.
|
| 44 |
+
|
| 45 |
+
If `genai.py` later exposes a function like `generate_genai_image(size)`,
|
| 46 |
+
it will be used here automatically.
|
| 47 |
+
"""
|
| 48 |
+
try:
|
| 49 |
+
from genai import generate_genai_image
|
| 50 |
+
|
| 51 |
+
# Provide the latest weather image if possible to guide the GenAI
|
| 52 |
+
base_img = None
|
| 53 |
+
try:
|
| 54 |
+
base_img = load_weather_plot(size)
|
| 55 |
+
except Exception:
|
| 56 |
+
base_img = None
|
| 57 |
+
|
| 58 |
+
img = generate_genai_image(input_image=base_img, save_to_disk=False)
|
| 59 |
+
if isinstance(img, Image.Image):
|
| 60 |
+
if img.size != size:
|
| 61 |
+
img = img.resize(size, Image.LANCZOS)
|
| 62 |
+
return img
|
| 63 |
+
except Exception as e:
|
| 64 |
+
print(f"genai.py not usable yet: {e}")
|
| 65 |
+
|
| 66 |
+
return _placeholder_image(size, "GenAI image pending")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def get_both_images(size: Tuple[int, int] = (1024, 1024)) -> Tuple[Image.Image, Image.Image]:
|
| 70 |
+
left = load_weather_plot(size)
|
| 71 |
+
right = load_genai_output(size)
|
| 72 |
+
return left, right
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def create_app():
|
| 76 |
+
"""Creates and returns the Gradio app with two side-by-side images."""
|
| 77 |
+
import gradio as gr
|
| 78 |
+
|
| 79 |
+
with gr.Blocks(title="Weather × GenAI") as app:
|
| 80 |
+
gr.Markdown("# Weather visualization and GenAI output")
|
| 81 |
+
with gr.Row():
|
| 82 |
+
left_img = gr.Image(label="Weather plot", type="pil")
|
| 83 |
+
right_img = gr.Image(label="GenAI output", type="pil")
|
| 84 |
+
|
| 85 |
+
# Load both images on app start
|
| 86 |
+
app.load(fn=get_both_images, inputs=None, outputs=[left_img, right_img])
|
| 87 |
+
|
| 88 |
+
# Manual refresh button
|
| 89 |
+
refresh_btn = gr.Button("Refresh")
|
| 90 |
+
refresh_btn.click(fn=get_both_images, inputs=None, outputs=[left_img, right_img])
|
| 91 |
+
|
| 92 |
+
return app
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
app = create_app()
|
| 97 |
+
app.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
numpy
|
| 2 |
+
pandas
|
| 3 |
+
matplotlib
|
| 4 |
+
pillow
|
| 5 |
+
gradio
|
| 6 |
+
python-dotenv
|
| 7 |
+
google-genai
|
utils.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utility helpers for Teleagriculture kits API
|
| 3 |
+
|
| 4 |
+
Provides:
|
| 5 |
+
- BASE_URL, HEADERS (with optional Bearer from KIT_API_KEY env)
|
| 6 |
+
- get_kit_info(kit_id)
|
| 7 |
+
- get_kit_measurements_df(kit_id, sensors=None, page_size=100)
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
from typing import Any, Iterable, Optional
|
| 13 |
+
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import requests
|
| 16 |
+
|
| 17 |
+
# API configuration
|
| 18 |
+
BASE_URL = os.getenv("KITS_API_BASE", "https://kits.teleagriculture.org/api")
|
| 19 |
+
KIT_API_KEY = os.getenv("KIT_API_KEY")
|
| 20 |
+
|
| 21 |
+
HEADERS: dict[str, str] = {
|
| 22 |
+
"Accept": "application/json",
|
| 23 |
+
}
|
| 24 |
+
if KIT_API_KEY:
|
| 25 |
+
HEADERS["Authorization"] = f"Bearer {KIT_API_KEY}"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_kit_info(kit_id: int) -> Optional[dict]:
|
| 29 |
+
"""Fetch metadata for a kit (board).
|
| 30 |
+
|
| 31 |
+
Returns the JSON 'data' object or None if not found / error.
|
| 32 |
+
"""
|
| 33 |
+
url = f"{BASE_URL}/kits/{kit_id}"
|
| 34 |
+
try:
|
| 35 |
+
r = requests.get(url, headers=HEADERS, timeout=30)
|
| 36 |
+
if r.status_code == 200:
|
| 37 |
+
body = r.json()
|
| 38 |
+
return body.get("data")
|
| 39 |
+
return None
|
| 40 |
+
except requests.RequestException:
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _paginate(
|
| 45 |
+
url: str,
|
| 46 |
+
*,
|
| 47 |
+
params: Optional[dict] = None,
|
| 48 |
+
headers: Optional[dict] = None,
|
| 49 |
+
page_size: int = 100,
|
| 50 |
+
max_pages: int = 500,
|
| 51 |
+
):
|
| 52 |
+
"""Cursor pagination helper yielding lists of items from {'data': [...]} pages.
|
| 53 |
+
|
| 54 |
+
Stops when no next_cursor is provided or on any non-200/parse error.
|
| 55 |
+
"""
|
| 56 |
+
q = dict(params or {})
|
| 57 |
+
q["page[size]"] = str(page_size)
|
| 58 |
+
cursor = None
|
| 59 |
+
pages = 0
|
| 60 |
+
while pages < max_pages:
|
| 61 |
+
if cursor:
|
| 62 |
+
q["page[cursor]"] = cursor
|
| 63 |
+
try:
|
| 64 |
+
r = requests.get(url, headers=headers, params=q, timeout=30)
|
| 65 |
+
except requests.RequestException:
|
| 66 |
+
break
|
| 67 |
+
if r.status_code != 200:
|
| 68 |
+
break
|
| 69 |
+
try:
|
| 70 |
+
payload = r.json()
|
| 71 |
+
except Exception:
|
| 72 |
+
break
|
| 73 |
+
data = payload.get("data")
|
| 74 |
+
meta = payload.get("meta", {})
|
| 75 |
+
yield data if isinstance(data, list) else []
|
| 76 |
+
cursor = meta.get("next_cursor")
|
| 77 |
+
pages += 1
|
| 78 |
+
if not cursor:
|
| 79 |
+
break
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_kit_measurements_df(
|
| 83 |
+
kit_id: int,
|
| 84 |
+
sensors: Optional[Iterable[str]] = None,
|
| 85 |
+
*,
|
| 86 |
+
page_size: int = 100,
|
| 87 |
+
) -> pd.DataFrame:
|
| 88 |
+
"""Fetch all measurements for the given kit across its sensors as a DataFrame.
|
| 89 |
+
|
| 90 |
+
- If sensors is None, discover sensors via get_kit_info(kit_id).
|
| 91 |
+
- Returns columns: kit_id, sensor, timestamp, value, unit, _raw
|
| 92 |
+
(depending on API, some fields may be None/NaT)
|
| 93 |
+
"""
|
| 94 |
+
# Determine sensor list
|
| 95 |
+
if sensors is None:
|
| 96 |
+
kit = get_kit_info(kit_id)
|
| 97 |
+
if not kit:
|
| 98 |
+
return pd.DataFrame(columns=["kit_id", "sensor", "timestamp", "value", "unit", "_raw"])
|
| 99 |
+
sensor_list = [
|
| 100 |
+
s.get("name")
|
| 101 |
+
for s in (kit.get("sensors") or [])
|
| 102 |
+
if isinstance(s, dict) and s.get("name")
|
| 103 |
+
]
|
| 104 |
+
else:
|
| 105 |
+
sensor_list = [s for s in sensors if s]
|
| 106 |
+
|
| 107 |
+
rows: list[dict[str, Any]] = []
|
| 108 |
+
|
| 109 |
+
for sname in sensor_list:
|
| 110 |
+
endpoint = f"{BASE_URL}/kits/{kit_id}/{sname}/measurements"
|
| 111 |
+
for page in _paginate(endpoint, headers=HEADERS, page_size=page_size):
|
| 112 |
+
for item in page:
|
| 113 |
+
if not isinstance(item, dict):
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
# Some APIs nest details under 'attributes'
|
| 117 |
+
rec = item.get("attributes", {})
|
| 118 |
+
rec.update({k: v for k, v in item.items() if k != "attributes"})
|
| 119 |
+
|
| 120 |
+
ts = rec.get("timestamp") or rec.get("time") or rec.get("created_at") or rec.get("datetime")
|
| 121 |
+
val = rec.get("value") or rec.get("reading") or rec.get("measurement") or rec.get("val")
|
| 122 |
+
unit = rec.get("unit") or rec.get("units")
|
| 123 |
+
rows.append(
|
| 124 |
+
{
|
| 125 |
+
"kit_id": kit_id,
|
| 126 |
+
"sensor": sname,
|
| 127 |
+
"timestamp": ts,
|
| 128 |
+
"value": val,
|
| 129 |
+
"unit": unit,
|
| 130 |
+
"_raw": item, # preserve original
|
| 131 |
+
}
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
df = pd.DataFrame(rows)
|
| 135 |
+
if not df.empty and "timestamp" in df.columns:
|
| 136 |
+
try:
|
| 137 |
+
df["timestamp"] = pd.to_datetime(df["timestamp"], errors="coerce", utc=True)
|
| 138 |
+
df = df.sort_values(["sensor", "timestamp"], kind="stable")
|
| 139 |
+
except Exception:
|
| 140 |
+
pass
|
| 141 |
+
return df
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def fetch_kit_dataframe(kit_id: int) -> pd.DataFrame:
|
| 145 |
+
"""Simplest API: return all measurements for the given kit as a DataFrame.
|
| 146 |
+
|
| 147 |
+
Equivalent to get_kit_measurements_df(kit_id) with sensible defaults.
|
| 148 |
+
"""
|
| 149 |
+
return get_kit_measurements_df(kit_id)
|
weather_data_visualisation.ipynb
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"id": "e7f2dbbd",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [],
|
| 9 |
+
"source": [
|
| 10 |
+
"# This script generates a sample \"Monsoon Mandala\" artwork using placeholder data. \n",
|
| 11 |
+
"# Replace the synthetic data block with your real pandas DataFrame columns to recreate the piece with your tea farm data.\n",
|
| 12 |
+
"\n",
|
| 13 |
+
"import numpy as np\n",
|
| 14 |
+
"import pandas as pd\n",
|
| 15 |
+
"import matplotlib.pyplot as plt\n",
|
| 16 |
+
"weatherdata_df = pd.read_csv(\"data/kit_1001_2025-09-22.csv\", index_col=2)\n",
|
| 17 |
+
"weatherdata_df.drop(columns=['kit_id', \"unit\",\"_raw\"], inplace=True)\n",
|
| 18 |
+
"weatherdata_df.dropna(inplace=True)\n",
|
| 19 |
+
"weatherdata_df.index = pd.to_datetime(weatherdata_df.index)\n",
|
| 20 |
+
"weatherdata_df = weatherdata_df.pivot(columns='sensor', values='value')\n",
|
| 21 |
+
"weatherdata_df.columns"
|
| 22 |
+
]
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"cell_type": "code",
|
| 26 |
+
"execution_count": null,
|
| 27 |
+
"id": "18a06b7d",
|
| 28 |
+
"metadata": {},
|
| 29 |
+
"outputs": [],
|
| 30 |
+
"source": [
|
| 31 |
+
"weatherdata_df.columns"
|
| 32 |
+
]
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"cell_type": "code",
|
| 36 |
+
"execution_count": null,
|
| 37 |
+
"id": "4dadce7a",
|
| 38 |
+
"metadata": {},
|
| 39 |
+
"outputs": [],
|
| 40 |
+
"source": [
|
| 41 |
+
"# ---- Mapping to polar \"Monsoon Mandala\" ----\n",
|
| 42 |
+
"# Angles map to time; radii encode a blended metric; thickness & dot size encode other variables.\n",
|
| 43 |
+
"\n",
|
| 44 |
+
"df = weatherdata_df\n",
|
| 45 |
+
"theta = np.linspace(0, 2*np.pi, len(df), endpoint=False)\n",
|
| 46 |
+
"\n",
|
| 47 |
+
"# Normalize helpers (avoid specifying colors, per instructions).\n",
|
| 48 |
+
"def norm(x):\n",
|
| 49 |
+
" x = np.asarray(x)\n",
|
| 50 |
+
" if np.nanmax(x) - np.nanmin(x) == 0:\n",
|
| 51 |
+
" return np.zeros_like(x)\n",
|
| 52 |
+
" return (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x))\n",
|
| 53 |
+
"\n",
|
| 54 |
+
"T = norm(df['ftTemp'].values)\n",
|
| 55 |
+
"H = norm(df['gbHum'].values)\n",
|
| 56 |
+
"R = norm(df['NH3'].values)\n",
|
| 57 |
+
"W = norm(df['C3H8'].values)\n",
|
| 58 |
+
"L = norm(df['CO'].values)\n",
|
| 59 |
+
"\n",
|
| 60 |
+
"# Radius combines temp (outer breathing), humidity (inner swell), light (diurnal bloom)\n",
|
| 61 |
+
"radius = 0.45 + 0.35*(0.5*T + 0.3*H + 0.2*L)\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"# Stroke width from wind; point size from rainfall intensity\n",
|
| 64 |
+
"stroke = 0.3 + 3.2*W\n",
|
| 65 |
+
"dots = 5 + 60*R\n",
|
| 66 |
+
"\n",
|
| 67 |
+
"# Rolling medians for smooth rings\n",
|
| 68 |
+
"def smooth(x, k=21):\n",
|
| 69 |
+
" if k < 3: \n",
|
| 70 |
+
" return x\n",
|
| 71 |
+
" w = np.ones(k)/k\n",
|
| 72 |
+
" return np.convolve(x, w, mode=\"same\")\n",
|
| 73 |
+
"\n",
|
| 74 |
+
"radius_smooth = smooth(radius, k=31)\n",
|
| 75 |
+
"\n",
|
| 76 |
+
"# ---- Plot (no explicit colors; uses matplotlib defaults) ----\n",
|
| 77 |
+
"plt.figure(figsize=(8, 8))\n",
|
| 78 |
+
"ax = plt.subplot(111, projection=\"polar\")\n",
|
| 79 |
+
"ax.set_theta_direction(-1) # clockwise\n",
|
| 80 |
+
"ax.set_theta_offset(np.pi/2.0) # start at top\n",
|
| 81 |
+
"ax.set_axis_off()\n",
|
| 82 |
+
"\n",
|
| 83 |
+
"# Outer ribbon\n",
|
| 84 |
+
"ax.plot(theta, radius_smooth, linewidth=2.0)\n",
|
| 85 |
+
"\n",
|
| 86 |
+
"# Inner filigree rings\n",
|
| 87 |
+
"for k in [3, 7, 13]:\n",
|
| 88 |
+
" ax.plot(theta, smooth(radius * (0.85 + 0.05*np.sin(k*theta)), k=15), linewidth=0.8)\n",
|
| 89 |
+
"\n",
|
| 90 |
+
"# Rainfall pearls\n",
|
| 91 |
+
"ax.scatter(theta[::3], (radius_smooth*0.92)[::3], s=dots[::3], alpha=0.6)\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"# Wind tick marks (radial sticks)\n",
|
| 94 |
+
"for th, rr, sw in zip(theta[::12], radius_smooth[::12], stroke[::12]):\n",
|
| 95 |
+
" ax.plot([th, th], [rr*0.75, rr*0.98], linewidth=sw*0.12, alpha=0.8)\n",
|
| 96 |
+
"\n",
|
| 97 |
+
"plt.tight_layout()\n",
|
| 98 |
+
"png_path = \"output/monsoon_mandala_example.png\"\n",
|
| 99 |
+
"svg_path = \"output/monsoon_mandala_example.svg\"\n",
|
| 100 |
+
"plt.savefig(png_path, dpi=300, bbox_inches=\"tight\", pad_inches=0.05)\n",
|
| 101 |
+
"plt.savefig(svg_path, bbox_inches=\"tight\", pad_inches=0.05)\n",
|
| 102 |
+
"png_path, svg_path\n"
|
| 103 |
+
]
|
| 104 |
+
}
|
| 105 |
+
],
|
| 106 |
+
"metadata": {
|
| 107 |
+
"kernelspec": {
|
| 108 |
+
"display_name": "datascience",
|
| 109 |
+
"language": "python",
|
| 110 |
+
"name": "python3"
|
| 111 |
+
},
|
| 112 |
+
"language_info": {
|
| 113 |
+
"codemirror_mode": {
|
| 114 |
+
"name": "ipython",
|
| 115 |
+
"version": 3
|
| 116 |
+
},
|
| 117 |
+
"file_extension": ".py",
|
| 118 |
+
"mimetype": "text/x-python",
|
| 119 |
+
"name": "python",
|
| 120 |
+
"nbconvert_exporter": "python",
|
| 121 |
+
"pygments_lexer": "ipython3",
|
| 122 |
+
"version": "3.12.4"
|
| 123 |
+
}
|
| 124 |
+
},
|
| 125 |
+
"nbformat": 4,
|
| 126 |
+
"nbformat_minor": 5
|
| 127 |
+
}
|
weather_data_visualisation.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This script generates a sample "Monsoon Mandala" artwork using placeholder data.
|
| 2 |
+
# Replace the synthetic data block with your real pandas DataFrame columns to recreate the piece with your tea farm data.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def weather_data_visualisation(save_to_disk: bool = False) -> Image.Image:
|
| 13 |
+
weatherdata_df = pd.read_csv("data/kit_1001_2025-09-22.csv", index_col=2)
|
| 14 |
+
weatherdata_df.drop(columns=['kit_id', "unit","_raw"], inplace=True)
|
| 15 |
+
weatherdata_df.dropna(inplace=True)
|
| 16 |
+
weatherdata_df.index = pd.to_datetime(weatherdata_df.index)
|
| 17 |
+
weatherdata_df = weatherdata_df.pivot(columns='sensor', values='value')
|
| 18 |
+
|
| 19 |
+
# ---- Mapping to polar "Monsoon Mandala" ----
|
| 20 |
+
# Angles map to time; radii encode a blended metric; thickness & dot size encode other variables.
|
| 21 |
+
|
| 22 |
+
df = weatherdata_df
|
| 23 |
+
|
| 24 |
+
theta = np.linspace(0, 2*np.pi, len(df), endpoint=False)
|
| 25 |
+
|
| 26 |
+
# Normalize helpers (avoid specifying colors, per instructions).
|
| 27 |
+
def norm(x):
|
| 28 |
+
x = np.asarray(x)
|
| 29 |
+
if np.nanmax(x) - np.nanmin(x) == 0:
|
| 30 |
+
return np.zeros_like(x)
|
| 31 |
+
return (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x))
|
| 32 |
+
|
| 33 |
+
T = norm(df['ftTemp'].values)
|
| 34 |
+
H = norm(df['gbHum'].values)
|
| 35 |
+
R = norm(df['NH3'].values)
|
| 36 |
+
W = norm(df['C3H8'].values)
|
| 37 |
+
L = norm(df['CO'].values)
|
| 38 |
+
|
| 39 |
+
# Radius combines temp (outer breathing), humidity (inner swell), light (diurnal bloom)
|
| 40 |
+
radius = 0.45 + 0.35*(0.5*T + 0.3*H + 0.2*L)
|
| 41 |
+
|
| 42 |
+
# Stroke width from wind; point size from rainfall intensity
|
| 43 |
+
stroke = 0.3 + 3.2*W
|
| 44 |
+
dots = 5 + 60*R
|
| 45 |
+
|
| 46 |
+
# Rolling medians for smooth rings
|
| 47 |
+
def smooth(x, k=21):
|
| 48 |
+
if k < 3:
|
| 49 |
+
return x
|
| 50 |
+
w = np.ones(k)/k
|
| 51 |
+
return np.convolve(x, w, mode="same")
|
| 52 |
+
|
| 53 |
+
radius_smooth = smooth(radius, k=31)
|
| 54 |
+
|
| 55 |
+
# ---- Plot (no explicit colors; uses matplotlib defaults) ----
|
| 56 |
+
fig = plt.figure(figsize=(8, 8))
|
| 57 |
+
ax = plt.subplot(111, projection="polar")
|
| 58 |
+
ax.set_theta_direction(-1) # clockwise
|
| 59 |
+
ax.set_theta_offset(np.pi/2.0) # start at top
|
| 60 |
+
ax.set_axis_off()
|
| 61 |
+
|
| 62 |
+
# Outer ribbon
|
| 63 |
+
ax.plot(theta, radius_smooth, linewidth=2.0)
|
| 64 |
+
|
| 65 |
+
# Inner filigree rings
|
| 66 |
+
for k in [3, 7, 13]:
|
| 67 |
+
ax.plot(theta, smooth(radius * (0.85 + 0.05*np.sin(k*theta)), k=15), linewidth=0.8)
|
| 68 |
+
|
| 69 |
+
# Rainfall pearls
|
| 70 |
+
ax.scatter(theta[::3], (radius_smooth*0.92)[::3], s=dots[::3], alpha=0.6)
|
| 71 |
+
|
| 72 |
+
# Wind tick marks (radial sticks)
|
| 73 |
+
for th, rr, sw in zip(theta[::12], radius_smooth[::12], stroke[::12]):
|
| 74 |
+
ax.plot([th, th], [rr*0.75, rr*0.98], linewidth=sw*0.12, alpha=0.8)
|
| 75 |
+
|
| 76 |
+
plt.tight_layout()
|
| 77 |
+
|
| 78 |
+
# Render figure to RGBA buffer and convert to PIL.Image
|
| 79 |
+
canvas = FigureCanvas(fig)
|
| 80 |
+
canvas.draw()
|
| 81 |
+
buf = np.asarray(canvas.buffer_rgba())
|
| 82 |
+
pil_img = Image.fromarray(buf, mode="RGBA").convert("RGB")
|
| 83 |
+
|
| 84 |
+
# Optionally also save to disk for compatibility with other tools
|
| 85 |
+
if save_to_disk:
|
| 86 |
+
png_path = "output/monsoon_mandala_example.png"
|
| 87 |
+
svg_path = "output/monsoon_mandala_example.svg"
|
| 88 |
+
try:
|
| 89 |
+
os.makedirs(os.path.dirname(png_path), exist_ok=True)
|
| 90 |
+
fig.savefig(png_path, dpi=300, bbox_inches="tight", pad_inches=0.05)
|
| 91 |
+
fig.savefig(svg_path, bbox_inches="tight", pad_inches=0.05)
|
| 92 |
+
except Exception:
|
| 93 |
+
# If saving fails (e.g., directory missing), continue returning the PIL image
|
| 94 |
+
pass
|
| 95 |
+
|
| 96 |
+
plt.close(fig)
|
| 97 |
+
return pil_img
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
weather_data_visualisation()
|