kikomiko commited on
Commit
96f6720
·
1 Parent(s): 6b79761
.dockerignore ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # copied verbatim from .gitignore, with changes ##
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ ##.env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
163
+
164
+ .vscode/
165
+ codealike.json
166
+
167
+ # app specific
168
+ test_data/
169
+
170
+ ## for Docker
171
+ Dockerfile
172
+ .gitignore
.env ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################################################################################################
2
+ # SambaNova Hosted Models reference: https://docs.sambanova.ai/cloud/docs/get-started/supported-models
3
+ ######################################################################################################
4
+
5
+ LLAMA_API_BASE_URL=https://api.sambanova.ai/v1
6
+
7
+ ## 1. Llama Text LLM
8
+ LLAMA_TEXT_MODEL=Meta-Llama-3.3-70B-Instruct
9
+ LLAMA_TEXT_MAX_TOKENS=131072
10
+
11
+ ## 2. Llama Vision LLM
12
+ LLAMA_VISION_MODEL=Llama-3.2-11B-Vision-Instruct
13
+ LLAMA_VISION_MAX_TOKENS=4096
14
+
15
+ ######################################################################################################
16
+ # Nebius Hosted Models reference: https://studio.nebius.com/models/text2image
17
+ ######################################################################################################
18
+
19
+ IMAGE_GEN_API_BASE_URL=https://api.studio.nebius.ai/v1
20
+
21
+ ## 1. Image Gen Model
22
+ IMAGE_GEN_MODEL=black-forest-labs/flux-schnell
23
+ IMAGE_GEN_MAX_PROMPT_LEN=2000
.env_sample ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################################################################################################
2
+ # SambaNova Hosted Models reference: https://docs.sambanova.ai/cloud/docs/get-started/supported-models
3
+ ######################################################################################################
4
+
5
+ LLAMA_API_BASE_URL=https://api.sambanova.ai/v1
6
+ LLAMA_API_KEY=xxx
7
+
8
+ ## 1. Llama Text LLM
9
+ LLAMA_TEXT_MODEL=Meta-Llama-3.3-70B-Instruct
10
+ LLAMA_TEXT_MAX_TOKENS=131072
11
+
12
+ ## 2. Llama Vision LLM
13
+ LLAMA_VISION_MODEL=Llama-3.2-11B-Vision-Instruct
14
+ LLAMA_VISION_MAX_TOKENS=4096
15
+
16
+ ######################################################################################################
17
+ # Nebius Hosted Models reference: https://studio.nebius.com/models/text2image
18
+ ######################################################################################################
19
+
20
+ IMAGE_GEN_API_BASE_URL=https://api.studio.nebius.ai/v1
21
+ IMAGE_GEN_API_KEY=xxx
22
+
23
+ ## 1. Image Gen Model
24
+ IMAGE_GEN_MODEL=black-forest-labs/flux-schnell
25
+ IMAGE_GEN_MAX_PROMPT_LEN=2000
.gitignore ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ env.bak/
123
+ venv.bak/
124
+
125
+ # Environments
126
+ # .env (allow .env to be committed)
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
164
+
165
+ .vscode/
166
+ codealike.json
167
+
168
+ # app specific
169
+ test_data/
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12-slim-bookworm
2
+
3
+ # Set the working directory in the container
4
+ WORKDIR /app
5
+
6
+ # Add the requirements file to the container
7
+ ADD requirements.txt .
8
+
9
+ # Install the app dependencies
10
+ RUN pip install --no-cache-dir -r requirements.txt
11
+
12
+ # Copy the source code into the container
13
+ COPY . .
14
+
15
+ # Set env variables
16
+ ENV PYTHONUNBUFFERED=1
17
+
18
+ RUN --mount=type=secret,id=LLAMA_API_KEY,mode=0444,required=true
19
+ RUN --mount=type=secret,id=IMAGE_GEN_API_KEY,mode=0444,required=true
20
+
21
+ # Command to run the app on container startup
22
+ CMD ["python", "-u", "/app/gradio_ui.py"]
README.md CHANGED
@@ -9,4 +9,21 @@ license: mit
9
  short_description: An agent which generates a new 💓health-ai💓 recipe for you!
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  short_description: An agent which generates a new 💓health-ai💓 recipe for you!
10
  ---
11
 
12
+ # HealthAI Chef
13
+
14
+ An agent which generates a new 💓health-ai💓 recipe for you from just an image of a meal, based on your dietary restrictions and medical diagnoses! For example, you loved eating hamburgers before, but now you have high cholesterol and need to eat vegetarian. Upload your favorite burger photo and the chef agent will scan it using Llama 3 Vision, figure out the recipe for it, use Llama 3 Text to generate a new recipe based on your health needs, and use an image gen model to generate an appetizing picture of the new recipe for you. The agent will show you the old and new pictures and recipes side by side, and even explain what changes it made. Also, if you don't have a meal photo handy, you can use the agent's internet search feature to find one, thanks to Llama's tool calling support.
15
+
16
+ ## Tech Stack
17
+
18
+ The agent is built in Python 3.12 using the following technologies:
19
+
20
+ - Llama 3.3 70B Text Instruct hosted on SambaNova
21
+ - Llama 3.2 11B Vision Instruct hosted on SambaNova
22
+ - Black Forest Labs Flux Schnell Image Gen hosted on Nebius
23
+ - LangChain
24
+ - LangGraph
25
+ - Gradio
26
+ - DuckDuckGo Search
27
+ - Jinja
28
+
29
+ _Note: these are all open-source._
data/dietary_restrictions.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dairy-Free
2
+ Egg-Free
3
+ Gluten-Free
4
+ Halal
5
+ Kosher
6
+ Low-Carb
7
+ Low-FODMAP
8
+ Low-Sodium
9
+ Nut-Free
10
+ Paleo
11
+ Pescatarian
12
+ Soy-Free
13
+ Vegan
14
+ Vegetarian
data/medical_diagnoses.txt ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Anemia
2
+ Arthritis
3
+ Asthma
4
+ Celiac Disease
5
+ Diabetes (Type 1)
6
+ Diabetes (Type 2)
7
+ Digestive Issues
8
+ Diverticulitis
9
+ Egg Allergy
10
+ Fatty Liver Disease (NAFLD/NASH)
11
+ Fish Allergy
12
+ GERD (Gastroesophageal Reflux Disease)
13
+ Gout
14
+ Heart Disease
15
+ High Blood Pressure
16
+ High Cholesterol
17
+ Hyperlipidemia
18
+ Hypertension
19
+ Hypothyroidism
20
+ Kidney Stones
21
+ Lactose Intolerance
22
+ Milk Allergy
23
+ Osteoporosis
24
+ Peanut Allergy
25
+ Prediabetes
26
+ Renal Disease
27
+ Shellfish Allergy
28
+ Soy Allergy
29
+ Tree Nut Allergy
30
+ Underweight
31
+ Wheat Allergy
gradio_ui.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from jinja2 import Environment, FileSystemLoader
4
+
5
+ from log_util import logger
6
+ from meal_image_search import search_meal_image
7
+ from recipe_generator import get_altered_recipe, get_image_from_recipe, get_recipe_from_image
8
+ from util import yield_lines_from_file
9
+
10
+ BRAND = 'HealthAI'
11
+ TITLE = f'{BRAND} Chef'
12
+
13
+ env = Environment(loader=FileSystemLoader('templates'))
14
+ template = env.get_template('recipe.html')
15
+
16
+ DISCLAIMER = 'This recipe is a healthier option <em>generated by AI</em>. A registered dietitian can provide expert dietary advice 😀'
17
+
18
+ async def process_image(file):
19
+ if not file:
20
+ yield None, None
21
+ return
22
+
23
+ image_path = file.name
24
+ yield image_path, None
25
+
26
+ try:
27
+ recipe = await get_recipe_from_image(image_path)
28
+ html = template.render(recipe=recipe)
29
+ yield image_path, html
30
+
31
+ except Exception as e:
32
+ logger.error(e)
33
+ yield None, None
34
+ if 'not a meal' in str(e).lower():
35
+ raise gr.Error("This image doesn't contain a meal.")
36
+ raise gr.Error("Sorry, this image can't be processed.")
37
+
38
+ async def get_new_recipe(orig_recipe: str, restrictions: list[str], diagnoses: list[str]):
39
+ if not orig_recipe or (not restrictions and not diagnoses):
40
+ yield None, None
41
+
42
+ if not orig_recipe:
43
+ raise gr.Error('Please upload a meal pic first.')
44
+
45
+ if not restrictions and not diagnoses:
46
+ raise gr.Error(
47
+ 'Please select dietary restrictions or medical diagnoses, then try again. I will give you a new recipe based on your selections!')
48
+
49
+ try:
50
+ recipe = await get_altered_recipe(orig_recipe, restrictions, diagnoses)
51
+ recipe['disclaimer'] = DISCLAIMER
52
+ html = template.render(recipe=recipe)
53
+ yield html, None
54
+
55
+ except Exception as e:
56
+ logger.error(e)
57
+ yield None, None
58
+ raise gr.Error(f"Sorry, a new recipe can't be made.")
59
+
60
+ try:
61
+ image_url = get_image_from_recipe(recipe)
62
+ yield html, image_url
63
+
64
+ except Exception as e:
65
+ logger.error(e)
66
+ yield html, None
67
+
68
+ def search_image(search_text: str) -> str:
69
+ if not search_text:
70
+ return None
71
+
72
+ try:
73
+ image_url = search_meal_image(search_text)
74
+ if image_url:
75
+ return image_url
76
+
77
+ except Exception as e:
78
+ logger.error(e)
79
+
80
+ raise gr.Error(f"Sorry, can't find a meal pic for {search_text}.")
81
+
82
+ with gr.Blocks(title=TITLE, theme=gr.themes.Monochrome(), css='''
83
+ footer {visibility: hidden}
84
+
85
+ /* make container full width */
86
+ .gradio-container {
87
+ width: 100% !important; /* flll width */
88
+ max-width: 100% !important; /* prevent max-width restriction */
89
+ margin: 5px 0px 5px 0px !important; /* top, right, bottom, left */
90
+ }
91
+ '''.strip()) as demo:
92
+
93
+ with gr.Row():
94
+ gr.Markdown(f'# {TITLE} 🍽️')
95
+ new_recipe_button = gr.Button(f'Get {BRAND} Recipe 😋', scale=0)
96
+
97
+ file_select = gr.File(label='Upload Meal Pic', container=True, file_types=['.jpg', '.jpeg', '.png', '.gif', '.webp'])
98
+
99
+ search_text = gr.Textbox(placeholder='Or enter a meal to search for an image of it', submit_btn=True, container=False, interactive=True, max_lines=1)
100
+
101
+ with gr.Row():
102
+ restrictions_dropdown = gr.Dropdown(label='Dietary Restrictions', choices=yield_lines_from_file('dietary_restrictions.txt'), interactive=True, multiselect=True, value=None)
103
+ diagnoses_dropdown = gr.Dropdown(label='Medical Diagnoses', choices=yield_lines_from_file('medical_diagnoses.txt'), interactive=True, multiselect=True, value=None)
104
+
105
+ with gr.Row():
106
+ orig_meal = gr.Image(label='Original Meal', interactive=False)
107
+ new_meal = gr.Image(label=f'{BRAND} Meal', interactive=False)
108
+
109
+ with gr.Row():
110
+ orig_recipe = gr.HTML(label='Original Recipe', container=True, show_label=True)
111
+ new_recipe = gr.HTML(label=f'{BRAND} Recipe', container=True, show_label=True)
112
+
113
+ search_text.submit(search_image, inputs=search_text, outputs=file_select)
114
+
115
+ file_select.change(
116
+ process_image,
117
+ inputs=file_select,
118
+ outputs=[orig_meal, orig_recipe]
119
+ )
120
+
121
+ new_recipe_button.click(fn=get_new_recipe, inputs=[orig_recipe, restrictions_dropdown, diagnoses_dropdown], outputs=[new_recipe, new_meal])
122
+
123
+ demo.launch(server_name='0.0.0.0')
image_generator.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from dotenv import load_dotenv
4
+ from langchain_core.prompts import PromptTemplate
5
+ from openai import OpenAI
6
+
7
+ from log_util import logger
8
+ from time_it import time_it
9
+ from util import load_prompt
10
+
11
+ load_dotenv()
12
+
13
+ IMAGE_GEN_API_BASE_URL = os.getenv('IMAGE_GEN_API_BASE_URL')
14
+ IMAGE_GEN_API_KEY = os.getenv('IMAGE_GEN_API_KEY')
15
+
16
+ IMAGE_GEN_MODEL = os.getenv('IMAGE_GEN_MODEL')
17
+ IMAGE_GEN_MAX_PROMPT_LEN = int(os.getenv('IMAGE_GEN_MAX_PROMPT_LEN'))
18
+
19
+ IMAGE_GEN_OPTIONS = {
20
+ 'response_extension': 'png',
21
+ 'width': 1024,
22
+ 'height': 1024,
23
+ 'num_inference_steps': int(os.getenv('NUM_INFERENCE_STEPS', '16')),
24
+ 'negative_prompt': '',
25
+ 'seed': -1
26
+ }
27
+
28
+ @time_it
29
+ def generate_image(prompt_file: str, input: dict) -> str:
30
+ prompt = load_prompt(prompt_file)
31
+
32
+ if len(prompt) > IMAGE_GEN_MAX_PROMPT_LEN:
33
+ logger.info(f'Prompt length {len(prompt)} exceeds {IMAGE_GEN_MAX_PROMPT_LEN} characters, will be truncated.')
34
+ prompt = prompt[:IMAGE_GEN_MAX_PROMPT_LEN]
35
+
36
+ prompt_template = PromptTemplate.from_template(prompt)
37
+ prompt = prompt_template.invoke(input).to_string()
38
+
39
+ images_client = OpenAI(base_url=IMAGE_GEN_API_BASE_URL, api_key=IMAGE_GEN_API_KEY).images
40
+ response = images_client.generate(model=IMAGE_GEN_MODEL, prompt=prompt, response_format='url', extra_body=IMAGE_GEN_OPTIONS)
41
+ image_url = response.data[0].url
42
+ logger.info(f'{image_url=}')
43
+ return image_url
llm.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from dotenv import load_dotenv
4
+ from langchain_openai import ChatOpenAI
5
+
6
+ from log_util import logger
7
+ from time_it import time_it_async
8
+
9
+ load_dotenv()
10
+
11
+ LLAMA_API_BASE_URL = os.getenv('LLAMA_API_BASE_URL')
12
+ LLAMA_API_KEY = os.getenv('LLAMA_API_KEY')
13
+
14
+ LLAMA_TEXT_MODEL = os.getenv('LLAMA_TEXT_MODEL')
15
+ LLAMA_TEXT_MAX_TOKENS = int(os.getenv('LLAMA_TEXT_MAX_TOKENS'))
16
+
17
+ LLAMA_VISION_MODEL = os.getenv('LLAMA_VISION_MODEL')
18
+ LLAMA_VISION_MAX_TOKENS = int(os.getenv('LLAMA_VISION_MAX_TOKENS'))
19
+
20
+ TEMPERATURE = float(os.getenv('TEMPERATURE', '0.2'))
21
+
22
+ TOKEN_FACTOR_FOR_PROMPT = 0.75
23
+
24
+ def get_text_llm() -> ChatOpenAI:
25
+ return _get_llm(LLAMA_TEXT_MODEL, LLAMA_TEXT_MAX_TOKENS)
26
+
27
+ def get_vision_llm() -> ChatOpenAI:
28
+ return _get_llm(LLAMA_VISION_MODEL, LLAMA_VISION_MAX_TOKENS)
29
+
30
+ def _get_llm(model: str, max_tokens: int) -> ChatOpenAI:
31
+ return ChatOpenAI(base_url=LLAMA_API_BASE_URL, api_key=LLAMA_API_KEY,
32
+ model=model, max_tokens=int(max_tokens*TOKEN_FACTOR_FOR_PROMPT), temperature=TEMPERATURE)
33
+
34
+ @time_it_async
35
+ async def invoke_llm_async(chain, input: dict | None = {}):
36
+ response = await chain.ainvoke(input)
37
+ logger.info(f'{response=}')
38
+ return response
log_util.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ PROJECT_NAME = 'healthai-chef'
9
+
10
+ default_log_args = {
11
+ 'level': logging.DEBUG if int(os.getenv('DEBUG', '0')) else logging.INFO,
12
+ 'format': '%(asctime)s [%(levelname)s] %(filename)s:%(lineno)d - %(message)s',
13
+ 'datefmt': '%d-%b-%y %H:%M:%S',
14
+ 'force': True,
15
+ }
16
+ logging.basicConfig(**default_log_args)
17
+ logger = logging.getLogger(PROJECT_NAME)
18
+
19
+ # disable 3rd party logs
20
+ LOGGERS_TO_DISABLE = [
21
+ ]
22
+ for logger_name in LOGGERS_TO_DISABLE:
23
+ logging.getLogger(logger_name).setLevel(logging.CRITICAL + 1)
meal_image_search.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ from dotenv import load_dotenv
5
+ from duckduckgo_search import DDGS
6
+ from langchain_core.messages.tool import BaseMessage, ToolMessage
7
+ from langchain_core.prompts import PromptTemplate
8
+ from langchain_core.tools import tool
9
+ from langgraph.graph import END, MessageGraph
10
+ from langgraph.prebuilt import ToolNode
11
+ from typing import TypedDict
12
+
13
+ from llm import get_text_llm
14
+ from log_util import logger
15
+ from time_it import time_it
16
+ from util import load_prompt
17
+
18
+ load_dotenv()
19
+
20
+ MAX_IMAGE_SEARCH_RESULTS = int(os.getenv('MAX_IMAGE_SEARCH_RESULTS', '3'))
21
+
22
+ class ImageSearchResult(TypedDict):
23
+ title: str
24
+ url: str
25
+
26
+ @time_it
27
+ def search_meal_image(meal: str) -> str:
28
+ prompt = load_prompt('validate_is_meal.prompt.txt')
29
+
30
+ llm = get_text_llm()
31
+ tools = [search_meal_images]
32
+
33
+ def is_meal_router(messages: list[BaseMessage]) -> str:
34
+ if messages[-1].content.lower() == 'yes':
35
+ return 'is_meal'
36
+ return END
37
+
38
+ graph = MessageGraph()
39
+ graph.add_node('validate_is_meal', llm)
40
+ graph.add_conditional_edges('validate_is_meal', is_meal_router)
41
+ graph.add_node('is_meal', llm.bind_tools(tools))
42
+ graph.add_edge('is_meal', 'call_tools')
43
+ graph.add_node('call_tools', ToolNode(tools))
44
+ graph.add_edge('call_tools', END)
45
+ graph.set_entry_point('validate_is_meal')
46
+
47
+ prompt_template = PromptTemplate.from_template(prompt)
48
+ prompt = prompt_template.format(phrase=meal)
49
+
50
+ workflow = graph.compile()
51
+ messages: list = workflow.invoke(prompt)
52
+ tool_messages = [message for message in messages if isinstance(message, ToolMessage)]
53
+ if tool_messages and tool_messages[0].content:
54
+ meal_images: list[ImageSearchResult] = json.loads(tool_messages[0].content)
55
+ if meal_images:
56
+ meal_image_url = meal_images[0]['url']
57
+ logger.info(f'{meal_image_url=}')
58
+ return meal_image_url
59
+ return None
60
+
61
+ @tool
62
+ def search_meal_images(meal: str) -> list[ImageSearchResult]:
63
+ '''Searches for images of the given meal.'''
64
+ return search_images(meal)
65
+
66
+ @time_it
67
+ def search_images(keywords: str, max_results: int | None=MAX_IMAGE_SEARCH_RESULTS) -> list[ImageSearchResult]:
68
+ results = DDGS().images(
69
+ keywords=keywords,
70
+ region='wt-wt',
71
+ safesearch='on',
72
+ size=None,
73
+ color='color',
74
+ type_image='photo',
75
+ layout=None,
76
+ license_image=None,
77
+ max_results=max_results,
78
+ )
79
+ logger.info(f'{keywords=}: {results=}')
80
+ results = [ImageSearchResult(title=result['title'], url=result['image']) for result in results]
81
+ return results
prompts/alter_recipe_for_healthier.prompt.txt ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <system>
2
+ You are a highly experienced registered dietitian, providing evidence-based nutritional recommendations.
3
+ </system>
4
+
5
+ Human:
6
+
7
+ Given the provided recipe, alter it to be appropriate for your patient with the dietary restrictions and medical diagnoses below.
8
+
9
+ Return your answer of the altered recipe as a JSON object only (no comments and not an array) with these properties:
10
+ - name (string)
11
+ - ingredients (array of string)
12
+ - instructions (array of string, do NOT number the instructions)
13
+ - meal_type (string: "lunch", "dinner", or "breakfast")
14
+ - serves (string: "individual", "couple", or "family")
15
+ - notes (string: a description of what you changed)
16
+
17
+ For each ingredient specify the quantity or amount.
18
+
19
+ <recipe>
20
+ {recipe}
21
+ </recipe>
22
+
23
+ <dietary_restrictions>
24
+ {dietary_restrictions}
25
+ </dietary_restrictions>
26
+
27
+ <medical_diagnoses>
28
+ {medical_diagnoses}
29
+ </medical_diagnoses>
prompts/get_image_from_recipe.prompt.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <system>
2
+ You are an expert chef and food photographer.
3
+ </system>
4
+
5
+ Human:
6
+
7
+ Given the recipe below, generate an appetizing, realistic image of the meal with no text.
8
+
9
+ <recipe>
10
+ {recipe}
11
+ </recipe>
prompts/get_recipe_from_image.prompt.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <system>
2
+ You are a highly experienced registered dietitian, providing evidence-based nutritional recommendations.
3
+ </system>
4
+
5
+ Human:
6
+
7
+ Generate a recipe from the provided image of a prepared meal.
8
+
9
+ Return your answer as a JSON object only (no comments and not an array) with these properties:
10
+ - name (string)
11
+ - ingredients (array of string)
12
+ - instructions (array of string, do NOT number the instructions)
13
+ - meal_type (string: "lunch", "dinner", or "breakfast")
14
+ - serves (string: "individual", "couple", or "family")
15
+
16
+ For each ingredient specify the quantity or amount.
17
+
18
+ **Do not give any instruction beginning with "Serves as" because this is a recurring bug of yours.**
19
+
20
+ If the image is not a meal, return the string "Not a meal" (and do not return JSON in this case).
prompts/validate_is_meal.prompt.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <system>
2
+ You are an expert on North American cuisine.
3
+ </system>
4
+
5
+ Human:
6
+
7
+ Respond "yes" or "no" whether the phrase below describes a common, well-known meal eaten at home or restaurants (including fast food) in North America.
8
+ No justification is needed for your answer, so do not respond with any other words except "yes" or "no".
9
+
10
+ <phrase>
11
+ {phrase}
12
+ </phrase>
recipe_generator.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+
3
+ from langchain_core.messages import HumanMessage
4
+ from langchain_core.output_parsers import JsonOutputParser
5
+ from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
6
+
7
+ from image_generator import generate_image
8
+ from llm import get_text_llm, get_vision_llm, invoke_llm_async
9
+ from time_it import time_it_async
10
+ from util import UTF8_ENCODING, load_prompt
11
+
12
+ async def get_recipe_from_image(image_path: str) -> dict:
13
+ recipe: dict = await _analyze_image_from_path(image_path, 'get_recipe_from_image.prompt.txt')
14
+ return recipe
15
+
16
+ async def get_altered_recipe(orig_recipe: dict, restrictions: list[str], diagnoses: list[str]) -> dict:
17
+ recipe: dict = await _generate_text('alter_recipe_for_healthier.prompt.txt',
18
+ {'recipe': orig_recipe, 'dietary_restrictions': restrictions, 'medical_diagnoses': diagnoses})
19
+ if 'recipe' in recipe:
20
+ recipe = recipe['recipe']
21
+ return recipe
22
+
23
+ async def _analyze_image_from_path(image_path: str, prompt_file: str) -> dict | str:
24
+ with open(image_path, 'rb') as image_file:
25
+ image_data = image_file.read()
26
+ image_format = _get_image_format(image_path)
27
+ return await _analyze_image_from_bytes(image_data, prompt_file, image_format)
28
+
29
+ @time_it_async
30
+ async def _analyze_image_from_bytes(image_data: bytes, prompt_file: str, image_format: str, output_parser_type=JsonOutputParser) -> dict | str:
31
+ img_base64 = base64.b64encode(image_data).decode(UTF8_ENCODING)
32
+ prompt = load_prompt(prompt_file)
33
+ prompt_template = ChatPromptTemplate.from_messages([
34
+ HumanMessage(
35
+ content=[
36
+ {'type': 'text', 'text': prompt},
37
+ {
38
+ 'type': 'image_url',
39
+ 'image_url': {
40
+ 'url': f'data:image/{image_format};base64,{img_base64}',
41
+ 'detail': 'high'
42
+ }
43
+ }
44
+ ]
45
+ ),
46
+ ])
47
+
48
+ llm = get_vision_llm()
49
+ chain = prompt_template | llm | output_parser_type()
50
+ response = await invoke_llm_async(chain)
51
+
52
+ return response
53
+
54
+ def _get_image_format(image_path: str) -> str:
55
+ file_ext = image_path.split('.')[-1].lower()
56
+ match file_ext:
57
+ case 'jpg' | 'jpeg':
58
+ return 'jpeg'
59
+ case 'png' | 'gif' | 'webp':
60
+ return file_ext
61
+ case _:
62
+ raise ValueError(f'Unsupported image format for {image_path=}')
63
+
64
+ async def _generate_text(prompt_file: str, input: dict) -> dict | str:
65
+ prompt = load_prompt(prompt_file)
66
+ prompt_template = PromptTemplate.from_template(prompt)
67
+
68
+ llm = get_text_llm()
69
+ chain = prompt_template | llm | JsonOutputParser()
70
+ response = await invoke_llm_async(chain, input)
71
+ return response
72
+
73
+ def get_image_from_recipe(recipe: dict) -> str:
74
+ recipe_for_image_gen = {k: v for k, v in recipe.items() if k in {'name', 'ingredients', 'instructions', 'meal_type', 'serves'}}
75
+ image_url = generate_image('get_image_from_recipe.prompt.txt', {'recipe': recipe_for_image_gen})
76
+ return image_url
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ duckduckgo_search==7.5.2
2
+ gradio==5.21.0
3
+ jinja2==3.1.6
4
+ langchain_core==0.3.45
5
+ langchain_openai==0.3.8
6
+ langgraph==0.3.11
7
+ python-dotenv==1.0.1
templates/recipe.html ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <style>
5
+ body {
6
+ font-family: 'Arial', sans-serif;
7
+ line-height: 1.6;
8
+ margin: 0;
9
+ padding: 20px;
10
+ }
11
+
12
+ h1 {
13
+ color: #333;
14
+ font-size: 2.5em;
15
+ margin-bottom: 1em;
16
+ text-align: center;
17
+ }
18
+
19
+ h2 {
20
+ color: #555;
21
+ font-size: 1.8em;
22
+ margin-top: 0.5em;
23
+ margin-bottom: 1em;
24
+ }
25
+
26
+ h3 {
27
+ color: #666;
28
+ font-size: 1.2em;
29
+ margin-bottom: 0.5em;
30
+ }
31
+
32
+ ul, ol {
33
+ padding-left: 30px;
34
+ }
35
+
36
+ li {
37
+ margin-bottom: 0.5em;
38
+ }
39
+
40
+ p {
41
+ margin-bottom: 1em;
42
+ }
43
+
44
+ strong {
45
+ font-weight: bold;
46
+ }
47
+
48
+ a {
49
+ color: #007bff; /* Blue link color */
50
+ text-decoration: none;
51
+ }
52
+
53
+ a:hover {
54
+ text-decoration: underline;
55
+ }
56
+
57
+ .recipe-container {
58
+ border: 1px solid #ddd;
59
+ padding: 20px;
60
+ margin-bottom: 2em;
61
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
62
+ border-radius: 5px;
63
+ }
64
+ </style>
65
+ </head>
66
+ <body>
67
+ <div class="recipe-container">
68
+ <h2>{{ recipe.name }}</h2>
69
+ <p><strong>Meal suggestion:</strong> {{ recipe.meal_type }}</p>
70
+ <h3>Ingredients:</h3>
71
+ <ul>
72
+ {% for ingredient in recipe.ingredients %}
73
+ <li>{{ ingredient }}</li>
74
+ {% endfor %}
75
+ </ul>
76
+ <h3>Instructions:</h3>
77
+ <ol>
78
+ {% for instruction in recipe.instructions %}
79
+ <li>{{ instruction }}</li>
80
+ {% endfor %}
81
+ </ol>
82
+ {% if recipe.serves %}
83
+ <p><strong>Serves:</strong> {{ recipe.serves }}</p>
84
+ {% endif %}
85
+ {% if recipe.notes %}
86
+ <p><strong>Notes:</strong> {{ recipe.notes }}</p>
87
+ {% endif %}
88
+ {% if recipe.disclaimer %}
89
+ <p><strong>Disclaimer:</strong> {{ recipe.disclaimer }}</p>
90
+ {% endif %}
91
+ </div>
92
+ </body>
93
+ </html>
time_it.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import inspect
3
+ import os
4
+ import time
5
+
6
+ from functools import lru_cache, wraps
7
+
8
+ def time_it(func):
9
+ '''
10
+ Decorator to measure and print the execution time of a synchronous function, including when an error is raised.
11
+ '''
12
+ source_file, first_line_number = _get_function_location(func, line_offset=1) # get line number of function definition AFTER line with decorator
13
+
14
+ @wraps(func)
15
+ def wrapper(*args, **kwargs):
16
+ start_time = time.time()
17
+ try:
18
+ return func(*args, **kwargs)
19
+ finally:
20
+ _log_execution_time(start_time, func, source_file, first_line_number)
21
+
22
+ return wrapper
23
+
24
+ def time_it_async(func):
25
+ '''
26
+ Decorator to measure and print the execution time of an asynchronous function, including when an error is raised.
27
+ '''
28
+ source_file, first_line_number = _get_function_location(func, line_offset=1) # get line number of function definition AFTER line with decorator
29
+
30
+ @wraps(func)
31
+ async def wrapper(*args, **kwargs):
32
+ start_time = time.time()
33
+ try:
34
+ return await func(*args, **kwargs)
35
+ finally:
36
+ _log_execution_time(start_time, func, source_file, first_line_number)
37
+
38
+ return wrapper
39
+
40
+ @lru_cache(maxsize=None)
41
+ def _get_function_location(func, line_offset: int) -> tuple[str, int]:
42
+ try:
43
+ source_file = os.path.basename(inspect.getsourcefile(func))
44
+ first_line_number = inspect.getsourcelines(func)[1] + line_offset
45
+ return source_file, first_line_number
46
+ except OSError:
47
+ return None, None
48
+
49
+ def _log_execution_time(start_time: float, func, source_file: str | None, first_line_number: int | None) -> None:
50
+ end_time = time.time()
51
+ execution_time = end_time - start_time
52
+ timestamp = datetime.datetime.now().strftime('%d-%b-%y %H:%M:%S')
53
+ message = f'[{func.__name__}] took {execution_time:.4f} sec'
54
+ source_line_ref = f'{source_file}:{first_line_number}' if source_file and first_line_number else ''
55
+ print(f'{timestamp} [TIME] {source_line_ref} - {message}')
util.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ UTF8_ENCODING = 'utf-8'
2
+
3
+ def load_prompt(file_name: str) -> str:
4
+ with open(f'prompts/{file_name}', 'r', encoding=UTF8_ENCODING) as f:
5
+ return f.read()
6
+
7
+ def yield_lines_from_file(file_path: str):
8
+ with open(f'data/{file_path}', 'r', encoding=UTF8_ENCODING) as f:
9
+ for line in f:
10
+ if line: # includes newline
11
+ yield line.strip()