diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..6b75645122782936c4555e3ae974ce9ce48d8029 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,32 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +assets/comfyui_template.png filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/4subject/ref.jpg filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/identity1_result.png filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/style3_style4_result.webp filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/style3.webp filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/style4.webp filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/z1_mix_style/ref1.webp filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/z1_mix_style/ref2.webp filter=lfs diff=lfs merge=lfs -text +assets/gradio_examples/z3_mix_style/ref1.jpg filter=lfs diff=lfs merge=lfs -text +assets/show_case1.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case2.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case3.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case4.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case5.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case6.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case7.webp filter=lfs diff=lfs merge=lfs -text +assets/show_case8.webp filter=lfs diff=lfs merge=lfs -text +assets/teaser.webp filter=lfs diff=lfs merge=lfs -text +assets/usoxcomfyui_official.jpeg filter=lfs diff=lfs merge=lfs -text +assets/usoxcomfyui.webp filter=lfs diff=lfs merge=lfs -text +workflow/17-17-29.webp.webp filter=lfs diff=lfs merge=lfs -text +workflow/example1.png filter=lfs diff=lfs merge=lfs -text +workflow/example2.png filter=lfs diff=lfs merge=lfs -text +workflow/example3.png filter=lfs diff=lfs merge=lfs -text +workflow/example4.png filter=lfs diff=lfs merge=lfs -text +workflow/example5.png filter=lfs diff=lfs merge=lfs -text +workflow/example6.png filter=lfs diff=lfs merge=lfs -text +workflow/input.png filter=lfs diff=lfs merge=lfs -text +workflow/style5_0.webp.webp filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4dc185e1008e95d0f2733256031b338bcc3fe2ee --- /dev/null +++ b/.gitignore @@ -0,0 +1,195 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# User config files +.vscode/ +output/ + +# ckpt +*.bin +*.pt +*.pth + +logs/ +*.safetensors + + + +# FOR EXCLUSION OF DOWNLOADED WEIGHTS +# Ignore everything in weights/ +weights/* + +# But don't ignore this file +!weights/downloader.py \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f49a4e16e68b128803cc2dcea614603632b04eac --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index 8f89ba525676a96096123c4fadcc1263b4980f56..1d79e0c211714a96184de73a88324915c55ea830 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,243 @@ ---- -title: Video Generator -emoji: 🐠 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 5.44.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +

+ Logo +
+ Unified Style and Subject-Driven Generation via Disentangled and Reward Learning +

+ +

+Build +Build + + +

+

+ +>

Shaojin Wu, Mengqi Huang, Yufeng Cheng, Wenxu Wu, Jiahe Tian, Yiming Luo, Fei Ding, Qian He
+>UXO Team
+>Intelligent Creation Lab, Bytedance

+ +### 🚩 Updates +* **2025.09.03** 🎉 USO is now natively supported in ComfyUI, see official tutorial [USO in ComfyUI](https://docs.comfy.org/tutorials/flux/flux-1-uso) and our provided examples in `./workflow`. More tips are available in the [README below](https://github.com/bytedance/USO#%EF%B8%8F-comfyui-examples). +

+ +

+ +* **2025.08.28** 🔥 The [demo](https://huggingface.co/spaces/bytedance-research/USO) of USO is released. Try it Now! ⚡️ +* **2025.08.28** 🔥 Update fp8 mode as a primary low vmemory usage support (please scroll down). Gift for consumer-grade GPU users. The peak Vmemory usage is ~16GB now. +* **2025.08.27** 🔥 The [inference code](https://github.com/bytedance/USO) and [model](https://huggingface.co/bytedance-research/USO) of USO are released. +* **2025.08.27** 🔥 The [project page](https://bytedance.github.io/USO) of USO is created. +* **2025.08.27** 🔥 The [technical report](https://arxiv.org/abs/2508.18966) of USO is released. + +## 📖 Introduction +Existing literature typically treats style-driven and subject-driven generation as two disjoint tasks: the former prioritizes stylistic similarity, whereas the latter insists on subject consistency, resulting in an apparent antagonism. We argue that both objectives can be unified under a single framework because they ultimately concern the disentanglement and re-composition of “content” and “style”, a long-standing theme in style-driven research. To this end, we present USO, a Unified framework for Style driven and subject-driven GeneratiOn. First, we construct a large-scale triplet dataset consisting of content images, style images, and their corresponding stylized content images. Second, we introduce a disentangled learning scheme that simultaneously aligns style features and disentangles content from style through two complementary objectives, style-alignment training and content–style disentanglement training. Third, we incorporate a style reward-learning paradigm to further enhance the model’s performance. +

+ +

+ +## ⚡️ Quick Start + +### 🔧 Requirements and Installation + +Install the requirements +```bash +## create a virtual environment with python >= 3.10 <= 3.12, like +python -m venv uso_env +source uso_env/bin/activate +## or +conda create -n uso_env python=3.10 -y +conda activate uso_env + +## install torch +## recommended version: +pip install torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu124 + +## then install the requirements by you need +pip install -r requirements.txt # legacy installation command +``` + +Then download checkpoints: +```bash +# 1. set up .env file +cp example.env .env + +# 2. set your huggingface token in .env (open the file and change this value to your token) +HF_TOKEN=your_huggingface_token_here + +#3. download the necessary weights (comment any weights you don't need) +pip install huggingface_hub +python ./weights/downloader.py +``` +- **IF YOU HAVE WEIGHTS, COMMENT OUT WHAT YOU DON'T NEED IN ./weights/downloader.py** + +### ✍️ Inference +* Start from the examples below to explore and spark your creativity. ✨ +```bash +# the first image is a content reference, and the rest are style references. + +# for subject-driven generation +python inference.py --prompt "The man in flower shops carefully match bouquets, conveying beautiful emotions and blessings with flowers. " --image_paths "assets/gradio_examples/identity1.jpg" --width 1024 --height 1024 +# for style-driven generation +# please keep the first image path empty +python inference.py --prompt "A cat sleeping on a chair." --image_paths "" "assets/gradio_examples/style1.webp" --width 1024 --height 1024 +# for style-subject driven generation (or set the prompt to empty for layout-preserved generation) +python inference.py --prompt "The woman gave an impassioned speech on the podium." --image_paths "assets/gradio_examples/identity2.webp" "assets/gradio_examples/style2.webp" --width 1024 --height 1024 +# for multi-style generation +# please keep the first image path empty +python inference.py --prompt "A handsome man." --image_paths "" "assets/gradio_examples/style3.webp" "assets/gradio_examples/style4.webp" --width 1024 --height 1024 + +# for low vram: +python inference.py --prompt "your propmt" --image_paths "your_image.jpg" --width 1024 --height 1024 --offload --model_type flux-dev-fp8 +``` +* You can also compare your results with the results in the `assets/gradio_examples` folder. + +* For more examples, visit our [project page](https://bytedance.github.io/USO) or try the live [demo](https://huggingface.co/spaces/bytedance-research/USO). + +### 🌟 Gradio Demo + +```bash +python app.py +``` + +**For low vmemory usage**, please pass the `--offload` and `--name flux-dev-fp8` args. The peak memory usage will be 16GB (Single reference) ~ 18GB (Multi references). + +```bash +# please use FLUX_DEV_FP8 replace FLUX_DEV +export FLUX_DEV_FP8="YOUR_FLUX_DEV_PATH" + +python app.py --offload --name flux-dev-fp8 +``` + +## 🌈 More examples +We provide some prompts and results to help you better understand the model. You can check our [paper](https://arxiv.org/abs/2508.18966) or [project page](https://bytedance.github.io/USO/) for more visualizations. + +#### Subject/Identity-driven generation +
+If you want to place a subject into new scene, please use natural language like "A dog/man/woman is doing...". If you only want to transfer the style but keep the layout, please an use instructive prompt like "Transform the style into ... style". For portraits-preserved generation, USO excels at producing high skin-detail images. A practical guideline: use half-body close-ups for half-body prompts, and full-body images when the pose or framing changes significantly. +

+ +

+

+ +

+

+ +

+

+ +

+
+ + +#### Style-driven generation +
+Just upload one or two style images, and use natural language to create want you want. USO will generate images follow your prompt and match the style you uploaded. +

+ +

+

+ +

+
+ +#### Style-subject driven generation +
+USO can stylize a single content reference with one or two style refs. For layout-preserved generation, just set the prompt to empty. +`Layout-preserved generation` +

+ +

+ +`Layout-shifted generation` +

+ +

+
+ +## ⚙️ ComfyUI examples +We’re pleased that USO now has native support in ComfyUI. For a quick start, please refer to the official tutorials [USO in ComfyUI](https://docs.comfy.org/tutorials/flux/flux-1-uso). To help you reproduce and match the results, we’ve provided several examples in `./workflows`, including **workflows** and their **inputs** and outputs, so you can quickly get familiar with what USO can do. With USO now fully compatible with the ComfyUI ecosystem, you can combine it with other plugins like ControlNet and LoRA. **We welcome community contributions of more workflows and examples.** + +Now you can easily run USO in ComfyUI. Just update ComfyUI to the latest version (0.3.57), and you’ll find USO in the official templates. +

+ +

+ +More examples are provided below: +

+ +

+ +**Identity preserved** +

+ +

+ +Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example1.json). Input images can be found in `./workflow` + +**Identity stylized** +

+ +

+ +Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example3.json). Input images can be found in `./workflow` + +**Identity + style reference** +

+ +

+ +Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example2.json). Input images can be found in `./workflow` + +**Single style reference** +

+ +

+ +Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example4.json). Input images can be found in `./workflow` +

+ +

+ +Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example6.json). Input images can be found in `./workflow` + +**Multiple style reference** +

+ +

+ +Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example5.json). Input images can be found in `./workflow` + +## 📄 Disclaimer +

+ We open-source this project for academic research. The vast majority of images + used in this project are either generated or from open-source datasets. If you have any concerns, + please contact us, and we will promptly remove any inappropriate content. + Our project is released under the Apache 2.0 License. If you apply to other base models, + please ensure that you comply with the original licensing terms. +

This research aims to advance the field of generative AI. Users are free to + create images using this tool, provided they comply with local laws and exercise + responsible usage. The developers are not liable for any misuse of the tool by users.

+ +## 🚀 Updates +For the purpose of fostering research and the open-source community, we plan to open-source the entire project, encompassing training, inference, weights, dataset etc. Thank you for your patience and support! 🌟 +- [x] Release technical report. +- [x] Release github repo. +- [x] Release inference code. +- [x] Release model checkpoints. +- [x] Release huggingface space demo. +- Release training code. +- Release dataset. + +## Citation +If USO is helpful, please help to ⭐ the repo. + +If you find this project useful for your research, please consider citing our paper: +```bibtex +@article{wu2025uso, + title={USO: Unified Style and Subject-Driven Generation via Disentangled and Reward Learning}, + author={Shaojin Wu and Mengqi Huang and Yufeng Cheng and Wenxu Wu and Jiahe Tian and Yiming Luo and Fei Ding and Qian He}, + year={2025}, + eprint={2508.18966}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, +} +``` \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..b16ddc235675f2c757644a0513a08eb769c14857 --- /dev/null +++ b/app.py @@ -0,0 +1,263 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +import json +import os +from pathlib import Path + +from dotenv import load_dotenv +load_dotenv() + + +import gradio as gr +import torch + +from uso.flux.pipeline import USOPipeline +from transformers import SiglipVisionModel, SiglipImageProcessor + + +with open("assets/uso_text.svg", "r", encoding="utf-8") as svg_file: + text_content = svg_file.read() + +with open("assets/uso_logo.svg", "r", encoding="utf-8") as svg_file: + logo_content = svg_file.read() + +title = f""" +
+ {text_content} + by UXO Team + {logo_content} +
+""".strip() + +badges_text = r""" +
+ +Build +Build + +
+""".strip() + +tips = """ +### What is USO and How to use? +🎨 USO is a unified style-subject optimized customization model and the latest addition to the UXO family ( USO and UNO). +It can freely combine any subjects with any styles in any scenarios. + +💡 We provide step-by-step instructions in our Github Repo. +Additionally, try the examples provided below the demo to quickly get familiar with USO and inspire your creativity! + +### Updates +🔥 **2025.09.04** USO now has native support in ComfyUI (see ComfyUI's official documentation for details). For more information, please also check out our GitHub Repo. + +
+The model is trained on 1024x1024 resolution and supports 3 types of usage. Tips: + +* **Only content img**: support following types: + * Subject/Identity-driven (supports natural prompt, e.g., *A clock on the table.* *The woman near the sea.*, excels in producing **photorealistic portraits**) + * Style edit (layout-preserved): *Transform the image into Ghibli style/Pixel style/Retro comic style/Watercolor painting style...*. + * Style edit (layout-shift): *Ghibli style, the man on the beach.*. +* **Only style img**: Reference input style and generate anything following prompt. Excelling in this and further support multiple style references (in beta). +* **Content img + style img**: Place the content into the desired style. + * Layout-preserved: set prompt to **empty**. + * Layout-shift: using natural prompt.
""" + +star = """ +### If USO is helpful, please help to ⭐ our Github Repo. Thanks a lot!""" + +def get_examples(examples_dir: str = "assets/examples") -> list: + examples = Path(examples_dir) + ans = [] + for example in examples.iterdir(): + if not example.is_dir() or len(os.listdir(example)) == 0: + continue + with open(example / "config.json") as f: + example_dict = json.load(f) + + + example_list = [] + example_list.append(example_dict["prompt"]) # prompt + + for key in ["image_ref1", "image_ref2", "image_ref3"]: + if key in example_dict: + example_list.append(str(example / example_dict[key])) + else: + example_list.append(None) + + example_list.append(example_dict["seed"]) + ans.append(example_list) + return ans + + +def create_demo( + model_type: str, + device: str = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu"), + offload: bool = False, +): + + # hf_download set to false to prevent download of weights + pipeline = USOPipeline( + model_type, device, offload, only_lora=True, lora_rank=128, hf_download=False + ) + print("USOPipeline loaded successfully") + + + + + # ⚠️ Weights now load from local paths via .env instead of downloading + siglip_path = os.getenv("SIGLIP_PATH", "google/siglip-so400m-patch14-384") + siglip_processor = SiglipImageProcessor.from_pretrained(siglip_path) + siglip_model = SiglipVisionModel.from_pretrained(siglip_path) + + + siglip_model.eval() + siglip_model.to(device) + pipeline.model.vision_encoder = siglip_model + pipeline.model.vision_encoder_processor = siglip_processor + print("SigLIP model loaded successfully") + + with gr.Blocks() as demo: + gr.Markdown(title) + gr.Markdown(badges_text) + gr.Markdown(tips) + with gr.Row(): + with gr.Column(): + prompt = gr.Textbox(label="Prompt", value="A beautiful woman.") + with gr.Row(): + image_prompt1 = gr.Image( + label="Content Reference Img", visible=True, interactive=True, type="pil" + ) + image_prompt2 = gr.Image( + label="Style Reference Img", visible=True, interactive=True, type="pil" + ) + image_prompt3 = gr.Image( + label="Extra Style Reference Img (Beta)", visible=True, interactive=True, type="pil" + ) + + with gr.Row(): + with gr.Row(): + width = gr.Slider( + 512, 1536, 1024, step=16, label="Generation Width" + ) + height = gr.Slider( + 512, 1536, 1024, step=16, label="Generation Height" + ) + with gr.Row(): + with gr.Row(): + keep_size = gr.Checkbox( + label="Keep input size", + value=False, + interactive=True + ) + with gr.Column(): + gr.Markdown("Set it to True if you only need style editing or want to keep the layout.") + + with gr.Accordion("Advanced Options", open=True): + with gr.Row(): + num_steps = gr.Slider( + 1, 50, 25, step=1, label="Number of steps" + ) + guidance = gr.Slider( + 1.0, 5.0, 4.0, step=0.1, label="Guidance", interactive=True + ) + content_long_size = gr.Slider( + 0, 1024, 512, step=16, label="Content reference size" + ) + seed = gr.Number(-1, label="Seed (-1 for random)") + + generate_btn = gr.Button("Generate") + gr.Markdown(star) + + with gr.Column(): + output_image = gr.Image(label="Generated Image") + download_btn = gr.File( + label="Download full-resolution", type="filepath", interactive=False + ) + gr.Markdown( + """ + ### ❗️ Important Usage Tips: + - **Input Prompt**: Unless you only need Style Editing ("Transform the style into..."), use natural language ("A dog/man/woman is doing...") instead of instruction descriptions of subject, identity, or style. + - **Input Content Image**: For portrait-preserving generation, USO excels at producing images with high skin detail. A practical guideline: use half-body close-ups when your prompt specifies a half-body subject, and full-body images—especially when the pose changes significantly. + """ + ) + + inputs = [ + prompt, + image_prompt1, + image_prompt2, + image_prompt3, + seed, + width, + height, + guidance, + num_steps, + keep_size, + content_long_size, + ] + generate_btn.click( + fn=pipeline.gradio_generate, + inputs=inputs, + outputs=[output_image, download_btn], + ) + + # example_text = gr.Text("", visible=False, label="Case For:") + examples = get_examples("./assets/gradio_examples") + + gr.Examples( + examples=examples, + inputs=[ + prompt, + image_prompt1, + image_prompt2, + image_prompt3, + seed, + ], + # cache_examples='lazy', + outputs=[output_image, download_btn], + fn=pipeline.gradio_generate, + label='row 1-4: identity/subject-driven; row 5-7: style-subject-driven; row 8-9: style-driven; row 10-12: multi-style-driven task; row 13: txt2img', + examples_per_page=15 + ) + + with gr.Accordion("Local Gradio Demo for Developers", open=False): + gr.Markdown( + 'Please refer to our GitHub repository to [run the USO gradio demo locally](https://github.com/bytedance/USO?tab=readme-ov-file#-gradio-demo).' + ) + return demo + + +if __name__ == "__main__": + from typing import Literal + + from transformers import HfArgumentParser + + @dataclasses.dataclass + class AppArgs: + name: Literal["flux-dev", "flux-dev-fp8", "flux-schnell", "flux-krea-dev"] = "flux-dev" + device: Literal["cuda", "cpu", "mps"] = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu") + offload: bool = dataclasses.field( + default=False, + metadata={ + "help": "If True, sequantial offload the models(ae, dit, text encoder) to CPU if not used." + }, + ) + port: int = 7860 + + parser = HfArgumentParser([AppArgs]) + args_tuple = parser.parse_args_into_dataclasses() # type: tuple[AppArgs] + args = args_tuple[0] + + demo = create_demo(args.name, args.device, args.offload) + demo.launch(server_port=args.port) diff --git a/assets/comfyui_template.png b/assets/comfyui_template.png new file mode 100644 index 0000000000000000000000000000000000000000..1dba88076b2ff692c96745b75aeed37ed9d1b2ec --- /dev/null +++ b/assets/comfyui_template.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77bcc55c997e6f08ddba58f94426152bef87a9b15fa3148194d1c6c401278430 +size 759491 diff --git a/assets/gradio_examples/1identity/config.json b/assets/gradio_examples/1identity/config.json new file mode 100644 index 0000000000000000000000000000000000000000..be3789e3a29e7e4156440a397fa30a265fd5fc42 --- /dev/null +++ b/assets/gradio_examples/1identity/config.json @@ -0,0 +1,6 @@ +{ + "prompt": "The girl is riding a bike in a street.", + "seed": 3407, + "usage": "Identity-driven", + "image_ref1": "./ref.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/1identity/ref.webp b/assets/gradio_examples/1identity/ref.webp new file mode 100644 index 0000000000000000000000000000000000000000..8be6810a25451b3c5a7a7ee335f3963daaa091eb Binary files /dev/null and b/assets/gradio_examples/1identity/ref.webp differ diff --git a/assets/gradio_examples/2identity/config.json b/assets/gradio_examples/2identity/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5816a5f2b6d590c851698d90aa9ba93da9fc87d6 --- /dev/null +++ b/assets/gradio_examples/2identity/config.json @@ -0,0 +1,6 @@ +{ + "prompt": "The man in flower shops carefully match bouquets, conveying beautiful emotions and blessings with flowers.", + "seed": 3407, + "usage": "Identity-driven", + "image_ref1": "./ref.jpg" +} \ No newline at end of file diff --git a/assets/gradio_examples/2identity/ref.jpg b/assets/gradio_examples/2identity/ref.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a15ae380e5e9224637c70f79cb72f37a91166b6 Binary files /dev/null and b/assets/gradio_examples/2identity/ref.jpg differ diff --git a/assets/gradio_examples/3identity/config.json b/assets/gradio_examples/3identity/config.json new file mode 100644 index 0000000000000000000000000000000000000000..94497918462a8c0e09bc80eb3d9549e89ba6e7d8 --- /dev/null +++ b/assets/gradio_examples/3identity/config.json @@ -0,0 +1,6 @@ +{ + "prompt": "Transform the image into Ghibli style.", + "seed": 3407, + "usage": "Identity-driven", + "image_ref1": "./ref.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/3identity/ref.webp b/assets/gradio_examples/3identity/ref.webp new file mode 100644 index 0000000000000000000000000000000000000000..a98cb69d271c63d7c76fab77d5fb4eefad73180b Binary files /dev/null and b/assets/gradio_examples/3identity/ref.webp differ diff --git a/assets/gradio_examples/4subject/config.json b/assets/gradio_examples/4subject/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2f470599cdd9382e12bded66467a33c6c4329197 --- /dev/null +++ b/assets/gradio_examples/4subject/config.json @@ -0,0 +1,6 @@ +{ + "prompt": "Wool felt style, a clock in the jungle.", + "seed": 3407, + "usage": "Subject-driven", + "image_ref1": "./ref.jpg" +} \ No newline at end of file diff --git a/assets/gradio_examples/4subject/ref.jpg b/assets/gradio_examples/4subject/ref.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38615fba19fda54eec4df211606e95ec89cbae4b --- /dev/null +++ b/assets/gradio_examples/4subject/ref.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e1eb6ca2c944f3bfaed3ace56f5f186ed073a477e0333e0237253d98f0c9267 +size 139451 diff --git a/assets/gradio_examples/5style_subject/config.json b/assets/gradio_examples/5style_subject/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6dc3da8233e6a6b5f5a9192fb65227352d2c5cdd --- /dev/null +++ b/assets/gradio_examples/5style_subject/config.json @@ -0,0 +1,7 @@ +{ + "prompt": "", + "seed": 321, + "usage": "Style-subject-driven (layout-preserved)", + "image_ref1": "./ref1.webp", + "image_ref2": "./ref2.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/5style_subject/ref1.webp b/assets/gradio_examples/5style_subject/ref1.webp new file mode 100644 index 0000000000000000000000000000000000000000..a98cb69d271c63d7c76fab77d5fb4eefad73180b Binary files /dev/null and b/assets/gradio_examples/5style_subject/ref1.webp differ diff --git a/assets/gradio_examples/5style_subject/ref2.webp b/assets/gradio_examples/5style_subject/ref2.webp new file mode 100644 index 0000000000000000000000000000000000000000..2ba23c19ff9a26d2676f8a2006feb7c9d13fca1b Binary files /dev/null and b/assets/gradio_examples/5style_subject/ref2.webp differ diff --git a/assets/gradio_examples/6style_subject/config.json b/assets/gradio_examples/6style_subject/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ef542fde59e71687ee9e536649b8481a880d57a8 --- /dev/null +++ b/assets/gradio_examples/6style_subject/config.json @@ -0,0 +1,7 @@ +{ + "prompt": "The woman gave an impassioned speech on the podium.", + "seed": 321, + "usage": "Style-subject-driven (layout-shifted)", + "image_ref1": "./ref1.webp", + "image_ref2": "./ref2.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/6style_subject/ref1.webp b/assets/gradio_examples/6style_subject/ref1.webp new file mode 100644 index 0000000000000000000000000000000000000000..a98cb69d271c63d7c76fab77d5fb4eefad73180b Binary files /dev/null and b/assets/gradio_examples/6style_subject/ref1.webp differ diff --git a/assets/gradio_examples/6style_subject/ref2.webp b/assets/gradio_examples/6style_subject/ref2.webp new file mode 100644 index 0000000000000000000000000000000000000000..d11c47b4ac627008c0ad523ddba48956cab0409d Binary files /dev/null and b/assets/gradio_examples/6style_subject/ref2.webp differ diff --git a/assets/gradio_examples/7style_subject/config.json b/assets/gradio_examples/7style_subject/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0800b99f1a83125ce5e71792e71ae9ee3dd126b6 --- /dev/null +++ b/assets/gradio_examples/7style_subject/config.json @@ -0,0 +1,7 @@ +{ + "prompt": "The man gave an impassioned speech on the podium. ", + "seed": 42, + "usage": "Style-subject-driven (layout-shifted)", + "image_ref1": "./ref1.webp", + "image_ref2": "./ref2.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/7style_subject/ref1.webp b/assets/gradio_examples/7style_subject/ref1.webp new file mode 100644 index 0000000000000000000000000000000000000000..2bce7c4a787bafdfea93f02413b3d66d4a681bf5 Binary files /dev/null and b/assets/gradio_examples/7style_subject/ref1.webp differ diff --git a/assets/gradio_examples/7style_subject/ref2.webp b/assets/gradio_examples/7style_subject/ref2.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd8a2320f0877255c50c807001ebfde8dfd03621 Binary files /dev/null and b/assets/gradio_examples/7style_subject/ref2.webp differ diff --git a/assets/gradio_examples/8style/config.json b/assets/gradio_examples/8style/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9303fd6dfd485517ff34607b0058dea9cbec2922 --- /dev/null +++ b/assets/gradio_examples/8style/config.json @@ -0,0 +1,6 @@ +{ + "prompt": "A cat sleeping on a chair.", + "seed": 3407, + "usage": "Style-driven", + "image_ref2": "./ref.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/8style/ref.webp b/assets/gradio_examples/8style/ref.webp new file mode 100644 index 0000000000000000000000000000000000000000..af2ea165d056b7a5e73f046d03d3558b6e55ec29 Binary files /dev/null and b/assets/gradio_examples/8style/ref.webp differ diff --git a/assets/gradio_examples/9style/config.json b/assets/gradio_examples/9style/config.json new file mode 100644 index 0000000000000000000000000000000000000000..172585f3bae2f7e73d25dea3393f46435123f95e --- /dev/null +++ b/assets/gradio_examples/9style/config.json @@ -0,0 +1,6 @@ +{ + "prompt": "A beautiful woman.", + "seed": 3407, + "usage": "Style-driven", + "image_ref2": "./ref.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/9style/ref.webp b/assets/gradio_examples/9style/ref.webp new file mode 100644 index 0000000000000000000000000000000000000000..33ffb250b33e838e81de3f500a267df304136c6c Binary files /dev/null and b/assets/gradio_examples/9style/ref.webp differ diff --git a/assets/gradio_examples/identity1.jpg b/assets/gradio_examples/identity1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a15ae380e5e9224637c70f79cb72f37a91166b6 Binary files /dev/null and b/assets/gradio_examples/identity1.jpg differ diff --git a/assets/gradio_examples/identity1_result.png b/assets/gradio_examples/identity1_result.png new file mode 100644 index 0000000000000000000000000000000000000000..e4d895db3f534950cbd804e594f450bf55f6507c --- /dev/null +++ b/assets/gradio_examples/identity1_result.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7684256e44ce1bd4ada1e77a12674432eddd95b07fb388673899139afc56d864 +size 1538828 diff --git a/assets/gradio_examples/identity2.webp b/assets/gradio_examples/identity2.webp new file mode 100644 index 0000000000000000000000000000000000000000..a98cb69d271c63d7c76fab77d5fb4eefad73180b Binary files /dev/null and b/assets/gradio_examples/identity2.webp differ diff --git a/assets/gradio_examples/identity2_style2_result.webp b/assets/gradio_examples/identity2_style2_result.webp new file mode 100644 index 0000000000000000000000000000000000000000..c1945870fdda7ffa691f397dd9a9110a0babf79e Binary files /dev/null and b/assets/gradio_examples/identity2_style2_result.webp differ diff --git a/assets/gradio_examples/style1.webp b/assets/gradio_examples/style1.webp new file mode 100644 index 0000000000000000000000000000000000000000..af2ea165d056b7a5e73f046d03d3558b6e55ec29 Binary files /dev/null and b/assets/gradio_examples/style1.webp differ diff --git a/assets/gradio_examples/style1_result.webp b/assets/gradio_examples/style1_result.webp new file mode 100644 index 0000000000000000000000000000000000000000..740234c7625298738a8675efc6330e3392227fed Binary files /dev/null and b/assets/gradio_examples/style1_result.webp differ diff --git a/assets/gradio_examples/style2.webp b/assets/gradio_examples/style2.webp new file mode 100644 index 0000000000000000000000000000000000000000..d11c47b4ac627008c0ad523ddba48956cab0409d Binary files /dev/null and b/assets/gradio_examples/style2.webp differ diff --git a/assets/gradio_examples/style3.webp b/assets/gradio_examples/style3.webp new file mode 100644 index 0000000000000000000000000000000000000000..1c02f7fe712a295f858a666f211d994cecaa7ac1 --- /dev/null +++ b/assets/gradio_examples/style3.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d272a0ecb03126503446b00a2152deab2045f89ac2c01f948e1099589d2862 +size 141886 diff --git a/assets/gradio_examples/style3_style4_result.webp b/assets/gradio_examples/style3_style4_result.webp new file mode 100644 index 0000000000000000000000000000000000000000..2bc1bfc2258d5193a300c560563b3b21eaa434d4 --- /dev/null +++ b/assets/gradio_examples/style3_style4_result.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d09a5e429cc1d059aecd041e061868cd8e5b59f4718bb0f926fd84364f3794b0 +size 172716 diff --git a/assets/gradio_examples/style4.webp b/assets/gradio_examples/style4.webp new file mode 100644 index 0000000000000000000000000000000000000000..e99715757eb80c277f42a4f5295251c30f1af45f --- /dev/null +++ b/assets/gradio_examples/style4.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1ce04559726509672ce859d617a08d8dff8b2fe28f503fecbca7a5f66082882 +size 290260 diff --git a/assets/gradio_examples/z1_mix_style/config.json b/assets/gradio_examples/z1_mix_style/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8c9aa236dc8c7bac8dd441aa9a86c786824a9d17 --- /dev/null +++ b/assets/gradio_examples/z1_mix_style/config.json @@ -0,0 +1,7 @@ +{ + "prompt": "A man.", + "seed": 321, + "usage": "Multi-style-driven", + "image_ref2": "./ref1.webp", + "image_ref3": "./ref2.webp" +} \ No newline at end of file diff --git a/assets/gradio_examples/z1_mix_style/ref1.webp b/assets/gradio_examples/z1_mix_style/ref1.webp new file mode 100644 index 0000000000000000000000000000000000000000..1c02f7fe712a295f858a666f211d994cecaa7ac1 --- /dev/null +++ b/assets/gradio_examples/z1_mix_style/ref1.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d272a0ecb03126503446b00a2152deab2045f89ac2c01f948e1099589d2862 +size 141886 diff --git a/assets/gradio_examples/z1_mix_style/ref2.webp b/assets/gradio_examples/z1_mix_style/ref2.webp new file mode 100644 index 0000000000000000000000000000000000000000..e99715757eb80c277f42a4f5295251c30f1af45f --- /dev/null +++ b/assets/gradio_examples/z1_mix_style/ref2.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1ce04559726509672ce859d617a08d8dff8b2fe28f503fecbca7a5f66082882 +size 290260 diff --git a/assets/gradio_examples/z2_mix_style/config.json b/assets/gradio_examples/z2_mix_style/config.json new file mode 100644 index 0000000000000000000000000000000000000000..db3579ae23a361a3e50cd4d767ef5f9426952bb6 --- /dev/null +++ b/assets/gradio_examples/z2_mix_style/config.json @@ -0,0 +1,7 @@ +{ + "prompt": "Boat on water.", + "seed": 321, + "usage": "Multi-style-driven", + "image_ref2": "./ref1.png", + "image_ref3": "./ref2.png" +} \ No newline at end of file diff --git a/assets/gradio_examples/z2_mix_style/ref1.png b/assets/gradio_examples/z2_mix_style/ref1.png new file mode 100644 index 0000000000000000000000000000000000000000..ee8fa808763bd0c4243d30ff4b21aac3ec7078bb Binary files /dev/null and b/assets/gradio_examples/z2_mix_style/ref1.png differ diff --git a/assets/gradio_examples/z2_mix_style/ref2.png b/assets/gradio_examples/z2_mix_style/ref2.png new file mode 100644 index 0000000000000000000000000000000000000000..61cddc31abd3060be7cbc407a990bb34840af04f Binary files /dev/null and b/assets/gradio_examples/z2_mix_style/ref2.png differ diff --git a/assets/gradio_examples/z3_mix_style/config.json b/assets/gradio_examples/z3_mix_style/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0dc589bd42af3c1569fb22ed709f2a4a5352e72e --- /dev/null +++ b/assets/gradio_examples/z3_mix_style/config.json @@ -0,0 +1,8 @@ +{ + "prompt": "", + "seed": 321, + "usage": "Multi-style-driven", + "image_ref1": "./ref1.jpg", + "image_ref2": "./ref2.png", + "image_ref3": "./ref3.png" +} \ No newline at end of file diff --git a/assets/gradio_examples/z3_mix_style/ref1.jpg b/assets/gradio_examples/z3_mix_style/ref1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8212cc15f419d27e4e7028873465cb6783f450c3 --- /dev/null +++ b/assets/gradio_examples/z3_mix_style/ref1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b8d8b0e22c91297ed23fabf03b0a3574a717efe8e578d6a9d51f5367b7fb0ee +size 166811 diff --git a/assets/gradio_examples/z3_mix_style/ref2.png b/assets/gradio_examples/z3_mix_style/ref2.png new file mode 100644 index 0000000000000000000000000000000000000000..ee8fa808763bd0c4243d30ff4b21aac3ec7078bb Binary files /dev/null and b/assets/gradio_examples/z3_mix_style/ref2.png differ diff --git a/assets/gradio_examples/z3_mix_style/ref3.png b/assets/gradio_examples/z3_mix_style/ref3.png new file mode 100644 index 0000000000000000000000000000000000000000..61cddc31abd3060be7cbc407a990bb34840af04f Binary files /dev/null and b/assets/gradio_examples/z3_mix_style/ref3.png differ diff --git a/assets/gradio_examples/z4_t2i/config.json b/assets/gradio_examples/z4_t2i/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2f33f4bb9c953daacd31cbcb025abb237af9e7c1 --- /dev/null +++ b/assets/gradio_examples/z4_t2i/config.json @@ -0,0 +1,5 @@ +{ + "prompt": "A beautiful woman.", + "seed": 0, + "usage": "Text-to-image" +} \ No newline at end of file diff --git a/assets/show_case1.webp b/assets/show_case1.webp new file mode 100644 index 0000000000000000000000000000000000000000..f723b4f94435d498a291bd10a19aba00b2be435d --- /dev/null +++ b/assets/show_case1.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61676ad26a250e7bd17d7b267d8f91c1b166fabb9893da6ec0e3fac85482aa78 +size 197138 diff --git a/assets/show_case2.webp b/assets/show_case2.webp new file mode 100644 index 0000000000000000000000000000000000000000..56c9a1aaf956e700f2bafdf6d6506c49da9c9cd9 --- /dev/null +++ b/assets/show_case2.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4954c6dd760437322691f56aef532d1a1a1a9f2ebfafb1050372023d5195c849 +size 203620 diff --git a/assets/show_case3.webp b/assets/show_case3.webp new file mode 100644 index 0000000000000000000000000000000000000000..5cfcced0bd97e9b100e447fc71b941f5da47a7ba --- /dev/null +++ b/assets/show_case3.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:246e67a7a2e57adf4df2c2e67764fe4c39ab412866f70d1d00ba0560850d48f8 +size 205538 diff --git a/assets/show_case4.webp b/assets/show_case4.webp new file mode 100644 index 0000000000000000000000000000000000000000..830ee57782b0cb555b464ef43feadca4196516a9 --- /dev/null +++ b/assets/show_case4.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4691d923d976e571d08ec395afda2b5eebfb3b656c20f30080c702f2c60dfa94 +size 215218 diff --git a/assets/show_case5.webp b/assets/show_case5.webp new file mode 100644 index 0000000000000000000000000000000000000000..b41187c30e040dc1120d9a39159eec3e8fba473b --- /dev/null +++ b/assets/show_case5.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8547e2406bb252d7d939cba95b596026926259c8cbf6d4dd11240977e892cb88 +size 480052 diff --git a/assets/show_case6.webp b/assets/show_case6.webp new file mode 100644 index 0000000000000000000000000000000000000000..8b1f2b9049c641ed1922edeb1ab095d49915d930 --- /dev/null +++ b/assets/show_case6.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d28ca01ecb7d065a628ad029bf29eace73bd8345adc33931dd374f0955ef7ee3 +size 828964 diff --git a/assets/show_case7.webp b/assets/show_case7.webp new file mode 100644 index 0000000000000000000000000000000000000000..e2240ad8d922896619f3dc28417973b2afd9a8a9 --- /dev/null +++ b/assets/show_case7.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c58ff3e1d9e352ddedb2f4f6577ff4aafdd54c83605baddf5a2d8c1531b8a20b +size 620274 diff --git a/assets/show_case8.webp b/assets/show_case8.webp new file mode 100644 index 0000000000000000000000000000000000000000..1577d6ab29affa859eacf90918ceaa741094de77 --- /dev/null +++ b/assets/show_case8.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13c8f71b8fea807f4717d5068452bac3bce93b758c32fcf00997a9e5a19500fb +size 212244 diff --git a/assets/teaser.webp b/assets/teaser.webp new file mode 100644 index 0000000000000000000000000000000000000000..d6916918b54307276b3fc8a5bc269c62dd7f2989 --- /dev/null +++ b/assets/teaser.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543c724f6b929303046ae481672567fe4a9620f0af5ca1dfff215dc7a2cbff5f +size 1674736 diff --git a/assets/uso.webp b/assets/uso.webp new file mode 100644 index 0000000000000000000000000000000000000000..dd1c587f011868aa5e534ea1ac4ffc3b7581e296 Binary files /dev/null and b/assets/uso.webp differ diff --git a/assets/uso_logo.svg b/assets/uso_logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..dc91b85132492bc6c054b5184f24c236656a7569 --- /dev/null +++ b/assets/uso_logo.svg @@ -0,0 +1,880 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/uso_text.svg b/assets/uso_text.svg new file mode 100644 index 0000000000000000000000000000000000000000..86153554bc20737c394cac6d50ee58d9f277572e --- /dev/null +++ b/assets/uso_text.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/usoxcomfyui.webp b/assets/usoxcomfyui.webp new file mode 100644 index 0000000000000000000000000000000000000000..4f54add976b3fb36f82993099e3d9b59f0fa2761 --- /dev/null +++ b/assets/usoxcomfyui.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fdf50b30bc8bfbbe1ef5c70ddcdeef5c289d776095155a13a2d65a786a64c8c +size 569622 diff --git a/assets/usoxcomfyui_official.jpeg b/assets/usoxcomfyui_official.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..f67ac50b34454b353b2cdd54f8b2f2f575ca8610 --- /dev/null +++ b/assets/usoxcomfyui_official.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52707bf371a9cab7df3a50485654299d82dad01d02f7168080aa94362cee5d44 +size 1005335 diff --git a/example.env b/example.env new file mode 100644 index 0000000000000000000000000000000000000000..473c40c6d06fa1e0784d11fcbe15f40426e7fdc9 --- /dev/null +++ b/example.env @@ -0,0 +1,23 @@ +# Hugging face token goes here: +HF_TOKEN=your_huggingface_token_here + +# Core Flux weights +FLUX_DEV=./weights/FLUX.1-dev/flux1-dev.safetensors +FLUX_DEV_FP8=./weights/FLUX.1-dev/flux1-dev.safetensors +AE=./weights/FLUX.1-dev/ae.safetensors + +# Text + vision encoders +T5=./weights/t5-xxl +CLIP=./weights/clip-vit-l14 +LORA=./weights/USO/uso_flux_v1.0/dit_lora.safetensors + +# USO LoRA + projector +PROJECTION_MODEL=./weights/USO/uso_flux_v1.0/projector.safetensors +SIGLIP_PATH=./weights/siglip + + +# ------------------------------- +# Optional: Flux-Krea variant +# ------------------------------- +# FLUX_DEV=./weights/FLUX.1-Krea-dev/flux1-krea-dev.safetensors +# FLUX_DEV_FP8=./weights/FLUX.1-Krea-dev/flux1-krea-dev.safetensors \ No newline at end of file diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..baff4f7701ba8aaf21a66b9775e7a607d4b8c34f --- /dev/null +++ b/inference.py @@ -0,0 +1,194 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import dataclasses +from typing import Literal + +from dotenv import load_dotenv +load_dotenv() + + +from accelerate import Accelerator +from transformers import HfArgumentParser +from PIL import Image +import json +import itertools +import torch + +from uso.flux.pipeline import USOPipeline, preprocess_ref +from transformers import SiglipVisionModel, SiglipImageProcessor +from tqdm import tqdm + + +def horizontal_concat(images): + widths, heights = zip(*(img.size for img in images)) + + total_width = sum(widths) + max_height = max(heights) + + new_im = Image.new("RGB", (total_width, max_height)) + + x_offset = 0 + for img in images: + new_im.paste(img, (x_offset, 0)) + x_offset += img.size[0] + + return new_im + + +@dataclasses.dataclass +class InferenceArgs: + prompt: str | None = None + image_paths: list[str] | None = None + eval_json_path: str | None = None + offload: bool = False + num_images_per_prompt: int = 1 + model_type: Literal["flux-dev", "flux-dev-fp8", "flux-schnell"] = "flux-dev" + width: int = 1024 + height: int = 1024 + num_steps: int = 25 + guidance: float = 4 + seed: int = 3407 + save_path: str = "output/inference" + only_lora: bool = True + concat_refs: bool = False + lora_rank: int = 128 + pe: Literal["d", "h", "w", "o"] = "d" + content_ref: int = 512 + ckpt_path: str | None = None + use_siglip: bool = True + instruct_edit: bool = False + hf_download: bool = False # set to false, we must not auto download the weights (゜-゜) + + +def main(args: InferenceArgs): + accelerator = Accelerator() + + # init SigLIP model + siglip_processor = None + siglip_model = None + if args.use_siglip: + + # ⚠️ Weights now load from local paths via .env instead of downloading + siglip_path = os.getenv("SIGLIP_PATH", "google/siglip-so400m-patch14-384") + siglip_processor = SiglipImageProcessor.from_pretrained(siglip_path) + siglip_model = SiglipVisionModel.from_pretrained(siglip_path) + + siglip_model.eval() + siglip_model.to(accelerator.device) + print("SigLIP model loaded successfully") + + pipeline = USOPipeline( + args.model_type, + accelerator.device, + args.offload, + only_lora=args.only_lora, + lora_rank=args.lora_rank, + hf_download=args.hf_download, + ) + if args.use_siglip and siglip_model is not None: + pipeline.model.vision_encoder = siglip_model + + assert ( + args.prompt is not None or args.eval_json_path is not None + ), "Please provide either prompt or eval_json_path" + + if args.eval_json_path is not None: + with open(args.eval_json_path, "rt") as f: + data_dicts = json.load(f) + data_root = os.path.dirname(args.eval_json_path) + else: + data_root = "" + data_dicts = [{"prompt": args.prompt, "image_paths": args.image_paths}] + + print( + f"process: {accelerator.num_processes}/{accelerator.process_index}, \ + process images: {len(data_dicts)}/{len(data_dicts[accelerator.process_index::accelerator.num_processes])}" + ) + + data_dicts = data_dicts[accelerator.process_index :: accelerator.num_processes] + + accelerator.wait_for_everyone() + local_task_count = len(data_dicts) * args.num_images_per_prompt + if accelerator.is_main_process: + progress_bar = tqdm(total=local_task_count, desc="Generating Images") + + for (i, data_dict), j in itertools.product( + enumerate(data_dicts), range(args.num_images_per_prompt) + ): + ref_imgs = [] + for _, img_path in enumerate(data_dict["image_paths"]): + if img_path != "": + img = Image.open(os.path.join(data_root, img_path)).convert("RGB") + ref_imgs.append(img) + else: + ref_imgs.append(None) + siglip_inputs = None + if args.use_siglip and siglip_processor is not None: + with torch.no_grad(): + siglip_inputs = [ + siglip_processor(img, return_tensors="pt").to(pipeline.device) + for img in ref_imgs[1:] if isinstance(img, Image.Image) + ] + + ref_imgs_pil = [ + preprocess_ref(img, args.content_ref) for img in ref_imgs[:1] if isinstance(img, Image.Image) + ] + + if args.instruct_edit: + args.width, args.height = ref_imgs_pil[0].size + args.width, args.height = args.width * (1024 / args.content_ref), args.height * (1024 / args.content_ref) + image_gen = pipeline( + prompt=data_dict["prompt"], + width=args.width, + height=args.height, + guidance=args.guidance, + num_steps=args.num_steps, + seed=args.seed + j, + ref_imgs=ref_imgs_pil, + pe=args.pe, + siglip_inputs=siglip_inputs, + ) + if args.concat_refs: + image_gen = horizontal_concat([image_gen, *ref_imgs]) + + if "save_dir" in data_dict: + config_save_path = os.path.join(args.save_path, data_dict["save_dir"] + f"_{j}.json") + image_save_path = os.path.join(args.save_path, data_dict["save_dir"] + f"_{j}.png") + else: + os.makedirs(args.save_path, exist_ok=True) + config_save_path = os.path.join(args.save_path, f"{i}_{j}.json") + image_save_path = os.path.join(args.save_path, f"{i}_{j}.png") + + # save config and image + os.makedirs(os.path.dirname(image_save_path), exist_ok=True) + image_gen.save(image_save_path) + # ensure the prompt and image_paths are saved in the config file + args.prompt = data_dict["prompt"] + args.image_paths = data_dict["image_paths"] + args_dict = vars(args) + with open(config_save_path, "w") as f: + json.dump(args_dict, f, indent=4) + + if accelerator.is_main_process: + progress_bar.update(1) + if accelerator.is_main_process: + progress_bar.close() + + +if __name__ == "__main__": + parser = HfArgumentParser([InferenceArgs]) + args = parser.parse_args_into_dataclasses()[0] + main(args) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..aade7edaf0c91567005c7b90ea5acdffd5f5f2e4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +accelerate==1.1.1 +deepspeed==0.14.4 +einops==0.8.0 +transformers==4.43.3 +huggingface-hub +diffusers==0.30.1 +sentencepiece==0.2.0 +gradio==5.22.0 +opencv-python +matplotlib +safetensors==0.4.5 +scipy==1.10.1 +numpy==1.24.4 +onnxruntime +# httpx==0.23.3 +git+https://github.com/openai/CLIP.git +# --extra-index-url https://download.pytorch.org/whl/cu124 +# torch==2.4.0 +# torchvision==0.19.0 +python-dotenv \ No newline at end of file diff --git a/uso/flux/math.py b/uso/flux/math.py new file mode 100644 index 0000000000000000000000000000000000000000..2461437371d22a60eab7df4b5f5cb371dd692fe9 --- /dev/null +++ b/uso/flux/math.py @@ -0,0 +1,45 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from einops import rearrange +from torch import Tensor + + +def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor: + q, k = apply_rope(q, k, pe) + + x = torch.nn.functional.scaled_dot_product_attention(q, k, v) + x = rearrange(x, "B H L D -> B L (H D)") + + return x + + +def rope(pos: Tensor, dim: int, theta: int) -> Tensor: + assert dim % 2 == 0 + scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim + omega = 1.0 / (theta**scale) + out = torch.einsum("...n,d->...nd", pos, omega) + out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) + out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) + return out.float() + + +def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]: + xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) + xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) + xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] + xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] + return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) diff --git a/uso/flux/model.py b/uso/flux/model.py new file mode 100644 index 0000000000000000000000000000000000000000..43d4c80227b80a50585df4afcd21d1449ca3f61d --- /dev/null +++ b/uso/flux/model.py @@ -0,0 +1,258 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +import torch +from torch import Tensor, nn + +from .modules.layers import ( + DoubleStreamBlock, + EmbedND, + LastLayer, + MLPEmbedder, + SingleStreamBlock, + timestep_embedding, + SigLIPMultiFeatProjModel, +) +import os + + +@dataclass +class FluxParams: + in_channels: int + vec_in_dim: int + context_in_dim: int + hidden_size: int + mlp_ratio: float + num_heads: int + depth: int + depth_single_blocks: int + axes_dim: list[int] + theta: int + qkv_bias: bool + guidance_embed: bool + + +class Flux(nn.Module): + """ + Transformer model for flow matching on sequences. + """ + + _supports_gradient_checkpointing = True + + def __init__(self, params: FluxParams): + super().__init__() + + self.params = params + self.in_channels = params.in_channels + self.out_channels = self.in_channels + if params.hidden_size % params.num_heads != 0: + raise ValueError( + f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}" + ) + pe_dim = params.hidden_size // params.num_heads + if sum(params.axes_dim) != pe_dim: + raise ValueError( + f"Got {params.axes_dim} but expected positional dim {pe_dim}" + ) + self.hidden_size = params.hidden_size + self.num_heads = params.num_heads + self.pe_embedder = EmbedND( + dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim + ) + self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True) + self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) + self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size) + self.guidance_in = ( + MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) + if params.guidance_embed + else nn.Identity() + ) + self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size) + + self.double_blocks = nn.ModuleList( + [ + DoubleStreamBlock( + self.hidden_size, + self.num_heads, + mlp_ratio=params.mlp_ratio, + qkv_bias=params.qkv_bias, + ) + for _ in range(params.depth) + ] + ) + + self.single_blocks = nn.ModuleList( + [ + SingleStreamBlock( + self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio + ) + for _ in range(params.depth_single_blocks) + ] + ) + + self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels) + self.gradient_checkpointing = False + + # feature embedder for siglip multi-feat inputs + self.feature_embedder = SigLIPMultiFeatProjModel( + siglip_token_nums=729, + style_token_nums=64, + siglip_token_dims=1152, + hidden_size=self.hidden_size, + context_layer_norm=True, + ) + print("use semantic encoder siglip multi-feat to encode style image") + + self.vision_encoder = None + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + @property + def attn_processors(self): + # set recursively + processors = {} # type: dict[str, nn.Module] + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors): + if hasattr(module, "set_processor"): + processors[f"{name}.processor"] = module.processor + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + def set_attn_processor(self, processor): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def forward( + self, + img: Tensor, + img_ids: Tensor, + txt: Tensor, + txt_ids: Tensor, + timesteps: Tensor, + y: Tensor, + guidance: Tensor | None = None, + ref_img: Tensor | None = None, + ref_img_ids: Tensor | None = None, + siglip_inputs: list[Tensor] | None = None, + ) -> Tensor: + if img.ndim != 3 or txt.ndim != 3: + raise ValueError("Input img and txt tensors must have 3 dimensions.") + + # running on sequences img + img = self.img_in(img) + vec = self.time_in(timestep_embedding(timesteps, 256)) + if self.params.guidance_embed: + if guidance is None: + raise ValueError( + "Didn't get guidance strength for guidance distilled model." + ) + vec = vec + self.guidance_in(timestep_embedding(guidance, 256)) + vec = vec + self.vector_in(y) + txt = self.txt_in(txt) + if self.feature_embedder is not None and siglip_inputs is not None and len(siglip_inputs) > 0 and self.vision_encoder is not None: + # processing style feat into textural hidden space + siglip_embedding = [self.vision_encoder(**emb, output_hidden_states=True) for emb in siglip_inputs] + # siglip_embedding = [self.vision_encoder(**(emb.to(torch.bfloat16)), output_hidden_states=True) for emb in siglip_inputs] + siglip_embedding = torch.cat([self.feature_embedder(emb) for emb in siglip_embedding], dim=1) + txt = torch.cat((siglip_embedding, txt), dim=1) + siglip_embedding_ids = torch.zeros( + siglip_embedding.shape[0], siglip_embedding.shape[1], 3 + ).to(txt_ids.device) + txt_ids = torch.cat((siglip_embedding_ids, txt_ids), dim=1) + + ids = torch.cat((txt_ids, img_ids), dim=1) + + # concat ref_img/img + img_end = img.shape[1] + if ref_img is not None: + if isinstance(ref_img, tuple) or isinstance(ref_img, list): + img_in = [img] + [self.img_in(ref) for ref in ref_img] + img_ids = [ids] + [ref_ids for ref_ids in ref_img_ids] + img = torch.cat(img_in, dim=1) + ids = torch.cat(img_ids, dim=1) + else: + img = torch.cat((img, self.img_in(ref_img)), dim=1) + ids = torch.cat((ids, ref_img_ids), dim=1) + pe = self.pe_embedder(ids) + + for index_block, block in enumerate(self.double_blocks): + if self.training and self.gradient_checkpointing: + img, txt = torch.utils.checkpoint.checkpoint( + block, + img=img, + txt=txt, + vec=vec, + pe=pe, + use_reentrant=False, + ) + else: + img, txt = block(img=img, txt=txt, vec=vec, pe=pe) + + img = torch.cat((txt, img), 1) + for block in self.single_blocks: + if self.training and self.gradient_checkpointing: + img = torch.utils.checkpoint.checkpoint( + block, img, vec=vec, pe=pe, use_reentrant=False + ) + else: + img = block(img, vec=vec, pe=pe) + img = img[:, txt.shape[1] :, ...] + # index img + img = img[:, :img_end, ...] + + img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) + return img diff --git a/uso/flux/modules/autoencoder.py b/uso/flux/modules/autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..2543bdf4240e1db5b5dc958e148ac0cb12d9e9e3 --- /dev/null +++ b/uso/flux/modules/autoencoder.py @@ -0,0 +1,327 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +import torch +from einops import rearrange +from torch import Tensor, nn + + +@dataclass +class AutoEncoderParams: + resolution: int + in_channels: int + ch: int + out_ch: int + ch_mult: list[int] + num_res_blocks: int + z_channels: int + scale_factor: float + shift_factor: float + + +def swish(x: Tensor) -> Tensor: + return x * torch.sigmoid(x) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels: int): + super().__init__() + self.in_channels = in_channels + + self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1) + self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1) + self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1) + self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1) + + def attention(self, h_: Tensor) -> Tensor: + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + b, c, h, w = q.shape + q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous() + k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous() + v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous() + h_ = nn.functional.scaled_dot_product_attention(q, k, v) + + return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b) + + def forward(self, x: Tensor) -> Tensor: + return x + self.proj_out(self.attention(x)) + + +class ResnetBlock(nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + + self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True) + self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + if self.in_channels != self.out_channels: + self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x): + h = x + h = self.norm1(h) + h = swish(h) + h = self.conv1(h) + + h = self.norm2(h) + h = swish(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + x = self.nin_shortcut(x) + + return x + h + + +class Downsample(nn.Module): + def __init__(self, in_channels: int): + super().__init__() + # no asymmetric padding in torch conv, must do it ourselves + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) + + def forward(self, x: Tensor): + pad = (0, 1, 0, 1) + x = nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + return x + + +class Upsample(nn.Module): + def __init__(self, in_channels: int): + super().__init__() + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + + def forward(self, x: Tensor): + x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + x = self.conv(x) + return x + + +class Encoder(nn.Module): + def __init__( + self, + resolution: int, + in_channels: int, + ch: int, + ch_mult: list[int], + num_res_blocks: int, + z_channels: int, + ): + super().__init__() + self.ch = ch + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + # downsampling + self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + block_in = self.ch + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for _ in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, out_channels=block_out)) + block_in = block_out + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in) + self.mid.attn_1 = AttnBlock(block_in) + self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in) + + # end + self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) + self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1) + + def forward(self, x: Tensor) -> Tensor: + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + # end + h = self.norm_out(h) + h = swish(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__( + self, + ch: int, + out_ch: int, + ch_mult: list[int], + num_res_blocks: int, + in_channels: int, + resolution: int, + z_channels: int, + ): + super().__init__() + self.ch = ch + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.ffactor = 2 ** (self.num_resolutions - 1) + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + + # z to block_in + self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in) + self.mid.attn_1 = AttnBlock(block_in) + self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + for _ in range(self.num_res_blocks + 1): + block.append(ResnetBlock(in_channels=block_in, out_channels=block_out)) + block_in = block_out + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) + self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) + + def forward(self, z: Tensor) -> Tensor: + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = swish(h) + h = self.conv_out(h) + return h + + +class DiagonalGaussian(nn.Module): + def __init__(self, sample: bool = True, chunk_dim: int = 1): + super().__init__() + self.sample = sample + self.chunk_dim = chunk_dim + + def forward(self, z: Tensor) -> Tensor: + mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim) + if self.sample: + std = torch.exp(0.5 * logvar) + return mean + std * torch.randn_like(mean) + else: + return mean + + +class AutoEncoder(nn.Module): + def __init__(self, params: AutoEncoderParams): + super().__init__() + self.encoder = Encoder( + resolution=params.resolution, + in_channels=params.in_channels, + ch=params.ch, + ch_mult=params.ch_mult, + num_res_blocks=params.num_res_blocks, + z_channels=params.z_channels, + ) + self.decoder = Decoder( + resolution=params.resolution, + in_channels=params.in_channels, + ch=params.ch, + out_ch=params.out_ch, + ch_mult=params.ch_mult, + num_res_blocks=params.num_res_blocks, + z_channels=params.z_channels, + ) + self.reg = DiagonalGaussian() + + self.scale_factor = params.scale_factor + self.shift_factor = params.shift_factor + + def encode(self, x: Tensor) -> Tensor: + z = self.reg(self.encoder(x)) + z = self.scale_factor * (z - self.shift_factor) + return z + + def decode(self, z: Tensor) -> Tensor: + z = z / self.scale_factor + self.shift_factor + return self.decoder(z) + + def forward(self, x: Tensor) -> Tensor: + return self.decode(self.encode(x)) diff --git a/uso/flux/modules/conditioner.py b/uso/flux/modules/conditioner.py new file mode 100644 index 0000000000000000000000000000000000000000..047950827b18f3577c7c43247392c6e0d9295f1f --- /dev/null +++ b/uso/flux/modules/conditioner.py @@ -0,0 +1,53 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from torch import Tensor, nn +from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel, + T5Tokenizer) + + +class HFEmbedder(nn.Module): + def __init__(self, version: str, max_length: int, **hf_kwargs): + super().__init__() + self.is_clip = "clip" in version.lower() + self.max_length = max_length + self.output_key = "pooler_output" if self.is_clip else "last_hidden_state" + + if self.is_clip: + self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length) + self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs) + else: + self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length) + self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs) + + self.hf_module = self.hf_module.eval().requires_grad_(False) + + def forward(self, text: list[str]) -> Tensor: + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=False, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + + outputs = self.hf_module( + input_ids=batch_encoding["input_ids"].to(self.hf_module.device), + attention_mask=None, + output_hidden_states=False, + ) + return outputs[self.output_key] diff --git a/uso/flux/modules/layers.py b/uso/flux/modules/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb8139ab78a88863ae822bac111815caf70c7e8 --- /dev/null +++ b/uso/flux/modules/layers.py @@ -0,0 +1,631 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass + +import torch +from einops import rearrange, repeat +from torch import Tensor, nn + +from ..math import attention, rope + + +class EmbedND(nn.Module): + def __init__(self, dim: int, theta: int, axes_dim: list[int]): + super().__init__() + self.dim = dim + self.theta = theta + self.axes_dim = axes_dim + + def forward(self, ids: Tensor) -> Tensor: + n_axes = ids.shape[-1] + emb = torch.cat( + [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)], + dim=-3, + ) + + return emb.unsqueeze(1) + + +def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + t = time_factor * t + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) + * torch.arange(start=0, end=half, dtype=torch.float32) + / half + ).to(t.device) + + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + if torch.is_floating_point(t): + embedding = embedding.to(t) + return embedding + + +class MLPEmbedder(nn.Module): + def __init__(self, in_dim: int, hidden_dim: int): + super().__init__() + self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True) + self.silu = nn.SiLU() + self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True) + + def forward(self, x: Tensor) -> Tensor: + return self.out_layer(self.silu(self.in_layer(x))) + + +class RMSNorm(torch.nn.Module): + def __init__(self, dim: int): + super().__init__() + self.scale = nn.Parameter(torch.ones(dim)) + + def forward(self, x: Tensor): + x_dtype = x.dtype + x = x.float() + rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6) + return ((x * rrms) * self.scale.float()).to(dtype=x_dtype) + + +class QKNorm(torch.nn.Module): + def __init__(self, dim: int): + super().__init__() + self.query_norm = RMSNorm(dim) + self.key_norm = RMSNorm(dim) + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]: + q = self.query_norm(q) + k = self.key_norm(k) + return q.to(v), k.to(v) + + +class LoRALinearLayer(nn.Module): + def __init__( + self, + in_features, + out_features, + rank=4, + network_alpha=None, + device=None, + dtype=None, + ): + super().__init__() + + self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype) + self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype) + # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. + # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning + self.network_alpha = network_alpha + self.rank = rank + + nn.init.normal_(self.down.weight, std=1 / rank) + nn.init.zeros_(self.up.weight) + + def forward(self, hidden_states): + orig_dtype = hidden_states.dtype + dtype = self.down.weight.dtype + + down_hidden_states = self.down(hidden_states.to(dtype)) + up_hidden_states = self.up(down_hidden_states) + + if self.network_alpha is not None: + up_hidden_states *= self.network_alpha / self.rank + + return up_hidden_states.to(orig_dtype) + + +class FLuxSelfAttnProcessor: + def __call__(self, attn, x, pe, **attention_kwargs): + qkv = attn.qkv(x) + q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) + q, k = attn.norm(q, k, v) + x = attention(q, k, v, pe=pe) + x = attn.proj(x) + return x + + +class LoraFluxAttnProcessor(nn.Module): + + def __init__(self, dim: int, rank=4, network_alpha=None, lora_weight=1): + super().__init__() + self.qkv_lora = LoRALinearLayer(dim, dim * 3, rank, network_alpha) + self.proj_lora = LoRALinearLayer(dim, dim, rank, network_alpha) + self.lora_weight = lora_weight + + def __call__(self, attn, x, pe, **attention_kwargs): + qkv = attn.qkv(x) + self.qkv_lora(x) * self.lora_weight + q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) + q, k = attn.norm(q, k, v) + x = attention(q, k, v, pe=pe) + x = attn.proj(x) + self.proj_lora(x) * self.lora_weight + return x + + +class SelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.norm = QKNorm(head_dim) + self.proj = nn.Linear(dim, dim) + + def forward(): + pass + + +@dataclass +class ModulationOut: + shift: Tensor + scale: Tensor + gate: Tensor + + +class Modulation(nn.Module): + def __init__(self, dim: int, double: bool): + super().__init__() + self.is_double = double + self.multiplier = 6 if double else 3 + self.lin = nn.Linear(dim, self.multiplier * dim, bias=True) + + def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]: + out = self.lin(nn.functional.silu(vec))[:, None, :].chunk( + self.multiplier, dim=-1 + ) + + return ( + ModulationOut(*out[:3]), + ModulationOut(*out[3:]) if self.is_double else None, + ) + + +class DoubleStreamBlockLoraProcessor(nn.Module): + def __init__(self, dim: int, rank=4, network_alpha=None, lora_weight=1): + super().__init__() + self.qkv_lora1 = LoRALinearLayer(dim, dim * 3, rank, network_alpha) + self.proj_lora1 = LoRALinearLayer(dim, dim, rank, network_alpha) + self.qkv_lora2 = LoRALinearLayer(dim, dim * 3, rank, network_alpha) + self.proj_lora2 = LoRALinearLayer(dim, dim, rank, network_alpha) + self.lora_weight = lora_weight + + def forward(self, attn, img, txt, vec, pe, **attention_kwargs): + img_mod1, img_mod2 = attn.img_mod(vec) + txt_mod1, txt_mod2 = attn.txt_mod(vec) + + # prepare image for attention + img_modulated = attn.img_norm1(img) + img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift + img_qkv = ( + attn.img_attn.qkv(img_modulated) + + self.qkv_lora1(img_modulated) * self.lora_weight + ) + img_q, img_k, img_v = rearrange( + img_qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads + ) + img_q, img_k = attn.img_attn.norm(img_q, img_k, img_v) + + # prepare txt for attention + txt_modulated = attn.txt_norm1(txt) + txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift + txt_qkv = ( + attn.txt_attn.qkv(txt_modulated) + + self.qkv_lora2(txt_modulated) * self.lora_weight + ) + txt_q, txt_k, txt_v = rearrange( + txt_qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads + ) + txt_q, txt_k = attn.txt_attn.norm(txt_q, txt_k, txt_v) + + # run actual attention + q = torch.cat((txt_q, img_q), dim=2) + k = torch.cat((txt_k, img_k), dim=2) + v = torch.cat((txt_v, img_v), dim=2) + + attn1 = attention(q, k, v, pe=pe) + txt_attn, img_attn = attn1[:, : txt.shape[1]], attn1[:, txt.shape[1] :] + + # calculate the img bloks + img = img + img_mod1.gate * ( + attn.img_attn.proj(img_attn) + self.proj_lora1(img_attn) * self.lora_weight + ) + img = img + img_mod2.gate * attn.img_mlp( + (1 + img_mod2.scale) * attn.img_norm2(img) + img_mod2.shift + ) + + # calculate the txt bloks + txt = txt + txt_mod1.gate * ( + attn.txt_attn.proj(txt_attn) + self.proj_lora2(txt_attn) * self.lora_weight + ) + txt = txt + txt_mod2.gate * attn.txt_mlp( + (1 + txt_mod2.scale) * attn.txt_norm2(txt) + txt_mod2.shift + ) + return img, txt + + +class DoubleStreamBlockProcessor: + def __call__(self, attn, img, txt, vec, pe, **attention_kwargs): + img_mod1, img_mod2 = attn.img_mod(vec) + txt_mod1, txt_mod2 = attn.txt_mod(vec) + + # prepare image for attention + img_modulated = attn.img_norm1(img) + img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift + img_qkv = attn.img_attn.qkv(img_modulated) + img_q, img_k, img_v = rearrange( + img_qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads, D=attn.head_dim + ) + img_q, img_k = attn.img_attn.norm(img_q, img_k, img_v) + + # prepare txt for attention + txt_modulated = attn.txt_norm1(txt) + txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift + txt_qkv = attn.txt_attn.qkv(txt_modulated) + txt_q, txt_k, txt_v = rearrange( + txt_qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads, D=attn.head_dim + ) + txt_q, txt_k = attn.txt_attn.norm(txt_q, txt_k, txt_v) + + # run actual attention + q = torch.cat((txt_q, img_q), dim=2) + k = torch.cat((txt_k, img_k), dim=2) + v = torch.cat((txt_v, img_v), dim=2) + + attn1 = attention(q, k, v, pe=pe) + txt_attn, img_attn = attn1[:, : txt.shape[1]], attn1[:, txt.shape[1] :] + + # calculate the img bloks + img = img + img_mod1.gate * attn.img_attn.proj(img_attn) + img = img + img_mod2.gate * attn.img_mlp( + (1 + img_mod2.scale) * attn.img_norm2(img) + img_mod2.shift + ) + + # calculate the txt bloks + txt = txt + txt_mod1.gate * attn.txt_attn.proj(txt_attn) + txt = txt + txt_mod2.gate * attn.txt_mlp( + (1 + txt_mod2.scale) * attn.txt_norm2(txt) + txt_mod2.shift + ) + return img, txt + + +class DoubleStreamBlock(nn.Module): + def __init__( + self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False + ): + super().__init__() + mlp_hidden_dim = int(hidden_size * mlp_ratio) + self.num_heads = num_heads + self.hidden_size = hidden_size + self.head_dim = hidden_size // num_heads + + self.img_mod = Modulation(hidden_size, double=True) + self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.img_attn = SelfAttention( + dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias + ) + + self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.img_mlp = nn.Sequential( + nn.Linear(hidden_size, mlp_hidden_dim, bias=True), + nn.GELU(approximate="tanh"), + nn.Linear(mlp_hidden_dim, hidden_size, bias=True), + ) + + self.txt_mod = Modulation(hidden_size, double=True) + self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.txt_attn = SelfAttention( + dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias + ) + + self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.txt_mlp = nn.Sequential( + nn.Linear(hidden_size, mlp_hidden_dim, bias=True), + nn.GELU(approximate="tanh"), + nn.Linear(mlp_hidden_dim, hidden_size, bias=True), + ) + processor = DoubleStreamBlockProcessor() + self.set_processor(processor) + + def set_processor(self, processor) -> None: + self.processor = processor + + def get_processor(self): + return self.processor + + def forward( + self, + img: Tensor, + txt: Tensor, + vec: Tensor, + pe: Tensor, + image_proj: Tensor = None, + ip_scale: float = 1.0, + ) -> tuple[Tensor, Tensor]: + if image_proj is None: + return self.processor(self, img, txt, vec, pe) + else: + return self.processor(self, img, txt, vec, pe, image_proj, ip_scale) + + +class SingleStreamBlockLoraProcessor(nn.Module): + def __init__( + self, dim: int, rank: int = 4, network_alpha=None, lora_weight: float = 1 + ): + super().__init__() + self.qkv_lora = LoRALinearLayer(dim, dim * 3, rank, network_alpha) + self.proj_lora = LoRALinearLayer(15360, dim, rank, network_alpha) + self.lora_weight = lora_weight + + def forward(self, attn: nn.Module, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor: + + mod, _ = attn.modulation(vec) + x_mod = (1 + mod.scale) * attn.pre_norm(x) + mod.shift + qkv, mlp = torch.split( + attn.linear1(x_mod), [3 * attn.hidden_size, attn.mlp_hidden_dim], dim=-1 + ) + qkv = qkv + self.qkv_lora(x_mod) * self.lora_weight + + q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads) + q, k = attn.norm(q, k, v) + + # compute attention + attn_1 = attention(q, k, v, pe=pe) + + # compute activation in mlp stream, cat again and run second linear layer + output = attn.linear2(torch.cat((attn_1, attn.mlp_act(mlp)), 2)) + output = ( + output + + self.proj_lora(torch.cat((attn_1, attn.mlp_act(mlp)), 2)) + * self.lora_weight + ) + output = x + mod.gate * output + return output + + +class SingleStreamBlockProcessor: + def __call__( + self, attn: nn.Module, x: Tensor, vec: Tensor, pe: Tensor, **attention_kwargs + ) -> Tensor: + + mod, _ = attn.modulation(vec) + x_mod = (1 + mod.scale) * attn.pre_norm(x) + mod.shift + qkv, mlp = torch.split( + attn.linear1(x_mod), [3 * attn.hidden_size, attn.mlp_hidden_dim], dim=-1 + ) + + q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads) + q, k = attn.norm(q, k, v) + + # compute attention + attn_1 = attention(q, k, v, pe=pe) + + # compute activation in mlp stream, cat again and run second linear layer + output = attn.linear2(torch.cat((attn_1, attn.mlp_act(mlp)), 2)) + output = x + mod.gate * output + return output + + +class SingleStreamBlock(nn.Module): + """ + A DiT block with parallel linear layers as described in + https://arxiv.org/abs/2302.05442 and adapted modulation interface. + """ + + def __init__( + self, + hidden_size: int, + num_heads: int, + mlp_ratio: float = 4.0, + qk_scale: float | None = None, + ): + super().__init__() + self.hidden_dim = hidden_size + self.num_heads = num_heads + self.head_dim = hidden_size // num_heads + self.scale = qk_scale or self.head_dim**-0.5 + + self.mlp_hidden_dim = int(hidden_size * mlp_ratio) + # qkv and mlp_in + self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim) + # proj and mlp_out + self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size) + + self.norm = QKNorm(self.head_dim) + + self.hidden_size = hidden_size + self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + + self.mlp_act = nn.GELU(approximate="tanh") + self.modulation = Modulation(hidden_size, double=False) + + processor = SingleStreamBlockProcessor() + self.set_processor(processor) + + def set_processor(self, processor) -> None: + self.processor = processor + + def get_processor(self): + return self.processor + + def forward( + self, + x: Tensor, + vec: Tensor, + pe: Tensor, + image_proj: Tensor | None = None, + ip_scale: float = 1.0, + ) -> Tensor: + if image_proj is None: + return self.processor(self, x, vec, pe) + else: + return self.processor(self, x, vec, pe, image_proj, ip_scale) + + +class LastLayer(nn.Module): + def __init__(self, hidden_size: int, patch_size: int, out_channels: int): + super().__init__() + self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.linear = nn.Linear( + hidden_size, patch_size * patch_size * out_channels, bias=True + ) + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True) + ) + + def forward(self, x: Tensor, vec: Tensor) -> Tensor: + shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1) + x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :] + x = self.linear(x) + return x + + +class SigLIPMultiFeatProjModel(torch.nn.Module): + """ + SigLIP Multi-Feature Projection Model for processing style features from different layers + and projecting them into a unified hidden space. + + Args: + siglip_token_nums (int): Number of SigLIP tokens, default 257 + style_token_nums (int): Number of style tokens, default 256 + siglip_token_dims (int): Dimension of SigLIP tokens, default 1536 + hidden_size (int): Hidden layer size, default 3072 + context_layer_norm (bool): Whether to use context layer normalization, default False + """ + + def __init__( + self, + siglip_token_nums: int = 257, + style_token_nums: int = 256, + siglip_token_dims: int = 1536, + hidden_size: int = 3072, + context_layer_norm: bool = False, + ): + super().__init__() + + # High-level feature processing (layer -2) + self.high_embedding_linear = nn.Sequential( + nn.Linear(siglip_token_nums, style_token_nums), + nn.SiLU() + ) + self.high_layer_norm = ( + nn.LayerNorm(siglip_token_dims) if context_layer_norm else nn.Identity() + ) + self.high_projection = nn.Linear(siglip_token_dims, hidden_size, bias=True) + + # Mid-level feature processing (layer -11) + self.mid_embedding_linear = nn.Sequential( + nn.Linear(siglip_token_nums, style_token_nums), + nn.SiLU() + ) + self.mid_layer_norm = ( + nn.LayerNorm(siglip_token_dims) if context_layer_norm else nn.Identity() + ) + self.mid_projection = nn.Linear(siglip_token_dims, hidden_size, bias=True) + + # Low-level feature processing (layer -20) + self.low_embedding_linear = nn.Sequential( + nn.Linear(siglip_token_nums, style_token_nums), + nn.SiLU() + ) + self.low_layer_norm = ( + nn.LayerNorm(siglip_token_dims) if context_layer_norm else nn.Identity() + ) + self.low_projection = nn.Linear(siglip_token_dims, hidden_size, bias=True) + + def forward(self, siglip_outputs): + """ + Forward pass function + + Args: + siglip_outputs: Output from SigLIP model, containing hidden_states + + Returns: + torch.Tensor: Concatenated multi-layer features with shape [bs, 3*style_token_nums, hidden_size] + """ + dtype = next(self.high_embedding_linear.parameters()).dtype + + # Process high-level features (layer -2) + high_embedding = self._process_layer_features( + siglip_outputs.hidden_states[-2], + self.high_embedding_linear, + self.high_layer_norm, + self.high_projection, + dtype + ) + + # Process mid-level features (layer -11) + mid_embedding = self._process_layer_features( + siglip_outputs.hidden_states[-11], + self.mid_embedding_linear, + self.mid_layer_norm, + self.mid_projection, + dtype + ) + + # Process low-level features (layer -20) + low_embedding = self._process_layer_features( + siglip_outputs.hidden_states[-20], + self.low_embedding_linear, + self.low_layer_norm, + self.low_projection, + dtype + ) + + # Concatenate features from all layers + return torch.cat((high_embedding, mid_embedding, low_embedding), dim=1) + + def _process_layer_features( + self, + hidden_states: torch.Tensor, + embedding_linear: nn.Module, + layer_norm: nn.Module, + projection: nn.Module, + dtype: torch.dtype + ) -> torch.Tensor: + """ + Helper function to process features from a single layer + + Args: + hidden_states: Input hidden states [bs, seq_len, dim] + embedding_linear: Embedding linear layer + layer_norm: Layer normalization + projection: Projection layer + dtype: Target data type + + Returns: + torch.Tensor: Processed features [bs, style_token_nums, hidden_size] + """ + # Transform dimensions: [bs, seq_len, dim] -> [bs, dim, seq_len] -> [bs, dim, style_token_nums] -> [bs, style_token_nums, dim] + embedding = embedding_linear( + hidden_states.to(dtype).transpose(1, 2) + ).transpose(1, 2) + + # Apply layer normalization + embedding = layer_norm(embedding) + + # Project to target hidden space + embedding = projection(embedding) + + return embedding diff --git a/uso/flux/pipeline.py b/uso/flux/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..e0f95d4dfe6c231508e1a359b30c2d1f05e998a0 --- /dev/null +++ b/uso/flux/pipeline.py @@ -0,0 +1,392 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import math +from typing import Literal, Optional +from torch import Tensor + +import torch +from einops import rearrange +from PIL import ExifTags, Image +import torchvision.transforms.functional as TVF + +from uso.flux.modules.layers import ( + DoubleStreamBlockLoraProcessor, + DoubleStreamBlockProcessor, + SingleStreamBlockLoraProcessor, + SingleStreamBlockProcessor, +) +from uso.flux.sampling import denoise, get_noise, get_schedule, prepare_multi_ip, unpack +from uso.flux.util import ( + get_lora_rank, + load_ae, + load_checkpoint, + load_clip, + load_flow_model, + load_flow_model_only_lora, + load_t5, +) + + +def find_nearest_scale(image_h, image_w, predefined_scales): + """ + 根据图片的高度和宽度,找到最近的预定义尺度。 + + :param image_h: 图片的高度 + :param image_w: 图片的宽度 + :param predefined_scales: 预定义尺度列表 [(h1, w1), (h2, w2), ...] + :return: 最近的预定义尺度 (h, w) + """ + # 计算输入图片的长宽比 + image_ratio = image_h / image_w + + # 初始化变量以存储最小差异和最近的尺度 + min_diff = float("inf") + nearest_scale = None + + # 遍历所有预定义尺度,找到与输入图片长宽比最接近的尺度 + for scale_h, scale_w in predefined_scales: + predefined_ratio = scale_h / scale_w + diff = abs(predefined_ratio - image_ratio) + + if diff < min_diff: + min_diff = diff + nearest_scale = (scale_h, scale_w) + + return nearest_scale + + +def preprocess_ref(raw_image: Image.Image, long_size: int = 512, scale_ratio: int = 1): + # 获取原始图像的宽度和高度 + image_w, image_h = raw_image.size + if image_w == image_h and image_w == 16: + return raw_image + + # 计算长边和短边 + if image_w >= image_h: + new_w = long_size + new_h = int((long_size / image_w) * image_h) + else: + new_h = long_size + new_w = int((long_size / image_h) * image_w) + + # 按新的宽高进行等比例缩放 + raw_image = raw_image.resize((new_w, new_h), resample=Image.LANCZOS) + + # 为了能让canny img进行scale + scale_ratio = int(scale_ratio) + target_w = new_w // (16 * scale_ratio) * (16 * scale_ratio) + target_h = new_h // (16 * scale_ratio) * (16 * scale_ratio) + + # 计算裁剪的起始坐标以实现中心裁剪 + left = (new_w - target_w) // 2 + top = (new_h - target_h) // 2 + right = left + target_w + bottom = top + target_h + + # 进行中心裁剪 + raw_image = raw_image.crop((left, top, right, bottom)) + + # 转换为 RGB 模式 + raw_image = raw_image.convert("RGB") + return raw_image + + +def resize_and_centercrop_image(image, target_height_ref1, target_width_ref1): + target_height_ref1 = int(target_height_ref1 // 64 * 64) + target_width_ref1 = int(target_width_ref1 // 64 * 64) + h, w = image.shape[-2:] + if h < target_height_ref1 or w < target_width_ref1: + # 计算长宽比 + aspect_ratio = w / h + if h < target_height_ref1: + new_h = target_height_ref1 + new_w = new_h * aspect_ratio + if new_w < target_width_ref1: + new_w = target_width_ref1 + new_h = new_w / aspect_ratio + else: + new_w = target_width_ref1 + new_h = new_w / aspect_ratio + if new_h < target_height_ref1: + new_h = target_height_ref1 + new_w = new_h * aspect_ratio + else: + aspect_ratio = w / h + tgt_aspect_ratio = target_width_ref1 / target_height_ref1 + if aspect_ratio > tgt_aspect_ratio: + new_h = target_height_ref1 + new_w = new_h * aspect_ratio + else: + new_w = target_width_ref1 + new_h = new_w / aspect_ratio + # 使用 TVF.resize 进行图像缩放 + image = TVF.resize(image, (math.ceil(new_h), math.ceil(new_w))) + # 计算中心裁剪的参数 + top = (image.shape[-2] - target_height_ref1) // 2 + left = (image.shape[-1] - target_width_ref1) // 2 + # 使用 TVF.crop 进行中心裁剪 + image = TVF.crop(image, top, left, target_height_ref1, target_width_ref1) + return image + + +class USOPipeline: + def __init__( + self, + model_type: str, + device: torch.device, + offload: bool = False, + only_lora: bool = False, + lora_rank: int = 16, + hf_download: bool = True, + ): + self.device = device + self.offload = offload + self.model_type = model_type + + self.clip = load_clip(self.device) + self.t5 = load_t5(self.device, max_length=512) + self.ae = load_ae(model_type, device="cpu" if offload else self.device) + self.use_fp8 = "fp8" in model_type + if only_lora: + self.model = load_flow_model_only_lora( + model_type, + device="cpu" if offload else self.device, + lora_rank=lora_rank, + use_fp8=self.use_fp8, + hf_download=hf_download, + ) + else: + self.model = load_flow_model( + model_type, device="cpu" if offload else self.device + ) + + def load_ckpt(self, ckpt_path): + if ckpt_path is not None: + from safetensors.torch import load_file as load_sft + + print("Loading checkpoint to replace old keys") + # load_sft doesn't support torch.device + if ckpt_path.endswith("safetensors"): + sd = load_sft(ckpt_path, device="cpu") + missing, unexpected = self.model.load_state_dict( + sd, strict=False, assign=True + ) + else: + dit_state = torch.load(ckpt_path, map_location="cpu") + sd = {} + for k in dit_state.keys(): + sd[k.replace("module.", "")] = dit_state[k] + missing, unexpected = self.model.load_state_dict( + sd, strict=False, assign=True + ) + self.model.to(str(self.device)) + print(f"missing keys: {missing}\n\n\n\n\nunexpected keys: {unexpected}") + + def set_lora( + self, + local_path: str = None, + repo_id: str = None, + name: str = None, + lora_weight: int = 0.7, + ): + checkpoint = load_checkpoint(local_path, repo_id, name) + self.update_model_with_lora(checkpoint, lora_weight) + + def set_lora_from_collection( + self, lora_type: str = "realism", lora_weight: int = 0.7 + ): + checkpoint = load_checkpoint( + None, self.hf_lora_collection, self.lora_types_to_names[lora_type] + ) + self.update_model_with_lora(checkpoint, lora_weight) + + def update_model_with_lora(self, checkpoint, lora_weight): + rank = get_lora_rank(checkpoint) + lora_attn_procs = {} + + for name, _ in self.model.attn_processors.items(): + lora_state_dict = {} + for k in checkpoint.keys(): + if name in k: + lora_state_dict[k[len(name) + 1 :]] = checkpoint[k] * lora_weight + + if len(lora_state_dict): + if name.startswith("single_blocks"): + lora_attn_procs[name] = SingleStreamBlockLoraProcessor( + dim=3072, rank=rank + ) + else: + lora_attn_procs[name] = DoubleStreamBlockLoraProcessor( + dim=3072, rank=rank + ) + lora_attn_procs[name].load_state_dict(lora_state_dict) + lora_attn_procs[name].to(self.device) + else: + if name.startswith("single_blocks"): + lora_attn_procs[name] = SingleStreamBlockProcessor() + else: + lora_attn_procs[name] = DoubleStreamBlockProcessor() + + self.model.set_attn_processor(lora_attn_procs) + + def __call__( + self, + prompt: str, + width: int = 512, + height: int = 512, + guidance: float = 4, + num_steps: int = 50, + seed: int = 123456789, + **kwargs, + ): + width = 16 * (width // 16) + height = 16 * (height // 16) + + device_type = self.device if isinstance(self.device, str) else self.device.type + dtype = torch.bfloat16 if device_type != "mps" else torch.float16 + with torch.autocast( + enabled=self.use_fp8, device_type=device_type, dtype=dtype + ): + return self.forward( + prompt, width, height, guidance, num_steps, seed, **kwargs + ) + + @torch.inference_mode() + def gradio_generate( + self, + prompt: str, + image_prompt1: Image.Image, + image_prompt2: Image.Image, + image_prompt3: Image.Image, + seed: int, + width: int = 1024, + height: int = 1024, + guidance: float = 4, + num_steps: int = 25, + keep_size: bool = False, + content_long_size: int = 512, + ): + ref_content_imgs = [image_prompt1] + ref_content_imgs = [img for img in ref_content_imgs if isinstance(img, Image.Image)] + ref_content_imgs = [preprocess_ref(img, content_long_size) for img in ref_content_imgs] + + ref_style_imgs = [image_prompt2, image_prompt3] + ref_style_imgs = [img for img in ref_style_imgs if isinstance(img, Image.Image)] + ref_style_imgs = [self.model.vision_encoder_processor(img, return_tensors="pt").to(self.device) for img in ref_style_imgs] + + seed = seed if seed != -1 else torch.randint(0, 10**8, (1,)).item() + + # whether keep input image size + if keep_size and len(ref_content_imgs)>0: + width, height = ref_content_imgs[0].size + width, height = int(width * (1024 / content_long_size)), int(height * (1024 / content_long_size)) + img = self( + prompt=prompt, + width=width, + height=height, + guidance=guidance, + num_steps=num_steps, + seed=seed, + ref_imgs=ref_content_imgs, + siglip_inputs=ref_style_imgs, + ) + + filename = f"output/gradio/{seed}_{prompt[:20]}.png" + os.makedirs(os.path.dirname(filename), exist_ok=True) + exif_data = Image.Exif() + exif_data[ExifTags.Base.Make] = "USO" + exif_data[ExifTags.Base.Model] = self.model_type + info = f"{prompt=}, {seed=}, {width=}, {height=}, {guidance=}, {num_steps=}" + exif_data[ExifTags.Base.ImageDescription] = info + img.save(filename, format="png", exif=exif_data) + return img, filename + + @torch.inference_mode + def forward( + self, + prompt: str, + width: int, + height: int, + guidance: float, + num_steps: int, + seed: int, + ref_imgs: list[Image.Image] | None = None, + pe: Literal["d", "h", "w", "o"] = "d", + siglip_inputs: list[Tensor] | None = None, + ): + x = get_noise( + 1, height, width, device=self.device, dtype=torch.bfloat16, seed=seed + ) + timesteps = get_schedule( + num_steps, + (width // 8) * (height // 8) // (16 * 16), + shift=True, + ) + if self.offload: + self.ae.encoder = self.ae.encoder.to(self.device) + x_1_refs = [ + self.ae.encode( + (TVF.to_tensor(ref_img) * 2.0 - 1.0) + .unsqueeze(0) + .to(self.device, torch.float32) + ).to(torch.bfloat16) + for ref_img in ref_imgs + ] + + if self.offload: + self.offload_model_to_cpu(self.ae.encoder) + self.t5, self.clip = self.t5.to(self.device), self.clip.to(self.device) + inp_cond = prepare_multi_ip( + t5=self.t5, + clip=self.clip, + img=x, + prompt=prompt, + ref_imgs=x_1_refs, + pe=pe, + ) + + if self.offload: + self.offload_model_to_cpu(self.t5, self.clip) + self.model = self.model.to(self.device) + + x = denoise( + self.model, + **inp_cond, + timesteps=timesteps, + guidance=guidance, + siglip_inputs=siglip_inputs, + ) + + if self.offload: + self.offload_model_to_cpu(self.model) + self.ae.decoder.to(x.device) + x = unpack(x.float(), height, width) + x = self.ae.decode(x) + self.offload_model_to_cpu(self.ae.decoder) + + x1 = x.clamp(-1, 1) + x1 = rearrange(x1[-1], "c h w -> h w c") + output_img = Image.fromarray((127.5 * (x1 + 1.0)).cpu().byte().numpy()) + return output_img + + def offload_model_to_cpu(self, *models): + if not self.offload: + return + for model in models: + model.cpu() + if torch.cuda.is_available(): + torch.cuda.empty_cache() diff --git a/uso/flux/sampling.py b/uso/flux/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..a4611fa8bef1c446d760a05a61f6d086dc5a8ad0 --- /dev/null +++ b/uso/flux/sampling.py @@ -0,0 +1,274 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Literal + +import torch +from einops import rearrange, repeat +from torch import Tensor +from tqdm import tqdm + +from .model import Flux +from .modules.conditioner import HFEmbedder + + +def get_noise( + num_samples: int, + height: int, + width: int, + device: torch.device, + dtype: torch.dtype, + seed: int, +): + return torch.randn( + num_samples, + 16, + # allow for packing + 2 * math.ceil(height / 16), + 2 * math.ceil(width / 16), + device=device, + dtype=dtype, + generator=torch.Generator(device=device).manual_seed(seed), + ) + + +def prepare( + t5: HFEmbedder, + clip: HFEmbedder, + img: Tensor, + prompt: str | list[str], + ref_img: None | Tensor = None, + pe: Literal["d", "h", "w", "o"] = "d", +) -> dict[str, Tensor]: + assert pe in ["d", "h", "w", "o"] + bs, c, h, w = img.shape + if bs == 1 and not isinstance(prompt, str): + bs = len(prompt) + + img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2) + if img.shape[0] == 1 and bs > 1: + img = repeat(img, "1 ... -> bs ...", bs=bs) + + img_ids = torch.zeros(h // 2, w // 2, 3) + img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None] + img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :] + img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + + if ref_img is not None: + _, _, ref_h, ref_w = ref_img.shape + ref_img = rearrange( + ref_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2 + ) + if ref_img.shape[0] == 1 and bs > 1: + ref_img = repeat(ref_img, "1 ... -> bs ...", bs=bs) + ref_img_ids = torch.zeros(ref_h // 2, ref_w // 2, 3) + # img id分别在宽高偏移各自最大值 + h_offset = h // 2 if pe in {"d", "h"} else 0 + w_offset = w // 2 if pe in {"d", "w"} else 0 + ref_img_ids[..., 1] = ( + ref_img_ids[..., 1] + torch.arange(ref_h // 2)[:, None] + h_offset + ) + ref_img_ids[..., 2] = ( + ref_img_ids[..., 2] + torch.arange(ref_w // 2)[None, :] + w_offset + ) + ref_img_ids = repeat(ref_img_ids, "h w c -> b (h w) c", b=bs) + + if isinstance(prompt, str): + prompt = [prompt] + txt = t5(prompt) + if txt.shape[0] == 1 and bs > 1: + txt = repeat(txt, "1 ... -> bs ...", bs=bs) + txt_ids = torch.zeros(bs, txt.shape[1], 3) + + vec = clip(prompt) + if vec.shape[0] == 1 and bs > 1: + vec = repeat(vec, "1 ... -> bs ...", bs=bs) + + if ref_img is not None: + return { + "img": img, + "img_ids": img_ids.to(img.device), + "ref_img": ref_img, + "ref_img_ids": ref_img_ids.to(img.device), + "txt": txt.to(img.device), + "txt_ids": txt_ids.to(img.device), + "vec": vec.to(img.device), + } + else: + return { + "img": img, + "img_ids": img_ids.to(img.device), + "txt": txt.to(img.device), + "txt_ids": txt_ids.to(img.device), + "vec": vec.to(img.device), + } + + +def prepare_multi_ip( + t5: HFEmbedder, + clip: HFEmbedder, + img: Tensor, + prompt: str | list[str], + ref_imgs: list[Tensor] | None = None, + pe: Literal["d", "h", "w", "o"] = "d", +) -> dict[str, Tensor]: + assert pe in ["d", "h", "w", "o"] + bs, c, h, w = img.shape + if bs == 1 and not isinstance(prompt, str): + bs = len(prompt) + + # tgt img + img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2) + if img.shape[0] == 1 and bs > 1: + img = repeat(img, "1 ... -> bs ...", bs=bs) + + img_ids = torch.zeros(h // 2, w // 2, 3) + img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None] + img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :] + img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) + + ref_img_ids = [] + ref_imgs_list = [] + + pe_shift_w, pe_shift_h = w // 2, h // 2 + for ref_img in ref_imgs: + _, _, ref_h1, ref_w1 = ref_img.shape + ref_img = rearrange( + ref_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2 + ) + if ref_img.shape[0] == 1 and bs > 1: + ref_img = repeat(ref_img, "1 ... -> bs ...", bs=bs) + ref_img_ids1 = torch.zeros(ref_h1 // 2, ref_w1 // 2, 3) + # img id分别在宽高偏移各自最大值 + h_offset = pe_shift_h if pe in {"d", "h"} else 0 + w_offset = pe_shift_w if pe in {"d", "w"} else 0 + ref_img_ids1[..., 1] = ( + ref_img_ids1[..., 1] + torch.arange(ref_h1 // 2)[:, None] + h_offset + ) + ref_img_ids1[..., 2] = ( + ref_img_ids1[..., 2] + torch.arange(ref_w1 // 2)[None, :] + w_offset + ) + ref_img_ids1 = repeat(ref_img_ids1, "h w c -> b (h w) c", b=bs) + ref_img_ids.append(ref_img_ids1) + ref_imgs_list.append(ref_img) + + # 更新pe shift + pe_shift_h += ref_h1 // 2 + pe_shift_w += ref_w1 // 2 + + if isinstance(prompt, str): + prompt = [prompt] + txt = t5(prompt) + if txt.shape[0] == 1 and bs > 1: + txt = repeat(txt, "1 ... -> bs ...", bs=bs) + txt_ids = torch.zeros(bs, txt.shape[1], 3) + + vec = clip(prompt) + if vec.shape[0] == 1 and bs > 1: + vec = repeat(vec, "1 ... -> bs ...", bs=bs) + + return { + "img": img, + "img_ids": img_ids.to(img.device), + "ref_img": tuple(ref_imgs_list), + "ref_img_ids": [ref_img_id.to(img.device) for ref_img_id in ref_img_ids], + "txt": txt.to(img.device), + "txt_ids": txt_ids.to(img.device), + "vec": vec.to(img.device), + } + + +def time_shift(mu: float, sigma: float, t: Tensor): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + +def get_lin_function( + x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15 +): + m = (y2 - y1) / (x2 - x1) + b = y1 - m * x1 + return lambda x: m * x + b + + +def get_schedule( + num_steps: int, + image_seq_len: int, + base_shift: float = 0.5, + max_shift: float = 1.15, + shift: bool = True, +) -> list[float]: + # extra step for zero + timesteps = torch.linspace(1, 0, num_steps + 1) + + # shifting the schedule to favor high timesteps for higher signal images + if shift: + # eastimate mu based on linear estimation between two points + mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len) + timesteps = time_shift(mu, 1.0, timesteps) + + return timesteps.tolist() + + +def denoise( + model: Flux, + # model input + img: Tensor, + img_ids: Tensor, + txt: Tensor, + txt_ids: Tensor, + vec: Tensor, + # sampling parameters + timesteps: list[float], + guidance: float = 4.0, + ref_img: Tensor = None, + ref_img_ids: Tensor = None, + siglip_inputs: list[Tensor] | None = None, +): + i = 0 + guidance_vec = torch.full( + (img.shape[0],), guidance, device=img.device, dtype=img.dtype + ) + for t_curr, t_prev in tqdm( + zip(timesteps[:-1], timesteps[1:]), total=len(timesteps) - 1 + ): + # for t_curr, t_prev in zip(timesteps[:-1], timesteps[1:]): + t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device) + pred = model( + img=img, + img_ids=img_ids, + ref_img=ref_img, + ref_img_ids=ref_img_ids, + txt=txt, + txt_ids=txt_ids, + y=vec, + timesteps=t_vec, + guidance=guidance_vec, + siglip_inputs=siglip_inputs, + ) + img = img + (t_prev - t_curr) * pred + i += 1 + return img + + +def unpack(x: Tensor, height: int, width: int) -> Tensor: + return rearrange( + x, + "b (h w) (c ph pw) -> b c (h ph) (w pw)", + h=math.ceil(height / 16), + w=math.ceil(width / 16), + ph=2, + pw=2, + ) diff --git a/uso/flux/util.py b/uso/flux/util.py new file mode 100644 index 0000000000000000000000000000000000000000..38e40b64885b8d3ab2c466c5cd19368463caf09a --- /dev/null +++ b/uso/flux/util.py @@ -0,0 +1,511 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. +# Copyright (c) 2024 Black Forest Labs and The XLabs-AI Team. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from dataclasses import dataclass + +import torch +import json +import numpy as np +from huggingface_hub import hf_hub_download +from safetensors import safe_open +from safetensors.torch import load_file as load_sft + +from .model import Flux, FluxParams +from .modules.autoencoder import AutoEncoder, AutoEncoderParams +from .modules.conditioner import HFEmbedder + +import re +from uso.flux.modules.layers import ( + DoubleStreamBlockLoraProcessor, + SingleStreamBlockLoraProcessor, +) + + +def load_model(ckpt, device="cpu"): + if ckpt.endswith("safetensors"): + from safetensors import safe_open + + pl_sd = {} + with safe_open(ckpt, framework="pt", device=device) as f: + for k in f.keys(): + pl_sd[k] = f.get_tensor(k) + else: + pl_sd = torch.load(ckpt, map_location=device) + return pl_sd + + +def load_safetensors(path): + tensors = {} + with safe_open(path, framework="pt", device="cpu") as f: + for key in f.keys(): + tensors[key] = f.get_tensor(key) + return tensors + + +def get_lora_rank(checkpoint): + for k in checkpoint.keys(): + if k.endswith(".down.weight"): + return checkpoint[k].shape[0] + + +def load_checkpoint(local_path, repo_id, name): + if local_path is not None: + if ".safetensors" in local_path: + print(f"Loading .safetensors checkpoint from {local_path}") + checkpoint = load_safetensors(local_path) + else: + print(f"Loading checkpoint from {local_path}") + checkpoint = torch.load(local_path, map_location="cpu") + elif repo_id is not None and name is not None: + print(f"Loading checkpoint {name} from repo id {repo_id}") + checkpoint = load_from_repo_id(repo_id, name) + else: + raise ValueError( + "LOADING ERROR: you must specify local_path or repo_id with name in HF to download" + ) + return checkpoint + + +def c_crop(image): + width, height = image.size + new_size = min(width, height) + left = (width - new_size) / 2 + top = (height - new_size) / 2 + right = (width + new_size) / 2 + bottom = (height + new_size) / 2 + return image.crop((left, top, right, bottom)) + + +def pad64(x): + return int(np.ceil(float(x) / 64.0) * 64 - x) + + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + + +@dataclass +class ModelSpec: + params: FluxParams + ae_params: AutoEncoderParams + ckpt_path: str | None + ae_path: str | None + repo_id: str | None + repo_flow: str | None + repo_ae: str | None + repo_id_ae: str | None + + +configs = { + "flux-dev": ModelSpec( + repo_id="black-forest-labs/FLUX.1-dev", + repo_id_ae="black-forest-labs/FLUX.1-dev", + repo_flow="flux1-dev.safetensors", + repo_ae="ae.safetensors", + ckpt_path=os.getenv("FLUX_DEV"), + params=FluxParams( + in_channels=64, + vec_in_dim=768, + context_in_dim=4096, + hidden_size=3072, + mlp_ratio=4.0, + num_heads=24, + depth=19, + depth_single_blocks=38, + axes_dim=[16, 56, 56], + theta=10_000, + qkv_bias=True, + guidance_embed=True, + ), + ae_path=os.getenv("AE"), + ae_params=AutoEncoderParams( + resolution=256, + in_channels=3, + ch=128, + out_ch=3, + ch_mult=[1, 2, 4, 4], + num_res_blocks=2, + z_channels=16, + scale_factor=0.3611, + shift_factor=0.1159, + ), + ), + "flux-dev-fp8": ModelSpec( + repo_id="black-forest-labs/FLUX.1-dev", + repo_id_ae="black-forest-labs/FLUX.1-dev", + repo_flow="flux1-dev.safetensors", + repo_ae="ae.safetensors", + ckpt_path=os.getenv("FLUX_DEV_FP8"), + params=FluxParams( + in_channels=64, + vec_in_dim=768, + context_in_dim=4096, + hidden_size=3072, + mlp_ratio=4.0, + num_heads=24, + depth=19, + depth_single_blocks=38, + axes_dim=[16, 56, 56], + theta=10_000, + qkv_bias=True, + guidance_embed=True, + ), + ae_path=os.getenv("AE"), + ae_params=AutoEncoderParams( + resolution=256, + in_channels=3, + ch=128, + out_ch=3, + ch_mult=[1, 2, 4, 4], + num_res_blocks=2, + z_channels=16, + scale_factor=0.3611, + shift_factor=0.1159, + ), + ), + "flux-krea-dev": ModelSpec( + repo_id="black-forest-labs/FLUX.1-Krea-dev", + repo_id_ae="black-forest-labs/FLUX.1-Krea-dev", + repo_flow="flux1-krea-dev.safetensors", + repo_ae="ae.safetensors", + ckpt_path=os.getenv("FLUX_KREA_DEV"), + params=FluxParams( + in_channels=64, + vec_in_dim=768, + context_in_dim=4096, + hidden_size=3072, + mlp_ratio=4.0, + num_heads=24, + depth=19, + depth_single_blocks=38, + axes_dim=[16, 56, 56], + theta=10_000, + qkv_bias=True, + guidance_embed=True, + ), + ae_path=os.getenv("AE"), + ae_params=AutoEncoderParams( + resolution=256, + in_channels=3, + ch=128, + out_ch=3, + ch_mult=[1, 2, 4, 4], + num_res_blocks=2, + z_channels=16, + scale_factor=0.3611, + shift_factor=0.1159, + ), + ), + "flux-schnell": ModelSpec( + repo_id="black-forest-labs/FLUX.1-schnell", + repo_id_ae="black-forest-labs/FLUX.1-dev", + repo_flow="flux1-schnell.safetensors", + repo_ae="ae.safetensors", + ckpt_path=os.getenv("FLUX_SCHNELL"), + params=FluxParams( + in_channels=64, + vec_in_dim=768, + context_in_dim=4096, + hidden_size=3072, + mlp_ratio=4.0, + num_heads=24, + depth=19, + depth_single_blocks=38, + axes_dim=[16, 56, 56], + theta=10_000, + qkv_bias=True, + guidance_embed=False, + ), + ae_path=os.getenv("AE"), + ae_params=AutoEncoderParams( + resolution=256, + in_channels=3, + ch=128, + out_ch=3, + ch_mult=[1, 2, 4, 4], + num_res_blocks=2, + z_channels=16, + scale_factor=0.3611, + shift_factor=0.1159, + ), + ), +} + + +def print_load_warning(missing: list[str], unexpected: list[str]) -> None: + if len(missing) > 0 and len(unexpected) > 0: + print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing)) + print("\n" + "-" * 79 + "\n") + print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected)) + elif len(missing) > 0: + print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing)) + elif len(unexpected) > 0: + print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected)) + + +def load_from_repo_id(repo_id, checkpoint_name): + ckpt_path = hf_hub_download(repo_id, checkpoint_name) + sd = load_sft(ckpt_path, device="cpu") + return sd + + +def load_flow_model( + name: str, device: str | torch.device = "cuda", hf_download: bool = True +): + # Loading Flux + print("Init model") + ckpt_path = configs[name].ckpt_path + if ( + ckpt_path is None + and configs[name].repo_id is not None + and configs[name].repo_flow is not None + ): + ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_flow) + + # with torch.device("meta" if ckpt_path is not None else device): + with torch.device(device): + model = Flux(configs[name].params).to(torch.bfloat16) + + if ckpt_path is not None: + print("Loading main checkpoint") + # load_sft doesn't support torch.device + sd = load_model(ckpt_path, device="cpu") + missing, unexpected = model.load_state_dict(sd, strict=False, assign=True) + print_load_warning(missing, unexpected) + return model.to(str(device)) + + +def load_flow_model_only_lora( + name: str, + device: str | torch.device = "cuda", + hf_download: bool = True, + lora_rank: int = 16, + use_fp8: bool = False, +): + # Loading Flux + ckpt_path = configs[name].ckpt_path + if ( + ckpt_path is None + and configs[name].repo_id is not None + and configs[name].repo_flow is not None + ): + ckpt_path = hf_hub_download( + configs[name].repo_id, configs[name].repo_flow.replace("sft", "safetensors") + ) + + if hf_download: + try: + lora_ckpt_path = hf_hub_download( + "bytedance-research/USO", "uso_flux_v1.0/dit_lora.safetensors" + ) + except Exception as e: + print(f"Failed to download lora checkpoint: {e}") + print("Trying to load lora from local") + lora_ckpt_path = os.environ.get("LORA", None) + try: + proj_ckpt_path = hf_hub_download( + "bytedance-research/USO", "uso_flux_v1.0/projector.safetensors" + ) + except Exception as e: + print(f"Failed to download projection_model checkpoint: {e}") + print("Trying to load projection_model from local") + proj_ckpt_path = os.environ.get("PROJECTION_MODEL", None) + else: + lora_ckpt_path = os.environ.get("LORA", None) + proj_ckpt_path = os.environ.get("PROJECTION_MODEL", None) + with torch.device("meta" if ckpt_path is not None else device): + model = Flux(configs[name].params) + + model = set_lora( + model, lora_rank, device="meta" if lora_ckpt_path is not None else device + ) + + if ckpt_path is not None: + print(f"Loading lora from {lora_ckpt_path}") + lora_sd = ( + load_sft(lora_ckpt_path, device=str(device)) + if lora_ckpt_path.endswith("safetensors") + else torch.load(lora_ckpt_path, map_location="cpu") + ) + proj_sd = ( + load_sft(proj_ckpt_path, device=str(device)) + if proj_ckpt_path.endswith("safetensors") + else torch.load(proj_ckpt_path, map_location="cpu") + ) + lora_sd.update(proj_sd) + + print("Loading main checkpoint") + # load_sft doesn't support torch.device + + if ckpt_path.endswith("safetensors"): + if use_fp8: + print( + "####\n" + "We are in fp8 mode right now, since the fp8 checkpoint of XLabs-AI/flux-dev-fp8 seems broken\n" + "we convert the fp8 checkpoint on flight from bf16 checkpoint\n" + "If your storage is constrained" + "you can save the fp8 checkpoint and replace the bf16 checkpoint by yourself\n" + ) + sd = load_sft(ckpt_path, device="cpu") + sd = { + k: v.to(dtype=torch.float8_e4m3fn, device=device) + for k, v in sd.items() + } + else: + sd = load_sft(ckpt_path, device=str(device)) + + sd.update(lora_sd) + missing, unexpected = model.load_state_dict(sd, strict=False, assign=True) + else: + dit_state = torch.load(ckpt_path, map_location="cpu") + sd = {} + for k in dit_state.keys(): + sd[k.replace("module.", "")] = dit_state[k] + sd.update(lora_sd) + missing, unexpected = model.load_state_dict(sd, strict=False, assign=True) + model.to(str(device)) + print_load_warning(missing, unexpected) + return model + + +def set_lora( + model: Flux, + lora_rank: int, + double_blocks_indices: list[int] | None = None, + single_blocks_indices: list[int] | None = None, + device: str | torch.device = "cpu", +) -> Flux: + double_blocks_indices = ( + list(range(model.params.depth)) + if double_blocks_indices is None + else double_blocks_indices + ) + single_blocks_indices = ( + list(range(model.params.depth_single_blocks)) + if single_blocks_indices is None + else single_blocks_indices + ) + + lora_attn_procs = {} + with torch.device(device): + for name, attn_processor in model.attn_processors.items(): + match = re.search(r"\.(\d+)\.", name) + if match: + layer_index = int(match.group(1)) + + if ( + name.startswith("double_blocks") + and layer_index in double_blocks_indices + ): + lora_attn_procs[name] = DoubleStreamBlockLoraProcessor( + dim=model.params.hidden_size, rank=lora_rank + ) + elif ( + name.startswith("single_blocks") + and layer_index in single_blocks_indices + ): + lora_attn_procs[name] = SingleStreamBlockLoraProcessor( + dim=model.params.hidden_size, rank=lora_rank + ) + else: + lora_attn_procs[name] = attn_processor + model.set_attn_processor(lora_attn_procs) + return model + + +def load_flow_model_quintized( + name: str, device: str | torch.device = "cuda", hf_download: bool = True +): + # Loading Flux + from optimum.quanto import requantize + + print("Init model") + ckpt_path = configs[name].ckpt_path + if ( + ckpt_path is None + and configs[name].repo_id is not None + and configs[name].repo_flow is not None + and hf_download + ): + ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_flow) + json_path = hf_hub_download(configs[name].repo_id, "flux_dev_quantization_map.json") + + model = Flux(configs[name].params).to(torch.bfloat16) + + print("Loading checkpoint") + # load_sft doesn't support torch.device + sd = load_sft(ckpt_path, device="cpu") + sd = {k: v.to(dtype=torch.float8_e4m3fn, device=device) for k, v in sd.items()} + model.load_state_dict(sd, assign=True) + return model + with open(json_path, "r") as f: + quantization_map = json.load(f) + print("Start a quantization process...") + requantize(model, sd, quantization_map, device=device) + print("Model is quantized!") + return model + + +def load_t5(device: str | torch.device = "cuda", max_length: int = 512) -> HFEmbedder: + # max length 64, 128, 256 and 512 should work (if your sequence is short enough) + version = os.environ.get("T5", "xlabs-ai/xflux_text_encoders") + return HFEmbedder(version, max_length=max_length, torch_dtype=torch.bfloat16).to( + device + ) + + +def load_clip(device: str | torch.device = "cuda") -> HFEmbedder: + version = os.environ.get("CLIP", "openai/clip-vit-large-patch14") + return HFEmbedder(version, max_length=77, torch_dtype=torch.bfloat16).to(device) + + +def load_ae( + name: str, device: str | torch.device = "cuda", hf_download: bool = True +) -> AutoEncoder: + ckpt_path = configs[name].ae_path + if ( + ckpt_path is None + and configs[name].repo_id is not None + and configs[name].repo_ae is not None + and hf_download + ): + ckpt_path = hf_hub_download(configs[name].repo_id_ae, configs[name].repo_ae) + + # Loading the autoencoder + print("Init AE") + with torch.device("meta" if ckpt_path is not None else device): + ae = AutoEncoder(configs[name].ae_params) + + if ckpt_path is not None: + sd = load_sft(ckpt_path, device=str(device)) + missing, unexpected = ae.load_state_dict(sd, strict=False, assign=True) + print_load_warning(missing, unexpected) + return ae diff --git a/weights/downloader.py b/weights/downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..a9dbc2d07f815f11d41d563162ec61b510197f39 --- /dev/null +++ b/weights/downloader.py @@ -0,0 +1,54 @@ +from huggingface_hub import snapshot_download, hf_hub_download + +from dotenv import load_dotenv +import os +load_dotenv() + +token = os.getenv("HF_TOKEN", None) + + +def download_flux(): + snapshot_download("black-forest-labs/FLUX.1-dev", + allow_patterns=["flux1-dev.safetensors", "ae.safetensors"], + local_dir="./weights/FLUX.1-dev", + local_dir_use_symlinks=False, + token=token) +# optional +def download_flux_krea(): + snapshot_download("black-forest-labs/FLUX.1-Krea-dev", + allow_patterns=["flux1-krea-dev.safetensors"], + local_dir="./weights/FLUX.1-Krea-dev", + local_dir_use_symlinks=False, + token=token) + +def download_uso(): + snapshot_download("bytedance-research/USO", + local_dir="./weights/USO", + local_dir_use_symlinks=False) + +def download_t5(): + for f in ["config.json", "tokenizer_config.json", "special_tokens_map.json", + "spiece.model", "pytorch_model.bin"]: + hf_hub_download("google/t5-v1_1-xxl", f, local_dir="./weights/t5-xxl", + local_dir_use_symlinks=False) + +def download_clip(): + for f in ["config.json", "merges.txt", "vocab.json", + "tokenizer_config.json", "special_tokens_map.json", + "pytorch_model.bin"]: + hf_hub_download("openai/clip-vit-large-patch14", f, + local_dir="./weights/clip-vit-l14", + local_dir_use_symlinks=False) + +def download_siglip(): + snapshot_download("google/siglip-so400m-patch14-384", + local_dir="./weights/siglip", + local_dir_use_symlinks=False) + +if __name__ == "__main__": + download_uso() + download_flux() + # download_flux_krea() + download_t5() + download_clip() + download_siglip() diff --git a/workflow/17-17-29.webp.webp b/workflow/17-17-29.webp.webp new file mode 100644 index 0000000000000000000000000000000000000000..a99542a3b7c7cd2aa020666ca73dccd59403be6c --- /dev/null +++ b/workflow/17-17-29.webp.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e998bcf1d9bfc8489ba37ec120ee36dc4f84dec9391fdbcee9e4660e5f7c550 +size 179360 diff --git a/workflow/1746100678361067_style0.webp.webp b/workflow/1746100678361067_style0.webp.webp new file mode 100644 index 0000000000000000000000000000000000000000..2ba23c19ff9a26d2676f8a2006feb7c9d13fca1b Binary files /dev/null and b/workflow/1746100678361067_style0.webp.webp differ diff --git a/workflow/example1.json b/workflow/example1.json new file mode 100644 index 0000000000000000000000000000000000000000..cf9ee8793eb3ec4436f451549344b4a8013d1111 --- /dev/null +++ b/workflow/example1.json @@ -0,0 +1,3525 @@ +{ + "id": "4be48bc0-f21c-45cb-b657-bb75df5d398c", + "revision": 0, + "last_node_id": 109, + "last_link_id": 172, + "nodes": [ + { + "id": 30, + "type": "CheckpointLoaderSimple", + "pos": [ + -700, + 100 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 62 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 45 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 46, + 78 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 72, + "type": "CheckpointLoaderSimple", + "pos": [ + -690, + 1580 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 124 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 146 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 126 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 79, + "type": "ConditioningZeroOut", + "pos": [ + -330, + 1790 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": {}, + "order": 27, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 131 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 129 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 89, + "type": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "pos": [ + -630, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 26, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 142 + }, + { + "name": "image", + "type": "IMAGE", + "link": 143 + }, + { + "name": "model", + "type": "MODEL", + "link": 144 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 145 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 137 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 91, + "type": "LoadImage", + "pos": [ + -290, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 2, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ref.webp", + "image" + ] + }, + { + "id": 46, + "type": "LoadImage", + "pos": [ + 10, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 3, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 98 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00005_.png", + "image" + ] + }, + { + "id": 40, + "type": "CLIPVisionLoader", + "pos": [ + -700, + 494 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 91, + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 57, + "type": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "pos": [ + -200, + 330 + ], + "size": [ + 280, + 78 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + 3.5 + ] + }, + { + "id": 48, + "type": "ConditioningZeroOut", + "pos": [ + -100, + 450 + ], + "size": [ + 280, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 73 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 51, + "type": "VAEEncode", + "pos": [ + -340, + 340 + ], + "size": [ + 280, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 171 + }, + { + "name": "vae", + "type": "VAE", + "link": 78 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 103 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 77, + "type": "VAEDecode", + "pos": [ + 500, + 1560 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 37, + "mode": 4, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 125 + }, + { + "name": "vae", + "type": "VAE", + "link": 126 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 123 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 78, + "type": "KSampler", + "pos": [ + 140, + 1550 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 35, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 151 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 125 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 581566795193612, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 87, + "type": "EmptyLatentImage", + "pos": [ + -320, + 1930 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 130 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 73, + "type": "SaveImage", + "pos": [ + 500, + 1670 + ], + "size": [ + 985.2999877929688, + 1060.3800048828125 + ], + "flags": {}, + "order": 39, + "mode": 4, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 90, + "type": "CLIPTextEncode", + "pos": [ + -330, + 1580 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 22, + "mode": 4, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 131, + 149 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A man dressed fashionably stands on the forest." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 83, + "type": "08624421-a41c-413b-9de7-d68b0b60b667", + "pos": [ + -290, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 30, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 135 + }, + { + "name": "image", + "type": "IMAGE", + "link": 136 + }, + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 138 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 158 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 92, + "type": "EasyCache", + "pos": [ + 160, + 2120 + ], + "size": [ + 270, + 130 + ], + "flags": {}, + "order": 33, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 151 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 88, + "type": "LoadImage", + "pos": [ + -630, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ComfyUI_01035_.png", + "image" + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 490, + 70 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 52 + }, + { + "name": "vae", + "type": "VAE", + "link": 46 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 9 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 580, + 180 + ], + "size": [ + 950, + 1010 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 97, + "type": "MarkdownNote", + "pos": [ + -700, + 1240 + ], + "size": [ + 320, + 130 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Scale Image to Total Pixels node", + "properties": {}, + "widgets_values": [ + "The \"Scale Image to Total Pixels\" node is used to prevent you from uploading an image that is too large. Sometimes, large-sized input can lead to poor results.\n\nIt will also upscale small size input image, so you can use **Ctrl-B** to bypass it if you don't need to." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 56, + "type": "74d27e51-9780-451f-9dde-8bf303d00011", + "pos": [ + 10, + 670 + ], + "size": [ + 270, + 118 + ], + "flags": {}, + "order": 29, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image", + "type": "IMAGE", + "link": 98 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 159 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 43, + "type": "LoraLoaderModelOnly", + "pos": [ + -700, + 250 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 62 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 99 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 39, + "type": "ModelPatchLoader", + "pos": [ + -700, + 384 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 94, + 100 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 76, + "type": "LoraLoaderModelOnly", + "pos": [ + -690, + 1730 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 21, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 124 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 144 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 75, + "type": "ModelPatchLoader", + "pos": [ + -690, + 1870 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 138, + 145 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 74, + "type": "CLIPVisionLoader", + "pos": [ + -690, + 1980 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 135, + 142 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 95, + "type": "EasyCache", + "pos": [ + 150, + -110 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 159 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 160 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 96, + "type": "MarkdownNote", + "pos": [ + 480, + -120 + ], + "size": [ + 390, + 140 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About EasyCache", + "properties": {}, + "widgets_values": [ + "The EasyCache node will maintain a `cumulative_change_rate`. When this value is lower than the `reuse_threshold`, it skips the current step and uses the cached result.\n\nThis node can reduce inference time, but it also sacrifices some quality and details. You can bypass it (Ctrl+B) if you don't need it.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 100, + "type": "MarkdownNote", + "pos": [ + 300, + 980 + ], + "size": [ + 230, + 170 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Style reference", + "properties": {}, + "widgets_values": [ + "The output image will reference the style from the images you uploaded. \n\nYou can disable all reference image inputs and use this workflow as a subject-driven image generation workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 102, + "type": "MarkdownNote", + "pos": [ + -1260, + 60 + ], + "size": [ + 530, + 510 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Model links", + "properties": {}, + "widgets_values": [ + "[tutorial](http://docs.comfy.org/tutorials/flux/flux-1-uso) | [教程](http://docs.comfy.org/zh-CN/tutorials/flux/flux-1-uso)\n\n**checkpoints**\n\n- [flux1-dev-fp8.safetensors](https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors)\n\n\n\n**loras**\n\n- [uso-flux1-dit-lora-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors)\n\n**model_patches**\n\n- [uso-flux1-projector-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors)\n\n**clip_visions**\n- [sigclip_vision_patch14_384.safetensors](https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors)\n\nModel Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 checkpoints/\n│ │ └── flux1-dev-fp8.safetensors\n│ ├── 📂 loras/\n│ │ └── uso-flux1-dit-lora-v1.safetensors\n│ ├── 📂 model_patches/\n│ │ └── uso-flux1-projector-v1.safetensors\n│ ├── 📂 clip_visions/\n│ │ └── sigclip_vision_patch14_384.safetensors\n```\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 101, + "type": "MarkdownNote", + "pos": [ + -1000, + 1540 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "This workflow only uses style reference." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 107, + "type": "MarkdownNote", + "pos": [ + 160, + 2320 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "You can also bypass the whole Style Reference group and use this workflow as a text-to-image workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 55, + "type": "581ab310-5783-4e50-b220-4d94035eb469", + "pos": [ + -330, + 680 + ], + "size": [ + 270, + 120 + ], + "flags": {}, + "order": 24, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 91 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 94 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + -330, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 16, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00144_.png", + "image" + ] + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + -680, + 690 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 170 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "input.png", + "image" + ] + }, + { + "id": 108, + "type": "ImageScaleToMaxDimension", + "pos": [ + -686.0413818359375, + 1075.162109375 + ], + "size": [ + 281.2027282714844, + 82 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 171 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ImageScaleToMaxDimension" + }, + "widgets_values": [ + "area", + 512 + ] + }, + { + "id": 109, + "type": "EmptyLatentImage", + "pos": [ + -190.36032104492188, + 489.6009826660156 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 172 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -350, + 100 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 45 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 72, + 102 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A European girl with a heartfelt smile and holds a sign writes \"USO x ComfyUI\". She is immersed in a vast, endless field of blooming flowers under a perfect summer sky." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 31, + "type": "KSampler", + "pos": [ + 150, + 70 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 160 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 104 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 73 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 172 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 52 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1061776533054168, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + } + ], + "links": [ + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 45, + 30, + 1, + 6, + 0, + "CLIP" + ], + [ + 46, + 30, + 2, + 8, + 1, + "VAE" + ], + [ + 52, + 31, + 0, + 8, + 0, + "LATENT" + ], + [ + 62, + 30, + 0, + 43, + 0, + "MODEL" + ], + [ + 72, + 6, + 0, + 48, + 0, + "CONDITIONING" + ], + [ + 73, + 48, + 0, + 31, + 2, + "CONDITIONING" + ], + [ + 78, + 30, + 2, + 51, + 1, + "VAE" + ], + [ + 91, + 40, + 0, + 55, + 0, + "CLIP_VISION" + ], + [ + 92, + 53, + 0, + 55, + 1, + "IMAGE" + ], + [ + 94, + 39, + 0, + 55, + 3, + "MODEL_PATCH" + ], + [ + 96, + 55, + 0, + 56, + 2, + "MODEL" + ], + [ + 97, + 40, + 0, + 56, + 0, + "CLIP_VISION" + ], + [ + 98, + 46, + 0, + 56, + 1, + "IMAGE" + ], + [ + 99, + 43, + 0, + 55, + 2, + "MODEL" + ], + [ + 100, + 39, + 0, + 56, + 3, + "MODEL_PATCH" + ], + [ + 102, + 6, + 0, + 57, + 0, + "CONDITIONING" + ], + [ + 103, + 51, + 0, + 57, + 1, + "LATENT" + ], + [ + 104, + 57, + 0, + 31, + 1, + "CONDITIONING" + ], + [ + 123, + 77, + 0, + 73, + 0, + "IMAGE" + ], + [ + 124, + 72, + 0, + 76, + 0, + "MODEL" + ], + [ + 125, + 78, + 0, + 77, + 0, + "LATENT" + ], + [ + 126, + 72, + 2, + 77, + 1, + "VAE" + ], + [ + 129, + 79, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 130, + 87, + 0, + 78, + 3, + "LATENT" + ], + [ + 131, + 90, + 0, + 79, + 0, + "CONDITIONING" + ], + [ + 135, + 74, + 0, + 83, + 0, + "CLIP_VISION" + ], + [ + 136, + 91, + 0, + 83, + 1, + "IMAGE" + ], + [ + 137, + 89, + 0, + 83, + 2, + "MODEL" + ], + [ + 138, + 75, + 0, + 83, + 3, + "MODEL_PATCH" + ], + [ + 142, + 74, + 0, + 89, + 0, + "CLIP_VISION" + ], + [ + 143, + 88, + 0, + 89, + 1, + "IMAGE" + ], + [ + 144, + 76, + 0, + 89, + 2, + "MODEL" + ], + [ + 145, + 75, + 0, + 89, + 3, + "MODEL_PATCH" + ], + [ + 146, + 72, + 1, + 90, + 0, + "CLIP" + ], + [ + 149, + 90, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 151, + 92, + 0, + 78, + 0, + "MODEL" + ], + [ + 158, + 83, + 0, + 92, + 0, + "MODEL" + ], + [ + 159, + 56, + 0, + 95, + 0, + "MODEL" + ], + [ + 160, + 95, + 0, + 31, + 0, + "MODEL" + ], + [ + 170, + 47, + 0, + 108, + 0, + "IMAGE" + ], + [ + 171, + 108, + 0, + 51, + 0, + "IMAGE" + ], + [ + 172, + 109, + 0, + 31, + 3, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Step 3 - Style Reference", + "bounding": [ + -350, + 590, + 890, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Step 2 - Subject/Identity Image", + "bounding": [ + -710, + 590, + 340, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Step 1 - Load Models", + "bounding": [ + -710, + 30, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Step3 - Style Reference", + "bounding": [ + -700, + 2060, + 790, + 650 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Step 1 - Load Models", + "bounding": [ + -700, + 1510, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 7, + "title": "Step 4 - Image Size", + "bounding": [ + -340, + 1850, + 300, + 200 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 8, + "title": "Step 3 - Prompt", + "bounding": [ + -340, + 1510, + 442.8500061035156, + 309.6000061035156 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 9, + "title": "Step 4 - Prompt", + "bounding": [ + -360, + 30, + 442.8500061035156, + 247.91000366210938 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "definitions": { + "subgraphs": [ + { + "id": "581ab310-5783-4e50-b220-4d94035eb469", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "74d27e51-9780-451f-9dde-8bf303d00011", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 56, + "lastLinkId": 102, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USO Reference Conditioning", + "inputNode": { + "id": -10, + "bounding": [ + -20, + 283, + 120, + 100 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 560, + 293, + 128.6640625, + 60 + ] + }, + "inputs": [ + { + "id": "d78373ce-7cdc-4e0f-a743-4d024e766376", + "name": "conditioning", + "type": "CONDITIONING", + "linkIds": [ + 66 + ], + "localized_name": "conditioning", + "pos": { + "0": 80, + "1": 303 + } + }, + { + "id": "1eebe27a-c790-4a85-8aa2-3b9f4eeddd00", + "name": "latent", + "type": "LATENT", + "linkIds": [ + 79 + ], + "localized_name": "latent", + "shape": 7, + "pos": { + "0": 80, + "1": 323 + } + }, + { + "id": "09894330-3dcb-4fba-87a5-33c3fb9db843", + "name": "guidance", + "type": "FLOAT", + "linkIds": [ + 102 + ], + "pos": { + "0": 80, + "1": 343 + } + } + ], + "outputs": [ + { + "id": "194badf0-ae11-47cd-a825-d7edd7ca6cc4", + "name": "CONDITIONING", + "type": "CONDITIONING", + "linkIds": [ + 57 + ], + "localized_name": "CONDITIONING", + "pos": { + "0": 580, + "1": 313 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 41, + "type": "FluxKontextMultiReferenceLatentMethod", + "pos": [ + 170, + 320 + ], + "size": [ + 330, + 60 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 64 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 67 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxKontextMultiReferenceLatentMethod" + }, + "widgets_values": [ + "uso" + ] + }, + { + "id": 44, + "type": "ReferenceLatent", + "pos": [ + 190, + 430 + ], + "size": [ + 197.712890625, + 46 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 66 + }, + { + "localized_name": "latent", + "name": "latent", + "shape": 7, + "type": "LATENT", + "link": 79 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ReferenceLatent" + }, + "widgets_values": [] + }, + { + "id": 35, + "type": "FluxGuidance", + "pos": [ + 160, + 200 + ], + "size": [ + 211.60000610351562, + 58 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "条件", + "name": "conditioning", + "type": "CONDITIONING", + "link": 67 + }, + { + "localized_name": "引导", + "name": "guidance", + "type": "FLOAT", + "widget": { + "name": "guidance" + }, + "link": 102 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 57 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 3.5 + ] + } + ], + "groups": [], + "links": [ + { + "id": 67, + "origin_id": 41, + "origin_slot": 0, + "target_id": 35, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 64, + "origin_id": 44, + "origin_slot": 0, + "target_id": 41, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 66, + "origin_id": -10, + "origin_slot": 0, + "target_id": 44, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 79, + "origin_id": -10, + "origin_slot": 1, + "target_id": 44, + "target_slot": 1, + "type": "LATENT" + }, + { + "id": 57, + "origin_id": 35, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 102, + "origin_id": -10, + "origin_slot": 2, + "target_id": 35, + "target_slot": 1, + "type": "FLOAT" + } + ], + "extra": {} + }, + { + "id": "08624421-a41c-413b-9de7-d68b0b60b667", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + } + ] + }, + "config": {}, + "extra": { + "ds": { + "scale": 0.4624109172678218, + "offset": [ + 1412.2342806679262, + 151.63453357426636 + ] + }, + "frontendVersion": "1.25.11" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/workflow/example1.png b/workflow/example1.png new file mode 100644 index 0000000000000000000000000000000000000000..d9eae62eb1d5db8347d42c12f38bb787d61fc8f8 --- /dev/null +++ b/workflow/example1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff852bd7d81f9e7f0a049d1c3748be9848c2879cbd3382ba2d215b6852e4afe4 +size 1422220 diff --git a/workflow/example2.json b/workflow/example2.json new file mode 100644 index 0000000000000000000000000000000000000000..b477cd5beb0cd20fa2b23ee757f61aed19ef0606 --- /dev/null +++ b/workflow/example2.json @@ -0,0 +1,3525 @@ +{ + "id": "4be48bc0-f21c-45cb-b657-bb75df5d398c", + "revision": 0, + "last_node_id": 109, + "last_link_id": 172, + "nodes": [ + { + "id": 30, + "type": "CheckpointLoaderSimple", + "pos": [ + -700, + 100 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 62 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 45 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 46, + 78 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 72, + "type": "CheckpointLoaderSimple", + "pos": [ + -690, + 1580 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 124 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 146 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 126 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 79, + "type": "ConditioningZeroOut", + "pos": [ + -330, + 1790 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": {}, + "order": 27, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 131 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 129 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 89, + "type": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "pos": [ + -630, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 26, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 142 + }, + { + "name": "image", + "type": "IMAGE", + "link": 143 + }, + { + "name": "model", + "type": "MODEL", + "link": 144 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 145 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 137 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 91, + "type": "LoadImage", + "pos": [ + -290, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 2, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ref.webp", + "image" + ] + }, + { + "id": 46, + "type": "LoadImage", + "pos": [ + 10, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 3, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 98 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00005_.png", + "image" + ] + }, + { + "id": 40, + "type": "CLIPVisionLoader", + "pos": [ + -700, + 494 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 91, + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 57, + "type": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "pos": [ + -200, + 330 + ], + "size": [ + 280, + 78 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + 3.5 + ] + }, + { + "id": 48, + "type": "ConditioningZeroOut", + "pos": [ + -100, + 450 + ], + "size": [ + 280, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 73 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 51, + "type": "VAEEncode", + "pos": [ + -340, + 340 + ], + "size": [ + 280, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 171 + }, + { + "name": "vae", + "type": "VAE", + "link": 78 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 103 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 77, + "type": "VAEDecode", + "pos": [ + 500, + 1560 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 37, + "mode": 4, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 125 + }, + { + "name": "vae", + "type": "VAE", + "link": 126 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 123 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 78, + "type": "KSampler", + "pos": [ + 140, + 1550 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 35, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 151 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 125 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 432877942827613, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 87, + "type": "EmptyLatentImage", + "pos": [ + -320, + 1930 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 130 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 73, + "type": "SaveImage", + "pos": [ + 500, + 1670 + ], + "size": [ + 985.2999877929688, + 1060.3800048828125 + ], + "flags": {}, + "order": 39, + "mode": 4, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 90, + "type": "CLIPTextEncode", + "pos": [ + -330, + 1580 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 22, + "mode": 4, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 131, + 149 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A man dressed fashionably stands on the forest." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 83, + "type": "08624421-a41c-413b-9de7-d68b0b60b667", + "pos": [ + -290, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 30, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 135 + }, + { + "name": "image", + "type": "IMAGE", + "link": 136 + }, + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 138 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 158 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 92, + "type": "EasyCache", + "pos": [ + 160, + 2120 + ], + "size": [ + 270, + 130 + ], + "flags": {}, + "order": 33, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 151 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 88, + "type": "LoadImage", + "pos": [ + -630, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ComfyUI_01035_.png", + "image" + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 490, + 70 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 52 + }, + { + "name": "vae", + "type": "VAE", + "link": 46 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 9 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 580, + 180 + ], + "size": [ + 950, + 1010 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 97, + "type": "MarkdownNote", + "pos": [ + -700, + 1240 + ], + "size": [ + 320, + 130 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Scale Image to Total Pixels node", + "properties": {}, + "widgets_values": [ + "The \"Scale Image to Total Pixels\" node is used to prevent you from uploading an image that is too large. Sometimes, large-sized input can lead to poor results.\n\nIt will also upscale small size input image, so you can use **Ctrl-B** to bypass it if you don't need to." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 56, + "type": "74d27e51-9780-451f-9dde-8bf303d00011", + "pos": [ + 10, + 670 + ], + "size": [ + 270, + 118 + ], + "flags": {}, + "order": 29, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image", + "type": "IMAGE", + "link": 98 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 159 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 43, + "type": "LoraLoaderModelOnly", + "pos": [ + -700, + 250 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 62 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 99 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 39, + "type": "ModelPatchLoader", + "pos": [ + -700, + 384 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 94, + 100 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 76, + "type": "LoraLoaderModelOnly", + "pos": [ + -690, + 1730 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 21, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 124 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 144 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 75, + "type": "ModelPatchLoader", + "pos": [ + -690, + 1870 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 138, + 145 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 74, + "type": "CLIPVisionLoader", + "pos": [ + -690, + 1980 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 135, + 142 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 95, + "type": "EasyCache", + "pos": [ + 150, + -110 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 159 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 160 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 96, + "type": "MarkdownNote", + "pos": [ + 480, + -120 + ], + "size": [ + 390, + 140 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About EasyCache", + "properties": {}, + "widgets_values": [ + "The EasyCache node will maintain a `cumulative_change_rate`. When this value is lower than the `reuse_threshold`, it skips the current step and uses the cached result.\n\nThis node can reduce inference time, but it also sacrifices some quality and details. You can bypass it (Ctrl+B) if you don't need it.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 100, + "type": "MarkdownNote", + "pos": [ + 300, + 980 + ], + "size": [ + 230, + 170 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Style reference", + "properties": {}, + "widgets_values": [ + "The output image will reference the style from the images you uploaded. \n\nYou can disable all reference image inputs and use this workflow as a subject-driven image generation workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 102, + "type": "MarkdownNote", + "pos": [ + -1260, + 60 + ], + "size": [ + 530, + 510 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Model links", + "properties": {}, + "widgets_values": [ + "[tutorial](http://docs.comfy.org/tutorials/flux/flux-1-uso) | [教程](http://docs.comfy.org/zh-CN/tutorials/flux/flux-1-uso)\n\n**checkpoints**\n\n- [flux1-dev-fp8.safetensors](https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors)\n\n\n\n**loras**\n\n- [uso-flux1-dit-lora-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors)\n\n**model_patches**\n\n- [uso-flux1-projector-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors)\n\n**clip_visions**\n- [sigclip_vision_patch14_384.safetensors](https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors)\n\nModel Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 checkpoints/\n│ │ └── flux1-dev-fp8.safetensors\n│ ├── 📂 loras/\n│ │ └── uso-flux1-dit-lora-v1.safetensors\n│ ├── 📂 model_patches/\n│ │ └── uso-flux1-projector-v1.safetensors\n│ ├── 📂 clip_visions/\n│ │ └── sigclip_vision_patch14_384.safetensors\n```\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 101, + "type": "MarkdownNote", + "pos": [ + -1000, + 1540 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "This workflow only uses style reference." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 107, + "type": "MarkdownNote", + "pos": [ + 160, + 2320 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "You can also bypass the whole Style Reference group and use this workflow as a text-to-image workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + -680, + 690 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 170 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "input.png", + "image" + ] + }, + { + "id": 108, + "type": "ImageScaleToMaxDimension", + "pos": [ + -686.0413818359375, + 1075.162109375 + ], + "size": [ + 281.2027282714844, + 82 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 171 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ImageScaleToMaxDimension" + }, + "widgets_values": [ + "area", + 512 + ] + }, + { + "id": 109, + "type": "EmptyLatentImage", + "pos": [ + -190.36032104492188, + 489.6009826660156 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 172 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -350, + 100 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 45 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 72, + 102 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A European girl with a heartfelt smile and holds a sign writes \"USO x ComfyUI\". She is immersed in a vast, endless field of blooming flowers under a perfect summer sky." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 55, + "type": "581ab310-5783-4e50-b220-4d94035eb469", + "pos": [ + -330, + 680 + ], + "size": [ + 270, + 120 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 91 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 94 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 31, + "type": "KSampler", + "pos": [ + 150, + 70 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 160 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 104 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 73 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 172 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 52 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 311404125744479, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + -330, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "1746100678361067_style0.webp.webp", + "image" + ] + } + ], + "links": [ + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 45, + 30, + 1, + 6, + 0, + "CLIP" + ], + [ + 46, + 30, + 2, + 8, + 1, + "VAE" + ], + [ + 52, + 31, + 0, + 8, + 0, + "LATENT" + ], + [ + 62, + 30, + 0, + 43, + 0, + "MODEL" + ], + [ + 72, + 6, + 0, + 48, + 0, + "CONDITIONING" + ], + [ + 73, + 48, + 0, + 31, + 2, + "CONDITIONING" + ], + [ + 78, + 30, + 2, + 51, + 1, + "VAE" + ], + [ + 91, + 40, + 0, + 55, + 0, + "CLIP_VISION" + ], + [ + 92, + 53, + 0, + 55, + 1, + "IMAGE" + ], + [ + 94, + 39, + 0, + 55, + 3, + "MODEL_PATCH" + ], + [ + 96, + 55, + 0, + 56, + 2, + "MODEL" + ], + [ + 97, + 40, + 0, + 56, + 0, + "CLIP_VISION" + ], + [ + 98, + 46, + 0, + 56, + 1, + "IMAGE" + ], + [ + 99, + 43, + 0, + 55, + 2, + "MODEL" + ], + [ + 100, + 39, + 0, + 56, + 3, + "MODEL_PATCH" + ], + [ + 102, + 6, + 0, + 57, + 0, + "CONDITIONING" + ], + [ + 103, + 51, + 0, + 57, + 1, + "LATENT" + ], + [ + 104, + 57, + 0, + 31, + 1, + "CONDITIONING" + ], + [ + 123, + 77, + 0, + 73, + 0, + "IMAGE" + ], + [ + 124, + 72, + 0, + 76, + 0, + "MODEL" + ], + [ + 125, + 78, + 0, + 77, + 0, + "LATENT" + ], + [ + 126, + 72, + 2, + 77, + 1, + "VAE" + ], + [ + 129, + 79, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 130, + 87, + 0, + 78, + 3, + "LATENT" + ], + [ + 131, + 90, + 0, + 79, + 0, + "CONDITIONING" + ], + [ + 135, + 74, + 0, + 83, + 0, + "CLIP_VISION" + ], + [ + 136, + 91, + 0, + 83, + 1, + "IMAGE" + ], + [ + 137, + 89, + 0, + 83, + 2, + "MODEL" + ], + [ + 138, + 75, + 0, + 83, + 3, + "MODEL_PATCH" + ], + [ + 142, + 74, + 0, + 89, + 0, + "CLIP_VISION" + ], + [ + 143, + 88, + 0, + 89, + 1, + "IMAGE" + ], + [ + 144, + 76, + 0, + 89, + 2, + "MODEL" + ], + [ + 145, + 75, + 0, + 89, + 3, + "MODEL_PATCH" + ], + [ + 146, + 72, + 1, + 90, + 0, + "CLIP" + ], + [ + 149, + 90, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 151, + 92, + 0, + 78, + 0, + "MODEL" + ], + [ + 158, + 83, + 0, + 92, + 0, + "MODEL" + ], + [ + 159, + 56, + 0, + 95, + 0, + "MODEL" + ], + [ + 160, + 95, + 0, + 31, + 0, + "MODEL" + ], + [ + 170, + 47, + 0, + 108, + 0, + "IMAGE" + ], + [ + 171, + 108, + 0, + 51, + 0, + "IMAGE" + ], + [ + 172, + 109, + 0, + 31, + 3, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Step 3 - Style Reference", + "bounding": [ + -350, + 590, + 890, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Step 2 - Subject/Identity Image", + "bounding": [ + -710, + 590, + 340, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Step 1 - Load Models", + "bounding": [ + -710, + 30, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Step3 - Style Reference", + "bounding": [ + -700, + 2060, + 790, + 650 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Step 1 - Load Models", + "bounding": [ + -700, + 1510, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 7, + "title": "Step 4 - Image Size", + "bounding": [ + -340, + 1850, + 300, + 200 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 8, + "title": "Step 3 - Prompt", + "bounding": [ + -340, + 1510, + 442.8500061035156, + 309.6000061035156 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 9, + "title": "Step 4 - Prompt", + "bounding": [ + -360, + 30, + 442.8500061035156, + 247.91000366210938 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "definitions": { + "subgraphs": [ + { + "id": "581ab310-5783-4e50-b220-4d94035eb469", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "74d27e51-9780-451f-9dde-8bf303d00011", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 56, + "lastLinkId": 102, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USO Reference Conditioning", + "inputNode": { + "id": -10, + "bounding": [ + -20, + 283, + 120, + 100 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 560, + 293, + 128.6640625, + 60 + ] + }, + "inputs": [ + { + "id": "d78373ce-7cdc-4e0f-a743-4d024e766376", + "name": "conditioning", + "type": "CONDITIONING", + "linkIds": [ + 66 + ], + "localized_name": "conditioning", + "pos": { + "0": 80, + "1": 303 + } + }, + { + "id": "1eebe27a-c790-4a85-8aa2-3b9f4eeddd00", + "name": "latent", + "type": "LATENT", + "linkIds": [ + 79 + ], + "localized_name": "latent", + "shape": 7, + "pos": { + "0": 80, + "1": 323 + } + }, + { + "id": "09894330-3dcb-4fba-87a5-33c3fb9db843", + "name": "guidance", + "type": "FLOAT", + "linkIds": [ + 102 + ], + "pos": { + "0": 80, + "1": 343 + } + } + ], + "outputs": [ + { + "id": "194badf0-ae11-47cd-a825-d7edd7ca6cc4", + "name": "CONDITIONING", + "type": "CONDITIONING", + "linkIds": [ + 57 + ], + "localized_name": "CONDITIONING", + "pos": { + "0": 580, + "1": 313 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 41, + "type": "FluxKontextMultiReferenceLatentMethod", + "pos": [ + 170, + 320 + ], + "size": [ + 330, + 60 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 64 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 67 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxKontextMultiReferenceLatentMethod" + }, + "widgets_values": [ + "uso" + ] + }, + { + "id": 44, + "type": "ReferenceLatent", + "pos": [ + 190, + 430 + ], + "size": [ + 197.712890625, + 46 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 66 + }, + { + "localized_name": "latent", + "name": "latent", + "shape": 7, + "type": "LATENT", + "link": 79 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ReferenceLatent" + }, + "widgets_values": [] + }, + { + "id": 35, + "type": "FluxGuidance", + "pos": [ + 160, + 200 + ], + "size": [ + 211.60000610351562, + 58 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "条件", + "name": "conditioning", + "type": "CONDITIONING", + "link": 67 + }, + { + "localized_name": "引导", + "name": "guidance", + "type": "FLOAT", + "widget": { + "name": "guidance" + }, + "link": 102 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 57 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 3.5 + ] + } + ], + "groups": [], + "links": [ + { + "id": 67, + "origin_id": 41, + "origin_slot": 0, + "target_id": 35, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 64, + "origin_id": 44, + "origin_slot": 0, + "target_id": 41, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 66, + "origin_id": -10, + "origin_slot": 0, + "target_id": 44, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 79, + "origin_id": -10, + "origin_slot": 1, + "target_id": 44, + "target_slot": 1, + "type": "LATENT" + }, + { + "id": 57, + "origin_id": 35, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 102, + "origin_id": -10, + "origin_slot": 2, + "target_id": 35, + "target_slot": 1, + "type": "FLOAT" + } + ], + "extra": {} + }, + { + "id": "08624421-a41c-413b-9de7-d68b0b60b667", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + } + ] + }, + "config": {}, + "extra": { + "ds": { + "scale": 0.685486150326799, + "offset": [ + 679.6562301803618, + -186.16630000311974 + ] + }, + "frontendVersion": "1.25.11" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/workflow/example2.png b/workflow/example2.png new file mode 100644 index 0000000000000000000000000000000000000000..8c0ad923dc5a25af783814e12fcd37f257c21c02 --- /dev/null +++ b/workflow/example2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5260b6dafb652d9217683f7945f7bd7f62622b09b5292d582e49f5dcf9d9b1e0 +size 1238775 diff --git a/workflow/example3.json b/workflow/example3.json new file mode 100644 index 0000000000000000000000000000000000000000..ff0c98476638e174179598a809440ef3dad8fbcc --- /dev/null +++ b/workflow/example3.json @@ -0,0 +1,3525 @@ +{ + "id": "4be48bc0-f21c-45cb-b657-bb75df5d398c", + "revision": 0, + "last_node_id": 109, + "last_link_id": 172, + "nodes": [ + { + "id": 30, + "type": "CheckpointLoaderSimple", + "pos": [ + -700, + 100 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 62 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 45 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 46, + 78 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 72, + "type": "CheckpointLoaderSimple", + "pos": [ + -690, + 1580 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 124 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 146 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 126 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 79, + "type": "ConditioningZeroOut", + "pos": [ + -330, + 1790 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": {}, + "order": 27, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 131 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 129 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 89, + "type": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "pos": [ + -630, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 26, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 142 + }, + { + "name": "image", + "type": "IMAGE", + "link": 143 + }, + { + "name": "model", + "type": "MODEL", + "link": 144 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 145 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 137 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 91, + "type": "LoadImage", + "pos": [ + -290, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 2, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ref.webp", + "image" + ] + }, + { + "id": 46, + "type": "LoadImage", + "pos": [ + 10, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 3, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 98 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00005_.png", + "image" + ] + }, + { + "id": 40, + "type": "CLIPVisionLoader", + "pos": [ + -700, + 494 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 91, + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 57, + "type": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "pos": [ + -200, + 330 + ], + "size": [ + 280, + 78 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + 3.5 + ] + }, + { + "id": 48, + "type": "ConditioningZeroOut", + "pos": [ + -100, + 450 + ], + "size": [ + 280, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 73 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 51, + "type": "VAEEncode", + "pos": [ + -340, + 340 + ], + "size": [ + 280, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 171 + }, + { + "name": "vae", + "type": "VAE", + "link": 78 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 103 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 77, + "type": "VAEDecode", + "pos": [ + 500, + 1560 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 37, + "mode": 4, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 125 + }, + { + "name": "vae", + "type": "VAE", + "link": 126 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 123 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 78, + "type": "KSampler", + "pos": [ + 140, + 1550 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 35, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 151 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 125 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1043289526508600, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 87, + "type": "EmptyLatentImage", + "pos": [ + -320, + 1930 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 130 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 73, + "type": "SaveImage", + "pos": [ + 500, + 1670 + ], + "size": [ + 985.2999877929688, + 1060.3800048828125 + ], + "flags": {}, + "order": 39, + "mode": 4, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 90, + "type": "CLIPTextEncode", + "pos": [ + -330, + 1580 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 22, + "mode": 4, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 131, + 149 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A man dressed fashionably stands on the forest." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 83, + "type": "08624421-a41c-413b-9de7-d68b0b60b667", + "pos": [ + -290, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 30, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 135 + }, + { + "name": "image", + "type": "IMAGE", + "link": 136 + }, + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 138 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 158 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 92, + "type": "EasyCache", + "pos": [ + 160, + 2120 + ], + "size": [ + 270, + 130 + ], + "flags": {}, + "order": 33, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 151 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 88, + "type": "LoadImage", + "pos": [ + -630, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ComfyUI_01035_.png", + "image" + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 490, + 70 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 52 + }, + { + "name": "vae", + "type": "VAE", + "link": 46 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 9 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 97, + "type": "MarkdownNote", + "pos": [ + -700, + 1240 + ], + "size": [ + 320, + 130 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Scale Image to Total Pixels node", + "properties": {}, + "widgets_values": [ + "The \"Scale Image to Total Pixels\" node is used to prevent you from uploading an image that is too large. Sometimes, large-sized input can lead to poor results.\n\nIt will also upscale small size input image, so you can use **Ctrl-B** to bypass it if you don't need to." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 56, + "type": "74d27e51-9780-451f-9dde-8bf303d00011", + "pos": [ + 10, + 670 + ], + "size": [ + 270, + 118 + ], + "flags": {}, + "order": 29, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image", + "type": "IMAGE", + "link": 98 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 159 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 43, + "type": "LoraLoaderModelOnly", + "pos": [ + -700, + 250 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 62 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 99 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 39, + "type": "ModelPatchLoader", + "pos": [ + -700, + 384 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 94, + 100 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 76, + "type": "LoraLoaderModelOnly", + "pos": [ + -690, + 1730 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 21, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 124 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 144 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 75, + "type": "ModelPatchLoader", + "pos": [ + -690, + 1870 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 138, + 145 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 74, + "type": "CLIPVisionLoader", + "pos": [ + -690, + 1980 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 135, + 142 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 95, + "type": "EasyCache", + "pos": [ + 150, + -110 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 159 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 160 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 96, + "type": "MarkdownNote", + "pos": [ + 480, + -120 + ], + "size": [ + 390, + 140 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About EasyCache", + "properties": {}, + "widgets_values": [ + "The EasyCache node will maintain a `cumulative_change_rate`. When this value is lower than the `reuse_threshold`, it skips the current step and uses the cached result.\n\nThis node can reduce inference time, but it also sacrifices some quality and details. You can bypass it (Ctrl+B) if you don't need it.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 100, + "type": "MarkdownNote", + "pos": [ + 300, + 980 + ], + "size": [ + 230, + 170 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Style reference", + "properties": {}, + "widgets_values": [ + "The output image will reference the style from the images you uploaded. \n\nYou can disable all reference image inputs and use this workflow as a subject-driven image generation workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 102, + "type": "MarkdownNote", + "pos": [ + -1260, + 60 + ], + "size": [ + 530, + 510 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Model links", + "properties": {}, + "widgets_values": [ + "[tutorial](http://docs.comfy.org/tutorials/flux/flux-1-uso) | [教程](http://docs.comfy.org/zh-CN/tutorials/flux/flux-1-uso)\n\n**checkpoints**\n\n- [flux1-dev-fp8.safetensors](https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors)\n\n\n\n**loras**\n\n- [uso-flux1-dit-lora-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors)\n\n**model_patches**\n\n- [uso-flux1-projector-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors)\n\n**clip_visions**\n- [sigclip_vision_patch14_384.safetensors](https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors)\n\nModel Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 checkpoints/\n│ │ └── flux1-dev-fp8.safetensors\n│ ├── 📂 loras/\n│ │ └── uso-flux1-dit-lora-v1.safetensors\n│ ├── 📂 model_patches/\n│ │ └── uso-flux1-projector-v1.safetensors\n│ ├── 📂 clip_visions/\n│ │ └── sigclip_vision_patch14_384.safetensors\n```\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 101, + "type": "MarkdownNote", + "pos": [ + -1000, + 1540 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "This workflow only uses style reference." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 107, + "type": "MarkdownNote", + "pos": [ + 160, + 2320 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "You can also bypass the whole Style Reference group and use this workflow as a text-to-image workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 55, + "type": "581ab310-5783-4e50-b220-4d94035eb469", + "pos": [ + -330, + 680 + ], + "size": [ + 270, + 120 + ], + "flags": {}, + "order": 24, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 91 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 94 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + -330, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 16, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00144_.png", + "image" + ] + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + -680, + 690 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 170 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "input.png", + "image" + ] + }, + { + "id": 108, + "type": "ImageScaleToMaxDimension", + "pos": [ + -686.0413818359375, + 1075.162109375 + ], + "size": [ + 281.2027282714844, + 82 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 171 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ImageScaleToMaxDimension" + }, + "widgets_values": [ + "area", + 512 + ] + }, + { + "id": 109, + "type": "EmptyLatentImage", + "pos": [ + -190.36032104492188, + 489.6009826660156 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 172 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 580, + 180 + ], + "size": [ + 950, + 1010 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 31, + "type": "KSampler", + "pos": [ + 150, + 70 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 160 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 104 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 73 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 172 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 52 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 180477256854341, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -350, + 100 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 45 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 72, + 102 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "Retro comic style, A woman rides a motorcycle and holds a sign that says \"USO x ComfyUI\" in a cyberpunk city. The word \"UXO\" is engraved on the front of the motorcycle." + ], + "color": "#232", + "bgcolor": "#353" + } + ], + "links": [ + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 45, + 30, + 1, + 6, + 0, + "CLIP" + ], + [ + 46, + 30, + 2, + 8, + 1, + "VAE" + ], + [ + 52, + 31, + 0, + 8, + 0, + "LATENT" + ], + [ + 62, + 30, + 0, + 43, + 0, + "MODEL" + ], + [ + 72, + 6, + 0, + 48, + 0, + "CONDITIONING" + ], + [ + 73, + 48, + 0, + 31, + 2, + "CONDITIONING" + ], + [ + 78, + 30, + 2, + 51, + 1, + "VAE" + ], + [ + 91, + 40, + 0, + 55, + 0, + "CLIP_VISION" + ], + [ + 92, + 53, + 0, + 55, + 1, + "IMAGE" + ], + [ + 94, + 39, + 0, + 55, + 3, + "MODEL_PATCH" + ], + [ + 96, + 55, + 0, + 56, + 2, + "MODEL" + ], + [ + 97, + 40, + 0, + 56, + 0, + "CLIP_VISION" + ], + [ + 98, + 46, + 0, + 56, + 1, + "IMAGE" + ], + [ + 99, + 43, + 0, + 55, + 2, + "MODEL" + ], + [ + 100, + 39, + 0, + 56, + 3, + "MODEL_PATCH" + ], + [ + 102, + 6, + 0, + 57, + 0, + "CONDITIONING" + ], + [ + 103, + 51, + 0, + 57, + 1, + "LATENT" + ], + [ + 104, + 57, + 0, + 31, + 1, + "CONDITIONING" + ], + [ + 123, + 77, + 0, + 73, + 0, + "IMAGE" + ], + [ + 124, + 72, + 0, + 76, + 0, + "MODEL" + ], + [ + 125, + 78, + 0, + 77, + 0, + "LATENT" + ], + [ + 126, + 72, + 2, + 77, + 1, + "VAE" + ], + [ + 129, + 79, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 130, + 87, + 0, + 78, + 3, + "LATENT" + ], + [ + 131, + 90, + 0, + 79, + 0, + "CONDITIONING" + ], + [ + 135, + 74, + 0, + 83, + 0, + "CLIP_VISION" + ], + [ + 136, + 91, + 0, + 83, + 1, + "IMAGE" + ], + [ + 137, + 89, + 0, + 83, + 2, + "MODEL" + ], + [ + 138, + 75, + 0, + 83, + 3, + "MODEL_PATCH" + ], + [ + 142, + 74, + 0, + 89, + 0, + "CLIP_VISION" + ], + [ + 143, + 88, + 0, + 89, + 1, + "IMAGE" + ], + [ + 144, + 76, + 0, + 89, + 2, + "MODEL" + ], + [ + 145, + 75, + 0, + 89, + 3, + "MODEL_PATCH" + ], + [ + 146, + 72, + 1, + 90, + 0, + "CLIP" + ], + [ + 149, + 90, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 151, + 92, + 0, + 78, + 0, + "MODEL" + ], + [ + 158, + 83, + 0, + 92, + 0, + "MODEL" + ], + [ + 159, + 56, + 0, + 95, + 0, + "MODEL" + ], + [ + 160, + 95, + 0, + 31, + 0, + "MODEL" + ], + [ + 170, + 47, + 0, + 108, + 0, + "IMAGE" + ], + [ + 171, + 108, + 0, + 51, + 0, + "IMAGE" + ], + [ + 172, + 109, + 0, + 31, + 3, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Step 3 - Style Reference", + "bounding": [ + -350, + 590, + 890, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Step 2 - Subject/Identity Image", + "bounding": [ + -710, + 590, + 340, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Step 1 - Load Models", + "bounding": [ + -710, + 30, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Step3 - Style Reference", + "bounding": [ + -700, + 2060, + 790, + 650 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Step 1 - Load Models", + "bounding": [ + -700, + 1510, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 7, + "title": "Step 4 - Image Size", + "bounding": [ + -340, + 1850, + 300, + 200 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 8, + "title": "Step 3 - Prompt", + "bounding": [ + -340, + 1510, + 442.8500061035156, + 309.6000061035156 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 9, + "title": "Step 4 - Prompt", + "bounding": [ + -360, + 30, + 442.8500061035156, + 247.91000366210938 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "definitions": { + "subgraphs": [ + { + "id": "581ab310-5783-4e50-b220-4d94035eb469", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "74d27e51-9780-451f-9dde-8bf303d00011", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 56, + "lastLinkId": 102, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USO Reference Conditioning", + "inputNode": { + "id": -10, + "bounding": [ + -20, + 283, + 120, + 100 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 560, + 293, + 128.6640625, + 60 + ] + }, + "inputs": [ + { + "id": "d78373ce-7cdc-4e0f-a743-4d024e766376", + "name": "conditioning", + "type": "CONDITIONING", + "linkIds": [ + 66 + ], + "localized_name": "conditioning", + "pos": { + "0": 80, + "1": 303 + } + }, + { + "id": "1eebe27a-c790-4a85-8aa2-3b9f4eeddd00", + "name": "latent", + "type": "LATENT", + "linkIds": [ + 79 + ], + "localized_name": "latent", + "shape": 7, + "pos": { + "0": 80, + "1": 323 + } + }, + { + "id": "09894330-3dcb-4fba-87a5-33c3fb9db843", + "name": "guidance", + "type": "FLOAT", + "linkIds": [ + 102 + ], + "pos": { + "0": 80, + "1": 343 + } + } + ], + "outputs": [ + { + "id": "194badf0-ae11-47cd-a825-d7edd7ca6cc4", + "name": "CONDITIONING", + "type": "CONDITIONING", + "linkIds": [ + 57 + ], + "localized_name": "CONDITIONING", + "pos": { + "0": 580, + "1": 313 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 41, + "type": "FluxKontextMultiReferenceLatentMethod", + "pos": [ + 170, + 320 + ], + "size": [ + 330, + 60 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 64 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 67 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxKontextMultiReferenceLatentMethod" + }, + "widgets_values": [ + "uso" + ] + }, + { + "id": 44, + "type": "ReferenceLatent", + "pos": [ + 190, + 430 + ], + "size": [ + 197.712890625, + 46 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 66 + }, + { + "localized_name": "latent", + "name": "latent", + "shape": 7, + "type": "LATENT", + "link": 79 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ReferenceLatent" + }, + "widgets_values": [] + }, + { + "id": 35, + "type": "FluxGuidance", + "pos": [ + 160, + 200 + ], + "size": [ + 211.60000610351562, + 58 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "条件", + "name": "conditioning", + "type": "CONDITIONING", + "link": 67 + }, + { + "localized_name": "引导", + "name": "guidance", + "type": "FLOAT", + "widget": { + "name": "guidance" + }, + "link": 102 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 57 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 3.5 + ] + } + ], + "groups": [], + "links": [ + { + "id": 67, + "origin_id": 41, + "origin_slot": 0, + "target_id": 35, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 64, + "origin_id": 44, + "origin_slot": 0, + "target_id": 41, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 66, + "origin_id": -10, + "origin_slot": 0, + "target_id": 44, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 79, + "origin_id": -10, + "origin_slot": 1, + "target_id": 44, + "target_slot": 1, + "type": "LATENT" + }, + { + "id": 57, + "origin_id": 35, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 102, + "origin_id": -10, + "origin_slot": 2, + "target_id": 35, + "target_slot": 1, + "type": "FLOAT" + } + ], + "extra": {} + }, + { + "id": "08624421-a41c-413b-9de7-d68b0b60b667", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + } + ] + }, + "config": {}, + "extra": { + "ds": { + "scale": 0.7058644213510158, + "offset": [ + 337.5296665407636, + -163.90090939242123 + ] + }, + "frontendVersion": "1.25.11" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/workflow/example3.png b/workflow/example3.png new file mode 100644 index 0000000000000000000000000000000000000000..c7e630bf2cf98d7c1dc4d920e2c0a6fb7945cd74 --- /dev/null +++ b/workflow/example3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c665d4932a0400ca18daff04398de07a26cba4ff77764464ae07e8590a81490 +size 1748212 diff --git a/workflow/example4.json b/workflow/example4.json new file mode 100644 index 0000000000000000000000000000000000000000..27cb8adeaee964f4ba5e54c90a4b57f14be5e5fe --- /dev/null +++ b/workflow/example4.json @@ -0,0 +1,3525 @@ +{ + "id": "4be48bc0-f21c-45cb-b657-bb75df5d398c", + "revision": 0, + "last_node_id": 109, + "last_link_id": 172, + "nodes": [ + { + "id": 46, + "type": "LoadImage", + "pos": [ + 10, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 0, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 98 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00005_.png", + "image" + ] + }, + { + "id": 56, + "type": "74d27e51-9780-451f-9dde-8bf303d00011", + "pos": [ + 10, + 670 + ], + "size": [ + 270, + 118 + ], + "flags": {}, + "order": 30, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image", + "type": "IMAGE", + "link": 98 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 159 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 100, + "type": "MarkdownNote", + "pos": [ + 300, + 980 + ], + "size": [ + 230, + 170 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Style reference", + "properties": {}, + "widgets_values": [ + "The output image will reference the style from the images you uploaded. \n\nYou can disable all reference image inputs and use this workflow as a subject-driven image generation workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 102, + "type": "MarkdownNote", + "pos": [ + -1260, + 60 + ], + "size": [ + 530, + 510 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Model links", + "properties": {}, + "widgets_values": [ + "[tutorial](http://docs.comfy.org/tutorials/flux/flux-1-uso) | [教程](http://docs.comfy.org/zh-CN/tutorials/flux/flux-1-uso)\n\n**checkpoints**\n\n- [flux1-dev-fp8.safetensors](https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors)\n\n\n\n**loras**\n\n- [uso-flux1-dit-lora-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors)\n\n**model_patches**\n\n- [uso-flux1-projector-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors)\n\n**clip_visions**\n- [sigclip_vision_patch14_384.safetensors](https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors)\n\nModel Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 checkpoints/\n│ │ └── flux1-dev-fp8.safetensors\n│ ├── 📂 loras/\n│ │ └── uso-flux1-dit-lora-v1.safetensors\n│ ├── 📂 model_patches/\n│ │ └── uso-flux1-projector-v1.safetensors\n│ ├── 📂 clip_visions/\n│ │ └── sigclip_vision_patch14_384.safetensors\n```\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 101, + "type": "MarkdownNote", + "pos": [ + -1000, + 1540 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "This workflow only uses style reference." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 107, + "type": "MarkdownNote", + "pos": [ + 160, + 2320 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "You can also bypass the whole Style Reference group and use this workflow as a text-to-image workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 55, + "type": "581ab310-5783-4e50-b220-4d94035eb469", + "pos": [ + -330, + 680 + ], + "size": [ + 270, + 120 + ], + "flags": {}, + "order": 25, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 91 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 94 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + -330, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00144_.png", + "image" + ] + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + -680, + 690 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 170 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "input.png", + "image" + ] + }, + { + "id": 108, + "type": "ImageScaleToMaxDimension", + "pos": [ + -686.0413818359375, + 1075.162109375 + ], + "size": [ + 281.2027282714844, + 82 + ], + "flags": {}, + "order": 19, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 171 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ImageScaleToMaxDimension" + }, + "widgets_values": [ + "area", + 512 + ] + }, + { + "id": 97, + "type": "MarkdownNote", + "pos": [ + -700, + 1240 + ], + "size": [ + 320, + 130 + ], + "flags": {}, + "order": 7, + "mode": 4, + "inputs": [], + "outputs": [], + "title": "About Scale Image to Total Pixels node", + "properties": {}, + "widgets_values": [ + "The \"Scale Image to Total Pixels\" node is used to prevent you from uploading an image that is too large. Sometimes, large-sized input can lead to poor results.\n\nIt will also upscale small size input image, so you can use **Ctrl-B** to bypass it if you don't need to." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 40, + "type": "CLIPVisionLoader", + "pos": [ + -700, + 494 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 8, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 91, + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 43, + "type": "LoraLoaderModelOnly", + "pos": [ + -700, + 250 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 20, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 62 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 99 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 39, + "type": "ModelPatchLoader", + "pos": [ + -700, + 384 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 94, + 100 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 30, + "type": "CheckpointLoaderSimple", + "pos": [ + -700, + 100 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 62 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 45 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 46, + 78 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -350, + 100 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 21, + "mode": 4, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 45 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 72, + 102 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A European girl with a heartfelt smile and holds a sign writes \"USO x ComfyUI\". She is immersed in a vast, endless field of blooming flowers under a perfect summer sky." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 57, + "type": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "pos": [ + -200, + 330 + ], + "size": [ + 280, + 78 + ], + "flags": {}, + "order": 29, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + 3.5 + ] + }, + { + "id": 109, + "type": "EmptyLatentImage", + "pos": [ + -190.36032104492188, + 489.6009826660156 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 11, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 172 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 31, + "type": "KSampler", + "pos": [ + 150, + 70 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 34, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 160 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 104 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 73 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 172 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 52 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 263797271289443, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 95, + "type": "EasyCache", + "pos": [ + 150, + -110 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 32, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 159 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 160 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 96, + "type": "MarkdownNote", + "pos": [ + 480, + -120 + ], + "size": [ + 390, + 140 + ], + "flags": {}, + "order": 12, + "mode": 4, + "inputs": [], + "outputs": [], + "title": "About EasyCache", + "properties": {}, + "widgets_values": [ + "The EasyCache node will maintain a `cumulative_change_rate`. When this value is lower than the `reuse_threshold`, it skips the current step and uses the cached result.\n\nThis node can reduce inference time, but it also sacrifices some quality and details. You can bypass it (Ctrl+B) if you don't need it.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 580, + 180 + ], + "size": [ + 950, + 1010 + ], + "flags": {}, + "order": 38, + "mode": 4, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 48, + "type": "ConditioningZeroOut", + "pos": [ + -100, + 450 + ], + "size": [ + 280, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 26, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 73 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 51, + "type": "VAEEncode", + "pos": [ + -340, + 340 + ], + "size": [ + 280, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 24, + "mode": 4, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 171 + }, + { + "name": "vae", + "type": "VAE", + "link": 78 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 103 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 490, + 70 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 36, + "mode": 4, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 52 + }, + { + "name": "vae", + "type": "VAE", + "link": 46 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 9 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 89, + "type": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "pos": [ + -630, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 142 + }, + { + "name": "image", + "type": "IMAGE", + "link": 143 + }, + { + "name": "model", + "type": "MODEL", + "link": 144 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 145 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 137 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 87, + "type": "EmptyLatentImage", + "pos": [ + -320, + 1930 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 130 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 79, + "type": "ConditioningZeroOut", + "pos": [ + -330, + 1790 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 131 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 129 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 78, + "type": "KSampler", + "pos": [ + 140, + 1550 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 151 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 125 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 277357199108482, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 92, + "type": "EasyCache", + "pos": [ + 160, + 2120 + ], + "size": [ + 270, + 130 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 151 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 77, + "type": "VAEDecode", + "pos": [ + 500, + 1560 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 125 + }, + { + "name": "vae", + "type": "VAE", + "link": 126 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 123 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 73, + "type": "SaveImage", + "pos": [ + 500, + 1670 + ], + "size": [ + 985.2999877929688, + 1060.3800048828125 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 72, + "type": "CheckpointLoaderSimple", + "pos": [ + -690, + 1580 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 124 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 146 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 126 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 76, + "type": "LoraLoaderModelOnly", + "pos": [ + -690, + 1730 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 124 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 144 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 75, + "type": "ModelPatchLoader", + "pos": [ + -690, + 1870 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 138, + 145 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 74, + "type": "CLIPVisionLoader", + "pos": [ + -690, + 1980 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 135, + 142 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 88, + "type": "LoadImage", + "pos": [ + -630, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "style5_0.webp.webp", + "image" + ] + }, + { + "id": 91, + "type": "LoadImage", + "pos": [ + -290, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 18, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ref.webp", + "image" + ] + }, + { + "id": 83, + "type": "08624421-a41c-413b-9de7-d68b0b60b667", + "pos": [ + -290, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 31, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 135 + }, + { + "name": "image", + "type": "IMAGE", + "link": 136 + }, + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 138 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 158 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 90, + "type": "CLIPTextEncode", + "pos": [ + -330, + 1580 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 131, + 149 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A line of large words reads \"USO\"" + ], + "color": "#232", + "bgcolor": "#353" + } + ], + "links": [ + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 45, + 30, + 1, + 6, + 0, + "CLIP" + ], + [ + 46, + 30, + 2, + 8, + 1, + "VAE" + ], + [ + 52, + 31, + 0, + 8, + 0, + "LATENT" + ], + [ + 62, + 30, + 0, + 43, + 0, + "MODEL" + ], + [ + 72, + 6, + 0, + 48, + 0, + "CONDITIONING" + ], + [ + 73, + 48, + 0, + 31, + 2, + "CONDITIONING" + ], + [ + 78, + 30, + 2, + 51, + 1, + "VAE" + ], + [ + 91, + 40, + 0, + 55, + 0, + "CLIP_VISION" + ], + [ + 92, + 53, + 0, + 55, + 1, + "IMAGE" + ], + [ + 94, + 39, + 0, + 55, + 3, + "MODEL_PATCH" + ], + [ + 96, + 55, + 0, + 56, + 2, + "MODEL" + ], + [ + 97, + 40, + 0, + 56, + 0, + "CLIP_VISION" + ], + [ + 98, + 46, + 0, + 56, + 1, + "IMAGE" + ], + [ + 99, + 43, + 0, + 55, + 2, + "MODEL" + ], + [ + 100, + 39, + 0, + 56, + 3, + "MODEL_PATCH" + ], + [ + 102, + 6, + 0, + 57, + 0, + "CONDITIONING" + ], + [ + 103, + 51, + 0, + 57, + 1, + "LATENT" + ], + [ + 104, + 57, + 0, + 31, + 1, + "CONDITIONING" + ], + [ + 123, + 77, + 0, + 73, + 0, + "IMAGE" + ], + [ + 124, + 72, + 0, + 76, + 0, + "MODEL" + ], + [ + 125, + 78, + 0, + 77, + 0, + "LATENT" + ], + [ + 126, + 72, + 2, + 77, + 1, + "VAE" + ], + [ + 129, + 79, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 130, + 87, + 0, + 78, + 3, + "LATENT" + ], + [ + 131, + 90, + 0, + 79, + 0, + "CONDITIONING" + ], + [ + 135, + 74, + 0, + 83, + 0, + "CLIP_VISION" + ], + [ + 136, + 91, + 0, + 83, + 1, + "IMAGE" + ], + [ + 137, + 89, + 0, + 83, + 2, + "MODEL" + ], + [ + 138, + 75, + 0, + 83, + 3, + "MODEL_PATCH" + ], + [ + 142, + 74, + 0, + 89, + 0, + "CLIP_VISION" + ], + [ + 143, + 88, + 0, + 89, + 1, + "IMAGE" + ], + [ + 144, + 76, + 0, + 89, + 2, + "MODEL" + ], + [ + 145, + 75, + 0, + 89, + 3, + "MODEL_PATCH" + ], + [ + 146, + 72, + 1, + 90, + 0, + "CLIP" + ], + [ + 149, + 90, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 151, + 92, + 0, + 78, + 0, + "MODEL" + ], + [ + 158, + 83, + 0, + 92, + 0, + "MODEL" + ], + [ + 159, + 56, + 0, + 95, + 0, + "MODEL" + ], + [ + 160, + 95, + 0, + 31, + 0, + "MODEL" + ], + [ + 170, + 47, + 0, + 108, + 0, + "IMAGE" + ], + [ + 171, + 108, + 0, + 51, + 0, + "IMAGE" + ], + [ + 172, + 109, + 0, + 31, + 3, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Step 3 - Style Reference", + "bounding": [ + -350, + 590, + 890, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Step 2 - Subject/Identity Image", + "bounding": [ + -710, + 590, + 340, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Step 1 - Load Models", + "bounding": [ + -710, + 30, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Step3 - Style Reference", + "bounding": [ + -700, + 2060, + 790, + 650 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Step 1 - Load Models", + "bounding": [ + -700, + 1510, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 7, + "title": "Step 4 - Image Size", + "bounding": [ + -340, + 1850, + 300, + 200 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 8, + "title": "Step 3 - Prompt", + "bounding": [ + -340, + 1510, + 442.8500061035156, + 309.6000061035156 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 9, + "title": "Step 4 - Prompt", + "bounding": [ + -360, + 30, + 442.8500061035156, + 247.91000366210938 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "definitions": { + "subgraphs": [ + { + "id": "581ab310-5783-4e50-b220-4d94035eb469", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "74d27e51-9780-451f-9dde-8bf303d00011", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 56, + "lastLinkId": 102, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USO Reference Conditioning", + "inputNode": { + "id": -10, + "bounding": [ + -20, + 283, + 120, + 100 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 560, + 293, + 128.6640625, + 60 + ] + }, + "inputs": [ + { + "id": "d78373ce-7cdc-4e0f-a743-4d024e766376", + "name": "conditioning", + "type": "CONDITIONING", + "linkIds": [ + 66 + ], + "localized_name": "conditioning", + "pos": { + "0": 80, + "1": 303 + } + }, + { + "id": "1eebe27a-c790-4a85-8aa2-3b9f4eeddd00", + "name": "latent", + "type": "LATENT", + "linkIds": [ + 79 + ], + "localized_name": "latent", + "shape": 7, + "pos": { + "0": 80, + "1": 323 + } + }, + { + "id": "09894330-3dcb-4fba-87a5-33c3fb9db843", + "name": "guidance", + "type": "FLOAT", + "linkIds": [ + 102 + ], + "pos": { + "0": 80, + "1": 343 + } + } + ], + "outputs": [ + { + "id": "194badf0-ae11-47cd-a825-d7edd7ca6cc4", + "name": "CONDITIONING", + "type": "CONDITIONING", + "linkIds": [ + 57 + ], + "localized_name": "CONDITIONING", + "pos": { + "0": 580, + "1": 313 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 41, + "type": "FluxKontextMultiReferenceLatentMethod", + "pos": [ + 170, + 320 + ], + "size": [ + 330, + 60 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 64 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 67 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxKontextMultiReferenceLatentMethod" + }, + "widgets_values": [ + "uso" + ] + }, + { + "id": 44, + "type": "ReferenceLatent", + "pos": [ + 190, + 430 + ], + "size": [ + 197.712890625, + 46 + ], + "flags": {}, + "order": 2, + "mode": 4, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 66 + }, + { + "localized_name": "latent", + "name": "latent", + "shape": 7, + "type": "LATENT", + "link": 79 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ReferenceLatent" + }, + "widgets_values": [] + }, + { + "id": 35, + "type": "FluxGuidance", + "pos": [ + 160, + 200 + ], + "size": [ + 211.60000610351562, + 58 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "条件", + "name": "conditioning", + "type": "CONDITIONING", + "link": 67 + }, + { + "localized_name": "引导", + "name": "guidance", + "type": "FLOAT", + "widget": { + "name": "guidance" + }, + "link": 102 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 57 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 3.5 + ] + } + ], + "groups": [], + "links": [ + { + "id": 67, + "origin_id": 41, + "origin_slot": 0, + "target_id": 35, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 64, + "origin_id": 44, + "origin_slot": 0, + "target_id": 41, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 66, + "origin_id": -10, + "origin_slot": 0, + "target_id": 44, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 79, + "origin_id": -10, + "origin_slot": 1, + "target_id": 44, + "target_slot": 1, + "type": "LATENT" + }, + { + "id": 57, + "origin_id": 35, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 102, + "origin_id": -10, + "origin_slot": 2, + "target_id": 35, + "target_slot": 1, + "type": "FLOAT" + } + ], + "extra": {} + }, + { + "id": "08624421-a41c-413b-9de7-d68b0b60b667", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + } + ] + }, + "config": {}, + "extra": { + "ds": { + "scale": 0.5473306654230768, + "offset": [ + 752.5951681259846, + -1382.1123142454012 + ] + }, + "frontendVersion": "1.25.11" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/workflow/example4.png b/workflow/example4.png new file mode 100644 index 0000000000000000000000000000000000000000..a188a47217288aa5450fd4ad44b96f5226057169 --- /dev/null +++ b/workflow/example4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fced5721dbf800ae927585365a725842f17caacb627fd50d89fe749066cc0773 +size 951607 diff --git a/workflow/example5.json b/workflow/example5.json new file mode 100644 index 0000000000000000000000000000000000000000..3c52a36c04fa4e05488e05027fc019235af228fc --- /dev/null +++ b/workflow/example5.json @@ -0,0 +1,3525 @@ +{ + "id": "4be48bc0-f21c-45cb-b657-bb75df5d398c", + "revision": 0, + "last_node_id": 109, + "last_link_id": 172, + "nodes": [ + { + "id": 46, + "type": "LoadImage", + "pos": [ + 10, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 0, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 98 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00005_.png", + "image" + ] + }, + { + "id": 56, + "type": "74d27e51-9780-451f-9dde-8bf303d00011", + "pos": [ + 10, + 670 + ], + "size": [ + 270, + 118 + ], + "flags": {}, + "order": 30, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image", + "type": "IMAGE", + "link": 98 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 159 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 100, + "type": "MarkdownNote", + "pos": [ + 300, + 980 + ], + "size": [ + 230, + 170 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Style reference", + "properties": {}, + "widgets_values": [ + "The output image will reference the style from the images you uploaded. \n\nYou can disable all reference image inputs and use this workflow as a subject-driven image generation workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 102, + "type": "MarkdownNote", + "pos": [ + -1260, + 60 + ], + "size": [ + 530, + 510 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Model links", + "properties": {}, + "widgets_values": [ + "[tutorial](http://docs.comfy.org/tutorials/flux/flux-1-uso) | [教程](http://docs.comfy.org/zh-CN/tutorials/flux/flux-1-uso)\n\n**checkpoints**\n\n- [flux1-dev-fp8.safetensors](https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors)\n\n\n\n**loras**\n\n- [uso-flux1-dit-lora-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors)\n\n**model_patches**\n\n- [uso-flux1-projector-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors)\n\n**clip_visions**\n- [sigclip_vision_patch14_384.safetensors](https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors)\n\nModel Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 checkpoints/\n│ │ └── flux1-dev-fp8.safetensors\n│ ├── 📂 loras/\n│ │ └── uso-flux1-dit-lora-v1.safetensors\n│ ├── 📂 model_patches/\n│ │ └── uso-flux1-projector-v1.safetensors\n│ ├── 📂 clip_visions/\n│ │ └── sigclip_vision_patch14_384.safetensors\n```\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 101, + "type": "MarkdownNote", + "pos": [ + -1000, + 1540 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "This workflow only uses style reference." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 107, + "type": "MarkdownNote", + "pos": [ + 160, + 2320 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "You can also bypass the whole Style Reference group and use this workflow as a text-to-image workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 55, + "type": "581ab310-5783-4e50-b220-4d94035eb469", + "pos": [ + -330, + 680 + ], + "size": [ + 270, + 120 + ], + "flags": {}, + "order": 25, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 91 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 94 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + -330, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00144_.png", + "image" + ] + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + -680, + 690 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 170 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "input.png", + "image" + ] + }, + { + "id": 108, + "type": "ImageScaleToMaxDimension", + "pos": [ + -686.0413818359375, + 1075.162109375 + ], + "size": [ + 281.2027282714844, + 82 + ], + "flags": {}, + "order": 19, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 171 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ImageScaleToMaxDimension" + }, + "widgets_values": [ + "area", + 512 + ] + }, + { + "id": 97, + "type": "MarkdownNote", + "pos": [ + -700, + 1240 + ], + "size": [ + 320, + 130 + ], + "flags": {}, + "order": 7, + "mode": 4, + "inputs": [], + "outputs": [], + "title": "About Scale Image to Total Pixels node", + "properties": {}, + "widgets_values": [ + "The \"Scale Image to Total Pixels\" node is used to prevent you from uploading an image that is too large. Sometimes, large-sized input can lead to poor results.\n\nIt will also upscale small size input image, so you can use **Ctrl-B** to bypass it if you don't need to." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 40, + "type": "CLIPVisionLoader", + "pos": [ + -700, + 494 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 8, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 91, + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 43, + "type": "LoraLoaderModelOnly", + "pos": [ + -700, + 250 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 20, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 62 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 99 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 39, + "type": "ModelPatchLoader", + "pos": [ + -700, + 384 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 94, + 100 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 30, + "type": "CheckpointLoaderSimple", + "pos": [ + -700, + 100 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 62 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 45 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 46, + 78 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -350, + 100 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 21, + "mode": 4, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 45 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 72, + 102 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A European girl with a heartfelt smile and holds a sign writes \"USO x ComfyUI\". She is immersed in a vast, endless field of blooming flowers under a perfect summer sky." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 57, + "type": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "pos": [ + -200, + 330 + ], + "size": [ + 280, + 78 + ], + "flags": {}, + "order": 29, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + 3.5 + ] + }, + { + "id": 109, + "type": "EmptyLatentImage", + "pos": [ + -190.36032104492188, + 489.6009826660156 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 11, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 172 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 31, + "type": "KSampler", + "pos": [ + 150, + 70 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 34, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 160 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 104 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 73 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 172 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 52 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 729971634207905, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 95, + "type": "EasyCache", + "pos": [ + 150, + -110 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 32, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 159 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 160 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 96, + "type": "MarkdownNote", + "pos": [ + 480, + -120 + ], + "size": [ + 390, + 140 + ], + "flags": {}, + "order": 12, + "mode": 4, + "inputs": [], + "outputs": [], + "title": "About EasyCache", + "properties": {}, + "widgets_values": [ + "The EasyCache node will maintain a `cumulative_change_rate`. When this value is lower than the `reuse_threshold`, it skips the current step and uses the cached result.\n\nThis node can reduce inference time, but it also sacrifices some quality and details. You can bypass it (Ctrl+B) if you don't need it.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 580, + 180 + ], + "size": [ + 950, + 1010 + ], + "flags": {}, + "order": 38, + "mode": 4, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 48, + "type": "ConditioningZeroOut", + "pos": [ + -100, + 450 + ], + "size": [ + 280, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 26, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 73 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 51, + "type": "VAEEncode", + "pos": [ + -340, + 340 + ], + "size": [ + 280, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 24, + "mode": 4, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 171 + }, + { + "name": "vae", + "type": "VAE", + "link": 78 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 103 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 490, + 70 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 36, + "mode": 4, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 52 + }, + { + "name": "vae", + "type": "VAE", + "link": 46 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 9 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 89, + "type": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "pos": [ + -630, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 142 + }, + { + "name": "image", + "type": "IMAGE", + "link": 143 + }, + { + "name": "model", + "type": "MODEL", + "link": 144 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 145 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 137 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 87, + "type": "EmptyLatentImage", + "pos": [ + -320, + 1930 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 130 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 79, + "type": "ConditioningZeroOut", + "pos": [ + -330, + 1790 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 131 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 129 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 78, + "type": "KSampler", + "pos": [ + 140, + 1550 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 151 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 125 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 665095485314866, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 92, + "type": "EasyCache", + "pos": [ + 160, + 2120 + ], + "size": [ + 270, + 130 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 151 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 77, + "type": "VAEDecode", + "pos": [ + 500, + 1560 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 125 + }, + { + "name": "vae", + "type": "VAE", + "link": 126 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 123 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 73, + "type": "SaveImage", + "pos": [ + 500, + 1670 + ], + "size": [ + 985.2999877929688, + 1060.3800048828125 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 72, + "type": "CheckpointLoaderSimple", + "pos": [ + -690, + 1580 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 124 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 146 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 126 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 76, + "type": "LoraLoaderModelOnly", + "pos": [ + -690, + 1730 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 124 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 144 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 74, + "type": "CLIPVisionLoader", + "pos": [ + -690, + 1980 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 135, + 142 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 88, + "type": "LoadImage", + "pos": [ + -630, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "style5_0.webp.webp", + "image" + ] + }, + { + "id": 83, + "type": "08624421-a41c-413b-9de7-d68b0b60b667", + "pos": [ + -290, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 135 + }, + { + "name": "image", + "type": "IMAGE", + "link": 136 + }, + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 138 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 158 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 90, + "type": "CLIPTextEncode", + "pos": [ + -330, + 1580 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 131, + 149 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A line of large words reads \"X\", with a smaller line of text below it that says \"inspires creativity\".\n" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 75, + "type": "ModelPatchLoader", + "pos": [ + -687.868896484375, + 1876.2841796875 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 138, + 145 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 91, + "type": "LoadImage", + "pos": [ + -290, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 18, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "17-17-29.webp.webp", + "image" + ] + } + ], + "links": [ + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 45, + 30, + 1, + 6, + 0, + "CLIP" + ], + [ + 46, + 30, + 2, + 8, + 1, + "VAE" + ], + [ + 52, + 31, + 0, + 8, + 0, + "LATENT" + ], + [ + 62, + 30, + 0, + 43, + 0, + "MODEL" + ], + [ + 72, + 6, + 0, + 48, + 0, + "CONDITIONING" + ], + [ + 73, + 48, + 0, + 31, + 2, + "CONDITIONING" + ], + [ + 78, + 30, + 2, + 51, + 1, + "VAE" + ], + [ + 91, + 40, + 0, + 55, + 0, + "CLIP_VISION" + ], + [ + 92, + 53, + 0, + 55, + 1, + "IMAGE" + ], + [ + 94, + 39, + 0, + 55, + 3, + "MODEL_PATCH" + ], + [ + 96, + 55, + 0, + 56, + 2, + "MODEL" + ], + [ + 97, + 40, + 0, + 56, + 0, + "CLIP_VISION" + ], + [ + 98, + 46, + 0, + 56, + 1, + "IMAGE" + ], + [ + 99, + 43, + 0, + 55, + 2, + "MODEL" + ], + [ + 100, + 39, + 0, + 56, + 3, + "MODEL_PATCH" + ], + [ + 102, + 6, + 0, + 57, + 0, + "CONDITIONING" + ], + [ + 103, + 51, + 0, + 57, + 1, + "LATENT" + ], + [ + 104, + 57, + 0, + 31, + 1, + "CONDITIONING" + ], + [ + 123, + 77, + 0, + 73, + 0, + "IMAGE" + ], + [ + 124, + 72, + 0, + 76, + 0, + "MODEL" + ], + [ + 125, + 78, + 0, + 77, + 0, + "LATENT" + ], + [ + 126, + 72, + 2, + 77, + 1, + "VAE" + ], + [ + 129, + 79, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 130, + 87, + 0, + 78, + 3, + "LATENT" + ], + [ + 131, + 90, + 0, + 79, + 0, + "CONDITIONING" + ], + [ + 135, + 74, + 0, + 83, + 0, + "CLIP_VISION" + ], + [ + 136, + 91, + 0, + 83, + 1, + "IMAGE" + ], + [ + 137, + 89, + 0, + 83, + 2, + "MODEL" + ], + [ + 138, + 75, + 0, + 83, + 3, + "MODEL_PATCH" + ], + [ + 142, + 74, + 0, + 89, + 0, + "CLIP_VISION" + ], + [ + 143, + 88, + 0, + 89, + 1, + "IMAGE" + ], + [ + 144, + 76, + 0, + 89, + 2, + "MODEL" + ], + [ + 145, + 75, + 0, + 89, + 3, + "MODEL_PATCH" + ], + [ + 146, + 72, + 1, + 90, + 0, + "CLIP" + ], + [ + 149, + 90, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 151, + 92, + 0, + 78, + 0, + "MODEL" + ], + [ + 158, + 83, + 0, + 92, + 0, + "MODEL" + ], + [ + 159, + 56, + 0, + 95, + 0, + "MODEL" + ], + [ + 160, + 95, + 0, + 31, + 0, + "MODEL" + ], + [ + 170, + 47, + 0, + 108, + 0, + "IMAGE" + ], + [ + 171, + 108, + 0, + 51, + 0, + "IMAGE" + ], + [ + 172, + 109, + 0, + 31, + 3, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Step 3 - Style Reference", + "bounding": [ + -350, + 590, + 890, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Step 2 - Subject/Identity Image", + "bounding": [ + -710, + 590, + 340, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Step 1 - Load Models", + "bounding": [ + -710, + 30, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Step3 - Style Reference", + "bounding": [ + -700, + 2060, + 790, + 650 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Step 1 - Load Models", + "bounding": [ + -700, + 1510, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 7, + "title": "Step 4 - Image Size", + "bounding": [ + -340, + 1850, + 300, + 200 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 8, + "title": "Step 3 - Prompt", + "bounding": [ + -340, + 1510, + 442.8500061035156, + 309.6000061035156 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 9, + "title": "Step 4 - Prompt", + "bounding": [ + -360, + 30, + 442.8500061035156, + 247.91000366210938 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "definitions": { + "subgraphs": [ + { + "id": "581ab310-5783-4e50-b220-4d94035eb469", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "74d27e51-9780-451f-9dde-8bf303d00011", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 56, + "lastLinkId": 102, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USO Reference Conditioning", + "inputNode": { + "id": -10, + "bounding": [ + -20, + 283, + 120, + 100 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 560, + 293, + 128.6640625, + 60 + ] + }, + "inputs": [ + { + "id": "d78373ce-7cdc-4e0f-a743-4d024e766376", + "name": "conditioning", + "type": "CONDITIONING", + "linkIds": [ + 66 + ], + "localized_name": "conditioning", + "pos": { + "0": 80, + "1": 303 + } + }, + { + "id": "1eebe27a-c790-4a85-8aa2-3b9f4eeddd00", + "name": "latent", + "type": "LATENT", + "linkIds": [ + 79 + ], + "localized_name": "latent", + "shape": 7, + "pos": { + "0": 80, + "1": 323 + } + }, + { + "id": "09894330-3dcb-4fba-87a5-33c3fb9db843", + "name": "guidance", + "type": "FLOAT", + "linkIds": [ + 102 + ], + "pos": { + "0": 80, + "1": 343 + } + } + ], + "outputs": [ + { + "id": "194badf0-ae11-47cd-a825-d7edd7ca6cc4", + "name": "CONDITIONING", + "type": "CONDITIONING", + "linkIds": [ + 57 + ], + "localized_name": "CONDITIONING", + "pos": { + "0": 580, + "1": 313 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 41, + "type": "FluxKontextMultiReferenceLatentMethod", + "pos": [ + 170, + 320 + ], + "size": [ + 330, + 60 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 64 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 67 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxKontextMultiReferenceLatentMethod" + }, + "widgets_values": [ + "uso" + ] + }, + { + "id": 44, + "type": "ReferenceLatent", + "pos": [ + 190, + 430 + ], + "size": [ + 197.712890625, + 46 + ], + "flags": {}, + "order": 2, + "mode": 4, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 66 + }, + { + "localized_name": "latent", + "name": "latent", + "shape": 7, + "type": "LATENT", + "link": 79 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ReferenceLatent" + }, + "widgets_values": [] + }, + { + "id": 35, + "type": "FluxGuidance", + "pos": [ + 160, + 200 + ], + "size": [ + 211.60000610351562, + 58 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "条件", + "name": "conditioning", + "type": "CONDITIONING", + "link": 67 + }, + { + "localized_name": "引导", + "name": "guidance", + "type": "FLOAT", + "widget": { + "name": "guidance" + }, + "link": 102 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 57 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 3.5 + ] + } + ], + "groups": [], + "links": [ + { + "id": 67, + "origin_id": 41, + "origin_slot": 0, + "target_id": 35, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 64, + "origin_id": 44, + "origin_slot": 0, + "target_id": 41, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 66, + "origin_id": -10, + "origin_slot": 0, + "target_id": 44, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 79, + "origin_id": -10, + "origin_slot": 1, + "target_id": 44, + "target_slot": 1, + "type": "LATENT" + }, + { + "id": 57, + "origin_id": 35, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 102, + "origin_id": -10, + "origin_slot": 2, + "target_id": 35, + "target_slot": 1, + "type": "FLOAT" + } + ], + "extra": {} + }, + { + "id": "08624421-a41c-413b-9de7-d68b0b60b667", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + } + ] + }, + "config": {}, + "extra": { + "ds": { + "scale": 0.8256879517985736, + "offset": [ + 373.5768808493334, + -1946.0652236413714 + ] + }, + "frontendVersion": "1.25.11" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/workflow/example5.png b/workflow/example5.png new file mode 100644 index 0000000000000000000000000000000000000000..3c9bcfafb8ea16f85e54e40efe3eafafccc3b7c6 --- /dev/null +++ b/workflow/example5.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0deb24b5cc0dc039ddde740cf9143af2f6550b649d595880fb1d1803bfe3ec04 +size 1382891 diff --git a/workflow/example6.json b/workflow/example6.json new file mode 100644 index 0000000000000000000000000000000000000000..4faa31ac4668a5b51344b5b90b803c7690d3ee3c --- /dev/null +++ b/workflow/example6.json @@ -0,0 +1,3525 @@ +{ + "id": "4be48bc0-f21c-45cb-b657-bb75df5d398c", + "revision": 0, + "last_node_id": 109, + "last_link_id": 172, + "nodes": [ + { + "id": 46, + "type": "LoadImage", + "pos": [ + 10, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 0, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 98 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00005_.png", + "image" + ] + }, + { + "id": 56, + "type": "74d27e51-9780-451f-9dde-8bf303d00011", + "pos": [ + 10, + 670 + ], + "size": [ + 270, + 118 + ], + "flags": {}, + "order": 30, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 97 + }, + { + "name": "image", + "type": "IMAGE", + "link": 98 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 159 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 100, + "type": "MarkdownNote", + "pos": [ + 300, + 980 + ], + "size": [ + 230, + 170 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "About Style reference", + "properties": {}, + "widgets_values": [ + "The output image will reference the style from the images you uploaded. \n\nYou can disable all reference image inputs and use this workflow as a subject-driven image generation workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 102, + "type": "MarkdownNote", + "pos": [ + -1260, + 60 + ], + "size": [ + 530, + 510 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Model links", + "properties": {}, + "widgets_values": [ + "[tutorial](http://docs.comfy.org/tutorials/flux/flux-1-uso) | [教程](http://docs.comfy.org/zh-CN/tutorials/flux/flux-1-uso)\n\n**checkpoints**\n\n- [flux1-dev-fp8.safetensors](https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors)\n\n\n\n**loras**\n\n- [uso-flux1-dit-lora-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors)\n\n**model_patches**\n\n- [uso-flux1-projector-v1.safetensors](https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors)\n\n**clip_visions**\n- [sigclip_vision_patch14_384.safetensors](https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors)\n\nModel Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 checkpoints/\n│ │ └── flux1-dev-fp8.safetensors\n│ ├── 📂 loras/\n│ │ └── uso-flux1-dit-lora-v1.safetensors\n│ ├── 📂 model_patches/\n│ │ └── uso-flux1-projector-v1.safetensors\n│ ├── 📂 clip_visions/\n│ │ └── sigclip_vision_patch14_384.safetensors\n```\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 101, + "type": "MarkdownNote", + "pos": [ + -1000, + 1540 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "This workflow only uses style reference." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 107, + "type": "MarkdownNote", + "pos": [ + 160, + 2320 + ], + "size": [ + 280, + 88 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [], + "title": "Note", + "properties": {}, + "widgets_values": [ + "You can also bypass the whole Style Reference group and use this workflow as a text-to-image workflow.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 55, + "type": "581ab310-5783-4e50-b220-4d94035eb469", + "pos": [ + -330, + 680 + ], + "size": [ + 270, + 120 + ], + "flags": {}, + "order": 25, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 91 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 99 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 94 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 96 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + -330, + 840 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "flux_krea_00144_.png", + "image" + ] + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + -680, + 690 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 170 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "input.png", + "image" + ] + }, + { + "id": 108, + "type": "ImageScaleToMaxDimension", + "pos": [ + -686.0413818359375, + 1075.162109375 + ], + "size": [ + 281.2027282714844, + 82 + ], + "flags": {}, + "order": 19, + "mode": 4, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 171 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ImageScaleToMaxDimension" + }, + "widgets_values": [ + "area", + 512 + ] + }, + { + "id": 97, + "type": "MarkdownNote", + "pos": [ + -700, + 1240 + ], + "size": [ + 320, + 130 + ], + "flags": {}, + "order": 7, + "mode": 4, + "inputs": [], + "outputs": [], + "title": "About Scale Image to Total Pixels node", + "properties": {}, + "widgets_values": [ + "The \"Scale Image to Total Pixels\" node is used to prevent you from uploading an image that is too large. Sometimes, large-sized input can lead to poor results.\n\nIt will also upscale small size input image, so you can use **Ctrl-B** to bypass it if you don't need to." + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 40, + "type": "CLIPVisionLoader", + "pos": [ + -700, + 494 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 8, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 91, + 97 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 43, + "type": "LoraLoaderModelOnly", + "pos": [ + -700, + 250 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 20, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 62 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 99 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 39, + "type": "ModelPatchLoader", + "pos": [ + -700, + 384 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 9, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 94, + 100 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 30, + "type": "CheckpointLoaderSimple", + "pos": [ + -700, + 100 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 10, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 62 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 45 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 46, + 78 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -350, + 100 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 21, + "mode": 4, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 45 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 72, + 102 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A European girl with a heartfelt smile and holds a sign writes \"USO x ComfyUI\". She is immersed in a vast, endless field of blooming flowers under a perfect summer sky." + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 57, + "type": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "pos": [ + -200, + 330 + ], + "size": [ + 280, + 78 + ], + "flags": {}, + "order": 29, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + 3.5 + ] + }, + { + "id": 109, + "type": "EmptyLatentImage", + "pos": [ + -190.36032104492188, + 489.6009826660156 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 11, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 172 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 31, + "type": "KSampler", + "pos": [ + 150, + 70 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 34, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 160 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 104 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 73 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 172 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 52 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 768241081134813, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 95, + "type": "EasyCache", + "pos": [ + 150, + -110 + ], + "size": [ + 310, + 130 + ], + "flags": {}, + "order": 32, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 159 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 160 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 96, + "type": "MarkdownNote", + "pos": [ + 480, + -120 + ], + "size": [ + 390, + 140 + ], + "flags": {}, + "order": 12, + "mode": 4, + "inputs": [], + "outputs": [], + "title": "About EasyCache", + "properties": {}, + "widgets_values": [ + "The EasyCache node will maintain a `cumulative_change_rate`. When this value is lower than the `reuse_threshold`, it skips the current step and uses the cached result.\n\nThis node can reduce inference time, but it also sacrifices some quality and details. You can bypass it (Ctrl+B) if you don't need it.\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 580, + 180 + ], + "size": [ + 950, + 1010 + ], + "flags": {}, + "order": 38, + "mode": 4, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 48, + "type": "ConditioningZeroOut", + "pos": [ + -100, + 450 + ], + "size": [ + 280, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 26, + "mode": 4, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 73 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 51, + "type": "VAEEncode", + "pos": [ + -340, + 340 + ], + "size": [ + 280, + 46 + ], + "flags": { + "collapsed": true + }, + "order": 24, + "mode": 4, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 171 + }, + { + "name": "vae", + "type": "VAE", + "link": 78 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 103 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 490, + 70 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 36, + "mode": 4, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 52 + }, + { + "name": "vae", + "type": "VAE", + "link": 46 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 9 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 87, + "type": "EmptyLatentImage", + "pos": [ + -320, + 1930 + ], + "size": [ + 270, + 106 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 130 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 79, + "type": "ConditioningZeroOut", + "pos": [ + -330, + 1790 + ], + "size": [ + 197.712890625, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 131 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 129 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 78, + "type": "KSampler", + "pos": [ + 140, + 1550 + ], + "size": [ + 315, + 474 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 151 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "slot_index": 0, + "links": [ + 125 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 76591154377355, + "randomize", + 20, + 1, + "euler", + "simple", + 1 + ] + }, + { + "id": 92, + "type": "EasyCache", + "pos": [ + 160, + 2120 + ], + "size": [ + 270, + 130 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 151 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "EasyCache" + }, + "widgets_values": [ + 0.2, + 0.15, + 0.95, + false + ] + }, + { + "id": 77, + "type": "VAEDecode", + "pos": [ + 500, + 1560 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 125 + }, + { + "name": "vae", + "type": "VAE", + "link": 126 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 123 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "VAEDecode" + }, + "widgets_values": [] + }, + { + "id": 73, + "type": "SaveImage", + "pos": [ + 500, + 1670 + ], + "size": [ + 985.2999877929688, + 1060.3800048828125 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 72, + "type": "CheckpointLoaderSimple", + "pos": [ + -690, + 1580 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "slot_index": 0, + "links": [ + 124 + ] + }, + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 1, + "links": [ + 146 + ] + }, + { + "name": "VAE", + "type": "VAE", + "slot_index": 2, + "links": [ + 126 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CheckpointLoaderSimple", + "models": [ + { + "name": "flux1-dev-fp8.safetensors", + "url": "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors?download=true", + "directory": "checkpoints" + } + ] + }, + "widgets_values": [ + "flux1-dev-fp8.safetensors" + ] + }, + { + "id": 76, + "type": "LoraLoaderModelOnly", + "pos": [ + -690, + 1730 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 124 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 144 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoraLoaderModelOnly", + "models": [ + { + "name": "uso-flux1-dit-lora-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors", + "directory": "loras" + } + ] + }, + "widgets_values": [ + "uso-flux1-dit-lora-v1.safetensors", + 1 + ] + }, + { + "id": 74, + "type": "CLIPVisionLoader", + "pos": [ + -690, + 1980 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 135, + 142 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionLoader", + "models": [ + { + "name": "sigclip_vision_patch14_384.safetensors", + "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", + "directory": "clip_vision" + } + ] + }, + "widgets_values": [ + "sigclip_vision_patch14_384.safetensors" + ] + }, + { + "id": 83, + "type": "08624421-a41c-413b-9de7-d68b0b60b667", + "pos": [ + -290, + 2160 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 135 + }, + { + "name": "image", + "type": "IMAGE", + "link": 136 + }, + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 138 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 158 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 75, + "type": "ModelPatchLoader", + "pos": [ + -687.868896484375, + 1876.2841796875 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL_PATCH", + "type": "MODEL_PATCH", + "links": [ + 138, + 145 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ModelPatchLoader", + "models": [ + { + "name": "uso-flux1-projector-v1.safetensors", + "url": "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors", + "directory": "model_patches" + } + ] + }, + "widgets_values": [ + "uso-flux1-projector-v1.safetensors" + ] + }, + { + "id": 91, + "type": "LoadImage", + "pos": [ + -290, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": { + "collapsed": false + }, + "order": 17, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "17-17-29.webp.webp", + "image" + ] + }, + { + "id": 89, + "type": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "pos": [ + -627.2552490234375, + 2159.374755859375 + ], + "size": [ + 274.080078125, + 120 + ], + "flags": {}, + "order": 27, + "mode": 4, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 142 + }, + { + "name": "image", + "type": "IMAGE", + "link": 143 + }, + { + "name": "model", + "type": "MODEL", + "link": 144 + }, + { + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 145 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 137 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 88, + "type": "LoadImage", + "pos": [ + -630, + 2330 + ], + "size": [ + 274.080078125, + 314.00006103515625 + ], + "flags": {}, + "order": 18, + "mode": 4, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ] + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "style5_0.webp.webp", + "image" + ] + }, + { + "id": 90, + "type": "CLIPTextEncode", + "pos": [ + -330, + 1580 + ], + "size": [ + 422.8500061035156, + 164.30999755859375 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 131, + 149 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "A line of large words reads \"ComfyUI\"" + ], + "color": "#232", + "bgcolor": "#353" + } + ], + "links": [ + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 45, + 30, + 1, + 6, + 0, + "CLIP" + ], + [ + 46, + 30, + 2, + 8, + 1, + "VAE" + ], + [ + 52, + 31, + 0, + 8, + 0, + "LATENT" + ], + [ + 62, + 30, + 0, + 43, + 0, + "MODEL" + ], + [ + 72, + 6, + 0, + 48, + 0, + "CONDITIONING" + ], + [ + 73, + 48, + 0, + 31, + 2, + "CONDITIONING" + ], + [ + 78, + 30, + 2, + 51, + 1, + "VAE" + ], + [ + 91, + 40, + 0, + 55, + 0, + "CLIP_VISION" + ], + [ + 92, + 53, + 0, + 55, + 1, + "IMAGE" + ], + [ + 94, + 39, + 0, + 55, + 3, + "MODEL_PATCH" + ], + [ + 96, + 55, + 0, + 56, + 2, + "MODEL" + ], + [ + 97, + 40, + 0, + 56, + 0, + "CLIP_VISION" + ], + [ + 98, + 46, + 0, + 56, + 1, + "IMAGE" + ], + [ + 99, + 43, + 0, + 55, + 2, + "MODEL" + ], + [ + 100, + 39, + 0, + 56, + 3, + "MODEL_PATCH" + ], + [ + 102, + 6, + 0, + 57, + 0, + "CONDITIONING" + ], + [ + 103, + 51, + 0, + 57, + 1, + "LATENT" + ], + [ + 104, + 57, + 0, + 31, + 1, + "CONDITIONING" + ], + [ + 123, + 77, + 0, + 73, + 0, + "IMAGE" + ], + [ + 124, + 72, + 0, + 76, + 0, + "MODEL" + ], + [ + 125, + 78, + 0, + 77, + 0, + "LATENT" + ], + [ + 126, + 72, + 2, + 77, + 1, + "VAE" + ], + [ + 129, + 79, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 130, + 87, + 0, + 78, + 3, + "LATENT" + ], + [ + 131, + 90, + 0, + 79, + 0, + "CONDITIONING" + ], + [ + 135, + 74, + 0, + 83, + 0, + "CLIP_VISION" + ], + [ + 136, + 91, + 0, + 83, + 1, + "IMAGE" + ], + [ + 137, + 89, + 0, + 83, + 2, + "MODEL" + ], + [ + 138, + 75, + 0, + 83, + 3, + "MODEL_PATCH" + ], + [ + 142, + 74, + 0, + 89, + 0, + "CLIP_VISION" + ], + [ + 143, + 88, + 0, + 89, + 1, + "IMAGE" + ], + [ + 144, + 76, + 0, + 89, + 2, + "MODEL" + ], + [ + 145, + 75, + 0, + 89, + 3, + "MODEL_PATCH" + ], + [ + 146, + 72, + 1, + 90, + 0, + "CLIP" + ], + [ + 149, + 90, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 151, + 92, + 0, + 78, + 0, + "MODEL" + ], + [ + 158, + 83, + 0, + 92, + 0, + "MODEL" + ], + [ + 159, + 56, + 0, + 95, + 0, + "MODEL" + ], + [ + 160, + 95, + 0, + 31, + 0, + "MODEL" + ], + [ + 170, + 47, + 0, + 108, + 0, + "IMAGE" + ], + [ + 171, + 108, + 0, + 51, + 0, + "IMAGE" + ], + [ + 172, + 109, + 0, + 31, + 3, + "LATENT" + ] + ], + "groups": [ + { + "id": 1, + "title": "Step 3 - Style Reference", + "bounding": [ + -350, + 590, + 890, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 2, + "title": "Step 2 - Subject/Identity Image", + "bounding": [ + -710, + 590, + 340, + 600 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 3, + "title": "Step 1 - Load Models", + "bounding": [ + -710, + 30, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 4, + "title": "Step3 - Style Reference", + "bounding": [ + -700, + 2060, + 790, + 650 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 6, + "title": "Step 1 - Load Models", + "bounding": [ + -700, + 1510, + 335, + 541.5999755859375 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 7, + "title": "Step 4 - Image Size", + "bounding": [ + -340, + 1850, + 300, + 200 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 8, + "title": "Step 3 - Prompt", + "bounding": [ + -340, + 1510, + 442.8500061035156, + 309.6000061035156 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 9, + "title": "Step 4 - Prompt", + "bounding": [ + -360, + 30, + 442.8500061035156, + 247.91000366210938 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "definitions": { + "subgraphs": [ + { + "id": "581ab310-5783-4e50-b220-4d94035eb469", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "74d27e51-9780-451f-9dde-8bf303d00011", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "805a5f96-6fdd-45a4-a1f3-623234fa734f", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 56, + "lastLinkId": 102, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USO Reference Conditioning", + "inputNode": { + "id": -10, + "bounding": [ + -20, + 283, + 120, + 100 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 560, + 293, + 128.6640625, + 60 + ] + }, + "inputs": [ + { + "id": "d78373ce-7cdc-4e0f-a743-4d024e766376", + "name": "conditioning", + "type": "CONDITIONING", + "linkIds": [ + 66 + ], + "localized_name": "conditioning", + "pos": { + "0": 80, + "1": 303 + } + }, + { + "id": "1eebe27a-c790-4a85-8aa2-3b9f4eeddd00", + "name": "latent", + "type": "LATENT", + "linkIds": [ + 79 + ], + "localized_name": "latent", + "shape": 7, + "pos": { + "0": 80, + "1": 323 + } + }, + { + "id": "09894330-3dcb-4fba-87a5-33c3fb9db843", + "name": "guidance", + "type": "FLOAT", + "linkIds": [ + 102 + ], + "pos": { + "0": 80, + "1": 343 + } + } + ], + "outputs": [ + { + "id": "194badf0-ae11-47cd-a825-d7edd7ca6cc4", + "name": "CONDITIONING", + "type": "CONDITIONING", + "linkIds": [ + 57 + ], + "localized_name": "CONDITIONING", + "pos": { + "0": 580, + "1": 313 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 41, + "type": "FluxKontextMultiReferenceLatentMethod", + "pos": [ + 170, + 320 + ], + "size": [ + 330, + 60 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 64 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 67 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxKontextMultiReferenceLatentMethod" + }, + "widgets_values": [ + "uso" + ] + }, + { + "id": 44, + "type": "ReferenceLatent", + "pos": [ + 190, + 430 + ], + "size": [ + 197.712890625, + 46 + ], + "flags": {}, + "order": 2, + "mode": 4, + "inputs": [ + { + "localized_name": "conditioning", + "name": "conditioning", + "type": "CONDITIONING", + "link": 66 + }, + { + "localized_name": "latent", + "name": "latent", + "shape": 7, + "type": "LATENT", + "link": 79 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 64 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "ReferenceLatent" + }, + "widgets_values": [] + }, + { + "id": 35, + "type": "FluxGuidance", + "pos": [ + 160, + 200 + ], + "size": [ + 211.60000610351562, + 58 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "条件", + "name": "conditioning", + "type": "CONDITIONING", + "link": 67 + }, + { + "localized_name": "引导", + "name": "guidance", + "type": "FLOAT", + "widget": { + "name": "guidance" + }, + "link": 102 + } + ], + "outputs": [ + { + "localized_name": "条件", + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 57 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 3.5 + ] + } + ], + "groups": [], + "links": [ + { + "id": 67, + "origin_id": 41, + "origin_slot": 0, + "target_id": 35, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 64, + "origin_id": 44, + "origin_slot": 0, + "target_id": 41, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 66, + "origin_id": -10, + "origin_slot": 0, + "target_id": 44, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 79, + "origin_id": -10, + "origin_slot": 1, + "target_id": 44, + "target_slot": 1, + "type": "LATENT" + }, + { + "id": 57, + "origin_id": 35, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "CONDITIONING" + }, + { + "id": 102, + "origin_id": -10, + "origin_slot": 2, + "target_id": 35, + "target_slot": 1, + "type": "FLOAT" + } + ], + "extra": {} + }, + { + "id": "08624421-a41c-413b-9de7-d68b0b60b667", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + }, + { + "id": "d31e1095-65ee-4ba3-b4d0-a21e493dd0bd", + "version": 1, + "state": { + "lastGroupId": 0, + "lastNodeId": 54, + "lastLinkId": 91, + "lastRerouteId": 0 + }, + "revision": 0, + "config": {}, + "name": "USOStyleReference", + "inputNode": { + "id": -10, + "bounding": [ + -790, + 608, + 120, + 140 + ] + }, + "outputNode": { + "id": -20, + "bounding": [ + 190, + 638, + 120, + 60 + ] + }, + "inputs": [ + { + "id": "7640a5a3-e2cf-4f74-acaf-45dedea514e3", + "name": "clip_vision", + "type": "CLIP_VISION", + "linkIds": [ + 85 + ], + "localized_name": "clip_vision", + "pos": { + "0": -690, + "1": 628 + } + }, + { + "id": "4e819086-d02a-4b6c-8383-e7939729ba47", + "name": "image", + "type": "IMAGE", + "linkIds": [ + 86 + ], + "localized_name": "image", + "pos": { + "0": -690, + "1": 648 + } + }, + { + "id": "2bea204b-da22-43d3-9591-9ef6e66a8169", + "name": "model", + "type": "MODEL", + "linkIds": [ + 88 + ], + "localized_name": "model", + "pos": { + "0": -690, + "1": 668 + } + }, + { + "id": "4effe73e-9aa2-4cc5-acde-5df0cf566455", + "name": "model_patch", + "type": "MODEL_PATCH", + "linkIds": [ + 89 + ], + "localized_name": "model_patch", + "pos": { + "0": -690, + "1": 688 + } + }, + { + "id": "d9a5affb-b212-4f50-8591-c3dc535a2e53", + "name": "crop", + "type": "COMBO", + "linkIds": [ + 91 + ], + "pos": { + "0": -690, + "1": 708 + } + } + ], + "outputs": [ + { + "id": "4f7b8185-680f-420c-8138-4138be1fda23", + "name": "MODEL", + "type": "MODEL", + "linkIds": [ + 90 + ], + "localized_name": "MODEL", + "pos": { + "0": 210, + "1": 658 + } + } + ], + "widgets": [], + "nodes": [ + { + "id": 52, + "type": "CLIPVisionEncode", + "pos": [ + -610, + 580 + ], + "size": [ + 290.390625, + 78 + ], + "flags": {}, + "order": 0, + "mode": 4, + "inputs": [ + { + "localized_name": "clip视觉", + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 85 + }, + { + "localized_name": "图像", + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "localized_name": "裁剪", + "name": "crop", + "type": "COMBO", + "widget": { + "name": "crop" + }, + "link": 91 + } + ], + "outputs": [ + { + "localized_name": "CLIP视觉输出", + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": [ + 87 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "CLIPVisionEncode" + }, + "widgets_values": [ + "center" + ] + }, + { + "id": 54, + "type": "USOStyleReference", + "pos": [ + -610, + 710 + ], + "size": [ + 360, + 70 + ], + "flags": {}, + "order": 1, + "mode": 4, + "inputs": [ + { + "localized_name": "model", + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "localized_name": "model_patch", + "name": "model_patch", + "type": "MODEL_PATCH", + "link": 89 + }, + { + "localized_name": "clip_vision_output", + "name": "clip_vision_output", + "type": "CLIP_VISION_OUTPUT", + "link": 87 + } + ], + "outputs": [ + { + "localized_name": "模型", + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.56", + "Node name for S&R": "USOStyleReference" + }, + "widgets_values": [] + } + ], + "groups": [], + "links": [ + { + "id": 87, + "origin_id": 52, + "origin_slot": 0, + "target_id": 54, + "target_slot": 2, + "type": "CLIP_VISION_OUTPUT" + }, + { + "id": 85, + "origin_id": -10, + "origin_slot": 0, + "target_id": 52, + "target_slot": 0, + "type": "CLIP_VISION" + }, + { + "id": 86, + "origin_id": -10, + "origin_slot": 1, + "target_id": 52, + "target_slot": 1, + "type": "IMAGE" + }, + { + "id": 88, + "origin_id": -10, + "origin_slot": 2, + "target_id": 54, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 89, + "origin_id": -10, + "origin_slot": 3, + "target_id": 54, + "target_slot": 1, + "type": "MODEL_PATCH" + }, + { + "id": 90, + "origin_id": 54, + "origin_slot": 0, + "target_id": -20, + "target_slot": 0, + "type": "MODEL" + }, + { + "id": 91, + "origin_id": -10, + "origin_slot": 4, + "target_id": 52, + "target_slot": 2, + "type": "COMBO" + } + ], + "extra": {} + } + ] + }, + "config": {}, + "extra": { + "ds": { + "scale": 0.6355517414158411, + "offset": [ + 973.4793616213951, + -1344.5087905516937 + ] + }, + "frontendVersion": "1.25.11" + }, + "version": 0.4 +} \ No newline at end of file diff --git a/workflow/example6.png b/workflow/example6.png new file mode 100644 index 0000000000000000000000000000000000000000..206e45d9769b5e79614e3ab0ddfc7dde70fc8f2c --- /dev/null +++ b/workflow/example6.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31078ef2daa6ec71e545bbb4b7d35314037a5f17db3f7656821ad35071b29025 +size 1602362 diff --git a/workflow/input.png b/workflow/input.png new file mode 100644 index 0000000000000000000000000000000000000000..058e469e9ef099a8f1086464c31ff28a520694db --- /dev/null +++ b/workflow/input.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14fca9c1085732a0e44c6eb0a83d9781b9f3f662a75fec0b50ca348631a1a493 +size 1455216 diff --git a/workflow/style5_0.webp.webp b/workflow/style5_0.webp.webp new file mode 100644 index 0000000000000000000000000000000000000000..1c02f7fe712a295f858a666f211d994cecaa7ac1 --- /dev/null +++ b/workflow/style5_0.webp.webp @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d272a0ecb03126503446b00a2152deab2045f89ac2c01f948e1099589d2862 +size 141886