multimodalart HF Staff cbensimon HF Staff commited on
Commit
b695f49
·
verified ·
1 Parent(s): 02b00c0

AOT: Load Pre-compiled FLUX.2 blocks from hub (#5)

Browse files

- AOT: Load Pre-compiled FLUX.2 blocks from hub (e7db332e79b3f00f7252605838483b3d6ec4bcfd)


Co-authored-by: Charles Bensimon <[email protected]>

Files changed (1) hide show
  1. app.py +2 -13
app.py CHANGED
@@ -9,7 +9,6 @@ import spaces
9
  import torch
10
  from diffusers import Flux2Pipeline, Flux2Transformer2DModel
11
  from diffusers import BitsAndBytesConfig as DiffBitsAndBytesConfig
12
- from optimization import optimize_pipeline_
13
  import requests
14
  from PIL import Image
15
  import json
@@ -81,18 +80,8 @@ pipe = Flux2Pipeline.from_pretrained(
81
  )
82
  pipe.to(device)
83
 
84
- pipe.transformer.set_attention_backend("_flash_3_hub")
85
-
86
- # Optimization runs once at startup
87
- optimize_pipeline_(
88
- pipe,
89
- image=[Image.new("RGB", (1024, 1024))],
90
- prompt_embeds = remote_text_encoder("prompt").to(device),
91
- guidance_scale=2.5,
92
- width=1024,
93
- height=1024,
94
- num_inference_steps=1
95
- )
96
 
97
  def image_to_data_uri(img):
98
  buffered = io.BytesIO()
 
9
  import torch
10
  from diffusers import Flux2Pipeline, Flux2Transformer2DModel
11
  from diffusers import BitsAndBytesConfig as DiffBitsAndBytesConfig
 
12
  import requests
13
  from PIL import Image
14
  import json
 
80
  )
81
  pipe.to(device)
82
 
83
+ # Pull pre-compiled Flux2 Transformer blocks from HF hub
84
+ spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/FLUX.2", variant="fa3")
 
 
 
 
 
 
 
 
 
 
85
 
86
  def image_to_data_uri(img):
87
  buffered = io.BytesIO()