Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,6 @@ import os
|
|
| 2 |
import subprocess
|
| 3 |
import sys
|
| 4 |
import io
|
| 5 |
-
from kernels import get_kernel
|
| 6 |
import gradio as gr
|
| 7 |
import numpy as np
|
| 8 |
import random
|
|
@@ -38,14 +37,11 @@ def remote_text_encoder(prompts):
|
|
| 38 |
return prompt_embeds
|
| 39 |
|
| 40 |
# Load model
|
| 41 |
-
fa3_kernel = get_kernel("kernels-community/flash-attn3", revision="fake-ops-return-probs")
|
| 42 |
-
|
| 43 |
repo_id = "black-forest-labs/FLUX.2-dev"
|
| 44 |
|
| 45 |
dit = Flux2Transformer2DModel.from_pretrained(
|
| 46 |
repo_id,
|
| 47 |
subfolder="transformer",
|
| 48 |
-
attn_implementation=fa3_kernel,
|
| 49 |
torch_dtype=torch.bfloat16
|
| 50 |
)
|
| 51 |
|
|
@@ -56,16 +52,18 @@ pipe = Flux2Pipeline.from_pretrained(
|
|
| 56 |
torch_dtype=torch.bfloat16
|
| 57 |
)
|
| 58 |
pipe.to("cuda")
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
| 69 |
|
| 70 |
@spaces.GPU(duration=180)
|
| 71 |
def infer(prompt, input_images, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=50, guidance_scale=2.5, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 2 |
import subprocess
|
| 3 |
import sys
|
| 4 |
import io
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
import numpy as np
|
| 7 |
import random
|
|
|
|
| 37 |
return prompt_embeds
|
| 38 |
|
| 39 |
# Load model
|
|
|
|
|
|
|
| 40 |
repo_id = "black-forest-labs/FLUX.2-dev"
|
| 41 |
|
| 42 |
dit = Flux2Transformer2DModel.from_pretrained(
|
| 43 |
repo_id,
|
| 44 |
subfolder="transformer",
|
|
|
|
| 45 |
torch_dtype=torch.bfloat16
|
| 46 |
)
|
| 47 |
|
|
|
|
| 52 |
torch_dtype=torch.bfloat16
|
| 53 |
)
|
| 54 |
pipe.to("cuda")
|
| 55 |
+
|
| 56 |
+
pipe.transformer.set_attention_backend("_flash_3_hub")
|
| 57 |
+
|
| 58 |
+
optimize_pipeline_(
|
| 59 |
+
pipe,
|
| 60 |
+
image=[Image.new("RGB", (1024, 1024))],
|
| 61 |
+
prompt_embeds = remote_text_encoder("prompt").to("cuda"),
|
| 62 |
+
guidance_scale=2.5,
|
| 63 |
+
width=1024,
|
| 64 |
+
height=1024,
|
| 65 |
+
num_inference_steps=1
|
| 66 |
+
)
|
| 67 |
|
| 68 |
@spaces.GPU(duration=180)
|
| 69 |
def infer(prompt, input_images, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=50, guidance_scale=2.5, progress=gr.Progress(track_tqdm=True)):
|