Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,6 @@ import time
|
|
12 |
|
13 |
# --- Main Configuration ---
|
14 |
KRYPTO_LORA = {
|
15 |
-
# CORRECTION : Le nom du dépôt était mal orthographié (O majuscule au lieu d'un zéro).
|
16 |
"repo": "Econogoat/Krypt0_LORA",
|
17 |
"trigger": "Krypt0",
|
18 |
"adapter_name": "krypt0"
|
@@ -28,17 +27,16 @@ if not HF_TOKEN:
|
|
28 |
print("WARNING: HF_TOKEN secret is not set. Gated model downloads may fail.")
|
29 |
|
30 |
# --- Model Initialization ---
|
31 |
-
device
|
32 |
-
print(
|
33 |
dtype = torch.bfloat16
|
34 |
base_model = "black-forest-labs/FLUX.1-dev"
|
35 |
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
print("Models loaded.")
|
42 |
|
43 |
# Load the LoRA adapter once on startup
|
44 |
print(f"Loading on-board LoRA: {KRYPTO_LORA['repo']}")
|
@@ -46,7 +44,7 @@ pipe.load_lora_weights(
|
|
46 |
KRYPTO_LORA['repo'],
|
47 |
low_cpu_mem_usage=True,
|
48 |
adapter_name=KRYPTO_LORA['adapter_name'],
|
49 |
-
token=HF_TOKEN
|
50 |
)
|
51 |
print("LoRA loaded successfully.")
|
52 |
|
@@ -72,9 +70,9 @@ def calculate_dimensions(aspect_ratio, resolution):
|
|
72 |
height = (height // 64) * 64
|
73 |
return width, height
|
74 |
|
|
|
75 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
76 |
-
|
77 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
78 |
|
79 |
image_generator = pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
80 |
prompt=prompt_mash,
|
@@ -106,21 +104,35 @@ def update_history(new_image, history):
|
|
106 |
def run_generation(prompt, lora_scale, cfg_scale, steps, randomize_seed, seed, aspect_ratio, base_resolution, progress=gr.Progress(track_tqdm=True)):
|
107 |
if not prompt:
|
108 |
raise gr.Error("Prompt cannot be empty.")
|
109 |
-
|
110 |
-
prompt_mash = f"{KRYPTO_LORA['trigger']}, {prompt}"
|
111 |
-
print("Final prompt:", prompt_mash)
|
112 |
-
|
113 |
-
pipe.set_adapters([KRYPTO_LORA['adapter_name']], adapter_weights=[lora_scale])
|
114 |
-
print(f"Adapter '{KRYPTO_LORA['adapter_name']}' activated with weight {lora_scale}.")
|
115 |
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
-
for image, progress_update in generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
123 |
-
yield image, seed, progress_update
|
124 |
|
125 |
run_generation.zerogpu = True
|
126 |
|
@@ -180,7 +192,6 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
|
|
180 |
)
|
181 |
|
182 |
# Advanced Settings
|
183 |
-
# CORRECTION : L'accordéon doit être fermé par défaut.
|
184 |
with gr.Accordion("Advanced Settings", open=False):
|
185 |
base_resolution = gr.Slider(label="Resolution (longest side)", minimum=768, maximum=1408, step=64, value=1024)
|
186 |
steps = gr.Slider(label="Generation Steps", minimum=4, maximum=50, step=1, value=20)
|
|
|
12 |
|
13 |
# --- Main Configuration ---
|
14 |
KRYPTO_LORA = {
|
|
|
15 |
"repo": "Econogoat/Krypt0_LORA",
|
16 |
"trigger": "Krypt0",
|
17 |
"adapter_name": "krypt0"
|
|
|
27 |
print("WARNING: HF_TOKEN secret is not set. Gated model downloads may fail.")
|
28 |
|
29 |
# --- Model Initialization ---
|
30 |
+
# CORRECTION : On ne détecte plus le device ici, on charge tout sur CPU par défaut.
|
31 |
+
print("Loading all models to CPU by default for ZeroGPU compatibility.")
|
32 |
dtype = torch.bfloat16
|
33 |
base_model = "black-forest-labs/FLUX.1-dev"
|
34 |
|
35 |
+
# CORRECTION : Tous les `.to(device)` sont retirés. Les modèles restent sur le CPU au démarrage.
|
36 |
+
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
|
37 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype, token=HF_TOKEN)
|
38 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1, token=HF_TOKEN)
|
39 |
+
print("Models loaded on CPU.")
|
|
|
40 |
|
41 |
# Load the LoRA adapter once on startup
|
42 |
print(f"Loading on-board LoRA: {KRYPTO_LORA['repo']}")
|
|
|
44 |
KRYPTO_LORA['repo'],
|
45 |
low_cpu_mem_usage=True,
|
46 |
adapter_name=KRYPTO_LORA['adapter_name'],
|
47 |
+
token=HF_TOKEN
|
48 |
)
|
49 |
print("LoRA loaded successfully.")
|
50 |
|
|
|
70 |
height = (height // 64) * 64
|
71 |
return width, height
|
72 |
|
73 |
+
# CORRECTION : Cette fonction suppose maintenant que `pipe` est déjà sur le bon device.
|
74 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
75 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
76 |
|
77 |
image_generator = pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
78 |
prompt=prompt_mash,
|
|
|
104 |
def run_generation(prompt, lora_scale, cfg_scale, steps, randomize_seed, seed, aspect_ratio, base_resolution, progress=gr.Progress(track_tqdm=True)):
|
105 |
if not prompt:
|
106 |
raise gr.Error("Prompt cannot be empty.")
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
+
# CORRECTION : C'est ici, à l'intérieur de la fonction @spaces.GPU, que l'on déplace les modèles sur le GPU.
|
109 |
+
print("Moving models to GPU for generation...")
|
110 |
+
pipe.to("cuda")
|
111 |
+
good_vae.to("cuda") # Il faut aussi déplacer le VAE de haute qualité
|
112 |
+
|
113 |
+
try:
|
114 |
+
prompt_mash = f"{KRYPTO_LORA['trigger']}, {prompt}"
|
115 |
+
print("Final prompt:", prompt_mash)
|
116 |
+
|
117 |
+
pipe.set_adapters([KRYPTO_LORA['adapter_name']], adapter_weights=[lora_scale])
|
118 |
+
print(f"Adapter '{KRYPTO_LORA['adapter_name']}' activated with weight {lora_scale}.")
|
119 |
+
|
120 |
+
if randomize_seed:
|
121 |
+
seed = random.randint(0, MAX_SEED)
|
122 |
+
|
123 |
+
width, height = calculate_dimensions(aspect_ratio, base_resolution)
|
124 |
+
print(f"Generating a {width}x{height} image.")
|
125 |
|
126 |
+
for image, progress_update in generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
127 |
+
yield image, seed, progress_update
|
128 |
+
|
129 |
+
finally:
|
130 |
+
# CORRECTION : Bonne pratique, on nettoie en déplaçant les modèles vers le CPU après usage.
|
131 |
+
print("Moving models back to CPU.")
|
132 |
+
pipe.to("cpu")
|
133 |
+
good_vae.to("cpu")
|
134 |
+
torch.cuda.empty_cache()
|
135 |
|
|
|
|
|
136 |
|
137 |
run_generation.zerogpu = True
|
138 |
|
|
|
192 |
)
|
193 |
|
194 |
# Advanced Settings
|
|
|
195 |
with gr.Accordion("Advanced Settings", open=False):
|
196 |
base_resolution = gr.Slider(label="Resolution (longest side)", minimum=768, maximum=1408, step=64, value=1024)
|
197 |
steps = gr.Slider(label="Generation Steps", minimum=4, maximum=50, step=1, value=20)
|