Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import torch | |
from PIL import Image | |
import spaces | |
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL | |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images | |
from diffusers.utils import load_image | |
import random | |
import time | |
# --- Main Configuration --- | |
KRYPTO_LORA = { | |
"repo": "Econogoat/Krypt0_LORA", | |
"trigger": "Krypt0", | |
"adapter_name": "krypt0" | |
} | |
# Get access token from Space secrets | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
print("WARNING: HF_TOKEN secret is not set. Gated model downloads may fail.") | |
# --- Model Initialization --- | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
print(f"Using device: {device}") | |
dtype = torch.bfloat16 | |
base_model = "black-forest-labs/FLUX.1-dev" | |
# Load model components | |
print("Loading model components...") | |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device) | |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype, token=HF_TOKEN).to(device) | |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1, token=HF_TOKEN).to(device) | |
print("Models loaded.") | |
# Load the LoRA adapter once on startup | |
print(f"Loading on-board LoRA: {KRYPTO_LORA['repo']}") | |
pipe.load_lora_weights( | |
KRYPTO_LORA['repo'], | |
low_cpu_mem_usage=True, | |
adapter_name=KRYPTO_LORA['adapter_name'], | |
token=HF_TOKEN | |
) | |
print("LoRA loaded successfully.") | |
MAX_SEED = 2**32 - 1 | |
# Monkey-patch the pipeline for live preview | |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) | |
def calculate_dimensions(aspect_ratio, resolution): | |
resolution = int(resolution) | |
if aspect_ratio == "Square (1:1)": | |
width, height = resolution, resolution | |
elif aspect_ratio == "Portrait (9:16)": | |
width, height = int(resolution * 9 / 16), resolution | |
elif aspect_ratio == "Landscape (16:9)": | |
width, height = resolution, int(resolution * 9 / 16) | |
elif aspect_ratio == "Ultrawide (21:9)": | |
width, height = resolution, int(resolution * 9 / 21) | |
else: | |
width, height = resolution, resolution | |
width = (width // 64) * 64 | |
height = (height // 64) * 64 | |
return width, height | |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress): | |
pipe.to(device) | |
generator = torch.Generator(device=device).manual_seed(seed) | |
image_generator = pipe.flux_pipe_call_that_returns_an_iterable_of_images( | |
prompt=prompt_mash, | |
num_inference_steps=steps, | |
guidance_scale=cfg_scale, | |
width=width, | |
height=height, | |
generator=generator, | |
joint_attention_kwargs={"scale": 1.0}, | |
output_type="pil", | |
good_vae=good_vae, | |
) | |
final_image = None | |
for i, image in enumerate(image_generator): | |
final_image = image | |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {i + 1}; --total: {steps};"></div></div>' | |
yield image, gr.update(value=progress_bar, visible=True) | |
yield final_image, gr.update(visible=False) | |
def update_history(new_image, history): | |
if new_image is None: | |
return history | |
if history is None: | |
history = [] | |
history.insert(0, new_image) | |
return history | |
def run_generation(prompt, lora_scale, cfg_scale, steps, randomize_seed, seed, aspect_ratio, base_resolution, progress=gr.Progress(track_tqdm=True)): | |
if not prompt: | |
raise gr.Error("Prompt cannot be empty.") | |
# --- NOUVELLE LOGIQUE DE PROMPT --- | |
# Définition des parties fixes du prompt | |
prefix_prompt = f"{KRYPTO_LORA['trigger']}, Krypt0 the white scruffy superdog with a red cape," | |
suffix_prompt = ", This is a cinematic, ultra-high-detail, photorealistic still" | |
# Construction du prompt final | |
user_prompt = prompt # Le prompt entré par l'utilisateur | |
prompt_mash = f"{prefix_prompt} {user_prompt}{suffix_prompt}" | |
print("Final prompt sent to model:", prompt_mash) | |
pipe.set_adapters([KRYPTO_LORA['adapter_name']], adapter_weights=[lora_scale]) | |
print(f"Adapter '{KRYPTO_LORA['adapter_name']}' activated with weight {lora_scale}.") | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
width, height = calculate_dimensions(aspect_ratio, base_resolution) | |
print(f"Generating a {width}x{height} image.") | |
for image, progress_update in generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress): | |
yield image, seed, progress_update | |
run_generation.zerogpu = True | |
# --- User Interface (Gradio) --- | |
css = ''' | |
#title_container { text-align: center; margin-bottom: 1em; } | |
#title_line { display: flex; justify-content: center; align-items: center; } | |
#title_line h1 { font-size: 2.5em; margin: 0; } | |
#subtitle { font-size: 1.1em; color: #57606a; margin-top: 0.3em; } | |
.progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px} | |
.progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.1s ease-in-out} | |
''' | |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as app: | |
# --- Header --- | |
gr.HTML( | |
""" | |
<div id="title_container"> | |
<div id="title_line"> | |
<h1>Krypto Image Generator - beta v1</h1> | |
</div> | |
<div id="subtitle"> | |
Powered by $Krypto | @Kryptocoinonsol | |
</div> | |
</div> | |
""" | |
) | |
with gr.Row(): | |
# --- LEFT COLUMN: CONTROLS --- | |
with gr.Column(scale=3): | |
# Prompt Controls (Simplifié) | |
with gr.Group(): | |
prompt = gr.Textbox( | |
label="Prompt", | |
lines=3, | |
placeholder="Krypto the superdog sits in the snow, with snow on his muzzle, looking innocent. It's a medium shot of the dog, and the image creates a friendly atmosphere." | |
) | |
# Image Shape and Style Controls | |
with gr.Group(): | |
aspect_ratio = gr.Radio( | |
label="Aspect Ratio", | |
choices=["Square (1:1)", "Portrait (9:16)", "Landscape (16:9)", "Ultrawide (21:9)"], | |
value="Square (1:1)" | |
) | |
lora_scale = gr.Slider( | |
label="Krypt0 Style Strength", | |
minimum=0, | |
maximum=2, | |
step=0.05, | |
value=0.9, | |
info="Controls how strongly the artistic style is applied. Higher values mean a more stylized image." | |
) | |
# Advanced Settings | |
with gr.Accordion("Advanced Settings", open=False): | |
base_resolution = gr.Slider(label="Resolution (longest side)", minimum=768, maximum=1408, step=64, value=1024) | |
steps = gr.Slider(label="Generation Steps", minimum=4, maximum=50, step=1, value=20) | |
cfg_scale = gr.Slider(label="Guidance (CFG Scale)", minimum=1, maximum=10, step=0.5, value=3.5) | |
with gr.Row(): | |
randomize_seed = gr.Checkbox(True, label="Random Seed") | |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) | |
generate_button = gr.Button("Generate", variant="primary") | |
# --- RIGHT COLUMN: RESULTS --- | |
with gr.Column(scale=2): | |
progress_bar = gr.Markdown(elem_id="progress", visible=False) | |
result = gr.Image(label="Generated Image", interactive=False, show_share_button=True) | |
with gr.Accordion("History", open=False): | |
history_gallery = gr.Gallery(label="History", columns=4, object_fit="contain", interactive=False) | |
# --- Event Logic --- | |
generation_event = gr.on( | |
triggers=[generate_button.click, prompt.submit], | |
fn=run_generation, | |
inputs=[prompt, lora_scale, cfg_scale, steps, randomize_seed, seed, aspect_ratio, base_resolution], | |
outputs=[result, seed, progress_bar] | |
) | |
generation_event.then( | |
fn=update_history, | |
inputs=[result, history_gallery], | |
outputs=history_gallery, | |
) | |
app.queue(max_size=20) | |
app.launch() |