Spaces:
Sleeping
Sleeping
Andre
commited on
Commit
·
ddc3713
1
Parent(s):
0a0ef7b
Update
Browse files- .DS_Store +0 -0
- CtB-AI-HP-Python/.DS_Store +0 -0
- app.py +116 -0
- requirements.txt +10 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
CtB-AI-HP-Python/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
app.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
from huggingface_hub import InferenceClient
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import gradio as gr
|
| 7 |
+
|
| 8 |
+
# Retrieve the Hugging Face token from environment variables
|
| 9 |
+
api_token = os.getenv("HF_TOKEN")
|
| 10 |
+
|
| 11 |
+
# List of models with aliases
|
| 12 |
+
models = [
|
| 13 |
+
{
|
| 14 |
+
"alias": "FLUX.1-dev",
|
| 15 |
+
"name": "black-forest-labs/FLUX.1-dev"
|
| 16 |
+
}
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
# Initialize the InferenceClient with the default model
|
| 20 |
+
client = InferenceClient(models[0]["name"], token=api_token)
|
| 21 |
+
|
| 22 |
+
# Function to generate castle descriptions based on HP
|
| 23 |
+
def generate_castle_description(hp, color):
|
| 24 |
+
if hp == 100:
|
| 25 |
+
return f"a {color} flag, perfectly intact, with lush vegetation surrounding it, birds flying in the sky, and a peaceful atmosphere"
|
| 26 |
+
elif hp >= 80:
|
| 27 |
+
return f"a {color} flag, slightly damaged, with small red fires and smoke visible, cracks starting to appear on the walls, and a tense atmosphere"
|
| 28 |
+
elif hp >= 50:
|
| 29 |
+
return f"a {color} flag, moderately damaged, with larger red fires, smoke billowing from the towers, cracks spreading across the walls, and some structures partially collapsed"
|
| 30 |
+
elif hp >= 30:
|
| 31 |
+
return f"a {color} flag, severely damaged, with heavy red fires, thick smoke, walls crumbling, and significant structural collapse"
|
| 32 |
+
elif hp >= 15:
|
| 33 |
+
return f"a {color} flag, critically damaged, with most structures in ruins, intense red fire and smoke, and only a few recognizable parts of the castle remaining"
|
| 34 |
+
elif hp >= 5:
|
| 35 |
+
return f"a {color} flag, almost destroyed, with only a few recognizable structures still standing, engulfed in red flames, and the castle on the verge of collapse"
|
| 36 |
+
else:
|
| 37 |
+
return f"a {color} flag, completely ruined, with no signs of life, intense red fire and smoke, and the castle reduced to rubble"
|
| 38 |
+
|
| 39 |
+
# Function to generate the prompt
|
| 40 |
+
def generate_prompt(left_hp, right_hp):
|
| 41 |
+
left_desc = generate_castle_description(left_hp, "blue")
|
| 42 |
+
right_desc = generate_castle_description(right_hp, "red")
|
| 43 |
+
return f"A wide fantasy landscape showing two castles. On the left, a castle with {left_desc}, adorned exclusively with large and prominent blue flags flying proudly. On the right, a castle with {right_desc}, adorned exclusively with large and prominent red flags flying proudly. The scene is highly detailed, with a clear contrast between the two castles. The left castle is visibly more damaged than the right castle, with significantly more red fire, smoke, and destruction. The blue flags on the left castle and the red flags on the right castle are clearly visible and distinct, ensuring no overlap in team colors. The fire is always red, regardless of the castle's team."
|
| 44 |
+
|
| 45 |
+
# Function to generate images based on the HP values
|
| 46 |
+
def generate_image(left_hp, right_hp, height, width, num_inference_steps, guidance_scale, seed, randomize_seed):
|
| 47 |
+
# Generate the prompt
|
| 48 |
+
prompt = generate_prompt(left_hp, right_hp)
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
# Randomize the seed if the checkbox is checked
|
| 52 |
+
if randomize_seed:
|
| 53 |
+
seed = random.randint(0, 1000000)
|
| 54 |
+
|
| 55 |
+
print(f"Using seed: {seed}")
|
| 56 |
+
|
| 57 |
+
# Debug: Indicate that the image is being generated
|
| 58 |
+
print("Generating image... Please wait.")
|
| 59 |
+
|
| 60 |
+
# Initialize the InferenceClient with the selected model
|
| 61 |
+
client = InferenceClient(models[0]["name"], token=api_token)
|
| 62 |
+
|
| 63 |
+
# Generate the image using the Inference API with parameters
|
| 64 |
+
image = client.text_to_image(
|
| 65 |
+
prompt,
|
| 66 |
+
guidance_scale=guidance_scale, # Guidance scale
|
| 67 |
+
num_inference_steps=num_inference_steps, # Number of inference steps
|
| 68 |
+
width=width, # Width
|
| 69 |
+
height=height, # Height
|
| 70 |
+
seed=seed # Random seed
|
| 71 |
+
)
|
| 72 |
+
return image
|
| 73 |
+
except Exception as e:
|
| 74 |
+
return f"An error occurred: {e}"
|
| 75 |
+
|
| 76 |
+
# Gradio Interface
|
| 77 |
+
def generate_interface(left_hp, right_hp, height, width, num_inference_steps, guidance_scale, seed, randomize_seed):
|
| 78 |
+
# Generate the image
|
| 79 |
+
image = generate_image(left_hp, right_hp, height, width, num_inference_steps, guidance_scale, seed, randomize_seed)
|
| 80 |
+
|
| 81 |
+
if isinstance(image, str):
|
| 82 |
+
return image # Return error message
|
| 83 |
+
else:
|
| 84 |
+
# Save the image with a timestamped filename
|
| 85 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 86 |
+
output_filename = f"{timestamp}_left_{left_hp}_right_{right_hp}.png"
|
| 87 |
+
image.save(output_filename)
|
| 88 |
+
return output_filename
|
| 89 |
+
|
| 90 |
+
# Gradio UI Components
|
| 91 |
+
with gr.Blocks() as demo:
|
| 92 |
+
gr.Markdown("# Castle Image Generator")
|
| 93 |
+
with gr.Row():
|
| 94 |
+
left_hp = gr.Slider(0, 100, value=100, label="Left Castle HP")
|
| 95 |
+
right_hp = gr.Slider(0, 100, value=100, label="Right Castle HP")
|
| 96 |
+
with gr.Row():
|
| 97 |
+
height = gr.Number(value=512, label="Height")
|
| 98 |
+
width = gr.Number(value=1024, label="Width")
|
| 99 |
+
with gr.Row():
|
| 100 |
+
num_inference_steps = gr.Slider(10, 100, value=20, label="Inference Steps")
|
| 101 |
+
guidance_scale = gr.Slider(1.0, 20.0, value=2.0, label="Guidance Scale")
|
| 102 |
+
with gr.Row():
|
| 103 |
+
seed = gr.Number(value=random.randint(0, 1000000), label="Seed")
|
| 104 |
+
randomize_seed = gr.Checkbox(value=True, label="Randomize Seed")
|
| 105 |
+
generate_button = gr.Button("Generate Image")
|
| 106 |
+
output_image = gr.Image(label="Generated Image")
|
| 107 |
+
|
| 108 |
+
# Link the button to the function
|
| 109 |
+
generate_button.click(
|
| 110 |
+
generate_interface,
|
| 111 |
+
inputs=[left_hp, right_hp, height, width, num_inference_steps, guidance_scale, seed, randomize_seed],
|
| 112 |
+
outputs=output_image
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Launch the Gradio app
|
| 116 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate
|
| 2 |
+
diffusers
|
| 3 |
+
invisible_watermark
|
| 4 |
+
torch
|
| 5 |
+
transformers
|
| 6 |
+
xformers
|
| 7 |
+
IPython
|
| 8 |
+
gradio
|
| 9 |
+
huggingface_hub
|
| 10 |
+
Pillow
|