Spaces:
Running
on
Zero
Running
on
Zero
File size: 9,237 Bytes
5aebf45 2ca24da 5aebf45 2ca24da b8cccb2 129c008 2ca24da 5aebf45 cfbb531 9186c7f 5aebf45 43909a3 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 3dc80da 23aafa0 5aebf45 23aafa0 5aebf45 ab3fdd8 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 5aebf45 23aafa0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
import gradio as gr
import spaces
import os
import cv2
import numpy as np
import torch
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
from diffusers import DDIMScheduler
from controlnet_aux import CannyDetector
from PIL import Image
import base64
from io import BytesIO
class SketchToRealisticFace:
def __init__(self):
"""Initialize the sketch-to-realistic face pipeline"""
# Load ControlNet model for Canny edge detection
self.controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny",
torch_dtype=torch.float16
)
# # Load base SD 1.5 model
# self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
# "runwayml/stable-diffusion-v1-5",
# controlnet=self.controlnet,
# torch_dtype=torch.float16,
# safety_checker=None,
# requires_safety_checker=False
# )
self.pipe = StableDiffusionControlNetPipeline.from_single_file(
"https://huggingface.co/NikhilJoson/Realistic_Vision_V6_B1/blob/main/realisticVisionV60B1_v51HyperVAE.safetensors",
controlnet=self.controlnet,
torch_dtype=torch.float16,
safety_checker=None,
requires_safety_checker=False
)
# Use DDIM scheduler for better quality
self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
# Move to GPU if available
if torch.cuda.is_available():
self.pipe = self.pipe.to("cuda")
# Initialize Canny detector
self.canny_detector = CannyDetector()
# Enable memory efficient attention
#self.pipe.enable_memory_efficient_attention()
# Set default parameters
self.default_prompt = "RAW photo, portrait, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3"
self.default_negative_prompt = "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck"
def preprocess_sketch(self, sketch_image, low_threshold=100, high_threshold=200):
"""Preprocess sketch image to create Canny edge map"""
image_array = np.array(sketch_image)
if len(image_array.shape) == 3:
image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)
canny = cv2.Canny(image_array, low_threshold, high_threshold)
control_image = Image.fromarray(canny).convert("RGB")
return control_image
def generate(self, sketch_image, custom_prompt=None, width=512, height=512, seed=None,
num_inference_steps=20, guidance_scale=7.0, controlnet_conditioning_scale=1.0):
"""Generate realistic face from sketch"""
if sketch_image is None:
return None, None
# Use custom prompt if provided, otherwise use default
prompt = custom_prompt if custom_prompt and custom_prompt.strip() else self.default_prompt
# Resize sketch to target dimensions
sketch_image = sketch_image.resize((width, height))
# Preprocess sketch to create control image
control_image = self.preprocess_sketch(sketch_image)
# Set seed for reproducibility
generator = torch.Generator(device=self.pipe.device).manual_seed(seed) if seed else None
# Generate image
with torch.autocast("cuda" if torch.cuda.is_available() else "cpu"):
result = self.pipe(prompt=prompt, image=control_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale,
controlnet_conditioning_scale=controlnet_conditioning_scale, generator=generator, width=width, height=height,
negative_prompt=self.default_negative_prompt)
return result.images[0], control_image
# Initialize the generator globally
print("Loading model... This may take a few minutes.")
generator = SketchToRealisticFace()
print("Model loaded successfully!")
@spaces.GPU
def generate_face(sketch_image, custom_prompt, seed, num_inference_steps, guidance_scale, controlnet_conditioning_scale):
"""Wrapper function for Gradio interface"""
try:
# Convert seed to int if provided
seed_int = int(seed) if seed else None
# Generate the realistic face
realistic_face, control_image = generator.generate(sketch_image=sketch_image, custom_prompt=custom_prompt, seed=seed_int,
num_inference_steps=num_inference_steps, guidance_scale=guidance_scale,
controlnet_conditioning_scale=controlnet_conditioning_scale)
return realistic_face, control_image
except Exception as e:
print(f"Error: {str(e)}")
return None, None
# Create Gradio interface
with gr.Blocks(title="Sketch to Realistic Face Generator", theme=gr.themes.Soft()) as app:
gr.Markdown(
"""
# π¨ Sketch to Realistic Face Generator
Transform your sketches into realistic faces using Stable Diffusion with ControlNet!
**Instructions:**
1. Upload a sketch or drawing of a face
2. Optionally customize the prompt
3. Adjust generation parameters (steps, guidance scale, etc.)
4. Set a seed for reproducible results (optional)
5. Click "Generate Realistic Face"
"""
)
with gr.Row():
with gr.Column():
# Input components
sketch_input = gr.Image(label="Upload Sketch", type="pil", height=400)
custom_prompt = gr.Textbox(label="Custom Prompt (optional)",
placeholder="Leave empty to use default prompt, or customize: 'portrait of a young person, professional headshot, studio lighting...'",
lines=3)
with gr.Row():
seed_input = gr.Number(label="Seed (optional)", placeholder="Enter a number for reproducible results", precision=0)
# Generation parameters
gr.Markdown("### ποΈ Generation Parameters")
num_inference_steps = gr.Slider(minimum=10, maximum=50, value=20, step=1, label="Inference Steps",
info="More steps = higher quality but slower generation")
guidance_scale = gr.Slider(minimum=1.0, maximum=20.0, value=7.0, step=0.5, label="Guidance Scale",
info="How closely to follow the prompt (higher = more adherence to prompt)")
controlnet_conditioning_scale = gr.Slider(minimum=0.5, maximum=2.0, value=1.0, step=0.1, label="ControlNet Conditioning Scale",
info="How strongly to follow the sketch structure")
generate_btn = gr.Button("π Generate Realistic Face", variant="primary", size="lg")
with gr.Column():
# Output components
with gr.Row():
realistic_output = gr.Image(label="Generated Realistic Face", height=400)
control_output = gr.Image(label="Control Image (Canny Edges)", height=400)
# Add examples
gr.Markdown("## π Default Prompt")
gr.Markdown(f"```{generator.default_prompt}```")
gr.Markdown(
"""
## π‘ Tips:
- **Inference Steps**: 20-30 steps usually provide good quality. More steps improve quality but increase generation time.
- **Guidance Scale**: 7.0-12.0 works well. Higher values make the AI follow your prompt more strictly.
- **ControlNet Scale**: 1.0 is usually perfect. Lower values give more creative freedom, higher values stick closer to sketch structure.
- Upload clear sketches with well-defined facial features
- The model works best with front-facing portraits
- Use the same seed number to get consistent results
- Customize the prompt to specify style, lighting, or other details
- The control image shows how your sketch is interpreted as edges
"""
)
# Connect the function to the interface
generate_btn.click(
fn=generate_face,
inputs=[sketch_input, custom_prompt, seed_input, num_inference_steps, guidance_scale, controlnet_conditioning_scale],
outputs=[realistic_output, control_output]
)
# Launch the app
if __name__ == "__main__":
app.launch(server_name="0.0.0.0", server_port=7860, share=True) |