import types import random import spaces import logging import os from pathlib import Path from datetime import datetime import re import torch import numpy as np import torchaudio from diffusers import AutoencoderKLWan, UniPCMultistepScheduler from diffusers.utils import export_to_video from diffusers import AutoModel import gradio as gr import tempfile from huggingface_hub import hf_hub_download import traceback # Patch for scaled_dot_product_attention to fix enable_gqa issue import torch.nn.functional as F original_sdpa = F.scaled_dot_product_attention def patched_scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None, enable_gqa=None): # enable_gqa 파라미터를 무시하고 나머지 파라미터만 전달 kwargs = {} if attn_mask is not None: kwargs['attn_mask'] = attn_mask if dropout_p != 0.0: kwargs['dropout_p'] = dropout_p if is_causal: kwargs['is_causal'] = is_causal if scale is not None: kwargs['scale'] = scale return original_sdpa(query, key, value, **kwargs) # 패치 적용 F.scaled_dot_product_attention = patched_scaled_dot_product_attention from src.pipeline_wan_nag import NAGWanPipeline from src.transformer_wan_nag import NagWanTransformer3DModel # MMAudio imports try: import mmaudio except ImportError: os.system("pip install -e .") import mmaudio from mmaudio.eval_utils import (ModelConfig, all_model_cfg, generate as mmaudio_generate, load_video, make_video, setup_eval_logging) from mmaudio.model.flow_matching import FlowMatching from mmaudio.model.networks import MMAudio, get_my_mmaudio from mmaudio.model.sequence_config import SequenceConfig from mmaudio.model.utils.features_utils import FeaturesUtils # NAG Video Settings MOD_VALUE = 32 DEFAULT_DURATION_SECONDS = 4 DEFAULT_STEPS = 4 DEFAULT_SEED = 2025 DEFAULT_H_SLIDER_VALUE = 480 DEFAULT_W_SLIDER_VALUE = 832 NEW_FORMULA_MAX_AREA = 480.0 * 832.0 SLIDER_MIN_H, SLIDER_MAX_H = 128, 896 SLIDER_MIN_W, SLIDER_MAX_W = 128, 896 MAX_SEED = np.iinfo(np.int32).max FIXED_FPS = 16 MIN_FRAMES_MODEL = 8 MAX_FRAMES_MODEL = 129 DEFAULT_NAG_NEGATIVE_PROMPT = "Static, motionless, still, ugly, bad quality, worst quality, poorly drawn, low resolution, blurry, lack of details" DEFAULT_AUDIO_NEGATIVE_PROMPT = "music, speech, voice, singing, narration" # NAG Model Settings MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers" SUB_MODEL_ID = "vrgamedevgirl84/Wan14BT2VFusioniX" SUB_MODEL_FILENAME = "Wan14BT2VFusioniX_fp16_.safetensors" LORA_REPO_ID = "Kijai/WanVideo_comfy" LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors" # MMAudio Settings torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True log = logging.getLogger() device = 'cuda' dtype = torch.bfloat16 audio_model_config: ModelConfig = all_model_cfg['large_44k_v2'] audio_model_config.download_if_needed() setup_eval_logging() # Initialize NAG Video Model try: vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32) wan_path = hf_hub_download(repo_id=SUB_MODEL_ID, filename=SUB_MODEL_FILENAME) transformer = NagWanTransformer3DModel.from_single_file(wan_path, torch_dtype=torch.bfloat16) pipe = NAGWanPipeline.from_pretrained( MODEL_ID, vae=vae, transformer=transformer, torch_dtype=torch.bfloat16 ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=5.0) pipe.to("cuda") pipe.transformer.__class__.attn_processors = NagWanTransformer3DModel.attn_processors pipe.transformer.__class__.set_attn_processor = NagWanTransformer3DModel.set_attn_processor pipe.transformer.__class__.forward = NagWanTransformer3DModel.forward print("NAG Video Model loaded successfully!") except Exception as e: print(f"Error loading NAG Video Model: {e}") pipe = None # Initialize MMAudio Model def get_mmaudio_model() -> tuple[MMAudio, FeaturesUtils, SequenceConfig]: seq_cfg = audio_model_config.seq_cfg net: MMAudio = get_my_mmaudio(audio_model_config.model_name).to(device, dtype).eval() net.load_weights(torch.load(audio_model_config.model_path, map_location=device, weights_only=True)) log.info(f'Loaded MMAudio weights from {audio_model_config.model_path}') feature_utils = FeaturesUtils(tod_vae_ckpt=audio_model_config.vae_path, synchformer_ckpt=audio_model_config.synchformer_ckpt, enable_conditions=True, mode=audio_model_config.mode, bigvgan_vocoder_ckpt=audio_model_config.bigvgan_16k_path, need_vae_encoder=False) feature_utils = feature_utils.to(device, dtype).eval() return net, feature_utils, seq_cfg try: audio_net, audio_feature_utils, audio_seq_cfg = get_mmaudio_model() print("MMAudio Model loaded successfully!") except Exception as e: print(f"Error loading MMAudio Model: {e}") audio_net = None # 비디오 프롬프트를 오디오 프롬프트로 변환하는 함수 def extract_audio_description(video_prompt): """비디오 프롬프트에서 오디오 관련 설명 추출/변환""" # 키워드 매핑 audio_keywords = { 'car': 'car engine sound, vehicle noise', 'porsche': 'sports car engine roar, exhaust sound', 'guitar': 'electric guitar playing, guitar music', 'concert': 'crowd cheering, live music, applause', 'motorcycle': 'motorcycle engine sound, motor rumble', 'highway': 'traffic noise, road ambience', 'rain': 'rain sounds, water drops', 'wind': 'wind blowing sound', 'ocean': 'ocean waves, water sounds', 'city': 'urban ambience, city traffic sounds', 'singer': 'singing voice, vocals', 'crowd': 'crowd noise, people talking', 'flames': 'fire crackling sound', 'pyro': 'fire whoosh, flame burst sound', 'explosion': 'explosion sound, blast', 'countryside': 'nature ambience, birds chirping', 'wheat fields': 'wind through grass, rural ambience', 'engine': 'motor sound, mechanical noise', 'flat-six engine': 'sports car engine sound', 'roaring': 'loud engine roar', 'thunderous': 'loud booming sound', 'child': 'children playing sounds', 'running': 'footsteps sound', 'woman': 'ambient sounds', 'phone': 'subtle electronic ambience', 'advertisement': 'modern ambient sounds' } # 간단한 키워드 기반 변환 audio_descriptions = [] lower_prompt = video_prompt.lower() for key, value in audio_keywords.items(): if key in lower_prompt: audio_descriptions.append(value) # 기본값 설정 if not audio_descriptions: # 프롬프트에 명시적인 오디오 설명이 있는지 확인 if 'sound' in lower_prompt or 'audio' in lower_prompt or 'noise' in lower_prompt: # 프롬프트에서 오디오 관련 부분만 추출 audio_pattern = r'([^.]*(?:sound|audio|noise|music|voice|roar|rumble)[^.]*)' matches = re.findall(audio_pattern, lower_prompt, re.IGNORECASE) if matches: return ', '.join(matches) # 기본 ambient sound return "ambient environmental sounds matching the scene" return ', '.join(audio_descriptions) # Audio generation function @torch.inference_mode() def add_audio_to_video(video_path, prompt, audio_custom_prompt, audio_negative_prompt, audio_steps, audio_cfg_strength, duration): """Generate and add audio to video using MMAudio""" if audio_net is None: print("MMAudio model not loaded, returning video without audio") return video_path try: # 커스텀 오디오 프롬프트가 있으면 사용, 없으면 비디오 프롬프트에서 추출 if audio_custom_prompt and audio_custom_prompt.strip(): audio_prompt = audio_custom_prompt.strip() else: audio_prompt = extract_audio_description(prompt) print(f"Original prompt: {prompt}") print(f"Audio prompt: {audio_prompt}") rng = torch.Generator(device=device) rng.manual_seed(random.randint(0, 2**32 - 1)) # 더 명확한 랜덤 시드 fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=audio_steps) video_info = load_video(video_path, duration) clip_frames = video_info.clip_frames sync_frames = video_info.sync_frames duration = video_info.duration_sec clip_frames = clip_frames.unsqueeze(0) sync_frames = sync_frames.unsqueeze(0) audio_seq_cfg.duration = duration audio_net.update_seq_lengths(audio_seq_cfg.latent_seq_len, audio_seq_cfg.clip_seq_len, audio_seq_cfg.sync_seq_len) # 향상된 네거티브 프롬프트 enhanced_negative = f"{audio_negative_prompt}, distortion, static noise, silence, random beeps" audios = mmaudio_generate(clip_frames, sync_frames, [audio_prompt], # 변환된 오디오 프롬프트 사용 negative_text=[enhanced_negative], feature_utils=audio_feature_utils, net=audio_net, fm=fm, rng=rng, cfg_strength=audio_cfg_strength) audio = audios.float().cpu()[0] # Create video with audio video_with_audio_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name make_video(video_info, video_with_audio_path, audio, sampling_rate=audio_seq_cfg.sampling_rate) return video_with_audio_path except Exception as e: print(f"Error in audio generation: {e}") traceback.print_exc() return video_path # Combined generation function def get_duration(prompt, nag_negative_prompt, nag_scale, height, width, duration_seconds, steps, seed, randomize_seed, enable_audio, audio_custom_prompt, audio_negative_prompt, audio_steps, audio_cfg_strength): # Calculate total duration including audio processing if enabled video_duration = int(duration_seconds) * int(steps) * 2.25 + 5 audio_duration = 30 if enable_audio else 0 # Additional time for audio processing return video_duration + audio_duration @spaces.GPU(duration=get_duration) def generate_video_with_audio( prompt, nag_negative_prompt, nag_scale, height=DEFAULT_H_SLIDER_VALUE, width=DEFAULT_W_SLIDER_VALUE, duration_seconds=DEFAULT_DURATION_SECONDS, steps=DEFAULT_STEPS, seed=DEFAULT_SEED, randomize_seed=False, enable_audio=True, audio_custom_prompt="", audio_negative_prompt=DEFAULT_AUDIO_NEGATIVE_PROMPT, audio_steps=30, audio_cfg_strength=4.5, ): if pipe is None: return None, DEFAULT_SEED try: # Generate video first target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE) target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE) num_frames = np.clip(int(round(int(duration_seconds) * FIXED_FPS) + 1), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL) current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) print(f"Generating video with: prompt='{prompt}', resolution={target_w}x{target_h}, frames={num_frames}") with torch.inference_mode(): nag_output_frames_list = pipe( prompt=prompt, nag_negative_prompt=nag_negative_prompt, nag_scale=nag_scale, nag_tau=3.5, nag_alpha=0.5, height=target_h, width=target_w, num_frames=num_frames, guidance_scale=0., num_inference_steps=int(steps), generator=torch.Generator(device="cuda").manual_seed(current_seed) ).frames[0] # Save initial video without audio with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: temp_video_path = tmpfile.name export_to_video(nag_output_frames_list, temp_video_path, fps=FIXED_FPS) print(f"Video saved to: {temp_video_path}") # Add audio if enabled if enable_audio: try: print("Adding audio to video...") final_video_path = add_audio_to_video( temp_video_path, prompt, audio_custom_prompt, audio_negative_prompt, audio_steps, audio_cfg_strength, duration_seconds ) # Clean up temp video if os.path.exists(temp_video_path) and final_video_path != temp_video_path: os.remove(temp_video_path) print(f"Final video with audio: {final_video_path}") except Exception as e: log.error(f"Audio generation failed: {e}") final_video_path = temp_video_path else: final_video_path = temp_video_path return final_video_path, current_seed except Exception as e: print(f"Error in video generation: {e}") return None, current_seed # Example generation function - simplified def set_example(prompt, nag_negative_prompt, nag_scale): """Set example values in the UI without triggering generation""" return ( prompt, nag_negative_prompt, nag_scale, DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE, DEFAULT_DURATION_SECONDS, DEFAULT_STEPS, DEFAULT_SEED, True, # randomize_seed True, # enable_audio "", # audio_custom_prompt DEFAULT_AUDIO_NEGATIVE_PROMPT, 30, # audio_steps 4.5 # audio_cfg_strength ) # Examples with audio descriptions examples = [ ["Midnight highway outside a neon-lit city. A black 1973 Porsche 911 Carrera RS speeds at 120 km/h. Inside, a stylish singer-guitarist sings while driving, vintage sunburst guitar on the passenger seat. Sodium streetlights streak over the hood; RGB panels shift magenta to blue on the driver. Camera: drone dive, Russian-arm low wheel shot, interior gimbal, FPV barrel roll, overhead spiral. Neo-noir palette, rain-slick asphalt reflections, roaring flat-six engine blended with live guitar.", DEFAULT_NAG_NEGATIVE_PROMPT, 11], ["Arena rock concert packed with 20 000 fans. A flamboyant lead guitarist in leather jacket and mirrored aviators shreds a cherry-red Flying V on a thrust stage. Pyro flames shoot up on every downbeat, CO₂ jets burst behind. Moving-head spotlights swirl teal and amber, follow-spots rim-light the guitarist's hair. Steadicam 360-orbit, crane shot rising over crowd, ultra-slow-motion pick attack at 1 000 fps. Film-grain teal-orange grade, thunderous crowd roar mixes with screaming guitar solo.", DEFAULT_NAG_NEGATIVE_PROMPT, 11], ["Golden-hour countryside road winding through rolling wheat fields. A man and woman ride a vintage café-racer motorcycle, hair and scarf fluttering in the warm breeze. Drone chase shot reveals endless patchwork farmland; low slider along rear wheel captures dust trail. Sun-flare back-lights the riders, lens blooms on highlights. Soft acoustic rock underscore; engine rumble mixed at –8 dB. Warm pastel color grade, gentle film-grain for nostalgic vibe.", DEFAULT_NAG_NEGATIVE_PROMPT, 11], ] # CSS styling - Fixed for better layout css = """ /* Right column - video output */ .video-output { border-radius: 15px; overflow: hidden; box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2); width: 100% !important; height: auto !important; min-height: 400px; } /* Ensure video container is responsive */ .video-output video { width: 100% !important; height: auto !important; max-height: 600px; object-fit: contain; display: block; } /* Remove any overlay or background from video container */ .video-output > div { background: transparent !important; padding: 0 !important; } /* Remove gradio's default video player overlay */ .video-output .wrap { background: transparent !important; } /* Ensure no gray overlay on video controls */ .video-output video::-webkit-media-controls-enclosure { background: transparent; } """ # Gradio interface - Fixed structure with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: gr.HTML("""

🎬 VEO3 Free

Wan2.1-T2V-14B + Fast 4-step with NAG + Automatic Audio Generation

""") gr.HTML("""
OpenFree badge Discord badge
""") with gr.Row(equal_height=True): with gr.Column(scale=5): with gr.Group(elem_classes="prompt-container"): prompt = gr.Textbox( label="✨ Video Prompt (also used for audio generation)", placeholder="Describe your video scene in detail...", lines=3, elem_classes="prompt-input" ) with gr.Accordion("🎨 Advanced Video Settings", open=False): nag_negative_prompt = gr.Textbox( label="Video Negative Prompt", value=DEFAULT_NAG_NEGATIVE_PROMPT, lines=2, ) nag_scale = gr.Slider( label="NAG Scale", minimum=1.0, maximum=20.0, step=0.25, value=11.0, info="Higher values = stronger guidance" ) with gr.Group(elem_classes="settings-panel"): gr.Markdown("### ⚙️ Video Settings") with gr.Row(): duration_seconds_input = gr.Slider( minimum=1, maximum=8, step=1, value=DEFAULT_DURATION_SECONDS, label="📱 Duration (seconds)", elem_classes="slider-container" ) steps_slider = gr.Slider( minimum=1, maximum=8, step=1, value=DEFAULT_STEPS, label="🔄 Inference Steps", elem_classes="slider-container" ) with gr.Row(): height_input = gr.Slider( minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"📐 Height (×{MOD_VALUE})", elem_classes="slider-container" ) width_input = gr.Slider( minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"📐 Width (×{MOD_VALUE})", elem_classes="slider-container" ) with gr.Row(): seed_input = gr.Slider( label="🌱 Seed", minimum=0, maximum=MAX_SEED, step=1, value=DEFAULT_SEED, interactive=True ) randomize_seed_checkbox = gr.Checkbox( label="🎲 Random Seed", value=True, interactive=True ) with gr.Group(elem_classes="audio-settings"): gr.Markdown("### 🎵 Audio Generation Settings") enable_audio = gr.Checkbox( label="🔊 Enable Automatic Audio Generation", value=True, interactive=True ) with gr.Column(visible=True) as audio_settings_group: audio_custom_prompt = gr.Textbox( label="Custom Audio Prompt (Optional)", placeholder="Leave empty to auto-generate from video prompt, or specify custom audio description (e.g., 'car engine sound, traffic noise')", value="", ) audio_negative_prompt = gr.Textbox( label="Audio Negative Prompt", value=DEFAULT_AUDIO_NEGATIVE_PROMPT, placeholder="Elements to avoid in audio", ) with gr.Row(): audio_steps = gr.Slider( minimum=10, maximum=50, step=5, value=30, label="🎚️ Audio Steps", info="More steps = better quality" ) audio_cfg_strength = gr.Slider( minimum=1.0, maximum=10.0, step=0.5, value=4.5, label="🎛️ Audio Guidance", info="Strength of prompt guidance" ) # Toggle audio settings visibility enable_audio.change( fn=lambda x: gr.update(visible=x), inputs=[enable_audio], outputs=[audio_settings_group] ) generate_button = gr.Button( "🎬 Generate Video with Audio", variant="primary", elem_classes="generate-btn" ) with gr.Column(scale=5): video_output = gr.Video( label="Generated Video with Audio", autoplay=True, interactive=False, elem_classes="video-output", height=600 ) gr.HTML("""

💡 Tip: For better audio, use Custom Audio Prompt with sound descriptions!

🎧 Examples: "car engine sound", "crowd cheering", "nature ambience"

""") # Examples section moved outside of columns with gr.Row(): gr.Markdown("### 🎯 Example Prompts") gr.Examples( examples=examples, inputs=[prompt, nag_negative_prompt, nag_scale], outputs=None, # Don't connect outputs to avoid index issues cache_examples=False ) # Connect UI elements ui_inputs = [ prompt, nag_negative_prompt, nag_scale, height_input, width_input, duration_seconds_input, steps_slider, seed_input, randomize_seed_checkbox, enable_audio, audio_custom_prompt, audio_negative_prompt, audio_steps, audio_cfg_strength, ] generate_button.click( fn=generate_video_with_audio, inputs=ui_inputs, outputs=[video_output, seed_input], ) if __name__ == "__main__": demo.queue().launch()