Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import gradio as gr | |
import json | |
import torch | |
import wavio | |
from tqdm import tqdm | |
from huggingface_hub import snapshot_download | |
from pydub import AudioSegment | |
from gradio import Markdown | |
import uuid | |
import torch | |
from diffusers import DiffusionPipeline,AudioPipelineOutput | |
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast | |
from typing import Union | |
from diffusers.utils.torch_utils import randn_tensor | |
from tqdm import tqdm | |
from TangoFlux import TangoFluxInference | |
import torchaudio | |
# Define the description text | |
description_text = """ | |
# TangoFlux Text-to-Audio Generation | |
Generate high-quality audio from text descriptions using TangoFlux. | |
## Instructions: | |
1. Enter your text description in the prompt box | |
2. Adjust the generation parameters if desired | |
3. Click submit to generate audio | |
## Parameters: | |
- Steps: Higher values give better quality but take longer | |
- Guidance Scale: Controls how closely the generation follows the prompt | |
- Duration: Length of the generated audio in seconds | |
""" | |
tangoflux = TangoFluxInference(name="declare-lab/TangoFlux") | |
def gradio_generate(prompt, steps, guidance, duration): | |
# Ensure duration has a default value if None | |
if duration is None: | |
duration = 10 | |
output = tangoflux.generate(prompt, steps=steps, guidance_scale=guidance, duration=duration) | |
filename = 'temp.wav' | |
output = output[:,:int(duration*44100)] | |
torchaudio.save(filename, output, 44100) | |
return filename | |
# Create custom interface with HTML badges | |
with gr.Blocks(theme="soft") as gr_interface: | |
# Add HTML badges at the top | |
gr.HTML( | |
""" | |
<div class='container' style='display:flex; justify-content:center; gap:12px;'> | |
<a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank"> | |
<img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge"> | |
</a> | |
<a href="https://discord.gg/openfreeai" target="_blank"> | |
<img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge"> | |
</a> | |
</div> | |
""" | |
) | |
# Title and description | |
gr.Markdown("# TangoFlux: Super Fast and Faithful Text to Audio Generation with Flow Matching and Clap-Ranked Preference Optimization") | |
gr.Markdown(description_text) | |
# Input components | |
with gr.Row(): | |
with gr.Column(): | |
input_text = gr.Textbox(lines=2, label="Prompt") | |
with gr.Row(): | |
denoising_steps = gr.Slider(minimum=10, maximum=100, value=25, step=5, label="Steps", interactive=True) | |
guidance_scale = gr.Slider(minimum=1, maximum=10, value=4.5, step=0.5, label="Guidance Scale", interactive=True) | |
duration_scale = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Duration", interactive=True) | |
submit_btn = gr.Button("Generate Audio", variant="primary") | |
with gr.Column(): | |
output_audio = gr.Audio(label="Generated Audio", type="filepath") | |
# Examples | |
gr.Examples( | |
examples=[ | |
# [prompt, steps, guidance, duration] | |
["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence", 25, 4.5, 10], | |
["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing", 25, 4.5, 10], | |
["Multiple ducks quacking loudly with splashing water and piercing wild animal shriek in background", 25, 4.5, 10], | |
["Powerful ocean waves crashing and receding on sandy beach with distant seagulls", 25, 4.5, 10], | |
["Gentle female voice cooing and baby responding with happy gurgles and giggles", 25, 4.5, 10], | |
["Clear male voice speaking, sharp popping sound, followed by genuine group laughter", 25, 4.5, 10], | |
["Stream of water hitting empty ceramic cup, pitch rising as cup fills up", 25, 4.5, 10], | |
["Massive crowd erupting in thunderous applause and excited cheering", 25, 4.5, 10], | |
["Deep rolling thunder with bright lightning strikes crackling through sky", 25, 4.5, 10], | |
["Aggressive dog barking and distressed cat meowing as racing car roars past at high speed", 25, 4.5, 10], | |
["Peaceful stream bubbling and birds singing, interrupted by sudden explosive gunshot", 25, 4.5, 10], | |
["Man speaking outdoors, goat bleating loudly, metal gate scraping closed, ducks quacking frantically, wind howling into microphone", 25, 4.5, 10], | |
["Series of loud aggressive dog barks echoing", 25, 4.5, 10], | |
["Multiple distinct cat meows at different pitches", 25, 4.5, 10], | |
["Rhythmic wooden table tapping overlaid with steady water pouring sound", 25, 4.5, 10], | |
["Sustained crowd applause with camera clicks and amplified male announcer voice", 25, 4.5, 10], | |
["Two sharp gunshots followed by panicked birds taking flight with rapid wing flaps", 25, 4.5, 10], | |
["Melodic human whistling harmonizing with natural birdsong", 25, 4.5, 10], | |
["Deep rhythmic snoring with clear breathing patterns", 25, 4.5, 10], | |
["Multiple racing engines revving and accelerating with sharp whistle piercing through", 25, 4.5, 10], | |
], | |
inputs=[input_text, denoising_steps, guidance_scale, duration_scale], | |
outputs=output_audio, | |
fn=gradio_generate, | |
cache_examples="lazy", | |
) | |
# Connect the button click to the generation function | |
submit_btn.click( | |
fn=gradio_generate, | |
inputs=[input_text, denoising_steps, guidance_scale, duration_scale], | |
outputs=output_audio | |
) | |
# Launch the interface | |
gr_interface.queue(15).launch() |