File size: 6,107 Bytes
348f0d7
ffead1e
 
 
 
 
d82e679
adac4ab
ffead1e
d82e679
df31906
 
 
 
 
 
d82e679
 
2a2a34a
b2b0b5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df31906
d82e679
df31906
d82e679
cbc1ec9
 
 
 
b2b0b5e
d82e679
 
 
 
0794579
7abbc1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31cd11e
b2b0b5e
d82e679
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import spaces
import gradio as gr
import json
import torch
import wavio
from tqdm import tqdm
from huggingface_hub import snapshot_download
from pydub import AudioSegment
from gradio import Markdown
import uuid
import torch
from diffusers import DiffusionPipeline,AudioPipelineOutput
from transformers import CLIPTextModel, T5EncoderModel, AutoModel, T5Tokenizer, T5TokenizerFast
from typing import Union
from diffusers.utils.torch_utils import randn_tensor
from tqdm import tqdm
from TangoFlux import TangoFluxInference
import torchaudio

# Define the description text
description_text = """
# TangoFlux Text-to-Audio Generation
Generate high-quality audio from text descriptions using TangoFlux.

## Instructions:
1. Enter your text description in the prompt box
2. Adjust the generation parameters if desired
3. Click submit to generate audio

## Parameters:
- Steps: Higher values give better quality but take longer
- Guidance Scale: Controls how closely the generation follows the prompt
- Duration: Length of the generated audio in seconds
"""

tangoflux = TangoFluxInference(name="declare-lab/TangoFlux")

@spaces.GPU(duration=15)
def gradio_generate(prompt, steps, guidance, duration):
    # Ensure duration has a default value if None
    if duration is None:
        duration = 10
    output = tangoflux.generate(prompt, steps=steps, guidance_scale=guidance, duration=duration)
    filename = 'temp.wav'
    output = output[:,:int(duration*44100)]
    torchaudio.save(filename, output, 44100)
    return filename

# Create custom interface with HTML badges
with gr.Blocks(theme="soft") as gr_interface:
    # Add HTML badges at the top
    gr.HTML(
        """
        <div class='container' style='display:flex; justify-content:center; gap:12px;'>
            <a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
                <img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge">
            </a>
            <a href="https://discord.gg/openfreeai" target="_blank">
                <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge">
            </a>
        </div>
        """
    )
    
    # Title and description
    gr.Markdown("# TangoFlux: Super Fast and Faithful Text to Audio Generation with Flow Matching and Clap-Ranked Preference Optimization")
    gr.Markdown(description_text)
    
    # Input components
    with gr.Row():
        with gr.Column():
            input_text = gr.Textbox(lines=2, label="Prompt")
            with gr.Row():
                denoising_steps = gr.Slider(minimum=10, maximum=100, value=25, step=5, label="Steps", interactive=True)
                guidance_scale = gr.Slider(minimum=1, maximum=10, value=4.5, step=0.5, label="Guidance Scale", interactive=True)
                duration_scale = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Duration", interactive=True)
            
            submit_btn = gr.Button("Generate Audio", variant="primary")
        
        with gr.Column():
            output_audio = gr.Audio(label="Generated Audio", type="filepath")
    
    # Examples
    gr.Examples(
        examples=[
            # [prompt, steps, guidance, duration]
            ["Quiet whispered conversation gradually fading into distant jet engine roar diminishing into silence", 25, 4.5, 10],
            ["Clear sound of bicycle tires crunching on loose gravel and dirt, followed by deep male laughter echoing", 25, 4.5, 10],
            ["Multiple ducks quacking loudly with splashing water and piercing wild animal shriek in background", 25, 4.5, 10],
            ["Powerful ocean waves crashing and receding on sandy beach with distant seagulls", 25, 4.5, 10],             
            ["Gentle female voice cooing and baby responding with happy gurgles and giggles", 25, 4.5, 10],     
            ["Clear male voice speaking, sharp popping sound, followed by genuine group laughter", 25, 4.5, 10],
            ["Stream of water hitting empty ceramic cup, pitch rising as cup fills up", 25, 4.5, 10],
            ["Massive crowd erupting in thunderous applause and excited cheering", 25, 4.5, 10],
            ["Deep rolling thunder with bright lightning strikes crackling through sky", 25, 4.5, 10],
            ["Aggressive dog barking and distressed cat meowing as racing car roars past at high speed", 25, 4.5, 10],
            ["Peaceful stream bubbling and birds singing, interrupted by sudden explosive gunshot", 25, 4.5, 10],
            ["Man speaking outdoors, goat bleating loudly, metal gate scraping closed, ducks quacking frantically, wind howling into microphone", 25, 4.5, 10],
            ["Series of loud aggressive dog barks echoing", 25, 4.5, 10],
            ["Multiple distinct cat meows at different pitches", 25, 4.5, 10],
            ["Rhythmic wooden table tapping overlaid with steady water pouring sound", 25, 4.5, 10],
            ["Sustained crowd applause with camera clicks and amplified male announcer voice", 25, 4.5, 10],
            ["Two sharp gunshots followed by panicked birds taking flight with rapid wing flaps", 25, 4.5, 10],
            ["Melodic human whistling harmonizing with natural birdsong", 25, 4.5, 10],
            ["Deep rhythmic snoring with clear breathing patterns", 25, 4.5, 10],
            ["Multiple racing engines revving and accelerating with sharp whistle piercing through", 25, 4.5, 10],
        ],
        inputs=[input_text, denoising_steps, guidance_scale, duration_scale],
        outputs=output_audio,
        fn=gradio_generate,
        cache_examples="lazy",
    )
    
    # Connect the button click to the generation function
    submit_btn.click(
        fn=gradio_generate,
        inputs=[input_text, denoising_steps, guidance_scale, duration_scale],
        outputs=output_audio
    )

# Launch the interface
gr_interface.queue(15).launch()