Adding new models (read description)
Browse files- BandSplit Roformer | FNO by Unwa
- BandSplit Roformer | Karaoke Frazer by becruily
- BandSplit Roformer | SW by jarredou
- MelBand Roformer | Duality v1 by Aname
- models/Roformer/BandSplit/config_BandSplit-Roformer_FNO_by-Unwa.yaml +136 -0
- models/Roformer/BandSplit/config_BandSplit-Roformer_Karaoke_Frazer_by-becruily.yaml +129 -0
- models/Roformer/BandSplit/config_BandSplit-Roformer_SW_by-jarredou.yaml +197 -0
- models/Roformer/BandSplit/model_BandSplit-Roformer_FNO_by-Unwa.ckpt +3 -0
- models/Roformer/BandSplit/model_BandSplit-Roformer_Karaoke_Frazer_by-becruily.ckpt +3 -0
- models/Roformer/BandSplit/model_BandSplit-Roformer_SW_by-jarredou.ckpt +3 -0
- models/Roformer/MelBand/config_MelBand-Roformer_Duality_v1_by-Aname.yaml +72 -0
- models/Roformer/MelBand/model_MelBand-Roformer_Duality_v1_by-Aname.ckpt +3 -0
models/Roformer/BandSplit/config_BandSplit-Roformer_FNO_by-Unwa.yaml
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
chunk_size: 749259
|
| 3 |
+
dim_f: 1024
|
| 4 |
+
dim_t: 1700 # don't work (use in model)
|
| 5 |
+
hop_length: 441 # don't work (use in model)
|
| 6 |
+
n_fft: 2048
|
| 7 |
+
num_channels: 2
|
| 8 |
+
sample_rate: 44100
|
| 9 |
+
min_mean_abs: 0.000
|
| 10 |
+
|
| 11 |
+
model:
|
| 12 |
+
dim: 256
|
| 13 |
+
depth: 12
|
| 14 |
+
stereo: true
|
| 15 |
+
num_stems: 1
|
| 16 |
+
time_transformer_depth: 1
|
| 17 |
+
freq_transformer_depth: 1
|
| 18 |
+
linear_transformer_depth: 0
|
| 19 |
+
freqs_per_bands: !!python/tuple
|
| 20 |
+
- 2
|
| 21 |
+
- 2
|
| 22 |
+
- 2
|
| 23 |
+
- 2
|
| 24 |
+
- 2
|
| 25 |
+
- 2
|
| 26 |
+
- 2
|
| 27 |
+
- 2
|
| 28 |
+
- 2
|
| 29 |
+
- 2
|
| 30 |
+
- 2
|
| 31 |
+
- 2
|
| 32 |
+
- 2
|
| 33 |
+
- 2
|
| 34 |
+
- 2
|
| 35 |
+
- 2
|
| 36 |
+
- 2
|
| 37 |
+
- 2
|
| 38 |
+
- 2
|
| 39 |
+
- 2
|
| 40 |
+
- 2
|
| 41 |
+
- 2
|
| 42 |
+
- 2
|
| 43 |
+
- 2
|
| 44 |
+
- 4
|
| 45 |
+
- 4
|
| 46 |
+
- 4
|
| 47 |
+
- 4
|
| 48 |
+
- 4
|
| 49 |
+
- 4
|
| 50 |
+
- 4
|
| 51 |
+
- 4
|
| 52 |
+
- 4
|
| 53 |
+
- 4
|
| 54 |
+
- 4
|
| 55 |
+
- 4
|
| 56 |
+
- 12
|
| 57 |
+
- 12
|
| 58 |
+
- 12
|
| 59 |
+
- 12
|
| 60 |
+
- 12
|
| 61 |
+
- 12
|
| 62 |
+
- 12
|
| 63 |
+
- 12
|
| 64 |
+
- 24
|
| 65 |
+
- 24
|
| 66 |
+
- 24
|
| 67 |
+
- 24
|
| 68 |
+
- 24
|
| 69 |
+
- 24
|
| 70 |
+
- 24
|
| 71 |
+
- 24
|
| 72 |
+
- 48
|
| 73 |
+
- 48
|
| 74 |
+
- 48
|
| 75 |
+
- 48
|
| 76 |
+
- 48
|
| 77 |
+
- 48
|
| 78 |
+
- 48
|
| 79 |
+
- 48
|
| 80 |
+
- 128
|
| 81 |
+
- 129
|
| 82 |
+
dim_head: 64
|
| 83 |
+
heads: 8
|
| 84 |
+
attn_dropout: 0.
|
| 85 |
+
ff_dropout: 0.
|
| 86 |
+
flash_attn: true
|
| 87 |
+
dim_freqs_in: 1025
|
| 88 |
+
stft_n_fft: 2048
|
| 89 |
+
stft_hop_length: 441
|
| 90 |
+
stft_win_length: 2048
|
| 91 |
+
stft_normalized: false
|
| 92 |
+
mask_estimator_depth: 2
|
| 93 |
+
multi_stft_resolution_loss_weight: 1.0
|
| 94 |
+
multi_stft_resolutions_window_sizes: !!python/tuple
|
| 95 |
+
- 4096
|
| 96 |
+
- 2048
|
| 97 |
+
- 1024
|
| 98 |
+
- 512
|
| 99 |
+
- 256
|
| 100 |
+
multi_stft_hop_size: 147
|
| 101 |
+
multi_stft_normalized: False
|
| 102 |
+
mlp_expansion_factor: 4
|
| 103 |
+
|
| 104 |
+
training:
|
| 105 |
+
batch_size: 2
|
| 106 |
+
gradient_accumulation_steps: 1
|
| 107 |
+
grad_clip: 0
|
| 108 |
+
instruments: ['vocals', 'other']
|
| 109 |
+
patience: 3
|
| 110 |
+
reduce_factor: 0.95
|
| 111 |
+
target_instrument: other
|
| 112 |
+
num_epochs: 1000
|
| 113 |
+
num_steps: 1000
|
| 114 |
+
augmentation: false # enable augmentations by audiomentations and pedalboard
|
| 115 |
+
augmentation_type: simple1
|
| 116 |
+
use_mp3_compress: false # Deprecated
|
| 117 |
+
augmentation_mix: true # Mix several stems of the same type with some probability
|
| 118 |
+
augmentation_loudness: true # randomly change loudness of each stem
|
| 119 |
+
augmentation_loudness_type: 1 # Type 1 or 2
|
| 120 |
+
augmentation_loudness_min: 0.5
|
| 121 |
+
augmentation_loudness_max: 1.5
|
| 122 |
+
q: 0.95
|
| 123 |
+
coarse_loss_clip: true
|
| 124 |
+
ema_momentum: 0.999
|
| 125 |
+
# optimizer: prodigy
|
| 126 |
+
optimizer: adam
|
| 127 |
+
# lr: 1.0
|
| 128 |
+
lr: 1.0e-5
|
| 129 |
+
other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
|
| 130 |
+
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
|
| 131 |
+
|
| 132 |
+
inference:
|
| 133 |
+
batch_size: 2
|
| 134 |
+
dim_t: 1700
|
| 135 |
+
num_overlap: 2
|
| 136 |
+
normalize: false
|
models/Roformer/BandSplit/config_BandSplit-Roformer_Karaoke_Frazer_by-becruily.yaml
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
chunk_size: 882000
|
| 3 |
+
dim_f: 1024
|
| 4 |
+
dim_t: 801
|
| 5 |
+
hop_length: 441
|
| 6 |
+
n_fft: 2048
|
| 7 |
+
num_channels: 2
|
| 8 |
+
sample_rate: 44100
|
| 9 |
+
min_mean_abs: 0.000
|
| 10 |
+
|
| 11 |
+
model:
|
| 12 |
+
dim: 256
|
| 13 |
+
depth: 12
|
| 14 |
+
stereo: true
|
| 15 |
+
num_stems: 1
|
| 16 |
+
time_transformer_depth: 1
|
| 17 |
+
freq_transformer_depth: 1
|
| 18 |
+
linear_transformer_depth: 0
|
| 19 |
+
freqs_per_bands: !!python/tuple
|
| 20 |
+
- 2
|
| 21 |
+
- 2
|
| 22 |
+
- 2
|
| 23 |
+
- 2
|
| 24 |
+
- 2
|
| 25 |
+
- 2
|
| 26 |
+
- 2
|
| 27 |
+
- 2
|
| 28 |
+
- 2
|
| 29 |
+
- 2
|
| 30 |
+
- 2
|
| 31 |
+
- 2
|
| 32 |
+
- 2
|
| 33 |
+
- 2
|
| 34 |
+
- 2
|
| 35 |
+
- 2
|
| 36 |
+
- 2
|
| 37 |
+
- 2
|
| 38 |
+
- 2
|
| 39 |
+
- 2
|
| 40 |
+
- 2
|
| 41 |
+
- 2
|
| 42 |
+
- 2
|
| 43 |
+
- 2
|
| 44 |
+
- 4
|
| 45 |
+
- 4
|
| 46 |
+
- 4
|
| 47 |
+
- 4
|
| 48 |
+
- 4
|
| 49 |
+
- 4
|
| 50 |
+
- 4
|
| 51 |
+
- 4
|
| 52 |
+
- 4
|
| 53 |
+
- 4
|
| 54 |
+
- 4
|
| 55 |
+
- 4
|
| 56 |
+
- 12
|
| 57 |
+
- 12
|
| 58 |
+
- 12
|
| 59 |
+
- 12
|
| 60 |
+
- 12
|
| 61 |
+
- 12
|
| 62 |
+
- 12
|
| 63 |
+
- 12
|
| 64 |
+
- 24
|
| 65 |
+
- 24
|
| 66 |
+
- 24
|
| 67 |
+
- 24
|
| 68 |
+
- 24
|
| 69 |
+
- 24
|
| 70 |
+
- 24
|
| 71 |
+
- 24
|
| 72 |
+
- 48
|
| 73 |
+
- 48
|
| 74 |
+
- 48
|
| 75 |
+
- 48
|
| 76 |
+
- 48
|
| 77 |
+
- 48
|
| 78 |
+
- 48
|
| 79 |
+
- 48
|
| 80 |
+
- 128
|
| 81 |
+
- 129
|
| 82 |
+
dim_head: 64
|
| 83 |
+
heads: 8
|
| 84 |
+
attn_dropout: 0
|
| 85 |
+
ff_dropout: 0
|
| 86 |
+
flash_attn: true
|
| 87 |
+
dim_freqs_in: 1025
|
| 88 |
+
stft_n_fft: 2048
|
| 89 |
+
stft_hop_length: 512
|
| 90 |
+
stft_win_length: 2048
|
| 91 |
+
stft_normalized: false
|
| 92 |
+
mask_estimator_depth: 2
|
| 93 |
+
multi_stft_resolution_loss_weight: 1.0
|
| 94 |
+
multi_stft_resolutions_window_sizes: !!python/tuple
|
| 95 |
+
- 4096
|
| 96 |
+
- 2048
|
| 97 |
+
- 1024
|
| 98 |
+
- 512
|
| 99 |
+
- 256
|
| 100 |
+
multi_stft_hop_size: 147
|
| 101 |
+
multi_stft_normalized: false
|
| 102 |
+
mlp_expansion_factor: 4
|
| 103 |
+
|
| 104 |
+
training:
|
| 105 |
+
batch_size: 1
|
| 106 |
+
gradient_accumulation_steps: 1
|
| 107 |
+
grad_clip: 0
|
| 108 |
+
instruments:
|
| 109 |
+
- Vocals
|
| 110 |
+
- Instrumental
|
| 111 |
+
patience: 2
|
| 112 |
+
reduce_factor: 0.95
|
| 113 |
+
target_instrument: Vocals
|
| 114 |
+
num_epochs: 1000
|
| 115 |
+
num_steps: 1000
|
| 116 |
+
q: 0.95
|
| 117 |
+
coarse_loss_clip: true
|
| 118 |
+
ema_momentum: 0.999
|
| 119 |
+
# optimizer: prodigy
|
| 120 |
+
optimizer: adam
|
| 121 |
+
lr: 1.0e-5
|
| 122 |
+
other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
|
| 123 |
+
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
|
| 124 |
+
|
| 125 |
+
inference:
|
| 126 |
+
batch_size: 2
|
| 127 |
+
dim_t: 2001
|
| 128 |
+
num_overlap: 4
|
| 129 |
+
normalize: false
|
models/Roformer/BandSplit/config_BandSplit-Roformer_SW_by-jarredou.yaml
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
chunk_size: 588800 #882000
|
| 3 |
+
dim_f: 1024
|
| 4 |
+
dim_t: 801 # don't work (use in model)
|
| 5 |
+
hop_length: 441 # don't work (use in model)
|
| 6 |
+
n_fft: 2048
|
| 7 |
+
num_channels: 2
|
| 8 |
+
sample_rate: 44100
|
| 9 |
+
min_mean_abs: 0.000
|
| 10 |
+
|
| 11 |
+
model:
|
| 12 |
+
dim: 256
|
| 13 |
+
depth: 12
|
| 14 |
+
stereo: true
|
| 15 |
+
num_stems: 6
|
| 16 |
+
time_transformer_depth: 1
|
| 17 |
+
freq_transformer_depth: 1
|
| 18 |
+
linear_transformer_depth: 0
|
| 19 |
+
freqs_per_bands: !!python/tuple
|
| 20 |
+
- 2
|
| 21 |
+
- 2
|
| 22 |
+
- 2
|
| 23 |
+
- 2
|
| 24 |
+
- 2
|
| 25 |
+
- 2
|
| 26 |
+
- 2
|
| 27 |
+
- 2
|
| 28 |
+
- 2
|
| 29 |
+
- 2
|
| 30 |
+
- 2
|
| 31 |
+
- 2
|
| 32 |
+
- 2
|
| 33 |
+
- 2
|
| 34 |
+
- 2
|
| 35 |
+
- 2
|
| 36 |
+
- 2
|
| 37 |
+
- 2
|
| 38 |
+
- 2
|
| 39 |
+
- 2
|
| 40 |
+
- 2
|
| 41 |
+
- 2
|
| 42 |
+
- 2
|
| 43 |
+
- 2
|
| 44 |
+
- 4
|
| 45 |
+
- 4
|
| 46 |
+
- 4
|
| 47 |
+
- 4
|
| 48 |
+
- 4
|
| 49 |
+
- 4
|
| 50 |
+
- 4
|
| 51 |
+
- 4
|
| 52 |
+
- 4
|
| 53 |
+
- 4
|
| 54 |
+
- 4
|
| 55 |
+
- 4
|
| 56 |
+
- 12
|
| 57 |
+
- 12
|
| 58 |
+
- 12
|
| 59 |
+
- 12
|
| 60 |
+
- 12
|
| 61 |
+
- 12
|
| 62 |
+
- 12
|
| 63 |
+
- 12
|
| 64 |
+
- 24
|
| 65 |
+
- 24
|
| 66 |
+
- 24
|
| 67 |
+
- 24
|
| 68 |
+
- 24
|
| 69 |
+
- 24
|
| 70 |
+
- 24
|
| 71 |
+
- 24
|
| 72 |
+
- 48
|
| 73 |
+
- 48
|
| 74 |
+
- 48
|
| 75 |
+
- 48
|
| 76 |
+
- 48
|
| 77 |
+
- 48
|
| 78 |
+
- 48
|
| 79 |
+
- 48
|
| 80 |
+
- 128
|
| 81 |
+
- 129
|
| 82 |
+
dim_head: 64
|
| 83 |
+
heads: 8
|
| 84 |
+
attn_dropout: 0.1
|
| 85 |
+
ff_dropout: 0.1
|
| 86 |
+
flash_attn: true
|
| 87 |
+
dim_freqs_in: 1025
|
| 88 |
+
stft_n_fft: 2048
|
| 89 |
+
stft_hop_length: 512
|
| 90 |
+
stft_win_length: 2048
|
| 91 |
+
stft_normalized: false
|
| 92 |
+
mask_estimator_depth: 2
|
| 93 |
+
multi_stft_resolution_loss_weight: 1.0
|
| 94 |
+
multi_stft_resolutions_window_sizes: !!python/tuple
|
| 95 |
+
- 4096
|
| 96 |
+
- 2048
|
| 97 |
+
- 1024
|
| 98 |
+
- 512
|
| 99 |
+
- 256
|
| 100 |
+
multi_stft_hop_size: 147
|
| 101 |
+
multi_stft_normalized: False
|
| 102 |
+
mlp_expansion_factor: 4
|
| 103 |
+
use_torch_checkpoint: False # it allows to greatly reduce GPU memory consumption during training (not fully tested)
|
| 104 |
+
skip_connection: False # Enable skip connection between transformer blocks - can solve problem with gradients and probably faster training
|
| 105 |
+
|
| 106 |
+
training:
|
| 107 |
+
batch_size: 2
|
| 108 |
+
gradient_accumulation_steps: 1
|
| 109 |
+
grad_clip: 0
|
| 110 |
+
instruments: ['bass', 'drums', 'other', 'vocals', 'guitar', 'piano']
|
| 111 |
+
patience: 3
|
| 112 |
+
reduce_factor: 0.95
|
| 113 |
+
target_instrument: null
|
| 114 |
+
num_epochs: 1000
|
| 115 |
+
num_steps: 1000
|
| 116 |
+
augmentation: false # enable augmentations by audiomentations and pedalboard
|
| 117 |
+
augmentation_type: simple1
|
| 118 |
+
use_mp3_compress: false # Deprecated
|
| 119 |
+
augmentation_mix: true # Mix several stems of the same type with some probability
|
| 120 |
+
augmentation_loudness: true # randomly change loudness of each stem
|
| 121 |
+
augmentation_loudness_type: 1 # Type 1 or 2
|
| 122 |
+
augmentation_loudness_min: 0.5
|
| 123 |
+
augmentation_loudness_max: 1.5
|
| 124 |
+
q: 0.95
|
| 125 |
+
coarse_loss_clip: true
|
| 126 |
+
ema_momentum: 0.999
|
| 127 |
+
# optimizer: prodigy
|
| 128 |
+
optimizer: adam
|
| 129 |
+
# lr: 1.0
|
| 130 |
+
lr: 1.0e-5
|
| 131 |
+
other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental
|
| 132 |
+
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
|
| 133 |
+
|
| 134 |
+
augmentations:
|
| 135 |
+
enable: true # enable or disable all augmentations (to fast disable if needed)
|
| 136 |
+
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max)
|
| 137 |
+
loudness_min: 0.5
|
| 138 |
+
loudness_max: 1.5
|
| 139 |
+
mixup: true # mix several stems of same type with some probability (only works for dataset types: 1, 2, 3)
|
| 140 |
+
mixup_probs: !!python/tuple # 2 additional stems of the same type (1st with prob 0.2, 2nd with prob 0.02)
|
| 141 |
+
- 0.2
|
| 142 |
+
- 0.02
|
| 143 |
+
mixup_loudness_min: 0.5
|
| 144 |
+
mixup_loudness_max: 1.5
|
| 145 |
+
|
| 146 |
+
all:
|
| 147 |
+
channel_shuffle: 0.5 # Set 0 or lower to disable
|
| 148 |
+
random_inverse: 0.1 # inverse track (better lower probability)
|
| 149 |
+
random_polarity: 0.5 # polarity change (multiply waveform to -1)
|
| 150 |
+
|
| 151 |
+
vocals:
|
| 152 |
+
pitch_shift: 0.1
|
| 153 |
+
pitch_shift_min_semitones: -5
|
| 154 |
+
pitch_shift_max_semitones: 5
|
| 155 |
+
seven_band_parametric_eq: 0.1
|
| 156 |
+
seven_band_parametric_eq_min_gain_db: -9
|
| 157 |
+
seven_band_parametric_eq_max_gain_db: 9
|
| 158 |
+
tanh_distortion: 0.1
|
| 159 |
+
tanh_distortion_min: 0.1
|
| 160 |
+
tanh_distortion_max: 0.7
|
| 161 |
+
bass:
|
| 162 |
+
pitch_shift: 0.1
|
| 163 |
+
pitch_shift_min_semitones: -2
|
| 164 |
+
pitch_shift_max_semitones: 2
|
| 165 |
+
seven_band_parametric_eq: 0.1
|
| 166 |
+
seven_band_parametric_eq_min_gain_db: -3
|
| 167 |
+
seven_band_parametric_eq_max_gain_db: 6
|
| 168 |
+
tanh_distortion: 0.1
|
| 169 |
+
tanh_distortion_min: 0.1
|
| 170 |
+
tanh_distortion_max: 0.5
|
| 171 |
+
drums:
|
| 172 |
+
pitch_shift: 0.1
|
| 173 |
+
pitch_shift_min_semitones: -5
|
| 174 |
+
pitch_shift_max_semitones: 5
|
| 175 |
+
seven_band_parametric_eq: 0.1
|
| 176 |
+
seven_band_parametric_eq_min_gain_db: -9
|
| 177 |
+
seven_band_parametric_eq_max_gain_db: 9
|
| 178 |
+
tanh_distortion: 0.1
|
| 179 |
+
tanh_distortion_min: 0.1
|
| 180 |
+
tanh_distortion_max: 0.6
|
| 181 |
+
other:
|
| 182 |
+
pitch_shift: 0.1
|
| 183 |
+
pitch_shift_min_semitones: -4
|
| 184 |
+
pitch_shift_max_semitones: 4
|
| 185 |
+
gaussian_noise: 0.1
|
| 186 |
+
gaussian_noise_min_amplitude: 0.001
|
| 187 |
+
gaussian_noise_max_amplitude: 0.015
|
| 188 |
+
time_stretch: 0.1
|
| 189 |
+
time_stretch_min_rate: 0.8
|
| 190 |
+
time_stretch_max_rate: 1.25
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
inference:
|
| 194 |
+
batch_size: 1
|
| 195 |
+
dim_t: 1101
|
| 196 |
+
num_overlap: 2
|
| 197 |
+
normalize: false
|
models/Roformer/BandSplit/model_BandSplit-Roformer_FNO_by-Unwa.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f35bf6d87b2863372388e85c2d9679e5b7651e5c2ddd23aab1480f7af10b90ca
|
| 3 |
+
size 332004435
|
models/Roformer/BandSplit/model_BandSplit-Roformer_Karaoke_Frazer_by-becruily.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb90ee24c1154d83fbcfd27e96182f19e061557cc6e4746953125e08c29389f9
|
| 3 |
+
size 204436907
|
models/Roformer/BandSplit/model_BandSplit-Roformer_SW_by-jarredou.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24e7d35ee9c64415673d3fd33e06a67cac2c103c5df6267ba1576459c775916e
|
| 3 |
+
size 699412152
|
models/Roformer/MelBand/config_MelBand-Roformer_Duality_v1_by-Aname.yaml
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
chunk_size: 661500
|
| 3 |
+
dim_f: 1024
|
| 4 |
+
dim_t: 1101
|
| 5 |
+
hop_length: 441
|
| 6 |
+
n_fft: 2048
|
| 7 |
+
num_channels: 2
|
| 8 |
+
sample_rate: 44100
|
| 9 |
+
min_mean_abs: 0.000
|
| 10 |
+
|
| 11 |
+
model:
|
| 12 |
+
dim: 384
|
| 13 |
+
depth: 6
|
| 14 |
+
stereo: true
|
| 15 |
+
num_stems: 1
|
| 16 |
+
time_transformer_depth: 1
|
| 17 |
+
freq_transformer_depth: 1
|
| 18 |
+
num_bands: 60
|
| 19 |
+
dim_head: 64
|
| 20 |
+
heads: 8
|
| 21 |
+
attn_dropout: 0
|
| 22 |
+
ff_dropout: 0
|
| 23 |
+
flash_attn: True
|
| 24 |
+
dim_freqs_in: 1025
|
| 25 |
+
sample_rate: 44100 # needed for mel filter bank from librosa
|
| 26 |
+
stft_n_fft: 2048
|
| 27 |
+
stft_hop_length: 441
|
| 28 |
+
stft_win_length: 2048
|
| 29 |
+
stft_normalized: False
|
| 30 |
+
mask_estimator_depth: 2
|
| 31 |
+
multi_stft_resolution_loss_weight: 1.0
|
| 32 |
+
multi_stft_resolutions_window_sizes: !!python/tuple
|
| 33 |
+
- 4096
|
| 34 |
+
- 2048
|
| 35 |
+
- 1024
|
| 36 |
+
- 512
|
| 37 |
+
- 256
|
| 38 |
+
multi_stft_hop_size: 147
|
| 39 |
+
multi_stft_normalized: False
|
| 40 |
+
|
| 41 |
+
training:
|
| 42 |
+
batch_size: 4
|
| 43 |
+
gradient_accumulation_steps: 1
|
| 44 |
+
grad_clip: 0
|
| 45 |
+
instruments:
|
| 46 |
+
- vocals
|
| 47 |
+
- other
|
| 48 |
+
lr: 1.0e-05
|
| 49 |
+
patience: 2
|
| 50 |
+
reduce_factor: 0.95
|
| 51 |
+
target_instrument: vocals
|
| 52 |
+
num_epochs: 1000
|
| 53 |
+
num_steps: 1000
|
| 54 |
+
augmentation: false # enable augmentations by audiomentations and pedalboard
|
| 55 |
+
augmentation_type: null
|
| 56 |
+
use_mp3_compress: false # Deprecated
|
| 57 |
+
augmentation_mix: false # Mix several stems of the same type with some probability
|
| 58 |
+
augmentation_loudness: false # randomly change loudness of each stem
|
| 59 |
+
augmentation_loudness_type: 1 # Type 1 or 2
|
| 60 |
+
augmentation_loudness_min: 0
|
| 61 |
+
augmentation_loudness_max: 0
|
| 62 |
+
q: 0.95
|
| 63 |
+
coarse_loss_clip: false
|
| 64 |
+
ema_momentum: 0.999
|
| 65 |
+
optimizer: adam
|
| 66 |
+
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
|
| 67 |
+
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
|
| 68 |
+
|
| 69 |
+
inference:
|
| 70 |
+
batch_size: 4
|
| 71 |
+
dim_t: 1101
|
| 72 |
+
num_overlap: 4
|
models/Roformer/MelBand/model_MelBand-Roformer_Duality_v1_by-Aname.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69c244e76d7142a948f0d41c67e293112ba84f12a28c89d24b22a3fecd9ef79e
|
| 3 |
+
size 913043907
|