| import librosa | |
| from model_clap import CLAPEmbedding | |
| from model_meta_voice import MetaVoiceEmbedding | |
| from model_pyannote_embedding import PyannoteEmbedding | |
| from model_w2v_bert import W2VBERTEmbedding | |
| from model_xls import XLSREmbedding | |
| from model_hubert import HuBERTXLEmbedding | |
| def test(): | |
| wav, sr = librosa.load("sample.wav") | |
| print("XLS-R") | |
| model = XLSREmbedding() | |
| v = model.get_speaker_embedding(wav, sr) | |
| print(v.shape) | |
| print("CLAP") | |
| model = CLAPEmbedding() | |
| v = model.get_speaker_embedding(wav, sr) | |
| print(v.shape) | |
| print("MetaVoiceSE") | |
| model = MetaVoiceEmbedding() | |
| v = model.get_speaker_embedding(wav, sr) | |
| print(v.shape) | |
| print("PyannoteSE") | |
| model = PyannoteEmbedding() | |
| v = model.get_speaker_embedding(wav, sr) | |
| print(v.shape) | |
| print("W2VBertSE") | |
| model = W2VBERTEmbedding() | |
| v = model.get_speaker_embedding(wav, sr) | |
| print(v.shape) | |
| print("huBERT") | |
| model = HuBERTXLEmbedding() | |
| v = model.get_speaker_embedding(wav, sr) | |
| print(v.shape) | |
| if __name__ == '__main__': | |
| test() | |