## 安装环境(linux)
下载代码、进入目录、安装依赖
!git clone https://github.com/open-mmlab/Amphion.git
!cd Amphion/
# bash ./models/tts/maskgct/env.sh
!pip install -r requirements.txt -U
!sudo apt-get update && sudo apt-get install espeak-ng
创建 requirements.txt
# sudo apt-get update && sudo apt-get install espeak-ng
setuptools
ruamel.yaml
tqdm
tensorboard
tensorboardX
torch # ==2.0.1
transformers===4.41.1
encodec
black # ==24.1.1
oss2
phonemizer
g2p_en
accelerate # ==0.31.0
funasr
zhconv
zhon
modelscope
# git+https://github.com/lhotse-speech/lhotse
timm
jieba
cn2an
unidecode
cos-python-sdk-v5
pypinyin
jiwer
omegaconf
pyworld
py3langid==0.2.2
LangSegment
onnxruntime
pyopenjtalk
pykakasi
openai-whisper
## 下载模型
from modelscope import snapshot_download
snapshot_download(
'amphion/MaskGCT', allow_patterns=[
'semantic_codec/model.safetensors',
'acoustic_codec/model.safetensors',
'acoustic_codec/model_1.safetensors',
't2s_model/model.safetensors',
's2a_model/s2a_model_1layer/model.safetensors',
's2a_model/s2a_model_full/model.safetensors'
], local_dir='./ckpts'
)
snapshot_download(
'AI-ModelScope/w2v-bert-2.0', local_dir='./ckpts/w2v-bert-2.0'
)
#### 重要!
# 修改 models/tts/maskgct/maskgct_utils.py
# "facebook/w2v-bert-2.0" -> "./ckpts/w2v-bert-2.0"
## 加载模型和权重
from models.tts.maskgct.maskgct_utils import *
from huggingface_hub import hf_hub_download
import safetensors
import soundfile as sf
# build model
device = torch.device("cuda:0")
cfg_path = "./models/tts/maskgct/config/maskgct.json"
cfg = load_config(cfg_path)
# 1. build semantic model (w2v-bert-2.0)
semantic_model, semantic_mean, semantic_std = build_semantic_model(device)
# 2. build semantic codec
semantic_codec = build_semantic_codec(cfg.model.semantic_codec, device)
# 3. build acoustic codec
codec_encoder, codec_decoder = build_acoustic_codec(cfg.model.acoustic_codec, device)
# 4. build t2s model
t2s_model = build_t2s_model(cfg.model.t2s_model, device)
# 5. build s2a model
s2a_model_1layer = build_s2a_model(cfg.model.s2a_model.s2a_1layer, device)
s2a_model_full = build_s2a_model(cfg.model.s2a_model.s2a_full, device)
semantic_code_ckpt = './ckpts/semantic_codec/model.safetensors'
codec_encoder_ckpt = './ckpts/acoustic_codec/model.safetensors'
codec_decoder_ckpt = './ckpts/acoustic_codec/model_1.safetensors'
t2s_model_ckpt = './ckpts/t2s_model/model.safetensors'
s2a_1layer_ckpt = './ckpts/s2a_model/s2a_model_1layer/model.safetensors'
s2a_full_ckpt = './ckpts/s2a_model/s2a_model_full/model.safetensors'
# load semantic codec
safetensors.torch.load_model(semantic_codec, semantic_code_ckpt)
# load acoustic codec
safetensors.torch.load_model(codec_encoder, codec_encoder_ckpt)
safetensors.torch.load_model(codec_decoder, codec_decoder_ckpt)
# load t2s model
safetensors.torch.load_model(t2s_model, t2s_model_ckpt)
# load s2a model
safetensors.torch.load_model(s2a_model_1layer, s2a_1layer_ckpt)
safetensors.torch.load_model(s2a_model_full, s2a_full_ckpt)
## 进行测试
# inference
save_path = "test.mp3"
prompt_wav_path = "./models/tts/maskgct/wav/prompt.wav"
prompt_text = " We do not break. We never give in. We never back down."
target_text = "In this paper, we introduce MaskGCT, a fully non-autoregressive TTS model that eliminates the need for explicit alignment information between text and speech supervision."
# Specify the target duration (in seconds). If target_len = None, we use a simple rule to predict the target duration.
target_len = 18
maskgct_inference_pipeline = MaskGCT_Inference_Pipeline(
semantic_model,
semantic_codec,
codec_encoder,
codec_decoder,
t2s_model,
s2a_model_1layer,
s2a_model_full,
semantic_mean,
semantic_std,
device,
)
recovered_audio = maskgct_inference_pipeline.maskgct_inference(
prompt_wav_path, prompt_text, target_text, "en", "en", target_len=target_len
)
sf.write(save_path, recovered_audio, 24000)
## notebook
from IPython.display import display, Audio
display(Audio(save_path))
## 附录
https://github.com/open-mmlab/Amphion