Below is an example of how to use the text-to-speech API in C++. After having created an AILIAVoice instance, and opened the model with ailiaVoiceOpenModelFile, use ailiaVoiceGraphemeToPhoneme to convert the text to phonemes, then use ailiaVoiceInference to perform the text-to-speech conversion, after which it is possible to get the resulting audio waveforms with ailiaVoiceGetWave. When using GPT-SoVITS, provide a reference audio file with ailiaVoiceSetReference before using ailiaVoiceInference. When using GPT-SoVITS v3, open the model with ailiaVoiceOpenGPTSoVITSV3ModelFileA. When using GPT-SoVITS v2-pro, open the model with ailiaVoiceOpenGPTSoVITSV2ProModelFileA. You can also change the CFM sampling steps with ailiaVoiceSetSampleSteps (v3 only). For Chinese, V2/V3/V2-Pro requires loading both G2P_CN and G2PW dictionaries.
#include "ailia_voice_util.h"
#include <stdio.h>
#include <vector>
#include <string>
#include <string.h>
#include "wave_reader.h"
#include "wave_writer.h"
int main(int argc, char *argv[]){
printf("Usage : ailia_voice_sample [tacotron2/gpt-sovits/gpt-sovits-en/gpt-sovits-zh/gpt-sovits-v2/gpt-sovits-v2-en/gpt-sovits-v2-zh/gpt-sovits-v3/gpt-sovits-v3-en/gpt-sovits-v3-zh/gpt-sovits-v2-pro/gpt-sovits-v2-pro-en/gpt-sovits-v2-pro-zh] [input_text]\n");
const char * input_text = "";
const char * lang = "";
const char * model = "tacotron2";
if (argc >= 2){
model = argv[1];
if (!(strcmp(model, "tacotron2") == 0 || strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-en") == 0 || strcmp(model, "gpt-sovits-zh") == 0 || strcmp(model, "gpt-sovits-v2") == 0 || strcmp(model, "gpt-sovits-v2-en") == 0 || strcmp(model, "gpt-sovits-v2-zh") == 0 || strcmp(model, "gpt-sovits-v3") == 0 || strcmp(model, "gpt-sovits-v3-en") == 0 || strcmp(model, "gpt-sovits-v3-zh") == 0 || strcmp(model, "gpt-sovits-v2-pro") == 0 || strcmp(model, "gpt-sovits-v2-pro-en") == 0 || strcmp(model, "gpt-sovits-v2-pro-zh") == 0)){
printf("model must be tacotron2, gpt-sovits, gpt-sovits-en, gpt-sovits-zh, gpt-sovits-v2, gpt-sovits-v2-en, gpt-sovits-v2-zh, gpt-sovits-v3, gpt-sovits-v3-en, gpt-sovits-v3-zh, gpt-sovits-v2-pro, gpt-sovits-v2-pro-en or gpt-sovits-v2-pro-zh\n");
return -1;
}
}
if (argc >= 3){
input_text = argv[2];
}
if (strcmp(model, "tacotron2") == 0 || strcmp(model, "gpt-sovits-en") == 0 || strcmp(model, "gpt-sovits-v2-en") == 0 || strcmp(model, "gpt-sovits-v3-en") == 0 || strcmp(model, "gpt-sovits-v2-pro-en") == 0){
if (strlen(input_text) == 0){
input_text = u8"Hello world.";
}
lang = "en";
}else if (strcmp(model, "gpt-sovits-zh") == 0 || strcmp(model, "gpt-sovits-v2-zh") == 0 || strcmp(model, "gpt-sovits-v3-zh") == 0 || strcmp(model, "gpt-sovits-v2-pro-zh") == 0){
if (strlen(input_text) == 0){
input_text = u8"你好,世界。今天天气真好。";
}
lang = "zh";
}else{
if (strlen(input_text) == 0){
input_text = u8"こんにちは。今日は新しいAIエンジンであるアイリアSDKを紹介します。";
}
lang = "ja";
}
printf("Model : %s\n", model);
printf("Input text : %s\n", input_text);
printf("Language : %s\n", lang);
AILIAVoice *net;
int env_id = AILIA_ENVIRONMENT_ID_AUTO;
int num_thread = AILIA_MULTITHREAD_AUTO;
int memory_mode = AILIA_MEMORY_REDUCE_CONSTANT | AILIA_MEMORY_REDUCE_CONSTANT_WITH_INPUT_INITIALIZER | AILIA_MEMORY_REUSE_INTERSTAGE;
bool enable_user_dictionary = true;
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceCreate error %d\n", status);
return -1;
}
if (strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-en") == 0 || strcmp(model, "gpt-sovits-v2") == 0 || strcmp(model, "gpt-sovits-v2-en") == 0 || strcmp(model, "gpt-sovits-v3") == 0 || strcmp(model, "gpt-sovits-v3-en") == 0 || strcmp(model, "gpt-sovits-v2-pro") == 0 || strcmp(model, "gpt-sovits-v2-pro-en") == 0){
if (enable_user_dictionary){
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceSetUserDictionaryFileA error %d\n", status);
return -1;
}
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA error %d\n", status);
return -1;
}
}
if (strcmp(model, "gpt-sovits-en") == 0 || strcmp(model, "gpt-sovits-v2-en") == 0 || strcmp(model, "gpt-sovits-v3-en") == 0 || strcmp(model, "gpt-sovits-v2-pro-en") == 0){
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA g2p_en error %d\n", status);
return -1;
}
}
if (strcmp(model, "gpt-sovits-zh") == 0){
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA g2p_cn error %d\n", status);
return -1;
}
}
if (strcmp(model, "gpt-sovits-v2-zh") == 0 || strcmp(model, "gpt-sovits-v3-zh") == 0 || strcmp(model, "gpt-sovits-v2-pro-zh") == 0){
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA g2p_cn error %d\n", status);
return -1;
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA g2pw error %d\n", status);
return -1;
}
}
if (strcmp(model, "tacotron2") == 0){
}else if (strcmp(model, "gpt-sovits-v3") == 0 || strcmp(model, "gpt-sovits-v3-en") == 0 || strcmp(model, "gpt-sovits-v3-zh") == 0){
status =
ailiaVoiceOpenGPTSoVITSV3ModelFileA(net,
"../onnx/gpt-sovits-v3/t2s_encoder.onnx",
"../onnx/gpt-sovits-v3/t2s_fsdec.onnx",
"../onnx/gpt-sovits-v3/t2s_sdec.onnx",
"../onnx/gpt-sovits-v3/cnhubert.onnx",
"../onnx/gpt-sovits-v3/vq_model.onnx",
"../onnx/gpt-sovits-v3/vq_cfm.onnx",
"../onnx/gpt-sovits-v3/bigvgan_model.onnx",
"../onnx/gpt-sovits-v3/chinese-roberta.onnx",
"../onnx/gpt-sovits-v3/vocab.txt");
}else if (strcmp(model, "gpt-sovits-v2-pro") == 0 || strcmp(model, "gpt-sovits-v2-pro-en") == 0 || strcmp(model, "gpt-sovits-v2-pro-zh") == 0){
status =
ailiaVoiceOpenGPTSoVITSV2ProModelFileA(net,
"../onnx/gpt-sovits-v3/t2s_encoder.onnx",
"../onnx/gpt-sovits-v3/t2s_fsdec.onnx",
"../onnx/gpt-sovits-v3/t2s_sdec.opt.onnx",
"../onnx/gpt-sovits-v3/cnhubert.onnx",
"../onnx/gpt-sovits-v2-pro/vits.onnx",
"../onnx/gpt-sovits-v2-pro/sv.onnx",
"../onnx/gpt-sovits-v2-pro/chinese-roberta.onnx",
"../onnx/gpt-sovits-v2-pro/vocab.txt");
}else if (strcmp(model, "gpt-sovits-zh") == 0){
status =
ailiaVoiceOpenGPTSoVITSV1ModelFileA(net,
"../onnx/gpt-sovits-zh/t2s_encoder.onnx",
"../onnx/gpt-sovits-zh/t2s_fsdec.onnx",
"../onnx/gpt-sovits-zh/t2s_sdec.opt3.onnx",
"../onnx/gpt-sovits-zh/vits.onnx",
"../onnx/gpt-sovits-zh/cnhubert.onnx");
}else if (strcmp(model, "gpt-sovits-v2") == 0 || strcmp(model, "gpt-sovits-v2-en") == 0 || strcmp(model, "gpt-sovits-v2-zh") == 0){
status =
ailiaVoiceOpenGPTSoVITSV2ModelFileA(net,
"../onnx/gpt-sovits-v2/t2s_encoder.onnx",
"../onnx/gpt-sovits-v2/t2s_fsdec.onnx",
"../onnx/gpt-sovits-v2/t2s_sdec.onnx",
"../onnx/gpt-sovits-v2/vits.onnx",
"../onnx/gpt-sovits-v2/cnhubert.onnx",
"../onnx/gpt-sovits-v2/chinese-roberta.onnx",
"../onnx/gpt-sovits-v2/vocab.txt");
}else{
status =
ailiaVoiceOpenGPTSoVITSV1ModelFileA(net,
"../onnx/gpt-sovits/t2s_encoder.onnx",
"../onnx/gpt-sovits/t2s_fsdec.onnx",
"../onnx/gpt-sovits/t2s_sdec.opt3.onnx",
"../onnx/gpt-sovits/vits.onnx",
"../onnx/gpt-sovits/cnhubert.onnx");
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenModelFileA error %d\n", status);
return -1;
}
if (strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-en") == 0 || strcmp(model, "gpt-sovits-zh") == 0 || strcmp(model, "gpt-sovits-v2") == 0 || strcmp(model, "gpt-sovits-v2-en") == 0 || strcmp(model, "gpt-sovits-v2-zh") == 0 || strcmp(model, "gpt-sovits-v3") == 0 || strcmp(model, "gpt-sovits-v3-en") == 0 || strcmp(model, "gpt-sovits-v3-zh") == 0 || strcmp(model, "gpt-sovits-v2-pro") == 0 || strcmp(model, "gpt-sovits-v2-pro-en") == 0 || strcmp(model, "gpt-sovits-v2-pro-zh") == 0){
int sampleRate, nChannels, nSamples;
const char *ref_audio = "../onnx/gpt-sovits/reference_audio_girl.wav";
const char *ref_text;
int ref_g2p_type;
ref_text = u8"水をマレーシアから買わなくてはならない。";
std::vector<float> wave = read_wave_file(ref_audio, &sampleRate, &nChannels, &nSamples);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGraphemeToPhoneme error %d\n", status);
return -1;
}
unsigned int len = 0;
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatureLength error %d\n", status);
return -1;
}
std::vector<char> ref_features;
ref_features.resize(len);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatures error %d\n", status);
return -1;
}
printf("Reference Features : %s\n", &ref_features[0]);
status =
ailiaVoiceSetReference(net, &wave[0], wave.size() *
sizeof(
float), nChannels, sampleRate, &ref_features[0]);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceSetReference error %d\n", status);
return -1;
}
}
std::vector<char> features;
if (strcmp(model, "tacotron2") == 0){
}else{
if (strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-v2") == 0 || strcmp(model, "gpt-sovits-v3") == 0 || strcmp(model, "gpt-sovits-v2-pro") == 0){
}else if (strcmp(model, "gpt-sovits-zh") == 0 || strcmp(model, "gpt-sovits-v2-zh") == 0 || strcmp(model, "gpt-sovits-v3-zh") == 0 || strcmp(model, "gpt-sovits-v2-pro-zh") == 0){
}else{
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGraphemeToPhoneme error %d\n", status);
return -1;
}
unsigned int len = 0;
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatureLength error %d\n", status);
return -1;
}
features.resize(len);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatures error %d\n", status);
return -1;
}
printf("Features : %s\n", &features[0]);
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceInference error %d\n", status);
return -1;
}
unsigned int samples, channels, sampling_rate;
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetWaveInfo error %d\n", status);
return -1;
}
std::vector<float> buf(samples * channels);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetWave error %d\n", status);
return -1;
}
printf("Wave samples : %d\nWave channles : %d\nWave sampling rate : %d\n", samples, channels, sampling_rate);
write_wave_file("output.wav", buf, sampling_rate);
return 0;
}
#define AILIA_VOICE_G2P_TYPE_GPT_SOVITS_JA
GPT SOVITS Japanese.
Definition: ailia_voice.h:198
int AILIA_API ailiaVoiceGraphemeToPhoneme(struct AILIAVoice *net, const char *utf8, int g2p_type)
Perform g2p.
int AILIA_API ailiaVoiceSetReference(struct AILIAVoice *net, float *buf, unsigned int buf_size, unsigned int channels, unsigned int sampling_rate, const char *features)
Set the waveform and text as references for zero-shot voice synthesis.
#define AILIA_VOICE_DICTIONARY_TYPE_OPEN_JTALK
Format for OpenJTalk.
Definition: ailia_voice.h:43
#define AILIA_VOICE_G2P_TYPE_GPT_SOVITS_EN
GPT SOVITS English.
Definition: ailia_voice.h:187
int AILIA_API ailiaVoiceSetUserDictionaryFileA(struct AILIAVoice *net, const char *dictionary_path, int dictionary_type)
Set user dictionary into a network instance.
int AILIA_API ailiaVoiceOpenGPTSoVITSV1ModelFileA(struct AILIAVoice *net, const char *encoder, const char *decoder1, const char *decoder2, const char *wave, const char *ssl)
Set GPT-SoVITS V1 models into a network instance.
int AILIA_API ailiaVoiceInference(struct AILIAVoice *net, const char *utf8)
Perform inference.
int AILIA_API ailiaVoiceOpenGPTSoVITSV2ProModelFileA(struct AILIAVoice *net, const char *encoder, const char *decoder1, const char *decoder2, const char *ssl, const char *vits, const char *sv, const char *chinese_bert, const char *vocab)
Set GPT-SoVITS V2-Pro models into a network instance.
int AILIA_API ailiaVoiceGetFeatureLength(struct AILIAVoice *net, unsigned int *len)
Gets the size of features. (Include null)
int AILIA_API ailiaVoiceGetWave(struct AILIAVoice *net, float *buf, unsigned int buf_size)
Gets the decoded features.
int AILIA_API ailiaVoiceGetWaveInfo(struct AILIAVoice *net, unsigned int *samples, unsigned int *channels, unsigned int *sampling_rate)
Gets the information of wave.
#define AILIA_VOICE_G2P_TYPE_GPT_SOVITS_ZH
GPT SOVITS Chinese.
Definition: ailia_voice.h:209
int AILIA_API ailiaVoiceCreate(struct AILIAVoice **net, int env_id, int num_thread, int memory_mode, int flags, AILIAVoiceApiCallback callback, int version)
Creates a Voice instance.
#define AILIA_VOICE_FLAG_NONE
Default flag.
Definition: ailia_voice.h:172
#define AILIA_VOICE_DICTIONARY_TYPE_G2P_CN
Format for G2P_CN.
Definition: ailia_voice.h:65
int AILIA_API ailiaVoiceOpenGPTSoVITSV2ModelFileA(struct AILIAVoice *net, const char *encoder, const char *decoder1, const char *decoder2, const char *wave, const char *ssl, const char *chinese_bert, const char *vocab)
Set GPT-SoVITS V2 models into a network instance.
int AILIA_API ailiaVoiceGetFeatures(struct AILIAVoice *net, char *features, unsigned int len)
Gets the decoded features.
int AILIA_API ailiaVoiceOpenTacotron2ModelFileA(struct AILIAVoice *net, const char *encoder, const char *decoder1, const char *decoder2, const char *wave, int cleaner_type)
Set Tacotron2 models into a network instance.
int AILIA_API ailiaVoiceOpenDictionaryFileA(struct AILIAVoice *net, const char *dictionary_path, int dictionary_type)
Set dictionary into a network instance.
#define AILIA_VOICE_DICTIONARY_TYPE_G2P_EN
Format for G2P_EN.
Definition: ailia_voice.h:54
int AILIA_API ailiaVoiceOpenGPTSoVITSV3ModelFileA(struct AILIAVoice *net, const char *encoder, const char *decoder1, const char *decoder2, const char *ssl, const char *vq, const char *cfm, const char *bigvgan, const char *chinese_bert, const char *vocab)
Set GPT-SoVITS V3 models into a network instance.
#define AILIA_VOICE_API_CALLBACK_VERSION
Struct version.
Definition: ailia_voice.h:260
#define AILIA_VOICE_DICTIONARY_TYPE_G2PW
Format for G2PW (Chinese polyphone disambiguation)
Definition: ailia_voice.h:76
void AILIA_API ailiaVoiceDestroy(struct AILIAVoice *net)
It destroys the Voice instance.
#define AILIA_VOICE_CLEANER_TYPE_BASIC
BasicCleaner.
Definition: ailia_voice.h:146
Definition: ailia_voice.h:263
The userdic.dic created with pyopenjtalk can be loaded by executing the ailiaVoiceSetUserDictionaryFile API before the ailiaVoiceOpenDictionaryFile API.
To use a GPU, specify the GPU's env_id in the env_id argument of ailiaVoiceCreate. By default, AILIA_ENVIRONMENT_ID_AUTO is specified, and inference is performed on the CPU. For how to obtain the GPU's env_id, please refer to ailia_voice_sample.cpp.