ailia_voice  1.1.0.0
API usage

Overview of ailia AI Voice API

Basic usage

Below is an example of how to use the text-to-speech API in C++. After having created an AILIAVoice instance, and opened the model with ailiaVoiceOpenModelFile, use ailiaVoiceGraphemeToPhoneme to convert the text to phonemes, then use ailiaVoiceInference to perform the text-to-speech conversion, after which it is possible to get the resulting audio waveforms with ailiaVoiceGetWave. When using GPS-SoVITS, provide a reference audio file with ailiaVoiceSetReference before using ailiaVoiceInference.

#include "ailia_voice.h"
#include "ailia_voice_util.h"
#include <stdio.h>
#include <vector>
#include <string>
#include <string.h>
#include "wave_reader.h"
#include "wave_writer.h"
int main(int argc, char *argv[]){
AILIAVoiceApiCallback callback = ailiaVoiceUtilGetCallback();
printf("Usage : ailia_voice_sample [tacotron2/gpt-sovits/gpt-sovits-en] [input_text]\n");
const char * input_text = "";
const char * lang = "";
const char * model = "tacotron2";
if (argc >= 2){
model = argv[1];
if (!(strcmp(model, "tacotron2") == 0 || strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-en") == 0)){
printf("model must be tacotron2 or gpt-sovits\n");
return -1;
}
}
if (argc >= 3){
input_text = argv[2];
}
if (strcmp(model, "tacotron2") == 0 || strcmp(model, "gpt-sovits-en") == 0 ){
if (strlen(input_text) == 0){
input_text = u8"Hello world.";
}
lang = "en";
}else{
if (strlen(input_text) == 0){
input_text = u8"こんにちは。今日は新しいAIエンジンであるアイリアSDKを紹介します。";
}
lang = "ja";
}
printf("Model : %s\n", model);
printf("Input text : %s\n", input_text);
printf("Language : %s\n", lang);
AILIAVoice *net;
int env_id = AILIA_ENVIRONMENT_ID_AUTO;
int num_thread = AILIA_MULTITHREAD_AUTO;
int memory_mode = AILIA_MEMORY_REDUCE_CONSTANT | AILIA_MEMORY_REDUCE_CONSTANT_WITH_INPUT_INITIALIZER | AILIA_MEMORY_REUSE_INTERSTAGE;
int status = ailiaVoiceCreate(&net, env_id, num_thread, memory_mode, AILIA_VOICE_FLAG_NONE, callback, AILIA_VOICE_API_CALLBACK_VERSION);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceCreate error %d\n", status);
return -1;
}
if (strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-en") == 0){
status = ailiaVoiceOpenDictionaryFileA(net, "./open_jtalk_dic_utf_8-1.11", AILIA_VOICE_DICTIONARY_TYPE_OPEN_JTALK);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA error %d\n", status);
return -1;
}
}
if (strcmp(model, "gpt-sovits-en") == 0){
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenDictionaryFileA error %d\n", status);
return -1;
}
}
if (strcmp(model, "tacotron2") == 0){
status = ailiaVoiceOpenModelFileA(net, "../onnx/nvidia/encoder.onnx", "../onnx/nvidia/decoder_iter.onnx", "../onnx/nvidia/postnet.onnx", "../onnx/nvidia/waveglow.onnx", NULL, AILIA_VOICE_MODEL_TYPE_TACOTRON2, AILIA_VOICE_CLEANER_TYPE_BASIC);
}else{
status = ailiaVoiceOpenModelFileA(net, "../onnx/gpt-sovits/t2s_encoder.onnx", "../onnx/gpt-sovits/t2s_fsdec.onnx", "../onnx/gpt-sovits/t2s_sdec.opt.onnx", "../onnx/gpt-sovits/vits.onnx", "../onnx/gpt-sovits/cnhubert.onnx", AILIA_VOICE_MODEL_TYPE_GPT_SOVITS, AILIA_VOICE_CLEANER_TYPE_BASIC);
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceOpenModelFileA error %d\n", status);
return -1;
}
if (strcmp(model, "gpt-sovits") == 0 || strcmp(model, "gpt-sovits-en") == 0){
int sampleRate, nChannels, nSamples;
const char *ref_audio = "../onnx/gpt-sovits/reference_audio_girl.wav";
//const char *ref_audio = "../onnx/gpt-sovits/reference_audio_man.wav";
std::vector<float> wave = read_wave_file(ref_audio, &sampleRate, &nChannels, &nSamples);
const char *ref_text = "水をマレーシアから買わなくてはならない。";
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGraphemeToPhoneme error %d\n", status);
return -1;
}
unsigned int len = 0;
status = ailiaVoiceGetFeatureLength(net, &len);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatureLength error %d\n", status);
return -1;
}
std::vector<char> ref_features;
ref_features.resize(len);
status = ailiaVoiceGetFeatures(net, &ref_features[0], len);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatures error %d\n", status);
return -1;
}
printf("Reference Features : %s\n", &ref_features[0]);
status = ailiaVoiceSetReference(net, &wave[0], wave.size() * sizeof(float), nChannels, sampleRate, &ref_features[0]);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceSetReference error %d\n", status);
return -1;
}
}
std::vector<char> features;
if (strcmp(model, "tacotron2") == 0){
status = ailiaVoiceInference(net, input_text);
}else{
if (strcmp(model, "gpt-sovits") == 0){
}else{
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGraphemeToPhoneme error %d\n", status);
return -1;
}
unsigned int len = 0;
status = ailiaVoiceGetFeatureLength(net, &len);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatureLength error %d\n", status);
return -1;
}
features.resize(len);
status = ailiaVoiceGetFeatures(net, &features[0], len);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetFeatures error %d\n", status);
return -1;
}
printf("Features : %s\n", &features[0]);
status = ailiaVoiceInference(net, &features[0]);
}
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceInference error %d\n", status);
return -1;
}
unsigned int samples, channels, sampling_rate;
status = ailiaVoiceGetWaveInfo(net, &samples, &channels, &sampling_rate);
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetWaveInfo error %d\n", status);
return -1;
}
std::vector<float> buf(samples * channels);
status = ailiaVoiceGetWave(net, &buf[0], buf.size() * sizeof(float));
if (status != AILIA_STATUS_SUCCESS){
printf("ailiaVoiceGetWave error %d\n", status);
return -1;
}
printf("Wave samples : %d\nWave channles : %d\nWave sampling rate : %d\n", samples, channels, sampling_rate);
write_wave_file("output.wav", buf, sampling_rate);
return 0;
}

GPU usage

In order to use the GPU, pass the env_id corresponding to the GPU as the env_id argument of ailiaVoiceCreate. By default, the value AILIA_ENVIRONMENT_ID_AUTO is used, which indicates to perform the inference on the CPU. See ailia_speech_sample.cpp as an example of how to determine the GPU env_id to be passed as the env_id argument.

AILIA_VOICE_MODEL_TYPE_TACOTRON2
#define AILIA_VOICE_MODEL_TYPE_TACOTRON2
Format for Tacotron2.
Definition: ailia_voice.h:69
_AILIAVoiceApiCallback
Definition: ailia_voice.h:193
ailia_voice.h
ailiaVoiceDestroy
void AILIA_API ailiaVoiceDestroy(struct AILIAVoice *net)
It destroys the Voice instance.
ailiaVoiceGetFeatures
int AILIA_API ailiaVoiceGetFeatures(struct AILIAVoice *net, char *features, unsigned int len)
Gets the decoded features.
ailiaVoiceOpenDictionaryFileA
int AILIA_API ailiaVoiceOpenDictionaryFileA(struct AILIAVoice *net, const char *dictionary_path, int dictionary_type)
Set dictionary into a network instance.
ailiaVoiceGetFeatureLength
int AILIA_API ailiaVoiceGetFeatureLength(struct AILIAVoice *net, unsigned int *len)
Gets the size of features. (Include null)
ailiaVoiceGetWaveInfo
int AILIA_API ailiaVoiceGetWaveInfo(struct AILIAVoice *net, unsigned int *samples, unsigned int *channels, unsigned int *sampling_rate)
Gets the information of wave.
AILIA_VOICE_FLAG_NONE
#define AILIA_VOICE_FLAG_NONE
Default flag.
Definition: ailia_voice.h:117
ailiaVoiceGraphemeToPhoneme
int AILIA_API ailiaVoiceGraphemeToPhoneme(struct AILIAVoice *net, const char *utf8, int g2p_type)
Perform g2p.
ailiaVoiceInference
int AILIA_API ailiaVoiceInference(struct AILIAVoice *net, const char *utf8)
Perform inference.
AILIA_VOICE_G2P_TYPE_GPT_SOVITS_EN
#define AILIA_VOICE_G2P_TYPE_GPT_SOVITS_EN
GPT SOVITS English.
Definition: ailia_voice.h:132
ailiaVoiceOpenModelFileA
int AILIA_API ailiaVoiceOpenModelFileA(struct AILIAVoice *net, const char *encoder, const char *decoder1, const char *decoder2, const char *wave, const char *ssl, int model_type, int cleaner_type)
Set models into a network instance.
AILIA_VOICE_MODEL_TYPE_GPT_SOVITS
#define AILIA_VOICE_MODEL_TYPE_GPT_SOVITS
Format for GPT-SoVITS.
Definition: ailia_voice.h:80
AILIA_VOICE_CLEANER_TYPE_BASIC
#define AILIA_VOICE_CLEANER_TYPE_BASIC
BasicCleaner.
Definition: ailia_voice.h:91
AILIA_VOICE_DICTIONARY_TYPE_G2P_EN
#define AILIA_VOICE_DICTIONARY_TYPE_G2P_EN
Format for G2P_EN.
Definition: ailia_voice.h:54
ailiaVoiceGetWave
int AILIA_API ailiaVoiceGetWave(struct AILIAVoice *net, float *buf, unsigned int buf_size)
Gets the decoded features.
AILIA_VOICE_API_CALLBACK_VERSION
#define AILIA_VOICE_API_CALLBACK_VERSION
Struct version.
Definition: ailia_voice.h:190
ailiaVoiceCreate
int AILIA_API ailiaVoiceCreate(struct AILIAVoice **net, int env_id, int num_thread, int memory_mode, int flags, AILIAVoiceApiCallback callback, int version)
Creates a Voice instance.
ailiaVoiceSetReference
int AILIA_API ailiaVoiceSetReference(struct AILIAVoice *net, float *buf, unsigned int buf_size, unsigned int channels, unsigned int sampling_rate, const char *features)
Set the waveform and text as references for zero-shot voice synthesis.
AILIA_VOICE_DICTIONARY_TYPE_OPEN_JTALK
#define AILIA_VOICE_DICTIONARY_TYPE_OPEN_JTALK
Format for OpenJTalk.
Definition: ailia_voice.h:43
AILIA_VOICE_G2P_TYPE_GPT_SOVITS_JA
#define AILIA_VOICE_G2P_TYPE_GPT_SOVITS_JA
GPT SOVITS Japanese.
Definition: ailia_voice.h:143