#include "sami_core.h" SAMICoreFeatureArray* findWantedFeature(SAMICoreFeatureSet* f_set, SAMICorePropertyId feature_id) { for(int i = 0; i < f_set->numFeatureTypes; ++i) { if(f_set->set[i].featureID == feature_id) { return &(f_set->set[i]); } } return nullptr; } // step 1, create handle SAMICoreHandle handle; SAMICoreExtractorCreateParam param; param.sampleRate = sample_rate; param.numChannel = num_channels; int ret = SAMICoreCreateHandleByIdentify(&handle, SAMICoreIdentify_Extractor_F0Detection,¶m); assert(ret == SAMI_OK); // step 2, create input audio block SAMICoreAudioBuffer in_audio_buffer; in_audio_buffer.numberChannels = num_channels; in_audio_buffer.numberSamples = 0; in_audio_buffer.isInterleave = 0; in_audio_buffer.data = new float*[num_channels]; SAMICoreBlock in_block; in_block.dataType = SAMICoreDataType_AudioBuffer; in_block.numberAudioData = 1; in_block.audioData = &in_audio_buffer; // step 3, process bloock by block for(;hasAudioSamples();) { updateInputBuffer(in_audio_buffer); ret = SAMICoreProcess(handle, &in_block, NULL); assert(ret == SAMI_OK); // step 3.1, get beat tracking results SAMICoreProperty frame_features; SAMICoreGetPropertyById(handle, SAMICorePropertyID_FrameFeatures, &frame_features); SAMICoreFeatureSet* feature_set = (SAMICoreFeatureSet*)(frame_features.data); if(feature_set != NULL) { SAMICoreFeatureArray* feature_result = findWantedFeature(feature_set, SAMICoreIdentify_Extractor_F0); if(feature_result != NULL) { float timestamp = feature_result->array[0].time; float value = feature_result->array[0].values[0]; cout << "time:" << timestamp << ", value:" << value << endl; } SAMICoreDestroyProperty(&frame_features); } } // step 4, remember release resource ret = SAMICoreDestroyHandle(handle); assert(ret == SAMI_OK); delete[] in_audio_buffer.data;
传入采样率和声道数,通过 SAMICoreCreateHandleByIdentify
创建 handle。
SAMICoreHandle handle; SAMICoreExtractorCreateParam param; param.sampleRate = sample_rate; param.numChannel = num_channels; int ret = SAMICoreCreateHandleByIdentify(&handle, SAMICoreIdentify_Extractor_F0Detection, ¶m);
SAMICoreAudioBuffer,用于存放音频数据;SAMICoreBlock,用于存放需要处理的数据。需要注意的是,F0 检测只处理单声道的数据
SAMICoreAudioBuffer in_buffer; in_buffer.isInterleave = 0; in_buffer.numberSamples = 0; in_buffer.numberChannels = 1; in_buffer.data = new float*[1]; SAMICoreBlock in_block; in_block.dataType = SAMICoreDataType_AudioBuffer; in_block.numberAudioData = 1; in_block.audioData = &in_buffer;
更新音频数据的指针,指向正确的内存即可,这样可以避免内存数据的拷贝。
for(;hasAudioSamples();) { in_buffer.data[0] = new_samples_pointer; in_buffer.numberSamples = new_samples_length; ret = SAMICoreProcess(handle, &in_block, NULL); assert(ret == SAMI_OK); .... }
输入是一块一块的音频数据,比如一个 512 大小的音频数据。在调用SAMICoreProcess
后,可以获取当前帧的结果,如果数据不够处理,feature_set 会等于 nullptr :
for(;hasAudioSamples();) { // .. process // step 4.1, get beat tracking results SAMICoreProperty frame_features; SAMICoreGetPropertyById(handle, SAMICorePropertyID_FrameFeatures, &frame_features); SAMICoreFeatureSet* feature_set = (SAMICoreFeatureSet*)(frame_features.data); if(feature_set != NULL) { SAMICoreFeatureArray* feature_result = findWantedFeature(feature_set, SAMICoreIdentify_Extractor_F0); if(feature_result != NULL) { float timestamp = feature_result->array[0].time; float value = feature_result->array[0].values[0]; cout << "time:" << timestamp << ", value:" << value << endl; } SAMICoreDestroyProperty(&frame_features); } }
其中:
7 行, SAMICoreGetPropertyById
通过SAMICorePropertyID_FrameFeatures获取算法所有输出(注:算法可能输出多种类型的特征)
12 ~ 13 行,指定SAMICoreIdentify_Extractor_F0,通过 findWantedFeature 辅助函数,获取结果。
15~20 行,获取事件的时间戳信息,和具体结果
21 行,记得调用 SAMICoreDestroyProperty 来释放资源
释放 handle
ret = SAMICoreDestroyHandle(handle);
此外,还要注意音频数据数据的内存释放(如果有)。例如:
delete[] in_audio_buffer.data;