使用ubuntu18.04
选择webrtc_audio_processing v0.3
下载地址
https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing/-/tree/master
git clone 完
编译
# Initialise into the build/ directory, for a prefixed install into the
# install/ directory
meson . build -Dprefix=$PWD/install
# Run the actual build
ninja -C build
# Install locally
ninja -C build install
里面会提醒你需要安装的东西,
meson 0.45会报错
meson 需要0.63的,需要下载python3.8然后pip 安装
然后
~/.local/bin/meson . build -Dprefix=$PWD/install
编译完webrtc会有以下文件
就可以把include头文件和.so文件去测试
测试demo
#include <iostream>
#include <vector>
#include <sndfile.hh>
#include "modules/audio_processing/include/audio_processing.h"
#include "api/scoped_refptr.h"
// 读取 WAV 文件
bool ReadWavFile(const std::string& filename, std::vector<float>& audio_data, int& sample_rate) {
SndfileHandle file(filename);
if (file.error()) {
std::cerr << "Error opening file: " << filename << std::endl;
return false;
}
sample_rate = file.samplerate();
audio_data.resize(file.frames() * file.channels());
file.read(audio_data.data(), audio_data.size());
return true;
}
// 写入 WAV 文件
bool WriteWavFile(const std::string& filename, const std::vector<float>& audio_data, int sample_rate, int channels) {
SndfileHandle file(filename, SFM_WRITE, SF_FORMAT_WAV | SF_FORMAT_PCM_16, channels, sample_rate);
if (file.error()) {
std::cerr << "Error creating file: " << filename << std::endl;
return false;
}
file.write(audio_data.data(), audio_data.size());
return true;
}
int main(int argc, char* argv[]) {
if (argc != 4 || !argv[1] || !argv[2] || !argv[3]) {
printf("Usage: EXE nearIn.wav farIn.wav output.wav\n");
return 0;
}
const char* nearInFile = argv[1];
const char* farInFile = argv[2];
const char* outputFile = argv[3];
std::vector<float> near_data, far_data;
int sample_rate;
// 读取输入文件
if (!ReadWavFile(nearInFile, near_data, sample_rate) || !ReadWavFile(farInFile, far_data, sample_rate)) {
return -1;
}
// 打印输入文件信息
std::cout << "Sample rate: " << sample_rate << std::endl;
std::cout << "Near data (first 10 samples): ";
for (int i = 0; i < 10 && i < near_data.size(); ++i) {
std::cout << near_data[i] << " ";
}
std::cout << std::endl;
// 创建 AudioProcessing 实例
rtc::scoped_refptr<webrtc::AudioProcessing> apm = webrtc::AudioProcessingBuilder().Create();
// 配置 AudioProcessing
webrtc::AudioProcessing::Config config;
config.echo_canceller.enabled = true; // 禁用回声消除
config.noise_suppression.enabled = true; // 禁用噪声抑制
config.gain_controller1.enabled = true; // 禁用自动增益控制
apm->ApplyConfig(config);
// 设置流配置
webrtc::StreamConfig stream_config(sample_rate, 1); // 单声道
apm->Initialize(); // 确保 AudioProcessing 初始化 TODO:之前这步没做,一直没声音
// 处理音频
std::vector<float> output_data(near_data.size());
// 将数据包装成 float* 数组
float* near_channel[1] = { near_data.data() }; // 输入信号
float* output_channel[1] = { output_data.data() }; // 输出信号
size_t num_frames = near_data.size() / stream_config.num_channels();
// 处理音频帧
for (size_t i = 0; i < num_frames; i += stream_config.num_frames()) {
// 处理当前帧
size_t frames_to_process = std::min(stream_config.num_frames(), num_frames - i);//todo :使用 std::min 确保不会超过实际的音频帧数。
apm->ProcessStream(near_channel, stream_config, stream_config, output_channel);
// 移动指针
near_channel[0] += stream_config.num_channels() * frames_to_process;//更改点
output_channel[0] += stream_config.num_channels() * frames_to_process;
}
// 打印输出数据
std::cout << "Output data (first 10 samples): ";
for (int i = 0; i < 10 && i < output_data.size(); ++i) {
std::cout << output_data[i] << " ";
}
std::cout << std::endl;
// 写入输出文件
if (!WriteWavFile(outputFile, output_data, sample_rate, 1)) {
return -1;
}
std::cout << "3A processing completed. Output saved to " << outputFile << std::endl;
return 0;
}
原图
webrtc_3a默认处理后图(有效果了)