int类型赋值INT_MIN(-2147483648)

本文详细解释了在C++编程中遇到的一元负运算符应用于无符号类型导致的错误,包括问题产生的原因、表现形式及解决方法,并通过实例代码演示如何避免编译器警告或错误。

最近在编程中遇到一个问题:

#include <iostream>

using namespace std;

int main()
{	
	int n = -2147483648;
	//cout << (1 > -2147483648) << endl;
	
	return 0;
}

使用VS2012编译提示:error C4146: 一元负运算符应用于无符号类型,结果仍为无符号类型

使用g++编译提示:

test.c:7:2: warning: this decimal constant is unsigned only in ISO C90 [enabled
by default]
在网上搜索,发现VS之前的版本提示的是warning,而到了VS2012版本就直接提示为error,加强了严重性。


实际上,当程序员尝试表达最小整数值 -2147483648 时,会发生此问题。 该值不能写为 -2147483648,因为表达式处理分两个步骤:

1. 计算数字 2147483648。 因 2147483648 大于最大整数值 2147483647,所以其类型不是 int,而是 unsigned int。

2. 将一元负应用于该值,得到无符号结果,该结果碰巧是 2147483648。

而这种隐含转换的无符号类型的结果可能导致意外行为,所以编译器会提示waring甚至在VS2012中直接提示error。


关于导致的意外行为,参看上述代码注释行: cout << (1 > -2147483648) << endl;

因为-2147483648会被转换为unsigned即2147483648,同时在比较中1也会被提升为unsigned,从而得到结果0,而这行代码我原意是比较两个int数,结果预计为1。


如何避免出现编译器提示waring甚至是error?

1. 使用limits.h中宏定义的INT_MIN。那为什么使用了该宏定义就不提示了呢?具体看其定义:

#define INT_MIN     (-2147483647 - 1) /* minimum (signed) int value */

不是直接给出-2147483648的常量来避免;

2. 根据上述提示,因而给int变量赋最小int值时,也可以使用 n = -2147483647 - 1 或者你想特立独行的搞个 n = -2147483646 - 2,只要不直接出现-2147483648就不会出现unsiged的转换。


/** * Copyright (C) 2024 TP-Link. All rights reserved. */ #include <stdio.h> #include <unistd.h> #include <math.h> #include "audio_aec.h" #include "ams_common.h" #define AEC_STATE_SWITCH_GAIN 1.0f #define AEC_STATE_SWITCH_GAIN_SQUARE 1.0f int aec_module_init(AEC_CONTEXT *p_context, int sample_rate, int use_timestamp, AEC3_SYS_CONFIG *EX_ctrl_para) { if (!p_context) { return -1; } memset(p_context, 0, sizeof(AEC_CONTEXT)); p_context->sample_rate = sample_rate; p_context->use_timestamp = use_timestamp; p_context->mic_timestamp = 0; p_context->spk_timestamp = 0; p_context->current_delay_f = 0; p_context->current_delay_pt = 0; /* 外开 */ p_context->current_delay_near = 40; /* 外开 */ memset(p_context->far_history, 0, sizeof(short) * D_RINGBUF_MAX_LEN); p_context->far_maxlen = D_RINGBUF_MAX_LEN; p_context->far_top = 0; /* added, nearend process */ memset(p_context->mic_history, 0, sizeof(short) * D_RINGBUF_MAX_LEN); p_context->mic_maxlen = D_RINGBUF_MAX_LEN; p_context->mic_top = 0; /* ringbuffer初始化 */ p_context->mic_ringbuf = audio_ring_buffer_init(D_RINGBUF_MAX_LEN); p_context->spk_ringbuf = audio_ring_buffer_init(D_RINGBUF_MAX_LEN); p_context->out_ringbuf = audio_ring_buffer_init(D_RINGBUF_MAX_LEN); p_context->post_gain = 1.0f; /* 外开,aec3的后增益 */ p_context->aec3_live_support_switch = 0; /* 外开,aec3支持直播模式的兼容性开关 */ p_context->aec3_dt_support_switch = 1; /* 外开,aec3支持双讲模式的兼容性开关 */ p_context->aec_state_switch_gain = 1.0f; /* 外开,应调为直播双讲模式切换时的幅值变化倍数 */ p_context->aec_state_switch_gain_square = p_context->aec_state_switch_gain * p_context->aec_state_switch_gain; p_context->R_aec_state_switch_gain = 1.0f / p_context->aec_state_switch_gain; p_context->R_aec_state_switch_gain_square = 1.0f / p_context->aec_state_switch_gain_square; memset(p_context->spk_buf_zero, 0, 2 * sizeof(short) * D_AEC_FRAME_LEN_WB); /* 初始化spk_buf_zero */ if (SAMPLE_RATE_NB == sample_rate) { p_context->aec_frame_len = D_AEC_FRAME_LEN; p_context->aec_fir_len = D_AEC_FILTER_LEN; } else if (SAMPLE_RATE_WB == sample_rate) { p_context->aec_frame_len = D_AEC_FRAME_LEN_WB; p_context->aec_fir_len = D_AEC_FILTER_LEN_WB; } else { p_context->aec_frame_len = D_AEC_FRAME_LEN; p_context->aec_fir_len = D_AEC_FILTER_LEN; } p_context->st = speex_echo_state_init(p_context->aec_frame_len, p_context->aec_fir_len); p_context->den = speex_preprocess_state_init(p_context->aec_frame_len, p_context->sample_rate); speex_echo_ctl(p_context->st, SPEEX_ECHO_SET_SAMPLING_RATE, &(p_context->sample_rate)); speex_preprocess_ctl(p_context->den, SPEEX_PREPROCESS_SET_ECHO_STATE, p_context->st); p_context->vad_for_agc_enable = 0; p_context->vad_list = (float *)calloc(D_INPUT_FRAME_MAX_LEN, sizeof(float)); /* 使用外开参数 */ speex_EXpara_apply(p_context, EX_ctrl_para); return 0; } void aec_state_switch(AEC_CONTEXT *p_context, SpeexEchoState *st, SpeexPreprocessState *den) /* 直播双讲模式切换 */ { float aec_state_switch_gain = p_context->aec_state_switch_gain; float aec_state_switch_gain_square = p_context->aec_state_switch_gain_square; float R_aec_state_switch_gain = p_context->R_aec_state_switch_gain; float R_aec_state_switch_gain_square = p_context->R_aec_state_switch_gain_square; int flag = st->old_aec_enable - st->aec_enable; if (flag == -1) /* 切换到双讲模式 */ { int i, M, N; N = st->window_size; M = st->M; st->cancel_count = 0; st->sum_adapt = 0; st->saturated = 0; st->echo_clipping_flag = 0; st->NLP_flag = 0; st->screwed_up = 0; memset(st->e, 0, N * sizeof(float)); memset(st->x, 0, N * sizeof(float)); memset(st->X, 0, N * (M + 1) * sizeof(float)); memset(st->input_buf, 0, N * sizeof(float)); memset(st->input_buf_win, 0, N * sizeof(float)); memset(st->y, 0, st->frame_size * sizeof(float)); memset(st->last_y, 0, st->frame_size * sizeof(float)); memset(st->last_echo, 0, st->frame_size * sizeof(float)); memset(st->INPUT, 0, st->frame_size * sizeof(float)); memset(st->Y, 0, st->frame_size * sizeof(float)); memset(st->E, 0, N * sizeof(float)); st->Davg1 = 0; st->Davg2 = 0; st->Dvar1 = 0; st->Dvar2 = 0; memset(st->power, 0, (st->frame_size + 1) * sizeof(float)); memset(st->Eh, 0, (st->frame_size + 1) * sizeof(float)); memset(st->Yh, 0, (st->frame_size + 1) * sizeof(float)); for (i = 0; i <= st->frame_size; i++) { st->power_1[i] = 1.0; } float sum = 0; /* Ratio of ~10 between adaptation rate of first and last block */ float decay = exp(-2.4 / st->M); st->prop[0] = 0.7; sum = st->prop[0]; for (i = 1; i < st->M; i++) { st->prop[i] = st->prop[i - 1] * decay; sum += st->prop[i]; } for (i = st->M - 1; i >= 0; i--) { st->prop[i] = 0.8f * st->prop[i] / sum; } memset(st->memD, 0, 1 * sizeof(float)); memset(st->memE, 0, 1 * sizeof(float)); memset(st->memX, 0, 1 * sizeof(float)); st->adapted = 0; st->Pey = st->Pyy = 1.0; st->converage_count = 0; memset(p_context->far_history, 0, sizeof(short) * D_RINGBUF_MAX_LEN); p_context->far_top = 0; tp_audio_ring_bufer_reset(p_context->spk_ringbuf); #ifdef TWO_PATH st->Davg1 = st->Davg2 = 0; st->Dvar1 = st->Dvar2 = 0.0; #endif for (i = 0; i < 2; i++) { st->notch_mem[i] *= R_aec_state_switch_gain; } for (i = 0; i < N; i++) { den->frame[i] *= R_aec_state_switch_gain; den->ft[i] *= R_aec_state_switch_gain; } for (i = 0; i < (int)(N / 2); i++) { den->noise[i] *= R_aec_state_switch_gain_square; den->old_ps[i] *= R_aec_state_switch_gain_square; den->S[i] *= R_aec_state_switch_gain_square; den->Smin[i] *= R_aec_state_switch_gain_square; den->Stmp[i] *= R_aec_state_switch_gain_square; den->inbuf[i] *= R_aec_state_switch_gain; den->outbuf[i] *= R_aec_state_switch_gain; den->noise_CNG[i] = den->noise[i]; /* CNG更新慢,且在直播模式停止更新,因此在切换时直接赋值 */ } } else if (flag == 1) { /* 切换到直播模式 */ int i, N; N = den->ps_size; for (i = 0; i < 2; i++) { st->notch_mem[i] *= aec_state_switch_gain; } for (i = 0; i < 2 * N; i++) { den->frame[i] *= aec_state_switch_gain; den->ft[i] *= aec_state_switch_gain; } for (i = 0; i < N; i++) { den->ps[i] *= aec_state_switch_gain_square; den->noise[i] *= aec_state_switch_gain_square; den->old_ps[i] *= aec_state_switch_gain_square; den->S[i] *= aec_state_switch_gain_square; den->Smin[i] *= aec_state_switch_gain_square; den->Stmp[i] *= aec_state_switch_gain_square; den->inbuf[i] *= aec_state_switch_gain; den->outbuf[i] *= aec_state_switch_gain; } } return; } int aec_module_process(AEC_CONTEXT * p_context) { if (!p_context) { return -1; } #ifdef DUMP_RAWDATA_SUPPORT FILE *mic_file = NULL, *spk_file = NULL, *aec_file = NULL, *out_file = NULL, *motor_state_file = NULL, *aec_state_file = NULL; if (access("/tmp/mnt/harddisk_1/audio_stream", 0) == 0) { mic_file = fopen("/tmp/mnt/harddisk_1/audio_stream/mic.pcm", "a+"); spk_file = fopen("/tmp/mnt/harddisk_1/audio_stream/spk.pcm", "a+"); aec_file = fopen("/tmp/mnt/harddisk_1/audio_stream/aec.pcm", "a+"); out_file = fopen("/tmp/mnt/harddisk_1/audio_stream/out.pcm", "a+"); motor_state_file = fopen("/tmp/mnt/harddisk_1/audio_stream/motor_state.pcm", "a+"); aec_state_file = fopen("/tmp/mnt/harddisk_1/audio_stream/aec_state.pcm", "a+"); } #endif int ret = 0; aec_state_switch(p_context, p_context->st, p_context->den); /* 状态切换时,更新参数 */ /* step 1: 时延估计 */ if (p_context->st->aec_enable) /* 判断回声消除是否开启 */ { /* 1.1 spk数据入队 */ if (p_context->far_top + p_context->sdata_len <= p_context->far_maxlen) { memcpy(&(p_context->far_history[p_context->far_top]), p_context->spk_buf_ptr, sizeof(short) * p_context->sdata_len); p_context->far_top += p_context->sdata_len; } else { unsigned int head_len = p_context->far_top + p_context->sdata_len - p_context->far_maxlen; unsigned int tail_len = p_context->sdata_len - head_len; memcpy(&(p_context->far_history[p_context->far_top]), p_context->spk_buf_ptr, sizeof(short) * tail_len); memcpy(&(p_context->far_history[0]), &(p_context->spk_buf_ptr[tail_len]), sizeof(short) * head_len); p_context->far_top = head_len; } } /* nearend process */ /* 近端数据入队 */ if (p_context->mic_top + p_context->sdata_len <= p_context->mic_maxlen) { memcpy(&(p_context->mic_history[p_context->mic_top]), p_context->mic_buf_ptr, sizeof(short) * p_context->sdata_len); p_context->mic_top += p_context->sdata_len; } else { unsigned int head_len = p_context->mic_top + p_context->sdata_len - p_context->mic_maxlen; unsigned int tail_len = p_context->sdata_len - head_len; memcpy(&(p_context->mic_history[p_context->mic_top]), p_context->mic_buf_ptr, sizeof(short) * tail_len); memcpy(&(p_context->mic_history[0]), &(p_context->mic_buf_ptr[tail_len]), sizeof(short) * head_len); p_context->mic_top = head_len; } /* 1.2 时延估计 */ S32 current_delay_pt = 0; S32 current_delay_near = 0; if (p_context->st->aec_enable) { p_context->current_delay_f = 0; if (p_context->use_timestamp) { U64 time_diff_us = 0; if (p_context->mic_timestamp > p_context->spk_timestamp) { time_diff_us = p_context->mic_timestamp - p_context->spk_timestamp; } // 从帧长,采样率倒推时间 S32 sample_rate = p_context->sample_rate; S32 frame_length = p_context->aec_frame_len; S32 delay_points = (S32)(time_diff_us / (1.0 * 1000000 / sample_rate)); p_context->current_delay_f = (int)(delay_points / frame_length); /* 传来的时间戳异常,倾向于无时延 */ if (p_context->current_delay_f > D_MAX_DELAY_F) { p_context->current_delay_f = 0; } } else { /* TODO: 使用算法计算时延。和aqi/aec/aec算法中的相同,待移植 */ } current_delay_pt = p_context->current_delay_f * p_context->aec_frame_len; } /* 1.3 从far_history中取出参考信号 */ current_delay_pt = p_context->current_delay_pt; memset(p_context->ref_buf, 0 , D_INPUT_FRAME_MAX_LEN * sizeof(short)); if (p_context->st->aec_enable) { if (p_context->far_top - current_delay_pt - p_context->sdata_len > 0) { memcpy(p_context->ref_buf, &(p_context->far_history[p_context->far_top - current_delay_pt - p_context->sdata_len]), sizeof(short) * p_context->sdata_len); } else { unsigned int tail_len = p_context->sdata_len - (p_context->far_top - current_delay_pt); if (tail_len < p_context->sdata_len) { memcpy(p_context->ref_buf, &(p_context->far_history[p_context->far_maxlen - tail_len]), sizeof(short) * tail_len); memcpy(&(p_context->ref_buf[tail_len]), p_context->far_history, sizeof(short) * (p_context->sdata_len - tail_len)); } else { memcpy(p_context->ref_buf, &(p_context->far_history[p_context->far_maxlen - tail_len]), sizeof(short) * p_context->sdata_len); } } } /* nearend process */ current_delay_near = p_context->current_delay_near; memset(p_context->mic_buf, 0 , D_INPUT_FRAME_MAX_LEN * sizeof(short)); if (p_context->mic_top - current_delay_near - p_context->sdata_len > 0) { memcpy(p_context->mic_buf, &(p_context->mic_history[p_context->mic_top - current_delay_near - p_context->sdata_len]), sizeof(short) * p_context->sdata_len); } else { unsigned int tail_len = p_context->sdata_len - (p_context->mic_top - current_delay_near); if (tail_len < p_context->sdata_len) { memcpy(p_context->mic_buf, &(p_context->mic_history[p_context->mic_maxlen - tail_len]), sizeof(short) * tail_len); memcpy(&(p_context->mic_buf[tail_len]), p_context->mic_history, sizeof(short) * (p_context->sdata_len - tail_len)); } else { memcpy(p_context->mic_buf, &(p_context->mic_history[p_context->mic_maxlen - tail_len]), sizeof(short) * p_context->sdata_len); } } /* step 2: mic,ref信号存入缓存 */ unsigned int write_elements = 0; write_elements = audio_ring_buffer_set_data(p_context->mic_ringbuf, p_context->mic_buf, p_context->sdata_len); if (write_elements < p_context->sdata_len) { AMS_ERROR("write buf error"); ret = -1; goto AEC_DESTROY; } if (p_context->st->aec_enable) { audio_ring_buffer_set_data(p_context->spk_ringbuf, p_context->ref_buf, p_context->sdata_len); } /* step 3: aec */ int frames = (int)(audio_ring_buffer_used_space(p_context->mic_ringbuf) / p_context->aec_frame_len); for (int i = 0; i < frames; i++) { audio_ring_buffer_get_data(p_context->mic_ringbuf, p_context->mic_tmp, p_context->aec_frame_len); if (p_context->st->aec_enable) { audio_ring_buffer_get_data(p_context->spk_ringbuf, p_context->spk_tmp, p_context->aec_frame_len); } speex_echo_cancellation(p_context->st, p_context->mic_tmp, p_context->spk_tmp, p_context->out_tmp); #ifdef DUMP_RAWDATA_SUPPORT short motor_state_tmp[1] = { 0 }; motor_state_tmp[0] = p_context->den->motor_state; short aec_state_tmp[1] = { 0 }; aec_state_tmp[0] = p_context->st->aec_enable; if (mic_file && spk_file && aec_file && motor_state_file && aec_state_file) { fwrite(p_context->mic_tmp, sizeof(short), p_context->aec_frame_len, mic_file); fwrite(p_context->spk_tmp, sizeof(short), p_context->aec_frame_len, spk_file); fwrite(p_context->out_tmp, sizeof(short), p_context->aec_frame_len, aec_file); fwrite(motor_state_tmp, sizeof(short), 1, motor_state_file); fwrite(aec_state_tmp, sizeof(short), 1, aec_state_file); } #endif if (COMPUTATION_NS_BARK_ENABLE & p_context->st->computation_simplification) { speex_preprocess_run_bark(p_context->den, p_context->out_tmp); } else { speex_preprocess_run(p_context->den, p_context->out_tmp); } p_context->vad_for_agc_enable = VAD_FOR_AGC_MODE_ON & p_context->den->special_mode; if (p_context->vad_for_agc_enable) { for (int j = i * p_context->aec_frame_len; j < (i + 1) * p_context->aec_frame_len; j++) { p_context->vad_list[j] = p_context->den->vad; } } speex_post_gain(p_context, p_context->out_tmp); #ifdef DUMP_RAWDATA_SUPPORT if (out_file) { fwrite(p_context->out_tmp, sizeof(short), p_context->aec_frame_len, out_file); } #endif /* out_tmp */ write_elements = audio_ring_buffer_set_data(p_context->out_ringbuf, p_context->out_tmp, p_context->aec_frame_len); if (write_elements < p_context->aec_frame_len) { AMS_ERROR("write buf error"); ret = -1; goto AEC_DESTROY; } } /* step 4: 输出 */ write_elements = audio_ring_buffer_used_space(p_context->out_ringbuf); if (write_elements < p_context->sdata_len) { memset(p_context->out_buf_ptr, 0, sizeof(short) * p_context->sdata_len); } else { audio_ring_buffer_get_data(p_context->out_ringbuf, p_context->out_buf_ptr, p_context->sdata_len); } AEC_DESTROY: #ifdef DUMP_RAWDATA_SUPPORT if (mic_file) { fclose(mic_file); } if (spk_file) { fclose(spk_file); } if (aec_file) { fclose(aec_file); } if (out_file) { fclose(out_file); } if (motor_state_file) { fclose(motor_state_file); } if (aec_state_file) { fclose(aec_state_file); } #endif return ret; } /***************************************************************************** * Function: aec_module_deinit() * Description: aec资源释放 * Input: p_context -- aec参数信息 * Output: 无 * Return: 无 *****************************************************************************/ void aec_module_deinit(AEC_CONTEXT *p_context) { if (p_context) { audio_ring_buffer_deinit(p_context->mic_ringbuf); audio_ring_buffer_deinit(p_context->spk_ringbuf); audio_ring_buffer_deinit(p_context->out_ringbuf); speex_echo_state_destroy(p_context->st); speex_preprocess_state_destroy(p_context->den); free(p_context->vad_list); p_context->vad_list = NULL; } } void speex_post_gain(AEC_CONTEXT *p_context, short *x) { int i; float x_tmp; float post_gain = p_context->post_gain; for (i = 0; i < p_context->aec_frame_len; i++) { x_tmp = (float)(x[i]); x_tmp *= post_gain; x_tmp = MIN32(x_tmp, 32767); x_tmp = MAX32(x_tmp, -32767); x[i] = (short)x_tmp; } } void speex_EXpara_apply(AEC_CONTEXT *p_context, AEC3_SYS_CONFIG *EX_ctrl_para) { /* audio aec */ p_context->current_delay_near = EX_ctrl_para->mic_delay_preset; /* mdf */ p_context->st->ref_res_thr = EX_ctrl_para->ref_res_thr; p_context->st->ref_nlp_thr = EX_ctrl_para->ref_nlp_thr; p_context->st->ref_saturate_thr = EX_ctrl_para->ref_saturate_thr; p_context->st->leak_estimate_enabled = EX_ctrl_para->leak_estimate_enabled; p_context->st->leak_value_unadapted = EX_ctrl_para->leak_value_unadapted; p_context->st->leak_value_clipping = EX_ctrl_para->leak_value_clipping; p_context->st->leak_value_slight = EX_ctrl_para->leak_value_slight; p_context->st->saturate_gain = EX_ctrl_para->saturate_gain; p_context->st->residual_gain = EX_ctrl_para->residual_gain; p_context->st->min_leak = EX_ctrl_para->min_leak; /* preprocess */ p_context->den->denoise_enabled = EX_ctrl_para->denoise_mode; p_context->den->nlp_cutoff = EX_ctrl_para->nlp_cutoff; p_context->den->nlp_hnle_thr = EX_ctrl_para->nlp_hnle_thr; p_context->den->nlp_highfreq_suppress = EX_ctrl_para->nlp_highfreq_suppress; /* 第二次更新参数 */ /* audio aec */ p_context->current_delay_pt = EX_ctrl_para->spk_delay_preset; p_context->post_gain = EX_ctrl_para->post_gain; p_context->aec3_live_support_switch = EX_ctrl_para->aec3_live_support_switch; p_context->aec3_dt_support_switch = EX_ctrl_para->aec3_dt_support_switch; p_context->aec_state_switch_gain = EX_ctrl_para->aec_state_switch_gain; p_context->aec_state_switch_gain_square = p_context->aec_state_switch_gain * p_context->aec_state_switch_gain; p_context->R_aec_state_switch_gain = 1.0f / p_context->aec_state_switch_gain; p_context->R_aec_state_switch_gain_square = 1.0f / p_context->aec_state_switch_gain_square; /* mdf */ p_context->st->notch_radius = EX_ctrl_para->notch_radius; p_context->st->mic_saturate_thr = EX_ctrl_para->mic_saturate_thr; p_context->st->res_hangover = EX_ctrl_para->res_hangover; p_context->st->nlp_hangover = EX_ctrl_para->nlp_hangover; p_context->st->saturated_hangover = EX_ctrl_para->saturated_hangover; /* preprocess */ p_context->den->special_mode = EX_ctrl_para->special_mode; p_context->den->min_range = EX_ctrl_para->min_range; p_context->den->vad_enabled = EX_ctrl_para->vad_enabled; p_context->den->speech_prob_start = EX_ctrl_para->speech_prob_start; p_context->den->speech_prob_continue = EX_ctrl_para->speech_prob_continue; p_context->den->noise_suppress = EX_ctrl_para->noise_suppress; p_context->den->echo_suppress = EX_ctrl_para->echo_suppress; p_context->den->echo_suppress_active = EX_ctrl_para->echo_suppress_active; p_context->den->overdrive = EX_ctrl_para->overdrive; p_context->den->alpha_z_noise = EX_ctrl_para->alpha_z_noise; p_context->den->alpha_z_speech = EX_ctrl_para->alpha_z_speech; p_context->den->beta_z_noise = EX_ctrl_para->beta_z_noise; p_context->den->beta_z_speech = EX_ctrl_para->beta_z_speech; p_context->den->use_hypergeom_speech = EX_ctrl_para->use_hypergeom_speech; /* 第三次更新参数(电机降噪参数) */ /* preprocess */ p_context->den->min_range_motor = EX_ctrl_para->min_range_motor; p_context->den->motor_suppress = EX_ctrl_para->motor_suppress; p_context->den->motor_highfreq_clipping_thr = EX_ctrl_para->motor_highfreq_clipping_thr; p_context->den->motor_harmonic_suppress_freq = EX_ctrl_para->motor_harmonic_suppress_freq; p_context->den->motor_nonstationary_numb = EX_ctrl_para->motor_nonstationary_numb; p_context->den->motor_first_used_numb = EX_ctrl_para->motor_first_used_numb; /* 第四次更新参数(解决初状态回声收敛) */ if (EX_ctrl_para->aec3_para_version > 3) { /* mdf */ p_context->st->erl = EX_ctrl_para->erl; p_context->st->erl_square = p_context->st->erl * p_context->st->erl; p_context->st->sxx_af_init_adapt_thr = EX_ctrl_para->sxx_af_init_adapt_thr; p_context->st->miu_fix = EX_ctrl_para->miu_fix; p_context->st->miu_max_init = EX_ctrl_para->miu_max_init; p_context->st->miu_max_init = MIN32(90.0f, p_context->st->miu_max_init); } /* 第五次更新参数 */ /* preprocess */ if (EX_ctrl_para->aec3_para_version > 4) { p_context->den->noise_update_thr = EX_ctrl_para->noise_update_thr; p_context->den->noise_update_speed = EX_ctrl_para->noise_update_speed; p_context->den->motor_noise_update_thr_low = EX_ctrl_para->motor_noise_update_thr_low; p_context->den->motor_noise_update_thr_high = EX_ctrl_para->motor_noise_update_thr_high; p_context->den->motor_noise_update_speed_low = EX_ctrl_para->motor_noise_update_speed_low; p_context->den->motor_noise_update_speed_high = EX_ctrl_para->motor_noise_update_speed_high; p_context->den->motor_end_nonstationary_numb = EX_ctrl_para->motor_end_nonstationary_numb; p_context->den->motor_overestimate_numb = EX_ctrl_para->motor_overestimate_numb; p_context->den->motor_overestimate_weight = EX_ctrl_para->motor_overestimate_weight; p_context->den->energy_vad_thr = EX_ctrl_para->energy_vad_thr; p_context->den->speech_prob_vad_thr = EX_ctrl_para->speech_prob_vad_thr; p_context->den->abrupt_noise_determination_thr = EX_ctrl_para->abrupt_noise_determination_thr; p_context->den->flatness_vad_thr = EX_ctrl_para->flatness_vad_thr; p_context->den->cepstrum_range_min = EX_ctrl_para->cepstrum_range_min; p_context->den->cepstrum_range_max = EX_ctrl_para->cepstrum_range_max; p_context->den->cepstrum_vad_thr = EX_ctrl_para->cepstrum_vad_thr; p_context->den->cepstrum_res_max = EX_ctrl_para->cepstrum_res_max; p_context->den->abrupt_noise_count_thr = EX_ctrl_para->abrupt_noise_count_thr; p_context->den->abrupt_noise_convergence_numb = EX_ctrl_para->abrupt_noise_convergence_numb; p_context->den->min_range_CNG = EX_ctrl_para->min_range_CNG; p_context->den->noise_update_thr_CNG = EX_ctrl_para->noise_update_thr_CNG; p_context->den->noise_update_speed_CNG = EX_ctrl_para->noise_update_speed_CNG; } else { p_context->den->motor_end_nonstationary_numb = EX_ctrl_para->motor_nonstationary_numb; p_context->den->motor_overestimate_numb = EX_ctrl_para->motor_nonstationary_numb; p_context->den->motor_overestimate_weight = 1; } /* 第六次更新参数 */ if (EX_ctrl_para->aec3_para_version > 5) { /* mdf */ p_context->st->computation_simplification = EX_ctrl_para->computation_simplification; /* preprocess */ p_context->den->add_white_noise = EX_ctrl_para->add_white_noise; } } 这段代码里面它是怎么根据motor_state来调整降噪策略的
最新发布
09-20
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值