之前写过一篇混音的文章,ffmpeg录制麦克风声音和pc内部声音(如播放歌曲)—混音
但是有瑕疵,主要是因为本地麦克风设备的采样频率是44100,而pc内部声音的采样率是48000,所以当最终输出的文件的采样率为44100时,文件里面的声音听起来会播放的慢一些。故在音频混合前,先将pc内部声音重采样成44100,重采样的转换过程如下:
uint8_t* out_buffer = (uint8_t*)frame_audio_inner_resample->data[0];
int nb = swr_convert(audio_convert_ctx, &out_buffer, dst_nb_samples, (const uint8_t**)frame_audio_inner->data, frame_audio_inner->nb_samples);
这里面用的out_buffer只引用到了data[0],没引用到data[1],主要是因为sample_format设置的是AV_SAMPLE_FMT_S16,非平面模式。
本人在代码里面专门新建了一个线程AudioInnerResampleThreadProc,用于将系统声音从48000重采样到44100,然后将重采样数据入队列,主线程将麦克风声音和重采样的系统声音进行混合,写入文件。
下面是具体代码:
// FfmpegAudioTest.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#include <Windows.h>
#include <conio.h>
#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avutil.h"
#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
AVFormatContext *pFormatCtx_AudioInner = NULL;
AVFormatContext *pFormatCtx_AudioMic = NULL;
AVFormatContext *pFormatCtx_OutputAudio = NULL;
AVCodecContext *pReadCodecContext = NULL;
AVCodecContext *pReadMicCodecContext = NULL;
int AudioIndex = 0;
AVCodecContext *pCodecEncodeCtx_Audio = NULL;
AVCodec *pCodecEncode_Audio = NULL;
AVAudioFifo *fifo_audio_inner = NULL;
AVAudioFifo *fifo_audio_mic = NULL;
AVAudioFifo *fifo_audio_inner_resample = NULL;
SwrContext *audio_convert_ctx = NULL;
uint8_t *picture_buf = NULL, *frame_buf = NULL;
bool bCap = true;
int AudioFrameIndex = 0;
int AudioMicFrameIndex = 0;
int64_t cur_pts_a = 0;
int64_t cur_pts_a_mic = 0;
AVFilterGraph* _filter_graph = NULL;
AVFilterContext* _filter_ctx_src_inner = NULL;
AVFilterContext* _filter_ctx_src_mic = NULL;
AVFilterContext* _filter_ctx_sink = NULL;
CRITICAL_SECTION AudioSection_inner;
CRITICAL_SECTION AudioSection_mic;
CRITICAL_SECTION AudioSection_inner_resample;
DWORD WINAPI AudioInnerCapThreadProc(LPVOID lpParam);
DWORD WINAPI AudioInnerResampleThreadProc(LPVOID lpParam);
DWORD WINAPI AudioMicCapThreadProc(LPVOID lpParam);
typedef struct BufferSourceContext {
const AVClass *bscclass;
AVFifoBuffer *fifo;
AVRational time_base; ///< time_base to set in the output link
AVRational frame_rate; ///< frame_rate to set in the output link
unsigned nb_failed_requests;
unsigned warning_limit;
/* video only */
int w, h;
enum AVPixelFormat pix_fmt;
AVRational pixel_aspect;
char *sws_param;
AVBufferRef *hw_frames_ctx;
/* audio only */
int sample_rate;
enum AVSampleFormat sample_fmt;
int channels;
uint64_t channel_layout;
char *channel_layout_str;
int got_format_from_params;
int eof;
} BufferSourceContext;
static char *dup_wchar_to_utf8(const wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *)av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
/* just pick the highest supported samplerate */
static int select_sample_rate(const AVCodec *codec)
{
const int *p;
int best_samplerate = 0;
if (!codec->supported_samplerates)
return 44100;
p = codec->supported_samplerates;
while (*p) {
if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate))
best_samplerate = *p;
p++;
}
return best_samplerate;
}
/* select layout with the highest channel count */
static int select_channel_layout(const AVCodec *codec)
{
const uint64_t *p;
uint64_t best_ch_layout = 0;
int best_nb_channels = 0;
if (!codec->channel_layouts)
return AV_CH_LAYOUT_STEREO;
p = codec->channel_layouts;
while (*p) {
int nb_channels = av_get_channel_layout_nb_channels(*p);
if (nb_channels > best_nb_channels) {
best_ch_layout = *p;
best_nb_channels = nb_channels;
}
p++;
}
return best_ch_layout;
}
int InitFilter(const char* filter_desc)
{
char args_inner[512];
const char* pad_name_inner = "in0";
char args_mic[512];
const char* pad_name_mic = "in1";
AVFilter* filter_src_spk = (AVFilter *)avfilter_get_by_name("abuffer");
AVFilter* filter_src_mic = (AVFilter *)avfilter_get_by_name("abuffer");
AVFilter* filter_sink = (AVFilter *)avfilter_get_by_name("abuffersink");
AVFilterInOut* filter_output_inner = avfilter_inout_alloc();
AVFilterInOut* filter_output_mic = avfilter_inout_alloc();
AVFilterInOut* filter_input = avfilter_inout_alloc();
_filter_graph = avfilter_graph_alloc();
/*
sprintf_s(args_inner, sizeof(args_inner), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
pReadCodecContext->time_base.num,
pReadCodecContext->time_base.den,
pReadCodecContext->sample_rate,
av_get_sample_fmt_name((AVSampleFormat)pReadCodecContext->sample_fmt),
pReadCodecContext->

该博客介绍了如何使用ffmpeg解决麦克风和系统声音混合时因采样率不同导致的播放速度不匹配问题。通过重采样将系统声音从48000Hz转换为44100Hz,然后进行音频混合,确保输出文件的正确播放。同时,文章展示了使用C++和ffmpeg库实现这一过程的详细代码,包括音频捕获、重采样、混合和输出。
最低0.47元/天 解锁文章
1519

被折叠的 条评论
为什么被折叠?



