作者:使徒保罗
邮箱:297329588szh@163.com
声明:您可以自由使用本文代码,如有任何疑问可通过群414742203交流
环境:win10 64位+vs2015
功能:qt调用The Windows Audio Session API (WASAPI),
MMDeviceAPI采集声卡示例代码
最低系统要求:客户端Windows Vista,
服务器Windows Server 2008,
手机Windows Phone 8
参考:https://msdn.microsoft.com/en-us/library/dd316756(v=vs.85).aspx
源码下载: http://download.youkuaiyun.com/download/su_vast/10039364
#include "stdafx.h"
#include <MMDeviceAPI.h>
#include <AudioClient.h>
#include <AudioPolicy.h>
#define EXIT_ON_ERROR(hres, errstring) \
if(FAILED(hres)) { printf("Exit error: %x. %s\n", hres,errstring);goto Exit;}
#define SAFE_RELEASE(punk) \
if((NULL != punk)) \
{ (punk)->Release(); (punk)=NULL; }
char* ConvertLPWSTRToLPSTR(LPWSTR lpwszStrIn);
int main(int argc, char *argv[])
{
IAudioClient * _AudioClient = NULL;
IAudioRenderClient *_RenderClient = NULL;
IMMDevice * _Device = NULL;
IMMDeviceEnumerator * _DeviceEnumerator = NULL;
BYTE* pData = NULL; //渲染缓冲区
HANDLE _AudioSamplesReadyEvent = NULL;
DWORD flags = 0;
UINT32 numFramesAvailable = 0;
UINT32 numFramesPadding = 0;
UINT32 bufferFrameCount = 0;
WAVEFORMATEX * _MixFormat = NULL;
HRESULT hr;
//获取声音文件路径
TCHAR tchExeFullPath[MAX_PATH + 1];
GetModuleFileName(NULL, tchExeFullPath, MAX_PATH+1);
(_tcsrchr(tchExeFullPath, _T('\\')))[1] = 0;
wcscat_s(tchExeFullPath, MAX_PATH + 1, _T("music_stereo_48kHz_16bit.pcm"));
//打开声音文件
FILE* file = NULL;
fopen_s(&file, ConvertLPWSTRToLPSTR(tchExeFullPath), "r+b");
if (NULL == file)
{
printf("open pcm file failed.\n");
goto Exit;
}
//初始化Com库
hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
EXIT_ON_ERROR(hr, "Com init failed")
//创建Com对象IMMDeviceEnumerator
hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, IID_PPV_ARGS(&_DeviceEnumerator));
EXIT_ON_ERROR(hr, "Create IMMDeviceEnumerator object failed")
//获取声音播放设备对象IMMDevice
hr = _DeviceEnumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, &_Device);
EXIT_ON_ERROR(hr, "Create IMMDevice object failed")
//创建Com对象IAudioClient
hr = _Device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, reinterpret_cast<void **>(&_AudioClient));
EXIT_ON_ERROR(hr, "Create IAudioClient object failed")
hr = _AudioClient->GetMixFormat(&_MixFormat);
EXIT_ON_ERROR(hr, "retrieve audio device mixformat failed")
//调整采样设备的采样深度到16位
//https://msdn.microsoft.com/en-us/library/windows/desktop/dd390970(v=vs.85).aspx
//https://msdn.microsoft.com/en-us/library/windows/desktop/dd390971(v=vs.85).aspx
if (WAVE_FORMAT_EXTENSIBLE == _MixFormat->wFormatTag)
{
PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(_MixFormat);
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
{
pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
pEx->Samples.wValidBitsPerSample = 16;
_MixFormat->wBitsPerSample = 16;
_MixFormat->nBlockAlign = _MixFormat->nChannels * _MixFormat->wBitsPerSample / 8;
_MixFormat->nAvgBytesPerSec = _MixFormat->nBlockAlign * _MixFormat->nSamplesPerSec;
}
}
else if (WAVE_FORMAT_IEEE_FLOAT == _MixFormat->wFormatTag)
{
_MixFormat->wFormatTag = WAVE_FORMAT_PCM;
_MixFormat->wBitsPerSample = 16;
_MixFormat->nBlockAlign = _MixFormat->nChannels*_MixFormat->wBitsPerSample / 8;
_MixFormat->nAvgBytesPerSec = _MixFormat->nBlockAlign*_MixFormat->nSamplesPerSec;
}
//初始化音频引擎
hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, 0, 0, _MixFormat, NULL);
EXIT_ON_ERROR(hr, "Initialize audio engine failed")
_AudioSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
if (_AudioSamplesReadyEvent == NULL)
{
printf("Unable to create samples ready event");
return false;
}
hr = _AudioClient->SetEventHandle(_AudioSamplesReadyEvent);
EXIT_ON_ERROR(hr, "Unable to set ready event")
hr = _AudioClient->GetService(IID_PPV_ARGS(&_RenderClient));
EXIT_ON_ERROR(hr, "Unable to get new render client")
//获取实际分配的缓冲区
hr = _AudioClient->GetBufferSize(&bufferFrameCount);
EXIT_ON_ERROR(hr, "Get the actual size of the allocated buffer failed")
//获取初始操作的整个缓冲区
// hr = _RenderClient->GetBuffer(bufferFrameCount, &pData);
EXIT_ON_ERROR(hr, "retrieve render audio buffer frame count failed")
hr = _AudioClient->Start();
EXIT_ON_ERROR(hr, "play audio failed")
while (AUDCLNT_BUFFERFLAGS_SILENT != flags)
{
WaitForSingleObject(_AudioSamplesReadyEvent, INFINITE);
//获取渲染缓冲区还未播放完的数据帧数
_AudioClient->GetCurrentPadding(&numFramesPadding);
if (bufferFrameCount == numFramesPadding) continue;
//获取空余的缓冲区长度,缓冲区的最大长度是可以设置的
numFramesAvailable = bufferFrameCount - numFramesPadding;
//写入数据
hr = _RenderClient->GetBuffer(numFramesAvailable, &pData);
//加载数据到pData
size_t nReadsize = fread_s(pData, numFramesAvailable*_MixFormat->nBlockAlign, sizeof(char), numFramesAvailable*_MixFormat->nBlockAlign, file);
//printf("%s\n", pData);
printf("%d\n", nReadsize);
_RenderClient->ReleaseBuffer(numFramesAvailable, flags);
}
Exit:
SAFE_RELEASE(_DeviceEnumerator)
SAFE_RELEASE(_Device)
SAFE_RELEASE(_DeviceEnumerator)
SAFE_RELEASE(_DeviceEnumerator)
SAFE_RELEASE(_DeviceEnumerator)
CoUninitialize();
return 0;
}
char* ConvertLPWSTRToLPSTR(LPWSTR lpwszStrIn)
{
LPSTR pszOut = NULL;
if (lpwszStrIn != NULL)
{
int nInputStrLen = wcslen(lpwszStrIn);
// Double NULL Termination
int nOutputStrLen = WideCharToMultiByte(CP_ACP, 0, lpwszStrIn, nInputStrLen, NULL, 0, 0, 0) + 2;
pszOut = new char[nOutputStrLen];
if (pszOut)
{
memset(pszOut, 0x00, nOutputStrLen);
WideCharToMultiByte(CP_ACP, 0, lpwszStrIn, nInputStrLen, pszOut, nOutputStrLen, 0, 0);
}
}
return pszOut;
}