Audio Stream.h
class AudioDevice;
typedef unsigned int app_type_t;
class StreamPrimary {
public:
StreamPrimary(audio_io_handle_t handle,
const std::set<audio_devices_t> &devices,
struct audio_config *config);
virtual ~StreamPrimary();
uint32_t GetSampleRate();
uint32_t GetBufferSize();
audio_format_t GetFormat();
audio_channel_mask_t GetChannelMask();
int getPalDeviceIds(const std::set<audio_devices_t> &halDeviceIds, pal_device_id_t* palOutDeviceIds);
audio_io_handle_t GetHandle();
int GetUseCase();
std::mutex write_wait_mutex_;
std::condition_variable write_condition_;
std::mutex stream_mutex_;
bool write_ready_;
std::mutex drain_wait_mutex_;
std::condition_variable drain_condition_;
bool drain_ready_;
stream_callback_t client_callback;
void *client_cookie;
static int GetDeviceAddress(struct str_parms *parms, int *card_id,
int *device_num);
int GetLookupTableIndex(const struct string_to_enum *table,
const int table_size, int value);
bool GetSupportedConfig(bool isOutStream,
struct str_parms *query, struct str_parms *reply);
virtual int RouteStream(const std::set<audio_devices_t>&, bool force_device_switch = false) = 0;
bool isStarted() { return stream_started_; };
protected:
struct pal_stream_attributes streamAttributes_;
pal_stream_handle_t* pal_stream_handle_;
audio_io_handle_t handle_;
pal_device_id_t pal_device_id_;
struct audio_config config_;
char address_[AUDIO_DEVICE_MAX_ADDRESS_LEN];
bool stream_started_ = false;
bool stream_paused_ = false;
bool stream_flushed_ = false;
int usecase_;
struct pal_volume_data *volume_; /* used to cache volume */
std::map <audio_devices_t, pal_device_id_t> mAndroidDeviceMap;
int mmap_shared_memory_fd;
app_type_t app_types_;
pal_param_device_capability_t *device_cap_query_;
app_type_t audio_power_app_types_;/* Audio PowerSave */
};
class StreamOutPrimary : public StreamPrimary {
private:
// Helper function for write to open pal stream & configure.
ssize_t configurePalOutputStream();
//Helper method to standby streams upon write failures and sleep for buffer duration.
ssize_t onWriteError(size_t bytes, ssize_t ret);
protected:
struct pal_device* mPalOutDevice;
private:
pal_device_id_t* mPalOutDeviceIds;
std::set<audio_devices_t> mAndroidOutDevices;
bool mInitialized;
/* fixed ear_out aux_out stereo start */
bool mIsKaraokeMuteOnCombo;
/* fixed ear_out aux_out stereo end */
// [offload playspeed
bool isOffloadUsecase() { return GetUseCase() == USECASE_AUDIO_PLAYBACK_OFFLOAD; }
bool isOffloadSpeedSupported();
bool isValidPlaybackRate(const audio_playback_rate_t *playbackRate);
bool isValidStretchMode(audio_timestretch_stretch_mode_t stretchMode);
bool isValidFallbackMode(audio_timestretch_fallback_mode_t fallbackMode);
int setPlaybackRateToPal(const audio_playback_rate_t *playbackRate);
audio_playback_rate_t mPlaybackRate = AUDIO_PLAYBACK_RATE_INITIALIZER;
// offload Playspeed]
public:
StreamOutPrimary(audio_io_handle_t handle,
const std::set<audio_devices_t>& devices,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
offload_effects_start_output fnp_start_offload_effect,
offload_effects_stop_output fnp_stop_offload_effect,
visualizer_hal_start_output fnp_visualizer_start_output_,
visualizer_hal_stop_output fnp_visualizer_stop_output_);
~StreamOutPrimary();
bool sendGaplessMetadata = true;
bool isCompressMetadataAvail = false;
void UpdatemCachedPosition(uint64_t val);
virtual int Standby();
int SetVolume(float left, float right);
int refactorVolumeData(float left, float right);
uint64_t GetFramesWritten(struct timespec *timestamp);
virtual int SetParameters(struct str_parms *parms);
int Pause();
int Resume();
int Drain(audio_drain_type_t type);
int Flush();
virtual int Start();
int Stop();
virtual ssize_t write(const void *buffer, size_t bytes);
virtual int Open();
void GetStreamHandle(audio_stream_out** stream);
uint32_t GetBufferSize();
uint32_t GetBufferSizeForLowLatency();
int GetFrames(uint64_t *frames);
static pal_stream_type_t GetPalStreamType(audio_output_flags_t halStreamFlags,
uint32_t sample_rate,
bool isDeviceAvail);
static int64_t GetRenderLatency(audio_output_flags_t halStreamFlags);
int GetOutputUseCase(audio_output_flags_t halStreamFlags);
int StartOffloadEffects(audio_io_handle_t, pal_stream_handle_t*);
int StopOffloadEffects(audio_io_handle_t, pal_stream_handle_t*);
bool CheckOffloadEffectsType(pal_stream_type_t pal_stream_type);
int StartOffloadVisualizer(audio_io_handle_t, pal_stream_handle_t*);
int StopOffloadVisualizer(audio_io_handle_t, pal_stream_handle_t*);
audio_output_flags_t flags_;
int CreateMmapBuffer(int32_t min_size_frames, struct audio_mmap_buffer_info *info);
int GetMmapPosition(struct audio_mmap_position *position);
bool isDeviceAvailable(pal_device_id_t deviceId);
int RouteStream(const std::set<audio_devices_t>&, bool force_device_switch = false);
virtual void SetMode(audio_mode_t mode) = 0;
ssize_t splitAndWriteAudioHapticsStream(const void *buffer, size_t bytes);
bool period_size_is_plausible_for_low_latency(int period_size);
source_metadata_t btSourceMetadata;
std::vector<playback_track_metadata_t> tracks;
int SetAggregateSourceMetadata(bool voice_active);
static std::mutex sourceMetadata_mutex_;
// [offload playback speed
int getPlaybackRateParameters(audio_playback_rate_t *playbackRate);
int setPlaybackRateParameters(const audio_playback_rate_t *playbackRate);
// offload playback speed]
protected:
struct timespec writeAt;
int get_compressed_buffer_size();
int get_pcm_buffer_size();
int is_direct();
audio_format_t halInputFormat = AUDIO_FORMAT_DEFAULT;
audio_format_t halOutputFormat = AUDIO_FORMAT_DEFAULT;
uint32_t convertBufSize;
uint32_t fragments_ = 0;
uint32_t fragment_size_ = 0;
pal_snd_dec_t palSndDec;
struct pal_compr_gapless_mdata gaplessMeta = {0, 0};
uint32_t msample_rate;
uint16_t mchannels;
std::shared_ptr<audio_stream_out> stream_;
uint64_t mBytesWritten; /* total bytes written, not cleared when entering standby */
uint64_t mCachedPosition = 0; /* cache pcm offload position when entering standby */
offload_effects_start_output fnp_offload_effect_start_output_ = nullptr;
offload_effects_stop_output fnp_offload_effect_stop_output_ = nullptr;
visualizer_hal_start_output fnp_visualizer_start_output_ = nullptr;
visualizer_hal_stop_output fnp_visualizer_stop_output_ = nullptr;
void *convertBuffer;
//Haptics Usecase
struct pal_stream_attributes hapticsStreamAttributes;
pal_stream_handle_t* pal_haptics_stream_handle;
AudioExtn AudExtn;
struct pal_device* hapticsDevice;
uint8_t* hapticBuffer;
size_t hapticsBufSize;
audio_mode_t _mode;
int FillHalFnPtrs();
friend class AudioDevice;
struct timespec ts_first_write = {0, 0};
};
class StreamInPrimary : public StreamPrimary{
protected:
struct pal_device* mPalInDevice;
private:
pal_device_id_t* mPalInDeviceIds;
std::set<audio_devices_t> mAndroidInDevices;
bool mInitialized;
//Helper method to standby streams upon read failures and sleep for buffer duration.
ssize_t onReadError(size_t bytes, size_t ret);
public:
StreamInPrimary(audio_io_handle_t handle,
const std::set<audio_devices_t> &devices,
audio_input_flags_t flags,
struct audio_config *config,
const char *address,
audio_source_t source);
~StreamInPrimary();
int Standby();
int SetGain(float gain);
void GetStreamHandle(audio_stream_in** stream);
virtual int Open();
int Start();
int Stop();
int SetMicMute(bool mute);
ssize_t read(const void *buffer, size_t bytes);
uint32_t GetBufferSize();
uint32_t GetBufferSizeForLowLatencyRecord();
pal_stream_type_t GetPalStreamType(audio_input_flags_t halStreamFlags,
uint32_t sample_rate);
int GetInputUseCase(audio_input_flags_t halStreamFlags, audio_source_t source);
int addRemoveAudioEffect(const struct audio_stream *stream, effect_handle_t effect,bool enable);
virtual int SetParameters(const char *kvpairs);
bool getParameters(struct str_parms *query, struct str_parms *reply);
bool is_st_session;
audio_input_flags_t flags_;
int CreateMmapBuffer(int32_t min_size_frames, struct audio_mmap_buffer_info *info);
int GetMmapPosition(struct audio_mmap_position *position);
bool isDeviceAvailable(pal_device_id_t deviceId);
int RouteStream(const std::set<audio_devices_t>& new_devices, bool force_device_switch = false);
int64_t GetSourceLatency(audio_input_flags_t halStreamFlags);
uint64_t GetFramesRead(int64_t *time);
int GetPalDeviceIds(pal_device_id_t *palDevIds, int *numPalDevs);
sink_metadata_t btSinkMetadata;
std::vector<record_track_metadata_t> tracks;
int SetAggregateSinkMetadata(bool voice_active);
static std::mutex sinkMetadata_mutex_;
pal_stream_handle_t *pal_vui_handle_;
protected:
struct timespec readAt;
uint32_t fragments_ = 0;
uint32_t fragment_size_ = 0;
int FillHalFnPtrs();
std::shared_ptr<audio_stream_in> stream_;
audio_source_t source_;
friend class AudioDevice;
uint64_t mBytesRead = 0; /* total bytes read, not cleared when entering standby */
// for compress capture usecase
std::unique_ptr<CompressCapture::CompressAAC> mCompressEncoder;
bool isECEnabled = false;
bool isNSEnabled = false;
bool effects_applied_ = true;
//ADD: KARAOKE
bool is_karaoke_on = false;
int is_karaoke_status = 0;
bool is_cts_stream = false;
std::mutex activeStreamMutex;
//END KARAOKE
// MIUI ADD: Audio_XiaoAi
bool is_map_switch = false;
// END Audio_XiaoAi
};
AudioStream.cpp
int StreamOutPrimary::Standby() {
int ret = 0;
/* fixed ear_out aux_out stereo start */
std::shared_ptr<AudioDevice> adevice = AudioDevice::GetInstance();
std::set<audio_devices_t> new_devices;
/* fixed ear_out aux_out stereo end */
AHAL_DBG("Enter");
if (adevice->is_earout_hphl_conflict && mIsKaraokeMuteOnCombo) {
AHAL_DBG("routestream from combo whs to whs before standby");
mAndroidOutDevices.erase(AUDIO_DEVICE_OUT_SPEAKER);
new_devices = mAndroidOutDevices;
StreamOutPrimary::RouteStream(new_devices, true);
}
stream_mutex_.lock();
if (pal_stream_handle_) {
if (streamAttributes_.type == PAL_STREAM_PCM_OFFLOAD) {
/*
* when ssr happens, dsp position for pcm offload could be 0,
* so get written frames. Else, get frames.
*/
if (PAL_CARD_STATUS_DOWN(AudioDevice::sndCardState)) {
struct timespec ts;
// release stream lock as GetFramesWritten will lock/unlock stream mutex
stream_mutex_.unlock();
mCachedPosition = GetFramesWritten(&ts);
stream_mutex_.lock();
AHAL_DBG("card is offline, return written frames %lld", (long long)mCachedPosition);
} else {
GetFrames(&mCachedPosition);
}
}
ret = pal_stream_stop(pal_stream_handle_);
if (ret) {
AHAL_ERR("failed to stop stream.");
ret = -EINVAL;
}
if (usecase_ == USECASE_AUDIO_PLAYBACK_WITH_HAPTICS && pal_haptics_stream_handle) {
ret = pal_stream_stop(pal_haptics_stream_handle);
if (ret) {
AHAL_ERR("failed to stop haptics stream.");
}
}
}
ret = -EINVAL;
exit:
stream_mutex_.unlock();
AHAL_DBG("Exit ret: %d", ret);
return ret;
}
StreamOutPrimary::StreamOutPrimary(
audio_io_handle_t handle,
const std::set<audio_devices_t> &devices,
audio_output_flags_t flags,
struct audio_config *config,
const char *address __unused,
offload_effects_start_output start_offload_effect,
offload_effects_stop_output stop_offload_effect,
visualizer_hal_start_output visualizer_start_output,
visualizer_hal_stop_output visualizer_stop_output):
StreamPrimary(handle, devices, config),
mAndroidOutDevices(devices),
flags_(flags),
btSourceMetadata{0, nullptr}
{
stream_ = std::shared_ptr<audio_stream_out> (new audio_stream_out());
std::shared_ptr<AudioDevice> adevice = AudioDevice::GetInstance();
mInitialized = false;
/* fixed ear_out aux_out stereo start */
mIsKaraokeMuteOnCombo = false;
bool isCombo = false;
audio_devices_t OutDevices = AudioExtn::get_device_types(mAndroidOutDevices);
/* fixed ear_out aux_out stereo end */
pal_stream_handle_ = nullptr;
pal_haptics_stream_handle = nullptr;
mPalOutDeviceIds = nullptr;
mPalOutDevice = nullptr;
convertBuffer = NULL;
hapticsDevice = NULL;
hapticBuffer = NULL;
hapticsBufSize = 0;
writeAt.tv_sec = 0;
writeAt.tv_nsec = 0;
mBytesWritten = 0;
int noPalDevices = 0;
int ret = 0;
/*Initialize the gaplessMeta value with 0*/
memset(&gaplessMeta,0,sizeof(struct pal_compr_gapless_mdata));
if (!stream_) {
AHAL_ERR("No memory allocated for stream_");
throw std::runtime_error("No memory allocated for stream_");
}
AHAL_DBG("enter: handle (%x) format(%#x) sample_rate(%d) channel_mask(%#x) devices(%zu) flags(%#x)\
address(%s)", handle, config->format, config->sample_rate, config->channel_mask,
mAndroidOutDevices.size(), flags, address);
return;
}
StreamInPrimary::StreamInPrimary(audio_io_handle_t handle,
const std::set<audio_devices_t> &devices,
audio_input_flags_t flags,
struct audio_config *config,
const char *address __unused,
audio_source_t source) :
StreamPrimary(handle, devices, config),
mAndroidInDevices(devices),
flags_(flags),
btSinkMetadata{0, nullptr},
pal_vui_handle_(nullptr),
mCompressEncoder(nullptr)
{
stream_ = std::shared_ptr<audio_stream_in> (new audio_stream_in());
std::shared_ptr<AudioDevice> adevice = AudioDevice::GetInstance();
pal_stream_handle_ = NULL;
mInitialized = false;
int noPalDevices = 0;
int ret = 0;
readAt.tv_sec = 0;
readAt.tv_nsec = 0;
void *st_handle = nullptr;
pal_param_payload *payload = nullptr;
AHAL_DBG("enter: handle (%x) format(%#x) sample_rate(%d) channel_mask(%#x) devices(%zu) flags(%#x)"\
, handle, config->format, config->sample_rate, config->channel_mask,
mAndroidInDevices.size(), flags);
if (!(stream_.get())) {
AHAL_ERR("stream_ new allocation failed");
goto error;
}
error:
(void)FillHalFnPtrs();
AHAL_DBG("Exit");
return;
}
audio_format_t StreamPrimary::GetFormat() {
return config_.format;
}
static audio_format_t astream_in_get_format(const struct audio_stream *stream) {
std::shared_ptr<AudioDevice> adevice = AudioDevice::GetInstance();
std::shared_ptr<StreamInPrimary> astream_in;
if (adevice)
astream_in = adevice->InGetStream((audio_stream_t*)stream);
else
AHAL_ERR("unable to get audio device");
if (astream_in)
return astream_in->GetFormat();
else
return AUDIO_FORMAT_DEFAULT;
}
请分析上面的代码修改,修改实现在int StreamOutPrimary::Standby() 获取is_karaoke_status的值用于判断
最新发布