1 /*
2 * Copyright (c) 2023-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef LOG_TAG
16 #define LOG_TAG "AudioEndpointInner"
17 #endif
18
19 #include "audio_endpoint.h"
20
21 #include <atomic>
22 #include <cinttypes>
23 #include <condition_variable>
24 #include <thread>
25 #include <vector>
26 #include <mutex>
27
28 #include "securec.h"
29
30 #include "audio_errors.h"
31 #include "audio_service_log.h"
32 #include "audio_schedule.h"
33 #include "audio_qosmanager.h"
34 #include "audio_utils.h"
35 #include "bluetooth_renderer_sink.h"
36 #include "fast_audio_renderer_sink.h"
37 #include "fast_audio_capturer_source.h"
38 #include "format_converter.h"
39 #include "i_audio_capturer_source.h"
40 #include "i_stream_manager.h"
41 #include "linear_pos_time_model.h"
42 #include "policy_handler.h"
43 #include "media_monitor_manager.h"
44 #include "audio_log_utils.h"
45 #ifdef DAUDIO_ENABLE
46 #include "remote_fast_audio_renderer_sink.h"
47 #include "remote_fast_audio_capturer_source.h"
48 #endif
49
50 namespace OHOS {
51 namespace AudioStandard {
52 namespace {
53 static constexpr int32_t VOLUME_SHIFT_NUMBER = 16; // 1 >> 16 = 65536, max volume
54 static constexpr int64_t RECORD_DELAY_TIME = 4000000; // 4ms
55 static constexpr int64_t RECORD_VOIP_DELAY_TIME = 20000000; // 20ms
56 static constexpr int64_t MAX_SPAN_DURATION_IN_NANO = 100000000; // 100ms
57 static constexpr int64_t PLAYBACK_DELAY_STOP_HDI_TIME = 10000000000; // 10s
58 static constexpr int64_t RECORDER_DELAY_STOP_HDI_TIME = 200000000; // 200ms
59 static constexpr int64_t WAIT_CLIENT_STANDBY_TIME_NS = 1000000000; // 1s
60 static constexpr int64_t DELAY_STOP_HDI_TIME_FOR_ZERO_VOLUME = 4000000000; // 4s
61 static constexpr int32_t SLEEP_TIME_IN_DEFAULT = 400; // 400ms
62 static constexpr int64_t DELTA_TO_REAL_READ_START_TIME = 0; // 0ms
63 const uint16_t GET_MAX_AMPLITUDE_FRAMES_THRESHOLD = 40;
64 static const int32_t HALF_FACTOR = 2;
65 }
66
ConvertToHdiAdapterFormat(AudioSampleFormat format)67 static enum HdiAdapterFormat ConvertToHdiAdapterFormat(AudioSampleFormat format)
68 {
69 enum HdiAdapterFormat adapterFormat;
70 switch (format) {
71 case AudioSampleFormat::SAMPLE_U8:
72 adapterFormat = HdiAdapterFormat::SAMPLE_U8;
73 break;
74 case AudioSampleFormat::SAMPLE_S16LE:
75 adapterFormat = HdiAdapterFormat::SAMPLE_S16;
76 break;
77 case AudioSampleFormat::SAMPLE_S24LE:
78 adapterFormat = HdiAdapterFormat::SAMPLE_S24;
79 break;
80 case AudioSampleFormat::SAMPLE_S32LE:
81 adapterFormat = HdiAdapterFormat::SAMPLE_S32;
82 break;
83 default:
84 adapterFormat = HdiAdapterFormat::INVALID_WIDTH;
85 break;
86 }
87
88 return adapterFormat;
89 }
90
91 class MockCallbacks : public IStatusCallback, public IWriteCallback {
92 public:
93 explicit MockCallbacks(uint32_t streamIndex);
94 virtual ~MockCallbacks() = default;
95 void OnStatusUpdate(IOperation operation) override;
96 int32_t OnWriteData(size_t length) override;
97 private:
98 uint32_t streamIndex_ = 0;
99 };
100
101 class AudioEndpointInner : public AudioEndpoint {
102 public:
103 AudioEndpointInner(EndpointType type, uint64_t id, const AudioProcessConfig &clientConfig);
104 ~AudioEndpointInner();
105
106 bool Config(const DeviceInfo &deviceInfo) override;
107 bool StartDevice();
108 void HandleStartDeviceFailed();
109 bool StopDevice();
110
111 // when audio process start.
112 int32_t OnStart(IAudioProcessStream *processStream) override;
113 // when audio process pause.
114 int32_t OnPause(IAudioProcessStream *processStream) override;
115 // when audio process request update handle info.
116 int32_t OnUpdateHandleInfo(IAudioProcessStream *processStream) override;
117
118 /**
119 * Call LinkProcessStream when first create process or link other process with this endpoint.
120 * Here are cases:
121 * case1: endpointStatus_ = UNLINKED, link not running process; UNLINKED-->IDEL & godown
122 * case2: endpointStatus_ = UNLINKED, link running process; UNLINKED-->IDEL & godown
123 * case3: endpointStatus_ = IDEL, link not running process; IDEL-->IDEL
124 * case4: endpointStatus_ = IDEL, link running process; IDEL-->STARTING-->RUNNING
125 * case5: endpointStatus_ = RUNNING; RUNNING-->RUNNING
126 */
127 int32_t LinkProcessStream(IAudioProcessStream *processStream) override;
128 void LinkProcessStreamExt(IAudioProcessStream *processStream,
129 const std::shared_ptr<OHAudioBuffer>& processBuffer);
130
131 int32_t UnlinkProcessStream(IAudioProcessStream *processStream) override;
132
133 int32_t GetPreferBufferInfo(uint32_t &totalSizeInframe, uint32_t &spanSizeInframe) override;
134
135 void Dump(std::string &dumpString) override;
136
137 std::string GetEndpointName() override;
138 EndpointType GetEndpointType() override
139 {
140 return endpointType_;
141 }
142 int32_t SetVolume(AudioStreamType streamType, float volume) override;
143
144 int32_t ResolveBuffer(std::shared_ptr<OHAudioBuffer> &buffer) override;
145
146 std::shared_ptr<OHAudioBuffer> GetBuffer() override
147 {
148 return dstAudioBuffer_;
149 }
150
151 // for inner-cap
152 bool ShouldInnerCap() override;
153 int32_t EnableFastInnerCap() override;
154 int32_t DisableFastInnerCap() override;
155
156 int32_t InitDupStream();
157
158 EndpointStatus GetStatus() override;
159
160 void Release() override;
161
162 DeviceInfo &GetDeviceInfo() override
163 {
164 return deviceInfo_;
165 }
166
167 DeviceRole GetDeviceRole() override
168 {
169 return deviceInfo_.deviceRole;
170 }
171
172 float GetMaxAmplitude() override;
173 uint32_t GetLinkedProcessCount() override;
174
175 AudioMode GetAudioMode() const final;
176
177 void SetHibernateEndpointRelease(const bool &isHibernate) override;
178 private:
179 AudioProcessConfig GetInnerCapConfig();
180 void StartThread(const IAudioSinkAttr &attr);
181 void MixToDupStream(const std::vector<AudioStreamData> &srcDataList);
182 bool ConfigInputPoint(const DeviceInfo &deviceInfo);
183 int32_t PrepareDeviceBuffer(const DeviceInfo &deviceInfo);
184 int32_t GetAdapterBufferInfo(const DeviceInfo &deviceInfo);
185 void ReSyncPosition();
186 void RecordReSyncPosition();
187 void InitAudiobuffer(bool resetReadWritePos);
188 void ProcessData(const std::vector<AudioStreamData> &srcDataList, const AudioStreamData &dstData);
189 void ProcessSingleData(const AudioStreamData &srcData, const AudioStreamData &dstData, bool applyVol);
190 void HandleZeroVolumeCheckEvent();
191 void HandleRendererDataParams(const AudioStreamData &srcData, const AudioStreamData &dstData, bool applyVol = true);
192 int32_t HandleCapturerDataParams(const BufferDesc &writeBuf, const BufferDesc &readBuf,
193 const BufferDesc &convertedBuffer);
194 void ZeroVolumeCheck(const int32_t vol);
195 int64_t GetPredictNextReadTime(uint64_t posInFrame);
196 int64_t GetPredictNextWriteTime(uint64_t posInFrame);
197 bool PrepareNextLoop(uint64_t curWritePos, int64_t &wakeUpTime);
198 bool RecordPrepareNextLoop(uint64_t curReadPos, int64_t &wakeUpTime);
199
200 /**
201 * @brief Get the current read position in frame and the read-time with it.
202 *
203 * @param frames the read position in frame
204 * @param nanoTime the time in nanosecond when device-sink start read the buffer
205 */
206 bool GetDeviceHandleInfo(uint64_t &frames, int64_t &nanoTime);
207 int32_t GetProcLastWriteDoneInfo(const std::shared_ptr<OHAudioBuffer> processBuffer, uint64_t curWriteFrame,
208 uint64_t &proHandleFrame, int64_t &proHandleTime);
209
210 void CheckStandBy();
211 bool IsAnyProcessRunning();
212 bool CheckAllBufferReady(int64_t checkTime, uint64_t curWritePos);
213 void WaitAllProcessReady(uint64_t curWritePos);
214 bool ProcessToEndpointDataHandle(uint64_t curWritePos);
215 void ProcessToDupStream(std::vector<AudioStreamData> &audioDataList, AudioStreamData &dstStreamData);
216 void GetAllReadyProcessData(std::vector<AudioStreamData> &audioDataList);
217
218 std::string GetStatusStr(EndpointStatus status);
219
220 int32_t WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBuffer> &procBuf, const BufferDesc &readBuf,
221 const BufferDesc &convertedBuffer, bool muteFlag);
222 void WriteToProcessBuffers(const BufferDesc &readBuf);
223 int32_t ReadFromEndpoint(uint64_t curReadPos);
224 bool KeepWorkloopRunning();
225
226 void EndpointWorkLoopFuc();
227 void RecordEndpointWorkLoopFuc();
228
229 // Call GetMmapHandlePosition in ipc may block more than a cycle, call it in another thread.
230 void AsyncGetPosTime();
231 bool DelayStopDevice();
232
233 IMmapAudioRendererSink *GetFastSink(const DeviceInfo &deviceInfo, EndpointType type);
234 IMmapAudioCapturerSource *GetFastSource(const std::string &networkId, EndpointType type, IAudioSourceAttr &attr);
235
236 void InitLatencyMeasurement();
237 void DeinitLatencyMeasurement();
238 void CheckPlaySignal(uint8_t *buffer, size_t bufferSize);
239 void CheckRecordSignal(uint8_t *buffer, size_t bufferSize);
240 void DfxOperation(BufferDesc &buffer, AudioSampleFormat format, AudioChannel channel) const;
241
242 void CheckUpdateState(char *frame, uint64_t replyBytes);
243
244 void ProcessUpdateAppsUidForPlayback();
245 void ProcessUpdateAppsUidForRecord();
246
247 void WriterRenderStreamStandbySysEvent(uint32_t sessionId, int32_t standby);
248 private:
249 static constexpr int64_t ONE_MILLISECOND_DURATION = 1000000; // 1ms
250 static constexpr int64_t THREE_MILLISECOND_DURATION = 3000000; // 3ms
251 static constexpr int64_t WRITE_TO_HDI_AHEAD_TIME = -1000000; // ahead 1ms
252 static constexpr int32_t UPDATE_THREAD_TIMEOUT = 1000; // 1000ms
253 enum ThreadStatus : uint32_t {
254 WAITTING = 0,
255 SLEEPING,
256 INRUNNING
257 };
258 enum FastSinkType {
259 NONE_FAST_SINK = 0,
260 FAST_SINK_TYPE_NORMAL,
261 FAST_SINK_TYPE_REMOTE,
262 FAST_SINK_TYPE_VOIP,
263 FAST_SINK_TYPE_BLUETOOTH
264 };
265 enum FastSourceType {
266 NONE_FAST_SOURCE = 0,
267 FAST_SOURCE_TYPE_NORMAL,
268 FAST_SOURCE_TYPE_REMOTE,
269 FAST_SOURCE_TYPE_VOIP
270 };
271 // SamplingRate EncodingType SampleFormat Channel
272 DeviceInfo deviceInfo_;
273 AudioStreamInfo dstStreamInfo_;
274 EndpointType endpointType_;
275 int32_t id_ = 0;
276 std::mutex listLock_;
277 std::vector<IAudioProcessStream *> processList_;
278 std::vector<std::shared_ptr<OHAudioBuffer>> processBufferList_;
279 AudioProcessConfig clientConfig_;
280
281 std::atomic<bool> isInited_ = false;
282
283 // for inner-cap
284 std::mutex dupMutex_;
285 std::atomic<bool> isInnerCapEnabled_ = false;
286 uint32_t dupStreamIndex_ = 0;
287 std::shared_ptr<MockCallbacks> dupStreamCallback_ = nullptr;
288 std::shared_ptr<IRendererStream> dupStream_ = nullptr;
289 size_t dupBufferSize_ = 0;
290 std::unique_ptr<uint8_t []> dupBuffer_ = nullptr;
291 FILE *dumpC2SDup_ = nullptr; // client to server inner-cap dump file
292 std::string dupDumpName_ = "";
293
294 IMmapAudioRendererSink *fastSink_ = nullptr;
295 IMmapAudioCapturerSource *fastSource_ = nullptr;
296 FastSinkType fastSinkType_ = NONE_FAST_SINK;
297 FastSourceType fastSourceType_ = NONE_FAST_SOURCE;
298
299 LinearPosTimeModel readTimeModel_;
300 LinearPosTimeModel writeTimeModel_;
301
302 int64_t spanDuration_ = 0; // nano second
303 int64_t serverAheadReadTime_ = 0;
304 int dstBufferFd_ = -1; // -1: invalid fd.
305 uint32_t dstTotalSizeInframe_ = 0;
306 uint32_t dstSpanSizeInframe_ = 0;
307 uint32_t dstByteSizePerFrame_ = 0;
308 std::shared_ptr<OHAudioBuffer> dstAudioBuffer_ = nullptr;
309
310 std::atomic<EndpointStatus> endpointStatus_ = INVALID;
311 bool isStarted_ = false;
312 int64_t delayStopTime_ = INT64_MAX;
313 int64_t cacheDelayStopTime_ = INT64_MAX;
314 bool hibernateEndpointRelease_ = false;
315 int64_t delayStopTimeForZeroVolume_ = INT64_MAX;
316
317 std::atomic<ThreadStatus> threadStatus_ = WAITTING;
318 std::thread endpointWorkThread_;
319 std::mutex loopThreadLock_;
320 std::condition_variable workThreadCV_;
321 int64_t lastHandleProcessTime_ = 0;
322
323 std::thread updatePosTimeThread_;
324 std::mutex updateThreadLock_;
325 std::condition_variable updateThreadCV_;
326 std::atomic<bool> stopUpdateThread_ = false;
327
328 std::atomic<uint64_t> posInFrame_ = 0;
329 std::atomic<int64_t> timeInNano_ = 0;
330
331 bool isDeviceRunningInIdel_ = true; // will call start sink when linked.
332 bool needReSyncPosition_ = true;
333 FILE *dumpDcp_ = nullptr;
334 FILE *dumpHdi_ = nullptr;
335 std::string dumpDcpName_ = "";
336 std::string dumpHdiName_ = "";
337 mutable int64_t volumeDataCount_ = 0;
338 std::string logUtilsTag_ = "";
339
340 // for get amplitude
341 float maxAmplitude_ = 0;
342 int64_t lastGetMaxAmplitudeTime_ = 0;
343 int64_t last10FrameStartTime_ = 0;
344 bool startUpdate_ = false;
345 int renderFrameNum_ = 0;
346
347 bool signalDetected_ = false;
348 bool latencyMeasEnabled_ = false;
349 size_t detectedTime_ = 0;
350 std::shared_ptr<SignalDetectAgent> signalDetectAgent_ = nullptr;
351 bool zeroVolumeStopDevice_ = false;
352 bool isVolumeAlreadyZero_ = false;
353 };
354
GenerateEndpointKey(DeviceInfo &deviceInfo, int32_t endpointFlag)355 std::string AudioEndpoint::GenerateEndpointKey(DeviceInfo &deviceInfo, int32_t endpointFlag)
356 {
357 // All primary sinks share one endpoint
358 int32_t endpointId = 0;
359 if (deviceInfo.deviceType == DEVICE_TYPE_BLUETOOTH_A2DP) {
360 endpointId = deviceInfo.deviceId;
361 }
362 return deviceInfo.networkId + "_" + std::to_string(endpointId) + "_" +
363 std::to_string(deviceInfo.deviceRole) + "_" + std::to_string(endpointFlag);
364 }
365
CreateEndpoint(EndpointType type, uint64_t id, const AudioProcessConfig &clientConfig, const DeviceInfo &deviceInfo)366 std::shared_ptr<AudioEndpoint> AudioEndpoint::CreateEndpoint(EndpointType type, uint64_t id,
367 const AudioProcessConfig &clientConfig, const DeviceInfo &deviceInfo)
368 {
369 std::shared_ptr<AudioEndpoint> audioEndpoint = nullptr;
370 if (type == EndpointType::TYPE_INDEPENDENT && deviceInfo.deviceRole != INPUT_DEVICE &&
371 deviceInfo.networkId == LOCAL_NETWORK_ID) {
372 audioEndpoint = std::make_shared<AudioEndpointSeparate>(type, id, clientConfig.streamType);
373 } else {
374 audioEndpoint = std::make_shared<AudioEndpointInner>(type, id, clientConfig);
375 }
376 CHECK_AND_RETURN_RET_LOG(audioEndpoint != nullptr, nullptr, "Create AudioEndpoint failed.");
377
378 if (!audioEndpoint->Config(deviceInfo)) {
379 AUDIO_ERR_LOG("Config AudioEndpoint failed.");
380 audioEndpoint = nullptr;
381 }
382 return audioEndpoint;
383 }
384
AudioEndpointInner(EndpointType type, uint64_t id, const AudioProcessConfig &clientConfig)385 AudioEndpointInner::AudioEndpointInner(EndpointType type, uint64_t id,
386 const AudioProcessConfig &clientConfig) : endpointType_(type), id_(id), clientConfig_(clientConfig)
387 {
388 AUDIO_INFO_LOG("AudioEndpoint type:%{public}d", endpointType_);
389 if (clientConfig_.audioMode == AUDIO_MODE_PLAYBACK) {
390 logUtilsTag_ = "AudioEndpoint::Play";
391 } else {
392 logUtilsTag_ = "AudioEndpoint::Rec";
393 }
394 }
395
GetEndpointName()396 std::string AudioEndpointInner::GetEndpointName()
397 {
398 return GenerateEndpointKey(deviceInfo_, id_);
399 }
400
SetVolume(AudioStreamType streamType, float volume)401 int32_t AudioEndpointInner::SetVolume(AudioStreamType streamType, float volume)
402 {
403 // No need set hdi volume in shared stream mode.
404 return SUCCESS;
405 }
406
ResolveBuffer(std::shared_ptr<OHAudioBuffer> &buffer)407 int32_t AudioEndpointInner::ResolveBuffer(std::shared_ptr<OHAudioBuffer> &buffer)
408 {
409 return SUCCESS;
410 }
411
MockCallbacks(uint32_t streamIndex)412 MockCallbacks::MockCallbacks(uint32_t streamIndex) : streamIndex_(streamIndex)
413 {
414 AUDIO_INFO_LOG("DupStream %{public}u create MockCallbacks", streamIndex_);
415 }
416
OnStatusUpdate(IOperation operation)417 void MockCallbacks::OnStatusUpdate(IOperation operation)
418 {
419 AUDIO_INFO_LOG("DupStream %{public}u recv operation: %{public}d", streamIndex_, operation);
420 }
421
OnWriteData(size_t length)422 int32_t MockCallbacks::OnWriteData(size_t length)
423 {
424 Trace trace("DupStream::OnWriteData length " + std::to_string(length));
425 return SUCCESS;
426 }
427
ShouldInnerCap()428 bool AudioEndpointInner::ShouldInnerCap()
429 {
430 bool shouldBecapped = false;
431 std::lock_guard<std::mutex> lock(listLock_);
432 for (uint32_t i = 0; i < processList_.size(); i++) {
433 if (processList_[i]->GetInnerCapState()) {
434 shouldBecapped = true;
435 break;
436 }
437 }
438 AUDIO_INFO_LOG("find endpoint inner-cap state: %{public}s", shouldBecapped ? "true" : "false");
439 return shouldBecapped;
440 }
441
GetInnerCapConfig()442 AudioProcessConfig AudioEndpointInner::GetInnerCapConfig()
443 {
444 AudioProcessConfig processConfig;
445
446 processConfig.appInfo.appPid = static_cast<int32_t>(getpid());
447 processConfig.appInfo.appUid = static_cast<int32_t>(getuid());
448
449 processConfig.streamInfo = dstStreamInfo_;
450
451 processConfig.audioMode = AUDIO_MODE_PLAYBACK;
452
453 // processConfig.rendererInfo ?
454
455 processConfig.streamType = STREAM_MUSIC;
456
457 return processConfig;
458 }
459
InitDupStream()460 int32_t AudioEndpointInner::InitDupStream()
461 {
462 std::lock_guard<std::mutex> lock(dupMutex_);
463 CHECK_AND_RETURN_RET_LOG(isInnerCapEnabled_ == false, SUCCESS, "already enabled");
464
465 AudioProcessConfig processConfig = GetInnerCapConfig();
466 int32_t ret = IStreamManager::GetDupPlaybackManager().CreateRender(processConfig, dupStream_);
467 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS && dupStream_ != nullptr, ERR_OPERATION_FAILED, "Failed: %{public}d", ret);
468 dupStreamIndex_ = dupStream_->GetStreamIndex();
469
470 dupStreamCallback_ = std::make_shared<MockCallbacks>(dupStreamIndex_);
471 dupStream_->RegisterStatusCallback(dupStreamCallback_);
472 dupStream_->RegisterWriteCallback(dupStreamCallback_);
473
474 // eg: /data/local/tmp/LocalDevice6_0_c2s_dup_48000_2_1.pcm
475 AudioStreamInfo tempInfo = processConfig.streamInfo;
476 dupDumpName_ = GetEndpointName() + "_c2s_dup_" + std::to_string(tempInfo.samplingRate) + "_" +
477 std::to_string(tempInfo.channels) + "_" + std::to_string(tempInfo.format) + ".pcm";
478 DumpFileUtil::OpenDumpFile(DUMP_SERVER_PARA, dupDumpName_, &dumpC2SDup_);
479
480 AUDIO_INFO_LOG("Dup Renderer %{public}d with Endpoint status: %{public}s", dupStreamIndex_,
481 GetStatusStr(endpointStatus_).c_str());
482 CHECK_AND_RETURN_RET_LOG(endpointStatus_ != INVALID, ERR_ILLEGAL_STATE, "Endpoint is invalid");
483
484 // buffer init
485 dupBufferSize_ = dstSpanSizeInframe_ * dstByteSizePerFrame_; // each
486 CHECK_AND_RETURN_RET_LOG(dupBufferSize_ < dstAudioBuffer_->GetDataSize(), ERR_OPERATION_FAILED, "Init buffer fail");
487 dupBuffer_ = std::make_unique<uint8_t []>(dupBufferSize_);
488 ret = memset_s(reinterpret_cast<void *>(dupBuffer_.get()), dupBufferSize_, 0, dupBufferSize_);
489 if (ret != EOK) {
490 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d", ret);
491 }
492
493 if (endpointStatus_ == RUNNING || (endpointStatus_ == IDEL && isDeviceRunningInIdel_)) {
494 int32_t audioId = deviceInfo_.deviceId;
495 AUDIO_INFO_LOG("Endpoint %{public}d is already running, let's start the dup stream", audioId);
496 dupStream_->Start();
497 }
498 // mark enabled last
499 isInnerCapEnabled_ = true;
500 return SUCCESS;
501 }
502
EnableFastInnerCap()503 int32_t AudioEndpointInner::EnableFastInnerCap()
504 {
505 if (isInnerCapEnabled_) {
506 AUDIO_INFO_LOG("InnerCap is already enabled");
507 return SUCCESS;
508 }
509
510 CHECK_AND_RETURN_RET_LOG(deviceInfo_.deviceRole == OUTPUT_DEVICE, ERR_INVALID_OPERATION, "Not output device!");
511 int32_t ret = InitDupStream();
512 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_OPERATION_FAILED, "Init dup stream failed");
513 return SUCCESS;
514 }
515
DisableFastInnerCap()516 int32_t AudioEndpointInner::DisableFastInnerCap()
517 {
518 if (deviceInfo_.deviceRole != OUTPUT_DEVICE) {
519 return SUCCESS;
520 }
521 std::lock_guard<std::mutex> lock(dupMutex_);
522 if (!isInnerCapEnabled_) {
523 AUDIO_INFO_LOG("InnerCap is already disabled.");
524 return SUCCESS;
525 }
526 isInnerCapEnabled_ = false;
527 AUDIO_INFO_LOG("Disable dup renderer %{public}d with Endpoint status: %{public}s", dupStreamIndex_,
528 GetStatusStr(endpointStatus_).c_str());
529
530 IStreamManager::GetDupPlaybackManager().ReleaseRender(dupStreamIndex_);
531 dupStream_ = nullptr;
532
533 return SUCCESS;
534 }
535
GetStatus()536 AudioEndpoint::EndpointStatus AudioEndpointInner::GetStatus()
537 {
538 AUDIO_INFO_LOG("AudioEndpoint get status:%{public}s", GetStatusStr(endpointStatus_).c_str());
539 return endpointStatus_.load();
540 }
541
Release()542 void AudioEndpointInner::Release()
543 {
544 // Wait for thread end and then clear other data to avoid using any cleared data in thread.
545 AUDIO_INFO_LOG("Release enter.");
546 if (!isInited_.load()) {
547 AUDIO_WARNING_LOG("already released");
548 return;
549 }
550
551 isInited_.store(false);
552 workThreadCV_.notify_all();
553 if (endpointWorkThread_.joinable()) {
554 AUDIO_DEBUG_LOG("AudioEndpoint join work thread start");
555 endpointWorkThread_.join();
556 AUDIO_DEBUG_LOG("AudioEndpoint join work thread end");
557 }
558
559 stopUpdateThread_.store(true);
560 updateThreadCV_.notify_all();
561 if (updatePosTimeThread_.joinable()) {
562 AUDIO_DEBUG_LOG("AudioEndpoint join update thread start");
563 updatePosTimeThread_.join();
564 AUDIO_DEBUG_LOG("AudioEndpoint join update thread end");
565 }
566
567 if (fastSink_ != nullptr) {
568 fastSink_->DeInit();
569 fastSink_ = nullptr;
570 }
571
572 if (fastSource_ != nullptr) {
573 fastSource_->DeInit();
574 fastSource_ = nullptr;
575 }
576
577 endpointStatus_.store(INVALID);
578
579 if (dstAudioBuffer_ != nullptr) {
580 AUDIO_INFO_LOG("Set device buffer null");
581 dstAudioBuffer_ = nullptr;
582 }
583
584 if (deviceInfo_.deviceRole == OUTPUT_DEVICE && isInnerCapEnabled_) {
585 DisableFastInnerCap();
586 }
587
588 DumpFileUtil::CloseDumpFile(&dumpDcp_);
589 DumpFileUtil::CloseDumpFile(&dumpHdi_);
590 }
591
~AudioEndpointInner()592 AudioEndpointInner::~AudioEndpointInner()
593 {
594 if (isInited_.load()) {
595 AudioEndpointInner::Release();
596 }
597 AUDIO_INFO_LOG("~AudioEndpoint()");
598 }
599
Dump(std::string &dumpString)600 void AudioEndpointInner::Dump(std::string &dumpString)
601 {
602 // dump endpoint stream info
603 dumpString += "Endpoint stream info:\n";
604 AppendFormat(dumpString, " - samplingRate: %d\n", dstStreamInfo_.samplingRate);
605 AppendFormat(dumpString, " - channels: %u\n", dstStreamInfo_.channels);
606 AppendFormat(dumpString, " - format: %u\n", dstStreamInfo_.format);
607 AppendFormat(dumpString, " - sink type: %d\n", fastSinkType_);
608 AppendFormat(dumpString, " - source type: %d\n", fastSourceType_);
609
610 // dump status info
611 AppendFormat(dumpString, " - Current endpoint status: %s\n", GetStatusStr(endpointStatus_).c_str());
612 if (dstAudioBuffer_ != nullptr) {
613 AppendFormat(dumpString, " - Currend hdi read position: %u\n", dstAudioBuffer_->GetCurReadFrame());
614 AppendFormat(dumpString, " - Currend hdi write position: %u\n", dstAudioBuffer_->GetCurWriteFrame());
615 }
616
617 // dump linked process info
618 std::lock_guard<std::mutex> lock(listLock_);
619 AppendFormat(dumpString, " - linked process:: %zu\n", processBufferList_.size());
620 for (auto item : processBufferList_) {
621 AppendFormat(dumpString, " - process read position: %u\n", item->GetCurReadFrame());
622 AppendFormat(dumpString, " - process write position: %u\n", item->GetCurWriteFrame());
623 }
624 dumpString += "\n";
625 }
626
ConfigInputPoint(const DeviceInfo &deviceInfo)627 bool AudioEndpointInner::ConfigInputPoint(const DeviceInfo &deviceInfo)
628 {
629 AUDIO_INFO_LOG("ConfigInputPoint enter.");
630 IAudioSourceAttr attr = {};
631 attr.sampleRate = dstStreamInfo_.samplingRate;
632 attr.channel = dstStreamInfo_.channels;
633 attr.format = ConvertToHdiAdapterFormat(dstStreamInfo_.format);
634 attr.deviceNetworkId = deviceInfo.networkId.c_str();
635 attr.deviceType = deviceInfo.deviceType;
636 attr.audioStreamFlag = endpointType_ == TYPE_VOIP_MMAP ? AUDIO_FLAG_VOIP_FAST : AUDIO_FLAG_MMAP;
637
638 fastSource_ = GetFastSource(deviceInfo.networkId, endpointType_, attr);
639
640 if (deviceInfo.networkId == LOCAL_NETWORK_ID) {
641 attr.adapterName = "primary";
642 fastSource_ = FastAudioCapturerSource::GetInstance();
643 } else {
644 #ifdef DAUDIO_ENABLE
645 attr.adapterName = "remote";
646 fastSource_ = RemoteFastAudioCapturerSource::GetInstance(deviceInfo.networkId);
647 #endif
648 }
649 CHECK_AND_RETURN_RET_LOG(fastSource_ != nullptr, false, "ConfigInputPoint GetInstance failed.");
650
651 int32_t err = fastSource_->Init(attr);
652 if (err != SUCCESS || !fastSource_->IsInited()) {
653 AUDIO_ERR_LOG("init remote fast fail, err %{public}d.", err);
654 fastSource_ = nullptr;
655 return false;
656 }
657 if (PrepareDeviceBuffer(deviceInfo) != SUCCESS) {
658 fastSource_->DeInit();
659 fastSource_ = nullptr;
660 return false;
661 }
662
663 bool ret = writeTimeModel_.ConfigSampleRate(dstStreamInfo_.samplingRate);
664 CHECK_AND_RETURN_RET_LOG(ret != false, false, "Config LinearPosTimeModel failed.");
665
666 endpointStatus_ = UNLINKED;
667 isInited_.store(true);
668 endpointWorkThread_ = std::thread([this] { this->RecordEndpointWorkLoopFuc(); });
669 pthread_setname_np(endpointWorkThread_.native_handle(), "OS_AudioEpLoop");
670
671 updatePosTimeThread_ = std::thread([this] { this->AsyncGetPosTime(); });
672 pthread_setname_np(updatePosTimeThread_.native_handle(), "OS_AudioEpUpdate");
673
674 dumpHdiName_ = "endpoint_hdi_audio_" + std::to_string(attr.sampleRate) + "_"
675 + std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
676 DumpFileUtil::OpenDumpFile(DUMP_SERVER_PARA, DUMP_ENDPOINT_HDI_FILENAME, &dumpHdi_);
677 return true;
678 }
679
GetFastSource(const std::string &networkId, EndpointType type, IAudioSourceAttr &attr)680 IMmapAudioCapturerSource *AudioEndpointInner::GetFastSource(const std::string &networkId, EndpointType type,
681 IAudioSourceAttr &attr)
682 {
683 AUDIO_INFO_LOG("Network id %{public}s, endpoint type %{public}d", networkId.c_str(), type);
684 if (networkId != LOCAL_NETWORK_ID) {
685 attr.adapterName = "remote";
686 #ifdef DAUDIO_ENABLE
687 fastSourceType_ = type == AudioEndpoint::TYPE_MMAP ? FAST_SOURCE_TYPE_REMOTE : FAST_SOURCE_TYPE_VOIP;
688 // Distributed only requires a singleton because there won't be both voip and regular fast simultaneously
689 return RemoteFastAudioCapturerSource::GetInstance(networkId);
690 #endif
691 }
692
693 attr.adapterName = "primary";
694 if (type == AudioEndpoint::TYPE_MMAP) {
695 fastSourceType_ = FAST_SOURCE_TYPE_NORMAL;
696 return FastAudioCapturerSource::GetInstance();
697 } else if (type == AudioEndpoint::TYPE_VOIP_MMAP) {
698 fastSourceType_ = FAST_SOURCE_TYPE_VOIP;
699 return FastAudioCapturerSource::GetVoipInstance();
700 }
701 return nullptr;
702 }
703
StartThread(const IAudioSinkAttr &attr)704 void AudioEndpointInner::StartThread(const IAudioSinkAttr &attr)
705 {
706 endpointStatus_ = UNLINKED;
707 isInited_.store(true);
708 endpointWorkThread_ = std::thread([this] { this->EndpointWorkLoopFuc(); });
709 pthread_setname_np(endpointWorkThread_.native_handle(), "OS_AudioEpLoop");
710
711 updatePosTimeThread_ = std::thread([this] { this->AsyncGetPosTime(); });
712 pthread_setname_np(updatePosTimeThread_.native_handle(), "OS_AudioEpUpdate");
713
714 dumpHdiName_ = "endpoint_hdi_audio_" + std::to_string(attr.sampleRate) + "_"
715 + std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
716 dumpDcpName_ = "endpoint_dcp_audio_" + std::to_string(attr.sampleRate) + "_"
717 + std::to_string(attr.channel) + "_" + std::to_string(attr.format) + ".pcm";
718
719 DumpFileUtil::OpenDumpFile(DUMP_SERVER_PARA, DUMP_ENDPOINT_HDI_FILENAME, &dumpHdi_);
720 DumpFileUtil::OpenDumpFile(DUMP_SERVER_PARA, DUMP_ENDPOINT_DCP_FILENAME, &dumpDcp_);
721 }
722
Config(const DeviceInfo &deviceInfo)723 bool AudioEndpointInner::Config(const DeviceInfo &deviceInfo)
724 {
725 AUDIO_INFO_LOG("Config enter, deviceRole %{public}d.", deviceInfo.deviceRole);
726 deviceInfo_ = deviceInfo;
727 bool res = deviceInfo_.audioStreamInfo.CheckParams();
728 CHECK_AND_RETURN_RET_LOG(res, false, "samplingRate or channels size is 0");
729
730 dstStreamInfo_ = {
731 *deviceInfo.audioStreamInfo.samplingRate.rbegin(),
732 deviceInfo.audioStreamInfo.encoding,
733 deviceInfo.audioStreamInfo.format,
734 *deviceInfo.audioStreamInfo.channels.rbegin()
735 };
736 dstStreamInfo_.channelLayout = deviceInfo.audioStreamInfo.channelLayout;
737
738 if (deviceInfo.deviceRole == INPUT_DEVICE) {
739 return ConfigInputPoint(deviceInfo);
740 }
741
742 fastSink_ = GetFastSink(deviceInfo, endpointType_);
743 CHECK_AND_RETURN_RET_LOG(fastSink_ != nullptr, false, "Get fastSink instance failed");
744
745 IAudioSinkAttr attr = {};
746 attr.adapterName = deviceInfo.networkId == LOCAL_NETWORK_ID ? "primary" : "remote";
747 attr.sampleRate = dstStreamInfo_.samplingRate; // 48000hz
748 attr.channel = dstStreamInfo_.channels; // STEREO = 2
749 attr.format = ConvertToHdiAdapterFormat(dstStreamInfo_.format); // SAMPLE_S16LE = 1
750 attr.deviceNetworkId = deviceInfo.networkId.c_str();
751 attr.deviceType = static_cast<int32_t>(deviceInfo.deviceType);
752 attr.audioStreamFlag = endpointType_ == TYPE_VOIP_MMAP ? AUDIO_FLAG_VOIP_FAST : AUDIO_FLAG_MMAP;
753
754 fastSink_->Init(attr);
755 if (!fastSink_->IsInited()) {
756 fastSink_ = nullptr;
757 return false;
758 }
759 if (PrepareDeviceBuffer(deviceInfo) != SUCCESS) {
760 fastSink_->DeInit();
761 fastSink_ = nullptr;
762 return false;
763 }
764
765 float initVolume = 1.0; // init volume to 1.0
766 fastSink_->SetVolume(initVolume, initVolume);
767
768 bool ret = readTimeModel_.ConfigSampleRate(dstStreamInfo_.samplingRate);
769 CHECK_AND_RETURN_RET_LOG(ret != false, false, "Config LinearPosTimeModel failed.");
770 StartThread(attr);
771 return true;
772 }
773
GetFastSink(const DeviceInfo &deviceInfo, EndpointType type)774 IMmapAudioRendererSink *AudioEndpointInner::GetFastSink(const DeviceInfo &deviceInfo, EndpointType type)
775 {
776 AUDIO_INFO_LOG("Network id %{public}s, endpoint type %{public}d", deviceInfo.networkId.c_str(), type);
777 if (deviceInfo.networkId != LOCAL_NETWORK_ID) {
778 #ifdef DAUDIO_ENABLE
779 fastSinkType_ = type == AudioEndpoint::TYPE_MMAP ? FAST_SINK_TYPE_REMOTE : FAST_SINK_TYPE_VOIP;
780 // Distributed only requires a singleton because there won't be both voip and regular fast simultaneously
781 return RemoteFastAudioRendererSink::GetInstance(deviceInfo.networkId);
782 #endif
783 }
784
785 if (deviceInfo.deviceType == DEVICE_TYPE_BLUETOOTH_A2DP && deviceInfo.a2dpOffloadFlag != A2DP_OFFLOAD) {
786 fastSinkType_ = FAST_SINK_TYPE_BLUETOOTH;
787 return BluetoothRendererSink::GetMmapInstance();
788 }
789
790 if (type == AudioEndpoint::TYPE_MMAP) {
791 fastSinkType_ = FAST_SINK_TYPE_NORMAL;
792 return FastAudioRendererSink::GetInstance();
793 } else if (type == AudioEndpoint::TYPE_VOIP_MMAP) {
794 fastSinkType_ = FAST_SINK_TYPE_VOIP;
795 return FastAudioRendererSink::GetVoipInstance();
796 }
797 return nullptr;
798 }
799
GetAdapterBufferInfo(const DeviceInfo &deviceInfo)800 int32_t AudioEndpointInner::GetAdapterBufferInfo(const DeviceInfo &deviceInfo)
801 {
802 int32_t ret = 0;
803 AUDIO_INFO_LOG("GetAdapterBufferInfo enter, deviceRole %{public}d.", deviceInfo.deviceRole);
804 if (deviceInfo.deviceRole == INPUT_DEVICE) {
805 CHECK_AND_RETURN_RET_LOG(fastSource_ != nullptr, ERR_INVALID_HANDLE,
806 "fast source is null.");
807 ret = fastSource_->GetMmapBufferInfo(dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_,
808 dstByteSizePerFrame_);
809 } else {
810 CHECK_AND_RETURN_RET_LOG(fastSink_ != nullptr, ERR_INVALID_HANDLE, "fast sink is null.");
811 ret = fastSink_->GetMmapBufferInfo(dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_,
812 dstByteSizePerFrame_);
813 }
814
815 if (ret != SUCCESS || dstBufferFd_ == -1 || dstTotalSizeInframe_ == 0 || dstSpanSizeInframe_ == 0 ||
816 dstByteSizePerFrame_ == 0) {
817 AUDIO_ERR_LOG("get mmap buffer info fail, ret %{public}d, dstBufferFd %{public}d, \
818 dstTotalSizeInframe %{public}d, dstSpanSizeInframe %{public}d, dstByteSizePerFrame %{public}d.",
819 ret, dstBufferFd_, dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_);
820 return ERR_ILLEGAL_STATE;
821 }
822 AUDIO_DEBUG_LOG("end, fd %{public}d.", dstBufferFd_);
823 return SUCCESS;
824 }
825
PrepareDeviceBuffer(const DeviceInfo &deviceInfo)826 int32_t AudioEndpointInner::PrepareDeviceBuffer(const DeviceInfo &deviceInfo)
827 {
828 AUDIO_INFO_LOG("enter, deviceRole %{public}d.", deviceInfo.deviceRole);
829 if (dstAudioBuffer_ != nullptr) {
830 AUDIO_INFO_LOG("endpoint buffer is preapred, fd:%{public}d", dstBufferFd_);
831 return SUCCESS;
832 }
833
834 int32_t ret = GetAdapterBufferInfo(deviceInfo);
835 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_OPERATION_FAILED,
836 "get adapter buffer Info fail, ret %{public}d.", ret);
837
838 // spanDuration_ may be less than the correct time of dstSpanSizeInframe_.
839 spanDuration_ = static_cast<int64_t>(dstSpanSizeInframe_) * AUDIO_NS_PER_SECOND /
840 static_cast<int64_t>(dstStreamInfo_.samplingRate);
841 int64_t temp = spanDuration_ / 5 * 3; // 3/5 spanDuration
842 serverAheadReadTime_ = temp < ONE_MILLISECOND_DURATION ? ONE_MILLISECOND_DURATION : temp; // at least 1ms ahead.
843 AUDIO_DEBUG_LOG("panDuration %{public}" PRIu64" ns, serverAheadReadTime %{public}" PRIu64" ns.",
844 spanDuration_, serverAheadReadTime_);
845
846 CHECK_AND_RETURN_RET_LOG(spanDuration_ > 0 && spanDuration_ < MAX_SPAN_DURATION_IN_NANO,
847 ERR_INVALID_PARAM, "mmap span info error, spanDuration %{public}" PRIu64".", spanDuration_);
848 dstAudioBuffer_ = OHAudioBuffer::CreateFromRemote(dstTotalSizeInframe_, dstSpanSizeInframe_, dstByteSizePerFrame_,
849 AUDIO_SERVER_ONLY, dstBufferFd_, OHAudioBuffer::INVALID_BUFFER_FD);
850 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr && dstAudioBuffer_->GetBufferHolder() ==
851 AudioBufferHolder::AUDIO_SERVER_ONLY, ERR_ILLEGAL_STATE, "create buffer from remote fail.");
852
853 if (dstAudioBuffer_ == nullptr || dstAudioBuffer_->GetStreamStatus() == nullptr) {
854 AUDIO_ERR_LOG("The stream status is null!");
855 return ERR_INVALID_PARAM;
856 }
857
858 dstAudioBuffer_->GetStreamStatus()->store(StreamStatus::STREAM_IDEL);
859
860 // clear data buffer
861 ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0, dstAudioBuffer_->GetDataSize());
862 if (ret != EOK) {
863 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d, fd %{public}d.", ret, dstBufferFd_);
864 }
865 InitAudiobuffer(true);
866
867 AUDIO_DEBUG_LOG("end, fd %{public}d.", dstBufferFd_);
868 return SUCCESS;
869 }
870
InitAudiobuffer(bool resetReadWritePos)871 void AudioEndpointInner::InitAudiobuffer(bool resetReadWritePos)
872 {
873 CHECK_AND_RETURN_LOG((dstAudioBuffer_ != nullptr), "dst audio buffer is null.");
874 if (resetReadWritePos) {
875 dstAudioBuffer_->ResetCurReadWritePos(0, 0);
876 }
877
878 uint32_t spanCount = dstAudioBuffer_->GetSpanCount();
879 for (uint32_t i = 0; i < spanCount; i++) {
880 SpanInfo *spanInfo = dstAudioBuffer_->GetSpanInfoByIndex(i);
881 CHECK_AND_RETURN_LOG(spanInfo != nullptr, "InitAudiobuffer failed.");
882 if (deviceInfo_.deviceRole == INPUT_DEVICE) {
883 spanInfo->spanStatus = SPAN_WRITE_DONE;
884 } else {
885 spanInfo->spanStatus = SPAN_READ_DONE;
886 }
887 spanInfo->offsetInFrame = 0;
888
889 spanInfo->readStartTime = 0;
890 spanInfo->readDoneTime = 0;
891
892 spanInfo->writeStartTime = 0;
893 spanInfo->writeDoneTime = 0;
894
895 spanInfo->volumeStart = 1 << VOLUME_SHIFT_NUMBER; // 65536 for initialize
896 spanInfo->volumeEnd = 1 << VOLUME_SHIFT_NUMBER; // 65536 for initialize
897 spanInfo->isMute = false;
898 }
899 return;
900 }
901
GetPreferBufferInfo(uint32_t &totalSizeInframe, uint32_t &spanSizeInframe)902 int32_t AudioEndpointInner::GetPreferBufferInfo(uint32_t &totalSizeInframe, uint32_t &spanSizeInframe)
903 {
904 totalSizeInframe = dstTotalSizeInframe_;
905 spanSizeInframe = dstSpanSizeInframe_;
906 return SUCCESS;
907 }
908
IsAnyProcessRunning()909 bool AudioEndpointInner::IsAnyProcessRunning()
910 {
911 std::lock_guard<std::mutex> lock(listLock_);
912 bool isRunning = false;
913 for (size_t i = 0; i < processBufferList_.size(); i++) {
914 if (processBufferList_[i]->GetStreamStatus() &&
915 processBufferList_[i]->GetStreamStatus()->load() == STREAM_RUNNING) {
916 isRunning = true;
917 break;
918 }
919 }
920 return isRunning;
921 }
922
RecordReSyncPosition()923 void AudioEndpointInner::RecordReSyncPosition()
924 {
925 AUDIO_INFO_LOG("RecordReSyncPosition enter.");
926 uint64_t curHdiWritePos = 0;
927 int64_t writeTime = 0;
928 CHECK_AND_RETURN_LOG(GetDeviceHandleInfo(curHdiWritePos, writeTime),
929 "get device handle info fail.");
930 AUDIO_DEBUG_LOG("get capturer info, curHdiWritePos %{public}" PRIu64", writeTime %{public}" PRId64".",
931 curHdiWritePos, writeTime);
932 int64_t temp = ClockTime::GetCurNano() - writeTime;
933 if (temp > spanDuration_) {
934 AUDIO_WARNING_LOG("GetDeviceHandleInfo cost long time %{public}" PRIu64".", temp);
935 }
936
937 writeTimeModel_.ResetFrameStamp(curHdiWritePos, writeTime);
938 uint64_t nextDstReadPos = curHdiWritePos;
939 uint64_t nextDstWritePos = curHdiWritePos;
940 InitAudiobuffer(false);
941 int32_t ret = dstAudioBuffer_->ResetCurReadWritePos(nextDstReadPos, nextDstWritePos);
942 CHECK_AND_RETURN_LOG(ret == SUCCESS, "ResetCurReadWritePos failed.");
943
944 SpanInfo *nextReadSapn = dstAudioBuffer_->GetSpanInfo(nextDstReadPos);
945 CHECK_AND_RETURN_LOG(nextReadSapn != nullptr, "GetSpanInfo failed.");
946 nextReadSapn->offsetInFrame = nextDstReadPos;
947 nextReadSapn->spanStatus = SpanStatus::SPAN_WRITE_DONE;
948 }
949
ReSyncPosition()950 void AudioEndpointInner::ReSyncPosition()
951 {
952 Trace loopTrace("AudioEndpoint::ReSyncPosition");
953 uint64_t curHdiReadPos = 0;
954 int64_t readTime = 0;
955 bool res = GetDeviceHandleInfo(curHdiReadPos, readTime);
956 CHECK_AND_RETURN_LOG(res, "ReSyncPosition call GetDeviceHandleInfo failed.");
957 int64_t curTime = ClockTime::GetCurNano();
958 int64_t temp = curTime - readTime;
959 if (temp > spanDuration_) {
960 AUDIO_ERR_LOG("GetDeviceHandleInfo may cost long time.");
961 }
962
963 readTimeModel_.ResetFrameStamp(curHdiReadPos, readTime);
964 uint64_t nextDstWritePos = curHdiReadPos + dstSpanSizeInframe_;
965 InitAudiobuffer(false);
966 int32_t ret = dstAudioBuffer_->ResetCurReadWritePos(nextDstWritePos, nextDstWritePos);
967 CHECK_AND_RETURN_LOG(ret == SUCCESS, "ResetCurReadWritePos failed.");
968
969 SpanInfo *nextWriteSapn = dstAudioBuffer_->GetSpanInfo(nextDstWritePos);
970 CHECK_AND_RETURN_LOG(nextWriteSapn != nullptr, "GetSpanInfo failed.");
971 nextWriteSapn->offsetInFrame = nextDstWritePos;
972 nextWriteSapn->spanStatus = SpanStatus::SPAN_READ_DONE;
973 return;
974 }
975
StartDevice()976 bool AudioEndpointInner::StartDevice()
977 {
978 AUDIO_INFO_LOG("StartDevice enter.");
979 // how to modify the status while unlinked and started?
980 CHECK_AND_RETURN_RET_LOG(endpointStatus_ == IDEL, false, "Endpoint status is %{public}s",
981 GetStatusStr(endpointStatus_).c_str());
982 endpointStatus_ = STARTING;
983 if ((deviceInfo_.deviceRole == INPUT_DEVICE && (fastSource_ == nullptr || fastSource_->Start() != SUCCESS)) ||
984 (deviceInfo_.deviceRole == OUTPUT_DEVICE && (fastSink_ == nullptr || fastSink_->Start() != SUCCESS))) {
985 HandleStartDeviceFailed();
986 return false;
987 }
988 isStarted_ = true;
989
990 if (isInnerCapEnabled_) {
991 Trace trace("AudioEndpointInner::StartDupStream");
992 std::lock_guard<std::mutex> lock(dupMutex_);
993 if (dupStream_ != nullptr) {
994 dupStream_->Start();
995 }
996 }
997
998 std::unique_lock<std::mutex> lock(loopThreadLock_);
999 needReSyncPosition_ = true;
1000 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
1001 workThreadCV_.notify_all();
1002 AUDIO_DEBUG_LOG("StartDevice out, status is %{public}s", GetStatusStr(endpointStatus_).c_str());
1003 return true;
1004 }
1005
HandleStartDeviceFailed()1006 void AudioEndpointInner::HandleStartDeviceFailed()
1007 {
1008 AUDIO_ERR_LOG("Start failed for %{public}d, endpoint type %{public}u, process list size: %{public}zu.",
1009 deviceInfo_.deviceRole, endpointType_, processList_.size());
1010 std::lock_guard<std::mutex> lock(listLock_);
1011 isStarted_ = false;
1012 if (processList_.size() <= 1) { // The endpoint only has the current stream
1013 endpointStatus_ = UNLINKED;
1014 } else {
1015 endpointStatus_ = IDEL;
1016 }
1017 workThreadCV_.notify_all();
1018 }
1019
1020 // will not change state to stopped
DelayStopDevice()1021 bool AudioEndpointInner::DelayStopDevice()
1022 {
1023 AUDIO_INFO_LOG("Status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1024
1025 // Clear data buffer to avoid noise in some case.
1026 if (dstAudioBuffer_ != nullptr) {
1027 int32_t ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0,
1028 dstAudioBuffer_->GetDataSize());
1029 if (ret != EOK) {
1030 AUDIO_WARNING_LOG("reset buffer fail, ret %{public}d.", ret);
1031 }
1032 }
1033
1034 if (isInnerCapEnabled_) {
1035 Trace trace("AudioEndpointInner::StopDupStreamInDelay");
1036 std::lock_guard<std::mutex> lock(dupMutex_);
1037 if (dupStream_ != nullptr) {
1038 dupStream_->Stop();
1039 }
1040 }
1041
1042 if (deviceInfo_.deviceRole == INPUT_DEVICE) {
1043 CHECK_AND_RETURN_RET_LOG(fastSource_ != nullptr && fastSource_->Stop() == SUCCESS,
1044 false, "Source stop failed.");
1045 } else {
1046 CHECK_AND_RETURN_RET_LOG(fastSink_ != nullptr && fastSink_->Stop() == SUCCESS,
1047 false, "Sink stop failed.");
1048 }
1049 isStarted_ = false;
1050 return true;
1051 }
1052
StopDevice()1053 bool AudioEndpointInner::StopDevice()
1054 {
1055 DeinitLatencyMeasurement();
1056
1057 AUDIO_INFO_LOG("StopDevice with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1058 // todo
1059 endpointStatus_ = STOPPING;
1060 // Clear data buffer to avoid noise in some case.
1061 if (dstAudioBuffer_ != nullptr) {
1062 int32_t ret = memset_s(dstAudioBuffer_->GetDataBase(), dstAudioBuffer_->GetDataSize(), 0,
1063 dstAudioBuffer_->GetDataSize());
1064 AUDIO_INFO_LOG("StopDevice clear buffer ret:%{public}d", ret);
1065 }
1066
1067 if (isInnerCapEnabled_) {
1068 Trace trace("AudioEndpointInner::StopDupStream");
1069 std::lock_guard<std::mutex> lock(dupMutex_);
1070 if (dupStream_ != nullptr) {
1071 dupStream_->Stop();
1072 }
1073 }
1074
1075 if (deviceInfo_.deviceRole == INPUT_DEVICE) {
1076 CHECK_AND_RETURN_RET_LOG(fastSource_ != nullptr && fastSource_->Stop() == SUCCESS,
1077 false, "Source stop failed.");
1078 } else {
1079 CHECK_AND_RETURN_RET_LOG(fastSink_ != nullptr && fastSink_->Stop() == SUCCESS,
1080 false, "Sink stop failed.");
1081 }
1082 endpointStatus_ = STOPPED;
1083 isStarted_ = false;
1084 return true;
1085 }
1086
OnStart(IAudioProcessStream *processStream)1087 int32_t AudioEndpointInner::OnStart(IAudioProcessStream *processStream)
1088 {
1089 InitLatencyMeasurement();
1090 AUDIO_PRERELEASE_LOGI("OnStart endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1091 if (endpointStatus_ == RUNNING) {
1092 AUDIO_INFO_LOG("OnStart find endpoint already in RUNNING.");
1093 return SUCCESS;
1094 }
1095 if (endpointStatus_ == IDEL) {
1096 // call sink start
1097 if (!isStarted_) {
1098 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1099 }
1100 }
1101
1102 endpointStatus_ = RUNNING;
1103 delayStopTime_ = INT64_MAX;
1104 return SUCCESS;
1105 }
1106
OnPause(IAudioProcessStream *processStream)1107 int32_t AudioEndpointInner::OnPause(IAudioProcessStream *processStream)
1108 {
1109 AUDIO_PRERELEASE_LOGI("OnPause endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1110 if (endpointStatus_ == RUNNING) {
1111 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
1112 }
1113 if (endpointStatus_ == IDEL) {
1114 // delay call sink stop when no process running
1115 AUDIO_PRERELEASE_LOGI("OnPause status is IDEL, need delay call stop");
1116 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
1117 ? PLAYBACK_DELAY_STOP_HDI_TIME : RECORDER_DELAY_STOP_HDI_TIME);
1118 if (hibernateEndpointRelease_) {
1119 delayStopTime_ = 0;
1120 }
1121 }
1122 // todo
1123 return SUCCESS;
1124 }
1125
GetProcLastWriteDoneInfo(const std::shared_ptr<OHAudioBuffer> processBuffer, uint64_t curWriteFrame, uint64_t &proHandleFrame, int64_t &proHandleTime)1126 int32_t AudioEndpointInner::GetProcLastWriteDoneInfo(const std::shared_ptr<OHAudioBuffer> processBuffer,
1127 uint64_t curWriteFrame, uint64_t &proHandleFrame, int64_t &proHandleTime)
1128 {
1129 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_HANDLE, "Process found but buffer is null");
1130 uint64_t curReadFrame = processBuffer->GetCurReadFrame();
1131 SpanInfo *curWriteSpan = processBuffer->GetSpanInfo(curWriteFrame);
1132 CHECK_AND_RETURN_RET_LOG(curWriteSpan != nullptr, ERR_INVALID_HANDLE,
1133 "curWriteSpan of curWriteFrame %{public}" PRIu64" is null", curWriteFrame);
1134 if (curWriteSpan->spanStatus == SpanStatus::SPAN_WRITE_DONE || curWriteFrame < dstSpanSizeInframe_ ||
1135 curWriteFrame < curReadFrame) {
1136 proHandleFrame = curWriteFrame;
1137 proHandleTime = curWriteSpan->writeDoneTime;
1138 } else {
1139 int32_t ret = GetProcLastWriteDoneInfo(processBuffer, curWriteFrame - dstSpanSizeInframe_,
1140 proHandleFrame, proHandleTime);
1141 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret,
1142 "get process last write done info fail, ret %{public}d.", ret);
1143 }
1144
1145 AUDIO_INFO_LOG("GetProcLastWriteDoneInfo end, curWriteFrame %{public}" PRIu64", proHandleFrame %{public}" PRIu64", "
1146 "proHandleTime %{public}" PRId64".", curWriteFrame, proHandleFrame, proHandleTime);
1147 return SUCCESS;
1148 }
1149
OnUpdateHandleInfo(IAudioProcessStream *processStream)1150 int32_t AudioEndpointInner::OnUpdateHandleInfo(IAudioProcessStream *processStream)
1151 {
1152 Trace trace("AudioEndpoint::OnUpdateHandleInfo");
1153 bool isFind = false;
1154 std::lock_guard<std::mutex> lock(listLock_);
1155 auto processItr = processList_.begin();
1156 while (processItr != processList_.end()) {
1157 if (*processItr != processStream) {
1158 processItr++;
1159 continue;
1160 }
1161 std::shared_ptr<OHAudioBuffer> processBuffer = (*processItr)->GetStreamBuffer();
1162 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_OPERATION_FAILED, "Process found but buffer is null");
1163 uint64_t proHandleFrame = 0;
1164 int64_t proHandleTime = 0;
1165 if (deviceInfo_.deviceRole == INPUT_DEVICE) {
1166 uint64_t curWriteFrame = processBuffer->GetCurWriteFrame();
1167 int32_t ret = GetProcLastWriteDoneInfo(processBuffer, curWriteFrame, proHandleFrame, proHandleTime);
1168 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret,
1169 "get process last write done info fail, ret %{public}d.", ret);
1170 processBuffer->SetHandleInfo(proHandleFrame, proHandleTime);
1171 } else {
1172 // For output device, handle info is updated in CheckAllBufferReady
1173 processBuffer->GetHandleInfo(proHandleFrame, proHandleTime);
1174 }
1175
1176 isFind = true;
1177 break;
1178 }
1179 CHECK_AND_RETURN_RET_LOG(isFind, ERR_OPERATION_FAILED, "Can not find any process to UpdateHandleInfo");
1180 return SUCCESS;
1181 }
1182
LinkProcessStream(IAudioProcessStream *processStream)1183 int32_t AudioEndpointInner::LinkProcessStream(IAudioProcessStream *processStream)
1184 {
1185 CHECK_AND_RETURN_RET_LOG(processStream != nullptr, ERR_INVALID_PARAM, "IAudioProcessStream is null");
1186 std::shared_ptr<OHAudioBuffer> processBuffer = processStream->GetStreamBuffer();
1187 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_PARAM, "processBuffer is null");
1188 CHECK_AND_RETURN_RET_LOG(processBuffer->GetStreamStatus() != nullptr, ERR_INVALID_PARAM, "stream status is null");
1189
1190 CHECK_AND_RETURN_RET_LOG(processList_.size() < MAX_LINKED_PROCESS, ERR_OPERATION_FAILED, "reach link limit.");
1191
1192 AUDIO_INFO_LOG("LinkProcessStream start status is:%{public}s.", GetStatusStr(endpointStatus_).c_str());
1193 processBuffer->SetSessionId(processStream->GetAudioSessionId());
1194 bool needEndpointRunning = processBuffer->GetStreamStatus()->load() == STREAM_RUNNING;
1195
1196 if (endpointStatus_ == STARTING) {
1197 AUDIO_INFO_LOG("LinkProcessStream wait start begin.");
1198 std::unique_lock<std::mutex> lock(loopThreadLock_);
1199 workThreadCV_.wait_for(lock, std::chrono::milliseconds(SLEEP_TIME_IN_DEFAULT), [this] {
1200 return endpointStatus_ != STARTING;
1201 });
1202 AUDIO_DEBUG_LOG("LinkProcessStream wait start end.");
1203 }
1204
1205 if (endpointStatus_ == RUNNING) {
1206 LinkProcessStreamExt(processStream, processBuffer);
1207 return SUCCESS;
1208 }
1209
1210 if (endpointStatus_ == UNLINKED) {
1211 endpointStatus_ = IDEL; // handle push_back in IDEL
1212 if (isDeviceRunningInIdel_) {
1213 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1214 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
1215 ? PLAYBACK_DELAY_STOP_HDI_TIME : RECORDER_DELAY_STOP_HDI_TIME);
1216 }
1217 }
1218
1219 if (endpointStatus_ == IDEL) {
1220 {
1221 std::lock_guard<std::mutex> lock(listLock_);
1222 processList_.push_back(processStream);
1223 processBufferList_.push_back(processBuffer);
1224 }
1225 if (!needEndpointRunning) {
1226 AUDIO_INFO_LOG("LinkProcessStream success, process stream status is not running.");
1227 return SUCCESS;
1228 }
1229 // needEndpointRunning = true
1230 if (isDeviceRunningInIdel_) {
1231 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
1232 } else {
1233 // needEndpointRunning = true & isDeviceRunningInIdel_ = false
1234 // KeepWorkloopRunning will wait on IDEL
1235 CHECK_AND_RETURN_RET_LOG(StartDevice(), ERR_OPERATION_FAILED, "StartDevice failed");
1236 }
1237 AUDIO_INFO_LOG("LinkProcessStream success with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1238 return SUCCESS;
1239 }
1240
1241 AUDIO_INFO_LOG("LinkProcessStream success with status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1242 return SUCCESS;
1243 }
1244
LinkProcessStreamExt(IAudioProcessStream *processStream, const std::shared_ptr<OHAudioBuffer>& processBuffer)1245 void AudioEndpointInner::LinkProcessStreamExt(IAudioProcessStream *processStream,
1246 const std::shared_ptr<OHAudioBuffer>& processBuffer)
1247 {
1248 std::lock_guard<std::mutex> lock(listLock_);
1249 processList_.push_back(processStream);
1250 processBufferList_.push_back(processBuffer);
1251 AUDIO_INFO_LOG("LinkProcessStream success in RUNNING.");
1252 }
1253
UnlinkProcessStream(IAudioProcessStream *processStream)1254 int32_t AudioEndpointInner::UnlinkProcessStream(IAudioProcessStream *processStream)
1255 {
1256 AUDIO_INFO_LOG("UnlinkProcessStream in status:%{public}s.", GetStatusStr(endpointStatus_).c_str());
1257 CHECK_AND_RETURN_RET_LOG(processStream != nullptr, ERR_INVALID_PARAM, "IAudioProcessStream is null");
1258 std::shared_ptr<OHAudioBuffer> processBuffer = processStream->GetStreamBuffer();
1259 CHECK_AND_RETURN_RET_LOG(processBuffer != nullptr, ERR_INVALID_PARAM, "processBuffer is null");
1260
1261 bool isFind = false;
1262 std::lock_guard<std::mutex> lock(listLock_);
1263 auto processItr = processList_.begin();
1264 auto bufferItr = processBufferList_.begin();
1265 while (processItr != processList_.end()) {
1266 if (*processItr == processStream && *bufferItr == processBuffer) {
1267 processList_.erase(processItr);
1268 processBufferList_.erase(bufferItr);
1269 isFind = true;
1270 break;
1271 } else {
1272 processItr++;
1273 bufferItr++;
1274 }
1275 }
1276 if (processList_.size() == 0) {
1277 StopDevice();
1278 endpointStatus_ = UNLINKED;
1279 }
1280
1281 AUDIO_DEBUG_LOG("UnlinkProcessStream end, %{public}s the process.", (isFind ? "find and remove" : "not find"));
1282 return SUCCESS;
1283 }
1284
CheckStandBy()1285 void AudioEndpointInner::CheckStandBy()
1286 {
1287 if (endpointStatus_ == RUNNING) {
1288 endpointStatus_ = IsAnyProcessRunning() ? RUNNING : IDEL;
1289 }
1290
1291 if (endpointStatus_ == RUNNING) {
1292 return;
1293 }
1294
1295 AUDIO_INFO_LOG("endpoint status:%{public}s", GetStatusStr(endpointStatus_).c_str());
1296 if (endpointStatus_ == IDEL) {
1297 // delay call sink stop when no process running
1298 AUDIO_INFO_LOG("status is IDEL, need delay call stop");
1299 delayStopTime_ = ClockTime::GetCurNano() + ((clientConfig_.audioMode == AUDIO_MODE_PLAYBACK)
1300 ? PLAYBACK_DELAY_STOP_HDI_TIME : RECORDER_DELAY_STOP_HDI_TIME);
1301 }
1302 }
1303
CheckAllBufferReady(int64_t checkTime, uint64_t curWritePos)1304 bool AudioEndpointInner::CheckAllBufferReady(int64_t checkTime, uint64_t curWritePos)
1305 {
1306 bool isAllReady = true;
1307 bool needCheckStandby = false;
1308 {
1309 // lock list without sleep
1310 std::lock_guard<std::mutex> lock(listLock_);
1311 for (size_t i = 0; i < processBufferList_.size(); i++) {
1312 std::shared_ptr<OHAudioBuffer> tempBuffer = processBufferList_[i];
1313 uint64_t eachCurReadPos = processBufferList_[i]->GetCurReadFrame();
1314 lastHandleProcessTime_ = checkTime;
1315 processBufferList_[i]->SetHandleInfo(eachCurReadPos, lastHandleProcessTime_); // update handle info
1316 if (tempBuffer->GetStreamStatus() &&
1317 tempBuffer->GetStreamStatus()->load() != StreamStatus::STREAM_RUNNING) {
1318 // Process is not running, server will continue to check the same location in the next cycle.
1319 int64_t duration = 5000000; // 5ms
1320 processBufferList_[i]->SetHandleInfo(eachCurReadPos, lastHandleProcessTime_ + duration);
1321 continue; // process not running
1322 }
1323 // Status is RUNNING
1324 int64_t current = ClockTime::GetCurNano();
1325 int64_t lastWrittenTime = tempBuffer->GetLastWrittenTime();
1326 if (current - lastWrittenTime > WAIT_CLIENT_STANDBY_TIME_NS) {
1327 Trace trace("AudioEndpoint::MarkClientStandby");
1328 AUDIO_INFO_LOG("change the status to stand-by, session %{public}u", tempBuffer->GetSessionId());
1329 CHECK_AND_RETURN_RET_LOG(tempBuffer->GetStreamStatus() != nullptr, false, "GetStreamStatus failed");
1330 tempBuffer->GetStreamStatus()->store(StreamStatus::STREAM_STAND_BY);
1331 WriterRenderStreamStandbySysEvent(tempBuffer->GetSessionId(), 1);
1332 needCheckStandby = true;
1333 continue;
1334 }
1335 uint64_t curRead = tempBuffer->GetCurReadFrame();
1336 SpanInfo *curReadSpan = tempBuffer->GetSpanInfo(curRead);
1337 if (curReadSpan == nullptr || curReadSpan->spanStatus != SpanStatus::SPAN_WRITE_DONE) {
1338 AUDIO_DEBUG_LOG("Find one process not ready"); // print uid of the process?
1339 isAllReady = false;
1340 continue;
1341 }
1342 // process Status is RUNNING && buffer status is WRITE_DONE
1343 tempBuffer->SetLastWrittenTime(current);
1344 }
1345 }
1346
1347 if (needCheckStandby) {
1348 CheckStandBy();
1349 }
1350
1351 if (!isAllReady) {
1352 WaitAllProcessReady(curWritePos);
1353 }
1354 return isAllReady;
1355 }
1356
WaitAllProcessReady(uint64_t curWritePos)1357 void AudioEndpointInner::WaitAllProcessReady(uint64_t curWritePos)
1358 {
1359 Trace trace("AudioEndpoint::WaitAllProcessReady");
1360 int64_t tempWakeupTime = readTimeModel_.GetTimeOfPos(curWritePos) + WRITE_TO_HDI_AHEAD_TIME;
1361 if (tempWakeupTime - ClockTime::GetCurNano() < ONE_MILLISECOND_DURATION) {
1362 ClockTime::RelativeSleep(ONE_MILLISECOND_DURATION);
1363 } else {
1364 ClockTime::AbsoluteSleep(tempWakeupTime); // sleep to hdi read time ahead 1ms.
1365 }
1366 }
1367
MixToDupStream(const std::vector<AudioStreamData> &srcDataList)1368 void AudioEndpointInner::MixToDupStream(const std::vector<AudioStreamData> &srcDataList)
1369 {
1370 Trace trace("AudioEndpointInner::MixToDupStream");
1371 std::lock_guard<std::mutex> lock(dupMutex_);
1372 CHECK_AND_RETURN_LOG(dupBuffer_ != nullptr, "Buffer is not ready");
1373
1374 for (size_t i = 0; i < srcDataList.size(); i++) {
1375 if (!srcDataList[i].isInnerCaped) {
1376 continue;
1377 }
1378 size_t dataLength = dupBufferSize_;
1379 dataLength /= 2; // SAMPLE_S16LE--> 2 byte
1380 int16_t *dstPtr = reinterpret_cast<int16_t *>(dupBuffer_.get());
1381
1382 for (size_t offset = 0; dataLength > 0; dataLength--) {
1383 int32_t sum = *dstPtr;
1384 sum += *(reinterpret_cast<int16_t *>(srcDataList[i].bufferDesc.buffer) + offset);
1385 *dstPtr = sum > INT16_MAX ? INT16_MAX : (sum < INT16_MIN ? INT16_MIN : sum);
1386 dstPtr++;
1387 offset++;
1388 }
1389 }
1390 BufferDesc temp;
1391 temp.buffer = dupBuffer_.get();
1392 temp.bufLength = dupBufferSize_;
1393 temp.dataLength = dupBufferSize_;
1394
1395 CHECK_AND_RETURN_LOG(dupStream_ != nullptr, "dupStream_ is nullptr");
1396 int32_t ret = dupStream_->EnqueueBuffer(temp);
1397 CHECK_AND_RETURN_LOG(ret == SUCCESS, "EnqueueBuffer failed:%{public}d", ret);
1398
1399 ret = memset_s(reinterpret_cast<void *>(dupBuffer_.get()), dupBufferSize_, 0, dupBufferSize_);
1400 if (ret != EOK) {
1401 AUDIO_WARNING_LOG("memset buffer fail, ret %{public}d", ret);
1402 }
1403 }
1404
ProcessData(const std::vector<AudioStreamData> &srcDataList, const AudioStreamData &dstData)1405 void AudioEndpointInner::ProcessData(const std::vector<AudioStreamData> &srcDataList, const AudioStreamData &dstData)
1406 {
1407 size_t srcListSize = srcDataList.size();
1408
1409 for (size_t i = 0; i < srcListSize; i++) {
1410 if (srcDataList[i].streamInfo.format != SAMPLE_S16LE || srcDataList[i].streamInfo.channels != STEREO ||
1411 srcDataList[i].bufferDesc.bufLength != dstData.bufferDesc.bufLength ||
1412 srcDataList[i].bufferDesc.dataLength != dstData.bufferDesc.dataLength) {
1413 AUDIO_ERR_LOG("ProcessData failed, streamInfo are different");
1414 return;
1415 }
1416 }
1417
1418 // Assum using the same format and same size
1419 CHECK_AND_RETURN_LOG(dstData.streamInfo.format == SAMPLE_S16LE && dstData.streamInfo.channels == STEREO,
1420 "ProcessData failed, streamInfo are not support");
1421
1422 size_t dataLength = dstData.bufferDesc.dataLength;
1423 dataLength /= 2; // SAMPLE_S16LE--> 2 byte
1424 int16_t *dstPtr = reinterpret_cast<int16_t *>(dstData.bufferDesc.buffer);
1425 for (size_t offset = 0; dataLength > 0; dataLength--) {
1426 int32_t sum = 0;
1427 for (size_t i = 0; i < srcListSize; i++) {
1428 int32_t vol = srcDataList[i].volumeStart; // change to modify volume of each channel
1429 int16_t *srcPtr = reinterpret_cast<int16_t *>(srcDataList[i].bufferDesc.buffer) + offset;
1430 sum += (*srcPtr * static_cast<int64_t>(vol)) >> VOLUME_SHIFT_NUMBER; // 1/65536
1431 ZeroVolumeCheck(vol);
1432 }
1433 offset++;
1434 *dstPtr++ = sum > INT16_MAX ? INT16_MAX : (sum < INT16_MIN ? INT16_MIN : sum);
1435 }
1436 HandleZeroVolumeCheckEvent();
1437 }
1438
HandleZeroVolumeCheckEvent()1439 void AudioEndpointInner::HandleZeroVolumeCheckEvent()
1440 {
1441 if (!zeroVolumeStopDevice_ && (ClockTime::GetCurNano() >= delayStopTimeForZeroVolume_)) {
1442 if (isStarted_) {
1443 if (fastSink_ != nullptr && fastSink_->Stop() == SUCCESS) {
1444 AUDIO_INFO_LOG("Volume from none-zero to zero more than 4s, stop device success.");
1445 isStarted_ = false;
1446 } else {
1447 AUDIO_INFO_LOG("Volume from none-zero to zero more than 4s, stop device failed.");
1448 isStarted_ = true;
1449 }
1450 }
1451 zeroVolumeStopDevice_ = true;
1452 }
1453 }
1454
1455
HandleRendererDataParams(const AudioStreamData &srcData, const AudioStreamData &dstData, bool applyVol)1456 void AudioEndpointInner::HandleRendererDataParams(const AudioStreamData &srcData, const AudioStreamData &dstData,
1457 bool applyVol)
1458 {
1459 if (srcData.streamInfo.encoding != dstData.streamInfo.encoding) {
1460 AUDIO_ERR_LOG("Different encoding formats");
1461 return;
1462 }
1463 if (srcData.streamInfo.format == SAMPLE_S16LE && srcData.streamInfo.channels == STEREO) {
1464 return ProcessSingleData(srcData, dstData, applyVol);
1465 }
1466 if (srcData.streamInfo.format == SAMPLE_S16LE && srcData.streamInfo.channels == MONO) {
1467 CHECK_AND_RETURN_LOG(processList_.size() > 0 && processList_[0] != nullptr, "No avaliable process");
1468 BufferDesc &convertedBuffer = processList_[0]->GetConvertedBuffer();
1469 int32_t ret = FormatConverter::S16MonoToS16Stereo(srcData.bufferDesc, convertedBuffer);
1470 CHECK_AND_RETURN_LOG(ret == SUCCESS, "Convert channel from mono to stereo failed");
1471 AudioStreamData dataAfterProcess = srcData;
1472 dataAfterProcess.bufferDesc = convertedBuffer;
1473 ProcessSingleData(dataAfterProcess, dstData, applyVol);
1474 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1475 convertedBuffer.bufLength);
1476 CHECK_AND_RETURN_LOG(ret == EOK, "memset converted buffer to 0 failed");
1477 }
1478 }
1479
ProcessSingleData(const AudioStreamData &srcData, const AudioStreamData &dstData, bool applyVol)1480 void AudioEndpointInner::ProcessSingleData(const AudioStreamData &srcData, const AudioStreamData &dstData,
1481 bool applyVol)
1482 {
1483 CHECK_AND_RETURN_LOG(dstData.streamInfo.format == SAMPLE_S16LE && dstData.streamInfo.channels == STEREO,
1484 "ProcessData failed, streamInfo are not support");
1485
1486 size_t dataLength = dstData.bufferDesc.dataLength;
1487 dataLength /= 2; // SAMPLE_S16LE--> 2 byte
1488 int16_t *dstPtr = reinterpret_cast<int16_t *>(dstData.bufferDesc.buffer);
1489 for (size_t offset = 0; dataLength > 0; dataLength--) {
1490 int32_t vol = srcData.volumeStart; // change to modify volume of each channel
1491 int16_t *srcPtr = reinterpret_cast<int16_t *>(srcData.bufferDesc.buffer) + offset;
1492 int32_t sum = applyVol ? (*srcPtr * static_cast<int64_t>(vol)) >> VOLUME_SHIFT_NUMBER : *srcPtr; // 1/65536
1493 ZeroVolumeCheck(vol);
1494 offset++;
1495 *dstPtr++ = sum > INT16_MAX ? INT16_MAX : (sum < INT16_MIN ? INT16_MIN : sum);
1496 }
1497 HandleZeroVolumeCheckEvent();
1498 }
1499
ZeroVolumeCheck(const int32_t vol)1500 void AudioEndpointInner::ZeroVolumeCheck(const int32_t vol)
1501 {
1502 if (std::abs(vol - 0) <= std::numeric_limits<float>::epsilon()) {
1503 if (!zeroVolumeStopDevice_ && !isVolumeAlreadyZero_) {
1504 AUDIO_INFO_LOG("Begin zero volume, will stop device.");
1505 delayStopTimeForZeroVolume_ = ClockTime::GetCurNano() + DELAY_STOP_HDI_TIME_FOR_ZERO_VOLUME;
1506 isVolumeAlreadyZero_ = true;
1507 }
1508 } else {
1509 if (zeroVolumeStopDevice_ && !isStarted_) {
1510 if (fastSink_ == nullptr || fastSink_->Start() != SUCCESS) {
1511 AUDIO_INFO_LOG("Volume from zero to none-zero, start device failed.");
1512 isStarted_ = false;
1513 } else {
1514 AUDIO_INFO_LOG("Volume from zero to none-zero, start device success.");
1515 isStarted_ = true;
1516 }
1517 zeroVolumeStopDevice_ = false;
1518 }
1519 isVolumeAlreadyZero_ = false;
1520 delayStopTimeForZeroVolume_ = INT64_MAX;
1521 }
1522 }
1523
1524 // call with listLock_ hold
GetAllReadyProcessData(std::vector<AudioStreamData> &audioDataList)1525 void AudioEndpointInner::GetAllReadyProcessData(std::vector<AudioStreamData> &audioDataList)
1526 {
1527 for (size_t i = 0; i < processBufferList_.size(); i++) {
1528 uint64_t curRead = processBufferList_[i]->GetCurReadFrame();
1529 Trace trace("AudioEndpoint::ReadProcessData->" + std::to_string(curRead));
1530 SpanInfo *curReadSpan = processBufferList_[i]->GetSpanInfo(curRead);
1531 CHECK_AND_CONTINUE_LOG(curReadSpan != nullptr, "GetSpanInfo failed, can not get client curReadSpan");
1532 AudioStreamData streamData;
1533 Volume vol = {true, 1.0f, 0};
1534 AudioStreamType streamType = processList_[i]->GetAudioStreamType();
1535 AudioVolumeType volumeType = VolumeUtils::GetVolumeTypeFromStreamType(streamType);
1536 DeviceType deviceType = PolicyHandler::GetInstance().GetActiveOutPutDevice();
1537 bool muteFlag = processList_[i]->GetMuteFlag();
1538 if (deviceInfo_.networkId == LOCAL_NETWORK_ID &&
1539 !(deviceInfo_.deviceType == DEVICE_TYPE_BLUETOOTH_A2DP && streamType == STREAM_MUSIC &&
1540 PolicyHandler::GetInstance().IsAbsVolumeSupported()) &&
1541 PolicyHandler::GetInstance().GetSharedVolume(volumeType, deviceType, vol)) {
1542 streamData.volumeStart = vol.isMute ? 0 : static_cast<int32_t>(curReadSpan->volumeStart * vol.volumeFloat);
1543 } else {
1544 streamData.volumeStart = curReadSpan->volumeStart;
1545 }
1546 streamData.volumeEnd = curReadSpan->volumeEnd;
1547 streamData.streamInfo = processList_[i]->GetStreamInfo();
1548 streamData.isInnerCaped = processList_[i]->GetInnerCapState();
1549 SpanStatus targetStatus = SpanStatus::SPAN_WRITE_DONE;
1550 if (curReadSpan->spanStatus.compare_exchange_strong(targetStatus, SpanStatus::SPAN_READING)) {
1551 processBufferList_[i]->GetReadbuffer(curRead, streamData.bufferDesc); // check return?
1552 if (muteFlag) {
1553 memset_s(static_cast<void *>(streamData.bufferDesc.buffer), streamData.bufferDesc.bufLength,
1554 0, streamData.bufferDesc.bufLength);
1555 }
1556 CheckPlaySignal(streamData.bufferDesc.buffer, streamData.bufferDesc.bufLength);
1557 audioDataList.push_back(streamData);
1558 curReadSpan->readStartTime = ClockTime::GetCurNano();
1559 DumpFileUtil::WriteDumpFile(dumpDcp_, static_cast<void *>(streamData.bufferDesc.buffer),
1560 streamData.bufferDesc.bufLength);
1561 if (AudioDump::GetInstance().GetVersionType() == BETA_VERSION) {
1562 Media::MediaMonitor::MediaMonitorManager::GetInstance().WriteAudioBuffer(dumpDcpName_,
1563 static_cast<void *>(streamData.bufferDesc.buffer), streamData.bufferDesc.bufLength);
1564 }
1565 }
1566 }
1567 }
1568
ProcessToEndpointDataHandle(uint64_t curWritePos)1569 bool AudioEndpointInner::ProcessToEndpointDataHandle(uint64_t curWritePos)
1570 {
1571 std::lock_guard<std::mutex> lock(listLock_);
1572
1573 std::vector<AudioStreamData> audioDataList;
1574 GetAllReadyProcessData(audioDataList);
1575
1576 AudioStreamData dstStreamData;
1577 dstStreamData.streamInfo = dstStreamInfo_;
1578 int32_t ret = dstAudioBuffer_->GetWriteBuffer(curWritePos, dstStreamData.bufferDesc);
1579 CHECK_AND_RETURN_RET_LOG(((ret == SUCCESS && dstStreamData.bufferDesc.buffer != nullptr)), false,
1580 "GetWriteBuffer failed, ret:%{public}d", ret);
1581
1582 SpanInfo *curWriteSpan = dstAudioBuffer_->GetSpanInfo(curWritePos);
1583 CHECK_AND_RETURN_RET_LOG(curWriteSpan != nullptr, false, "GetSpanInfo failed, can not get curWriteSpan");
1584
1585 dstStreamData.volumeStart = curWriteSpan->volumeStart;
1586 dstStreamData.volumeEnd = curWriteSpan->volumeEnd;
1587
1588 Trace trace("AudioEndpoint::WriteDstBuffer=>" + std::to_string(curWritePos));
1589 // do write work
1590 if (audioDataList.size() == 0) {
1591 memset_s(dstStreamData.bufferDesc.buffer, dstStreamData.bufferDesc.bufLength, 0,
1592 dstStreamData.bufferDesc.bufLength);
1593 } else {
1594 if (endpointType_ == TYPE_VOIP_MMAP && audioDataList.size() == 1) {
1595 HandleRendererDataParams(audioDataList[0], dstStreamData);
1596 } else {
1597 ProcessData(audioDataList, dstStreamData);
1598 }
1599 }
1600
1601 if (isInnerCapEnabled_) {
1602 ProcessToDupStream(audioDataList, dstStreamData);
1603 }
1604
1605 DumpFileUtil::WriteDumpFile(dumpHdi_, static_cast<void *>(dstStreamData.bufferDesc.buffer),
1606 dstStreamData.bufferDesc.bufLength);
1607 DfxOperation(dstStreamData.bufferDesc, dstStreamInfo_.format, dstStreamInfo_.channels);
1608
1609 if (AudioDump::GetInstance().GetVersionType() == BETA_VERSION) {
1610 Media::MediaMonitor::MediaMonitorManager::GetInstance().WriteAudioBuffer(dumpHdiName_,
1611 static_cast<void *>(dstStreamData.bufferDesc.buffer), dstStreamData.bufferDesc.bufLength);
1612 }
1613
1614 CheckUpdateState(reinterpret_cast<char *>(dstStreamData.bufferDesc.buffer),
1615 dstStreamData.bufferDesc.bufLength);
1616
1617 return true;
1618 }
1619
ProcessToDupStream(std::vector<AudioStreamData> &audioDataList, AudioStreamData &dstStreamData)1620 void AudioEndpointInner::ProcessToDupStream(std::vector<AudioStreamData> &audioDataList, AudioStreamData &dstStreamData)
1621 {
1622 Trace trace("AudioEndpointInner::ProcessToDupStream");
1623 if (endpointType_ == TYPE_VOIP_MMAP) {
1624 if (audioDataList.size() == 1 && audioDataList[0].isInnerCaped) {
1625 BufferDesc temp;
1626 temp.buffer = dupBuffer_.get();
1627 temp.bufLength = dupBufferSize_;
1628 temp.dataLength = dupBufferSize_;
1629
1630 dstStreamData.bufferDesc = temp;
1631 HandleRendererDataParams(audioDataList[0], dstStreamData, false);
1632 dupStream_->EnqueueBuffer(temp);
1633 }
1634 } else {
1635 MixToDupStream(audioDataList);
1636 }
1637 }
1638
DfxOperation(BufferDesc &buffer, AudioSampleFormat format, AudioChannel channel) const1639 void AudioEndpointInner::DfxOperation(BufferDesc &buffer, AudioSampleFormat format, AudioChannel channel) const
1640 {
1641 ChannelVolumes vols = VolumeTools::CountVolumeLevel(buffer, format, channel);
1642 if (channel == MONO) {
1643 Trace::Count(logUtilsTag_, vols.volStart[0]);
1644 } else {
1645 Trace::Count(logUtilsTag_, (vols.volStart[0] + vols.volStart[1]) / HALF_FACTOR);
1646 }
1647 AudioLogUtils::ProcessVolumeData(logUtilsTag_, vols, volumeDataCount_);
1648 }
1649
CheckUpdateState(char *frame, uint64_t replyBytes)1650 void AudioEndpointInner::CheckUpdateState(char *frame, uint64_t replyBytes)
1651 {
1652 if (startUpdate_) {
1653 if (renderFrameNum_ == 0) {
1654 last10FrameStartTime_ = ClockTime::GetCurNano();
1655 }
1656 renderFrameNum_++;
1657 maxAmplitude_ = UpdateMaxAmplitude(static_cast<ConvertHdiFormat>(dstStreamInfo_.format),
1658 frame, replyBytes);
1659 if (renderFrameNum_ == GET_MAX_AMPLITUDE_FRAMES_THRESHOLD) {
1660 renderFrameNum_ = 0;
1661 if (last10FrameStartTime_ > lastGetMaxAmplitudeTime_) {
1662 startUpdate_ = false;
1663 maxAmplitude_ = 0;
1664 }
1665 }
1666 }
1667 }
1668
GetMaxAmplitude()1669 float AudioEndpointInner::GetMaxAmplitude()
1670 {
1671 lastGetMaxAmplitudeTime_ = ClockTime::GetCurNano();
1672 startUpdate_ = true;
1673 return maxAmplitude_;
1674 }
1675
GetAudioMode() const1676 AudioMode AudioEndpointInner::GetAudioMode() const
1677 {
1678 return clientConfig_.audioMode;
1679 }
1680
SetHibernateEndpointRelease(const bool &isHibernate)1681 void AudioEndpointInner::SetHibernateEndpointRelease(const bool &isHibernate)
1682 {
1683 hibernateEndpointRelease_ = isHibernate;
1684 if (isHibernate) {
1685 if (delayStopTime_ == 0) {
1686 AUDIO_INFO_LOG("delayStopTime is 0, don't amend");
1687 return;
1688 }
1689 cacheDelayStopTime_ = delayStopTime_;
1690 delayStopTime_ = 0;
1691 AUDIO_INFO_LOG("amend delayStopTime from %{public}" PRIu64" to %{public}" PRIu64,
1692 cacheDelayStopTime_, delayStopTime_);
1693 } else {
1694 AUDIO_INFO_LOG("amend delayStopTime from %{public}" PRIu64" to %{public}" PRIu64,
1695 delayStopTime_, cacheDelayStopTime_);
1696 delayStopTime_ = cacheDelayStopTime_;
1697 }
1698 }
1699
GetPredictNextReadTime(uint64_t posInFrame)1700 int64_t AudioEndpointInner::GetPredictNextReadTime(uint64_t posInFrame)
1701 {
1702 Trace trace("AudioEndpoint::GetPredictNextRead");
1703 uint64_t handleSpanCnt = posInFrame / dstSpanSizeInframe_;
1704 uint32_t startPeriodCnt = 20; // sync each time when start
1705 uint32_t oneBigPeriodCnt = 40; // 200ms
1706 if (handleSpanCnt < startPeriodCnt || handleSpanCnt % oneBigPeriodCnt == 0) {
1707 updateThreadCV_.notify_all();
1708 }
1709 uint64_t readFrame = 0;
1710 int64_t readtime = 0;
1711 if (readTimeModel_.GetFrameStamp(readFrame, readtime)) {
1712 if (readFrame != posInFrame_) {
1713 readTimeModel_.UpdataFrameStamp(posInFrame_, timeInNano_);
1714 }
1715 }
1716
1717 int64_t nextHdiReadTime = readTimeModel_.GetTimeOfPos(posInFrame);
1718 return nextHdiReadTime;
1719 }
1720
GetPredictNextWriteTime(uint64_t posInFrame)1721 int64_t AudioEndpointInner::GetPredictNextWriteTime(uint64_t posInFrame)
1722 {
1723 uint64_t handleSpanCnt = posInFrame / dstSpanSizeInframe_;
1724 uint32_t startPeriodCnt = 20;
1725 uint32_t oneBigPeriodCnt = 40;
1726 if (handleSpanCnt < startPeriodCnt || handleSpanCnt % oneBigPeriodCnt == 0) {
1727 updateThreadCV_.notify_all();
1728 }
1729 uint64_t writeFrame = 0;
1730 int64_t writetime = 0;
1731 if (writeTimeModel_.GetFrameStamp(writeFrame, writetime)) {
1732 if (writeFrame != posInFrame_) {
1733 writeTimeModel_.UpdataFrameStamp(posInFrame_, timeInNano_);
1734 }
1735 }
1736 int64_t nextHdiWriteTime = writeTimeModel_.GetTimeOfPos(posInFrame);
1737 return nextHdiWriteTime;
1738 }
1739
RecordPrepareNextLoop(uint64_t curReadPos, int64_t &wakeUpTime)1740 bool AudioEndpointInner::RecordPrepareNextLoop(uint64_t curReadPos, int64_t &wakeUpTime)
1741 {
1742 uint64_t nextHandlePos = curReadPos + dstSpanSizeInframe_;
1743 int64_t nextHdiWriteTime = GetPredictNextWriteTime(nextHandlePos);
1744 int64_t tempDelay = endpointType_ == TYPE_VOIP_MMAP ? RECORD_VOIP_DELAY_TIME : RECORD_DELAY_TIME;
1745 int64_t predictWakeupTime = nextHdiWriteTime + tempDelay;
1746 if (predictWakeupTime <= ClockTime::GetCurNano()) {
1747 wakeUpTime = ClockTime::GetCurNano() + ONE_MILLISECOND_DURATION;
1748 AUDIO_ERR_LOG("hdi send wrong position time");
1749 } else {
1750 wakeUpTime = predictWakeupTime;
1751 }
1752
1753 int32_t ret = dstAudioBuffer_->SetCurWriteFrame(nextHandlePos);
1754 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "set dst buffer write frame fail, ret %{public}d.", ret);
1755 ret = dstAudioBuffer_->SetCurReadFrame(nextHandlePos);
1756 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "set dst buffer read frame fail, ret %{public}d.", ret);
1757
1758 return true;
1759 }
1760
PrepareNextLoop(uint64_t curWritePos, int64_t &wakeUpTime)1761 bool AudioEndpointInner::PrepareNextLoop(uint64_t curWritePos, int64_t &wakeUpTime)
1762 {
1763 uint64_t nextHandlePos = curWritePos + dstSpanSizeInframe_;
1764 Trace prepareTrace("AudioEndpoint::PrepareNextLoop " + std::to_string(nextHandlePos));
1765 int64_t nextHdiReadTime = GetPredictNextReadTime(nextHandlePos);
1766 int64_t predictWakeupTime = nextHdiReadTime - serverAheadReadTime_;
1767 if (predictWakeupTime <= ClockTime::GetCurNano()) {
1768 wakeUpTime = ClockTime::GetCurNano() + ONE_MILLISECOND_DURATION;
1769 AUDIO_ERR_LOG("hdi send wrong position time");
1770 } else {
1771 wakeUpTime = predictWakeupTime;
1772 }
1773
1774 SpanInfo *nextWriteSpan = dstAudioBuffer_->GetSpanInfo(nextHandlePos);
1775 CHECK_AND_RETURN_RET_LOG(nextWriteSpan != nullptr, false, "GetSpanInfo failed, can not get next write span");
1776
1777 int32_t ret1 = dstAudioBuffer_->SetCurWriteFrame(nextHandlePos);
1778 int32_t ret2 = dstAudioBuffer_->SetCurReadFrame(nextHandlePos);
1779 CHECK_AND_RETURN_RET_LOG(ret1 == SUCCESS && ret2 == SUCCESS, false,
1780 "SetCurWriteFrame or SetCurReadFrame failed, ret1:%{public}d ret2:%{public}d", ret1, ret2);
1781 // handl each process buffer info
1782 int64_t curReadDoneTime = ClockTime::GetCurNano();
1783 {
1784 std::lock_guard<std::mutex> lock(listLock_);
1785 for (size_t i = 0; i < processBufferList_.size(); i++) {
1786 uint64_t eachCurReadPos = processBufferList_[i]->GetCurReadFrame();
1787 SpanInfo *tempSpan = processBufferList_[i]->GetSpanInfo(eachCurReadPos);
1788 CHECK_AND_RETURN_RET_LOG(tempSpan != nullptr, false,
1789 "GetSpanInfo failed, can not get process read span");
1790 SpanStatus targetStatus = SpanStatus::SPAN_READING;
1791 CHECK_AND_RETURN_RET_LOG(processBufferList_[i]->GetStreamStatus() != nullptr, false,
1792 "stream status is null");
1793 if (tempSpan->spanStatus.compare_exchange_strong(targetStatus, SpanStatus::SPAN_READ_DONE)) {
1794 tempSpan->readDoneTime = curReadDoneTime;
1795 BufferDesc bufferReadDone = { nullptr, 0, 0};
1796 processBufferList_[i]->GetReadbuffer(eachCurReadPos, bufferReadDone);
1797 if (bufferReadDone.buffer != nullptr && bufferReadDone.bufLength != 0) {
1798 memset_s(bufferReadDone.buffer, bufferReadDone.bufLength, 0, bufferReadDone.bufLength);
1799 }
1800 processBufferList_[i]->SetCurReadFrame(eachCurReadPos + dstSpanSizeInframe_); // use client span size
1801 } else if (processBufferList_[i]->GetStreamStatus() &&
1802 processBufferList_[i]->GetStreamStatus()->load() == StreamStatus::STREAM_RUNNING) {
1803 AUDIO_DEBUG_LOG("Current %{public}" PRIu64" span not ready:%{public}d", eachCurReadPos, targetStatus);
1804 }
1805 }
1806 }
1807 return true;
1808 }
1809
GetDeviceHandleInfo(uint64_t &frames, int64_t &nanoTime)1810 bool AudioEndpointInner::GetDeviceHandleInfo(uint64_t &frames, int64_t &nanoTime)
1811 {
1812 Trace trace("AudioEndpoint::GetMmapHandlePosition");
1813 int64_t timeSec = 0;
1814 int64_t timeNanoSec = 0;
1815 int32_t ret = 0;
1816 if (deviceInfo_.deviceRole == INPUT_DEVICE) {
1817 CHECK_AND_RETURN_RET_LOG(fastSource_ != nullptr && fastSource_->IsInited(),
1818 false, "Source start failed.");
1819 // GetMmapHandlePosition will call using ipc.
1820 ret = fastSource_->GetMmapHandlePosition(frames, timeSec, timeNanoSec);
1821 } else {
1822 CHECK_AND_RETURN_RET_LOG(fastSink_ != nullptr && fastSink_->IsInited(),
1823 false, "GetDeviceHandleInfo failed: sink is not inited.");
1824 // GetMmapHandlePosition will call using ipc.
1825 ret = fastSink_->GetMmapHandlePosition(frames, timeSec, timeNanoSec);
1826 }
1827 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, false, "Call adapter GetMmapHandlePosition failed: %{public}d", ret);
1828 trace.End();
1829 nanoTime = timeNanoSec + timeSec * AUDIO_NS_PER_SECOND;
1830 Trace infoTrace("AudioEndpoint::GetDeviceHandleInfo frames=>" + std::to_string(frames) + " " +
1831 std::to_string(nanoTime) + " at " + std::to_string(ClockTime::GetCurNano()));
1832 nanoTime += DELTA_TO_REAL_READ_START_TIME; // global delay in server
1833 return true;
1834 }
1835
AsyncGetPosTime()1836 void AudioEndpointInner::AsyncGetPosTime()
1837 {
1838 AUDIO_INFO_LOG("AsyncGetPosTime thread start.");
1839 while (!stopUpdateThread_) {
1840 std::unique_lock<std::mutex> lock(updateThreadLock_);
1841 updateThreadCV_.wait_for(lock, std::chrono::milliseconds(UPDATE_THREAD_TIMEOUT));
1842 if (stopUpdateThread_) {
1843 break;
1844 }
1845 if (endpointStatus_ == IDEL && isStarted_ && ClockTime::GetCurNano() > delayStopTime_) {
1846 AUDIO_INFO_LOG("IDEL for too long, let's call hdi stop");
1847 DelayStopDevice();
1848 continue;
1849 }
1850 if (!isStarted_) {
1851 continue;
1852 }
1853 // get signaled, call get pos-time
1854 uint64_t curHdiHandlePos = posInFrame_;
1855 int64_t handleTime = timeInNano_;
1856 if (!GetDeviceHandleInfo(curHdiHandlePos, handleTime)) {
1857 AUDIO_WARNING_LOG("AsyncGetPosTime call GetDeviceHandleInfo failed.");
1858 continue;
1859 }
1860 // keep it
1861 if (posInFrame_ != curHdiHandlePos) {
1862 posInFrame_ = curHdiHandlePos;
1863 timeInNano_ = handleTime;
1864 }
1865 }
1866 }
1867
GetStatusStr(EndpointStatus status)1868 std::string AudioEndpointInner::GetStatusStr(EndpointStatus status)
1869 {
1870 switch (status) {
1871 case INVALID:
1872 return "INVALID";
1873 case UNLINKED:
1874 return "UNLINKED";
1875 case IDEL:
1876 return "IDEL";
1877 case STARTING:
1878 return "STARTING";
1879 case RUNNING:
1880 return "RUNNING";
1881 case STOPPING:
1882 return "STOPPING";
1883 case STOPPED:
1884 return "STOPPED";
1885 default:
1886 break;
1887 }
1888 return "NO_SUCH_STATUS";
1889 }
1890
KeepWorkloopRunning()1891 bool AudioEndpointInner::KeepWorkloopRunning()
1892 {
1893 EndpointStatus targetStatus = INVALID;
1894 switch (endpointStatus_.load()) {
1895 case RUNNING:
1896 return true;
1897 case IDEL:
1898 if (ClockTime::GetCurNano() > delayStopTime_) {
1899 targetStatus = RUNNING;
1900 break;
1901 }
1902 if (isDeviceRunningInIdel_) {
1903 return true;
1904 }
1905 break;
1906 case UNLINKED:
1907 targetStatus = IDEL;
1908 break;
1909 case STARTING:
1910 targetStatus = RUNNING;
1911 break;
1912 case STOPPING:
1913 targetStatus = STOPPED;
1914 break;
1915 default:
1916 break;
1917 }
1918
1919 // when return false, EndpointWorkLoopFuc will continue loop immediately. Wait to avoid a inifity loop.
1920 std::unique_lock<std::mutex> lock(loopThreadLock_);
1921 AUDIO_PRERELEASE_LOGI("Status is %{public}s now, wait for %{public}s...", GetStatusStr(endpointStatus_).c_str(),
1922 GetStatusStr(targetStatus).c_str());
1923 threadStatus_ = WAITTING;
1924 workThreadCV_.wait_for(lock, std::chrono::milliseconds(SLEEP_TIME_IN_DEFAULT));
1925 AUDIO_DEBUG_LOG("Wait end. Cur is %{public}s now, target is %{public}s...", GetStatusStr(endpointStatus_).c_str(),
1926 GetStatusStr(targetStatus).c_str());
1927
1928 return false;
1929 }
1930
WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBuffer> &procBuf, const BufferDesc &readBuf, const BufferDesc &convertedBuffer, bool muteFlag)1931 int32_t AudioEndpointInner::WriteToSpecialProcBuf(const std::shared_ptr<OHAudioBuffer> &procBuf,
1932 const BufferDesc &readBuf, const BufferDesc &convertedBuffer, bool muteFlag)
1933 {
1934 CHECK_AND_RETURN_RET_LOG(procBuf != nullptr, ERR_INVALID_HANDLE, "process buffer is null.");
1935 uint64_t curWritePos = procBuf->GetCurWriteFrame();
1936 Trace trace("AudioEndpoint::WriteProcessData-<" + std::to_string(curWritePos));
1937
1938 int32_t writeAbleSize = procBuf->GetAvailableDataFrames();
1939 if (writeAbleSize <= 0 || static_cast<uint32_t>(writeAbleSize) <= dstSpanSizeInframe_) {
1940 AUDIO_WARNING_LOG("client read too slow: curWritePos:%{public}" PRIu64" writeAbleSize:%{public}d",
1941 curWritePos, writeAbleSize);
1942 return ERR_OPERATION_FAILED;
1943 }
1944
1945 SpanInfo *curWriteSpan = procBuf->GetSpanInfo(curWritePos);
1946 CHECK_AND_RETURN_RET_LOG(curWriteSpan != nullptr, ERR_INVALID_HANDLE,
1947 "get write span info of procBuf fail.");
1948
1949 AUDIO_DEBUG_LOG("process buffer write start, curWritePos %{public}" PRIu64".", curWritePos);
1950 curWriteSpan->spanStatus.store(SpanStatus::SPAN_WRITTING);
1951 curWriteSpan->writeStartTime = ClockTime::GetCurNano();
1952
1953 BufferDesc writeBuf;
1954 int32_t ret = procBuf->GetWriteBuffer(curWritePos, writeBuf);
1955 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret, "get write buffer fail, ret %{public}d.", ret);
1956 if (muteFlag) {
1957 memset_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength, 0, writeBuf.bufLength);
1958 } else {
1959 if (endpointType_ == TYPE_VOIP_MMAP) {
1960 ret = HandleCapturerDataParams(writeBuf, readBuf, convertedBuffer);
1961 } else {
1962 ret = memcpy_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength,
1963 static_cast<void *>(readBuf.buffer), readBuf.bufLength);
1964 }
1965 }
1966
1967 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memcpy data to process buffer fail, "
1968 "curWritePos %{public}" PRIu64", ret %{public}d.", curWritePos, ret);
1969
1970 curWriteSpan->writeDoneTime = ClockTime::GetCurNano();
1971 procBuf->SetHandleInfo(curWritePos, curWriteSpan->writeDoneTime);
1972 ret = procBuf->SetCurWriteFrame(curWritePos + dstSpanSizeInframe_);
1973 if (ret != SUCCESS) {
1974 AUDIO_WARNING_LOG("set procBuf next write frame fail, ret %{public}d.", ret);
1975 curWriteSpan->spanStatus.store(SpanStatus::SPAN_READ_DONE);
1976 return ERR_OPERATION_FAILED;
1977 }
1978 curWriteSpan->spanStatus.store(SpanStatus::SPAN_WRITE_DONE);
1979 return SUCCESS;
1980 }
1981
HandleCapturerDataParams(const BufferDesc &writeBuf, const BufferDesc &readBuf, const BufferDesc &convertedBuffer)1982 int32_t AudioEndpointInner::HandleCapturerDataParams(const BufferDesc &writeBuf, const BufferDesc &readBuf,
1983 const BufferDesc &convertedBuffer)
1984 {
1985 if (clientConfig_.streamInfo.format == SAMPLE_S16LE && clientConfig_.streamInfo.channels == STEREO) {
1986 return memcpy_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength,
1987 static_cast<void *>(readBuf.buffer), readBuf.bufLength);
1988 }
1989 if (clientConfig_.streamInfo.format == SAMPLE_S16LE && clientConfig_.streamInfo.channels == MONO) {
1990 int32_t ret = FormatConverter::S16StereoToS16Mono(readBuf, convertedBuffer);
1991 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_WRITE_FAILED, "Convert channel from stereo to mono failed");
1992 ret = memcpy_s(static_cast<void *>(writeBuf.buffer), writeBuf.bufLength,
1993 static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength);
1994 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memcpy_s failed");
1995 ret = memset_s(static_cast<void *>(convertedBuffer.buffer), convertedBuffer.bufLength, 0,
1996 convertedBuffer.bufLength);
1997 CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "memset converted buffer to 0 failed");
1998 return EOK;
1999 }
2000 return ERR_NOT_SUPPORTED;
2001 }
2002
WriteToProcessBuffers(const BufferDesc &readBuf)2003 void AudioEndpointInner::WriteToProcessBuffers(const BufferDesc &readBuf)
2004 {
2005 CheckRecordSignal(readBuf.buffer, readBuf.bufLength);
2006 std::lock_guard<std::mutex> lock(listLock_);
2007 for (size_t i = 0; i < processBufferList_.size(); i++) {
2008 CHECK_AND_CONTINUE_LOG(processBufferList_[i] != nullptr,
2009 "process buffer %{public}zu is null.", i);
2010 if (processBufferList_[i]->GetStreamStatus() &&
2011 processBufferList_[i]->GetStreamStatus()->load() != STREAM_RUNNING) {
2012 AUDIO_WARNING_LOG("process buffer %{public}zu not running, stream status %{public}d.",
2013 i, processBufferList_[i]->GetStreamStatus()->load());
2014 continue;
2015 }
2016
2017 int32_t ret = WriteToSpecialProcBuf(processBufferList_[i], readBuf, processList_[i]->GetConvertedBuffer(),
2018 processList_[i]->GetMuteFlag());
2019 CHECK_AND_CONTINUE_LOG(ret == SUCCESS,
2020 "endpoint write to process buffer %{public}zu fail, ret %{public}d.", i, ret);
2021 AUDIO_DEBUG_LOG("endpoint process buffer %{public}zu write success.", i);
2022 }
2023 }
2024
ReadFromEndpoint(uint64_t curReadPos)2025 int32_t AudioEndpointInner::ReadFromEndpoint(uint64_t curReadPos)
2026 {
2027 Trace trace("AudioEndpoint::ReadDstBuffer=<" + std::to_string(curReadPos));
2028 AUDIO_DEBUG_LOG("ReadFromEndpoint enter, dstAudioBuffer curReadPos %{public}" PRIu64".", curReadPos);
2029 CHECK_AND_RETURN_RET_LOG(dstAudioBuffer_ != nullptr, ERR_INVALID_HANDLE,
2030 "dst audio buffer is null.");
2031 SpanInfo *curReadSpan = dstAudioBuffer_->GetSpanInfo(curReadPos);
2032 CHECK_AND_RETURN_RET_LOG(curReadSpan != nullptr, ERR_INVALID_HANDLE,
2033 "get source read span info of source adapter fail.");
2034 curReadSpan->readStartTime = ClockTime::GetCurNano();
2035 curReadSpan->spanStatus.store(SpanStatus::SPAN_READING);
2036 BufferDesc readBuf;
2037 int32_t ret = dstAudioBuffer_->GetReadbuffer(curReadPos, readBuf);
2038 CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ret, "get read buffer fail, ret %{public}d.", ret);
2039 DumpFileUtil::WriteDumpFile(dumpHdi_, static_cast<void *>(readBuf.buffer), readBuf.bufLength);
2040 DfxOperation(readBuf, dstStreamInfo_.format, dstStreamInfo_.channels);
2041 if (AudioDump::GetInstance().GetVersionType() == BETA_VERSION) {
2042 Media::MediaMonitor::MediaMonitorManager::GetInstance().WriteAudioBuffer(dumpHdiName_,
2043 static_cast<void *>(readBuf.buffer), readBuf.bufLength);
2044 }
2045 WriteToProcessBuffers(readBuf);
2046 ret = memset_s(readBuf.buffer, readBuf.bufLength, 0, readBuf.bufLength);
2047 if (ret != EOK) {
2048 AUDIO_WARNING_LOG("reset buffer fail, ret %{public}d.", ret);
2049 }
2050 curReadSpan->readDoneTime = ClockTime::GetCurNano();
2051 curReadSpan->spanStatus.store(SpanStatus::SPAN_READ_DONE);
2052 return SUCCESS;
2053 }
2054
RecordEndpointWorkLoopFuc()2055 void AudioEndpointInner::RecordEndpointWorkLoopFuc()
2056 {
2057 SetThreadQosLevel();
2058 int64_t curTime = 0;
2059 uint64_t curReadPos = 0;
2060 int64_t wakeUpTime = ClockTime::GetCurNano();
2061 AUDIO_INFO_LOG("Record endpoint work loop fuc start.");
2062 while (isInited_.load()) {
2063 if (!KeepWorkloopRunning()) {
2064 continue;
2065 }
2066 threadStatus_ = INRUNNING;
2067 if (needReSyncPosition_) {
2068 RecordReSyncPosition();
2069 wakeUpTime = ClockTime::GetCurNano();
2070 needReSyncPosition_ = false;
2071 continue;
2072 }
2073 curTime = ClockTime::GetCurNano();
2074 Trace loopTrace("Record_loop_trace");
2075 if (curTime - wakeUpTime > THREE_MILLISECOND_DURATION) {
2076 AUDIO_WARNING_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
2077 } else if (curTime - wakeUpTime > ONE_MILLISECOND_DURATION) {
2078 AUDIO_DEBUG_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
2079 }
2080
2081 curReadPos = dstAudioBuffer_->GetCurReadFrame();
2082 CHECK_AND_BREAK_LOG(ReadFromEndpoint(curReadPos) == SUCCESS, "read from endpoint to process service fail.");
2083
2084 bool ret = RecordPrepareNextLoop(curReadPos, wakeUpTime);
2085 CHECK_AND_BREAK_LOG(ret, "PrepareNextLoop failed!");
2086
2087 ProcessUpdateAppsUidForRecord();
2088
2089 loopTrace.End();
2090 threadStatus_ = SLEEPING;
2091 ClockTime::AbsoluteSleep(wakeUpTime);
2092 }
2093 ReSetThreadQosLevel();
2094 }
2095
EndpointWorkLoopFuc()2096 void AudioEndpointInner::EndpointWorkLoopFuc()
2097 {
2098 SetThreadQosLevel();
2099 int64_t curTime = 0;
2100 uint64_t curWritePos = 0;
2101 int64_t wakeUpTime = ClockTime::GetCurNano();
2102 AUDIO_INFO_LOG("Endpoint work loop fuc start");
2103 int32_t ret = 0;
2104 while (isInited_.load()) {
2105 if (!KeepWorkloopRunning()) {
2106 continue;
2107 }
2108 ret = 0;
2109 threadStatus_ = INRUNNING;
2110 curTime = ClockTime::GetCurNano();
2111 Trace loopTrace("AudioEndpoint::loop_trace");
2112 if (needReSyncPosition_) {
2113 ReSyncPosition();
2114 wakeUpTime = curTime;
2115 needReSyncPosition_ = false;
2116 continue;
2117 }
2118 if (curTime - wakeUpTime > THREE_MILLISECOND_DURATION) {
2119 AUDIO_WARNING_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
2120 } else if (curTime - wakeUpTime > ONE_MILLISECOND_DURATION) {
2121 AUDIO_DEBUG_LOG("Wake up cost %{public}" PRId64" ms!", (curTime - wakeUpTime) / AUDIO_US_PER_SECOND);
2122 }
2123
2124 // First, wake up at client may-write-done time, and check if all process write done.
2125 // If not, do another sleep to the possible latest write time.
2126 curWritePos = dstAudioBuffer_->GetCurWriteFrame();
2127 if (!CheckAllBufferReady(wakeUpTime, curWritePos)) {
2128 curTime = ClockTime::GetCurNano();
2129 }
2130
2131 // then do mix & write to hdi buffer and prepare next loop
2132 if (!ProcessToEndpointDataHandle(curWritePos)) {
2133 AUDIO_ERR_LOG("ProcessToEndpointDataHandle failed!");
2134 break;
2135 }
2136
2137 // prepare info of next loop
2138 if (!PrepareNextLoop(curWritePos, wakeUpTime)) {
2139 AUDIO_ERR_LOG("PrepareNextLoop failed!");
2140 break;
2141 }
2142
2143 ProcessUpdateAppsUidForPlayback();
2144
2145 loopTrace.End();
2146 // start sleep
2147 threadStatus_ = SLEEPING;
2148 ClockTime::AbsoluteSleep(wakeUpTime);
2149 }
2150 AUDIO_DEBUG_LOG("Endpoint work loop fuc end, ret %{public}d", ret);
2151 ReSetThreadQosLevel();
2152 }
2153
InitLatencyMeasurement()2154 void AudioEndpointInner::InitLatencyMeasurement()
2155 {
2156 if (!AudioLatencyMeasurement::CheckIfEnabled()) {
2157 return;
2158 }
2159 signalDetectAgent_ = std::make_shared<SignalDetectAgent>();
2160 CHECK_AND_RETURN_LOG(signalDetectAgent_ != nullptr, "LatencyMeas signalDetectAgent_ is nullptr");
2161 signalDetectAgent_->sampleFormat_ = SAMPLE_S16LE;
2162 signalDetectAgent_->formatByteSize_ = GetFormatByteSize(SAMPLE_S16LE);
2163 latencyMeasEnabled_ = true;
2164 signalDetected_ = false;
2165 }
2166
DeinitLatencyMeasurement()2167 void AudioEndpointInner::DeinitLatencyMeasurement()
2168 {
2169 signalDetectAgent_ = nullptr;
2170 latencyMeasEnabled_ = false;
2171 }
2172
CheckPlaySignal(uint8_t *buffer, size_t bufferSize)2173 void AudioEndpointInner::CheckPlaySignal(uint8_t *buffer, size_t bufferSize)
2174 {
2175 if (!latencyMeasEnabled_) {
2176 return;
2177 }
2178 CHECK_AND_RETURN_LOG(signalDetectAgent_ != nullptr, "LatencyMeas signalDetectAgent_ is nullptr");
2179 size_t byteSize = static_cast<size_t>(GetFormatByteSize(dstStreamInfo_.format));
2180 size_t newlyCheckedTime = bufferSize / (dstStreamInfo_.samplingRate /
2181 MILLISECOND_PER_SECOND) / (byteSize * sizeof(uint8_t) * dstStreamInfo_.channels);
2182 detectedTime_ += newlyCheckedTime;
2183 if (detectedTime_ >= MILLISECOND_PER_SECOND && signalDetectAgent_->signalDetected_ &&
2184 !signalDetectAgent_->dspTimestampGot_) {
2185 AudioParamKey key = NONE;
2186 std::string condition = "debug_audio_latency_measurement";
2187 std::string dspTime = fastSink_->GetAudioParameter(key, condition);
2188 LatencyMonitor::GetInstance().UpdateDspTime(dspTime);
2189 LatencyMonitor::GetInstance().UpdateSinkOrSourceTime(true,
2190 signalDetectAgent_->lastPeakBufferTime_);
2191 AUDIO_INFO_LOG("LatencyMeas fastSink signal detected");
2192 LatencyMonitor::GetInstance().ShowTimestamp(true);
2193 signalDetectAgent_->dspTimestampGot_ = true;
2194 signalDetectAgent_->signalDetected_ = false;
2195 }
2196 signalDetected_ = signalDetectAgent_->CheckAudioData(buffer, bufferSize);
2197 if (signalDetected_) {
2198 AUDIO_INFO_LOG("LatencyMeas fastSink signal detected");
2199 detectedTime_ = 0;
2200 }
2201 }
2202
CheckRecordSignal(uint8_t *buffer, size_t bufferSize)2203 void AudioEndpointInner::CheckRecordSignal(uint8_t *buffer, size_t bufferSize)
2204 {
2205 if (!latencyMeasEnabled_) {
2206 return;
2207 }
2208 CHECK_AND_RETURN_LOG(signalDetectAgent_ != nullptr, "LatencyMeas signalDetectAgent_ is nullptr");
2209 signalDetected_ = signalDetectAgent_->CheckAudioData(buffer, bufferSize);
2210 if (signalDetected_) {
2211 AudioParamKey key = NONE;
2212 std::string condition = "debug_audio_latency_measurement";
2213 std::string dspTime = fastSource_->GetAudioParameter(key, condition);
2214 LatencyMonitor::GetInstance().UpdateSinkOrSourceTime(false,
2215 signalDetectAgent_->lastPeakBufferTime_);
2216 LatencyMonitor::GetInstance().UpdateDspTime(dspTime);
2217 AUDIO_INFO_LOG("LatencyMeas fastSource signal detected");
2218 signalDetected_ = false;
2219 }
2220 }
2221
ProcessUpdateAppsUidForPlayback()2222 void AudioEndpointInner::ProcessUpdateAppsUidForPlayback()
2223 {
2224 std::vector<int32_t> appsUid;
2225 {
2226 std::lock_guard<std::mutex> lock(listLock_);
2227
2228 appsUid.reserve(processList_.size());
2229 for (auto iProccessStream : processList_) {
2230 appsUid.push_back(iProccessStream->GetAppInfo().appUid);
2231 }
2232 }
2233 CHECK_AND_RETURN_LOG(fastSink_, "fastSink_ is nullptr");
2234 fastSink_->UpdateAppsUid(appsUid);
2235 }
2236
ProcessUpdateAppsUidForRecord()2237 void AudioEndpointInner::ProcessUpdateAppsUidForRecord()
2238 {
2239 std::vector<int32_t> appsUid;
2240 {
2241 std::lock_guard<std::mutex> lock(listLock_);
2242
2243 appsUid.reserve(processList_.size());
2244 for (auto iProccessStream : processList_) {
2245 appsUid.push_back(iProccessStream->GetAppInfo().appUid);
2246 }
2247 }
2248 CHECK_AND_RETURN_LOG(fastSource_, "fastSource_ is nullptr");
2249 fastSource_->UpdateAppsUid(appsUid);
2250 }
2251
WriterRenderStreamStandbySysEvent(uint32_t sessionId, int32_t standby)2252 void AudioEndpointInner::WriterRenderStreamStandbySysEvent(uint32_t sessionId, int32_t standby)
2253 {
2254 std::shared_ptr<Media::MediaMonitor::EventBean> bean = std::make_shared<Media::MediaMonitor::EventBean>(
2255 Media::MediaMonitor::AUDIO, Media::MediaMonitor::STREAM_STANDBY,
2256 Media::MediaMonitor::BEHAVIOR_EVENT);
2257 bean->Add("STREAMID", static_cast<int32_t>(sessionId));
2258 bean->Add("STANDBY", standby);
2259 Media::MediaMonitor::MediaMonitorManager::GetInstance().WriteLogMsg(bean);
2260 }
2261
GetLinkedProcessCount()2262 uint32_t AudioEndpointInner::GetLinkedProcessCount()
2263 {
2264 std::lock_guard<std::mutex> lock(listLock_);
2265 return processList_.size();
2266 }
2267 } // namespace AudioStandard
2268 } // namespace OHOS
2269