1/*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "sched/interval.h"
17#include "util/ffrt_facade.h"
18#include "ffrt_trace.h"
19
20namespace ffrt {
21void Deadline::Update(uint64_t deadlineUs)
22{
23    if (deadlineUs != ToUs()) {
24        deadlineNs = deadlineUs < 1 ? 1 : deadlineUs * 1000;
25    }
26
27    absDeadlineNs = deadlineNs + AbsNowNs();
28
29    FFRT_LOGI("Deadline %lu Update %lu Abs %lu", deadlineUs, deadlineNs, absDeadlineNs);
30}
31
32uint64_t Deadline::AbsNowNs()
33{
34    return std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now().time_since_epoch())
35        .count();
36}
37
38PerfCtrl::PerfCtrl(const QoS& qos) : qos(qos)
39{
40    if (qos == qos_inherit) {
41        FFRT_LOGW("Invalid Thread Group");
42        return;
43    }
44
45    tg = FFRTFacade::GetEUInstance().BindTG(DevType::CPU, this->qos);
46}
47
48PerfCtrl::~PerfCtrl()
49{
50    if (tg) {
51        tg = nullptr;
52        FFRTFacade::GetEUInstance().UnbindTG(DevType::CPU, qos);
53    }
54}
55
56void PerfCtrl::Update(bool force)
57{
58    if (!force && predUtil == curUtil) {
59        FFRT_LOGW("Predict Util Same as Current Util %lu", predUtil);
60        return;
61    }
62
63    curUtil = predUtil;
64
65    if (tg) {
66        tg->UpdateUitl(curUtil);
67    }
68}
69
70void PerfCtrl::Update(uint64_t deadlineNs, uint64_t load, bool force)
71{
72    if (deadlineNs == 0) {
73        deadlineNs = 1;
74    }
75    predUtil = (load << SCHED_CAPACITY_SHIFT) / deadlineNs;
76    if (predUtil > SCHED_MAX_CAPACITY) {
77        FFRT_LOGW("Predict Util %lu Exceeds Max Capacity", predUtil);
78        predUtil = SCHED_MAX_CAPACITY;
79    }
80
81    FFRT_LOGI("Update Load %lu, Deadline %lu, Util %lu\n", load, deadlineNs, predUtil);
82
83    Update(force);
84}
85
86void IntervalLoadPredictor::UpdateTotalLoad(uint64_t load)
87{
88    totalLoad.UpdateLoad(load);
89}
90
91void IntervalLoadPredictor::UpdateCPLoad(uint64_t load)
92{
93    if (cpLoadIndex + 1 > cpLoad.size()) {
94        cpLoad.resize(cpLoadIndex + 1);
95    }
96
97    cpLoad[cpLoadIndex++].UpdateLoad(load);
98}
99
100uint64_t IntervalLoadPredictor::GetTotalLoad()
101{
102    return totalLoad.GetPredictLoad();
103}
104
105uint64_t IntervalLoadPredictor::GetCPLoad()
106{
107    uint64_t load = cpLoad[cpLoadIndex].GetPredictLoad();
108    if (load == 0) {
109        return 0UL;
110    }
111
112    uint64_t predictLoad = totalLoad.GetPredictLoad();
113    return (predictLoad < load) ? 0 : (predictLoad - load);
114}
115
116DefaultInterval::DefaultInterval(uint64_t deadlineUs, const QoS& qos) : Interval(deadlineUs, qos), lt(*this), ctrl(qos)
117{
118    ctrl.SetWindowSize(Ddl().ToNs());
119}
120
121DefaultInterval::~DefaultInterval()
122{
123    std::unique_lock lock(mutex);
124    ctrl.Update(1, 0, true);
125}
126
127int DefaultInterval::Begin()
128{
129    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalBegin);
130    std::unique_lock lock(mutex);
131
132    if (Enabled()) {
133        FFRT_LOGE("interval already begin\n");
134        return -1;
135    }
136
137    if (ctrl.isBusy()) {
138        FFRT_LOGE("qos interval is busy, please retry later\n");
139        return -1;
140    }
141
142    enabled = true;
143
144    lt.Begin();
145
146    ctrl.Update(Ddl().ToNs(), lp.GetTotalLoad(), true);
147    lp.ResetCPIndex();
148
149    return 0;
150}
151
152void DefaultInterval::Update(uint64_t deadlineUs)
153{
154    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalUpdate);
155    std::unique_lock lock(mutex);
156
157    if (!Enabled()) {
158        return;
159    }
160
161    Ddl().Update(deadlineUs);
162    ctrl.SetWindowSize(Ddl().ToNs());
163}
164
165void DefaultInterval::End()
166{
167    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalEnd);
168    std::unique_lock lock(mutex);
169
170    if (!Enabled()) {
171        return;
172    }
173
174    enabled = false;
175
176    lp.UpdateTotalLoad(lt.GetLoad());
177
178    lt.End();
179}
180
181void DefaultInterval::CheckPoint()
182{
183    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalCheckPoint);
184    std::unique_lock lock(mutex);
185
186    if (!Enabled()) {
187        return;
188    }
189
190    ctrl.Update(Ddl().LeftNs(), lp.GetCPLoad());
191    lp.UpdateCPLoad(lt.GetLoad());
192}
193
194void DefaultInterval::Join()
195{
196    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalJoin);
197    std::unique_lock lock(mutex);
198    if (!ctrl.Join()) {
199        FFRT_LOGE("Failed to Join Thread %d", ThreadGroup::GetTID());
200    }
201}
202
203void DefaultInterval::Leave()
204{
205    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalLeave);
206    std::unique_lock lock(mutex);
207    if (!ctrl.Leave()) {
208        FFRT_LOGE("Failed to Leave Thread %d", ThreadGroup::GetTID());
209    }
210}
211
212void DefaultInterval::UpdateTaskSwitch(TaskSwitchState state)
213{
214    FFRT_TRACE_SCOPE(TRACE_LEVEL1, IntervalUpdateTaskSwitch);
215    std::unique_lock lock(mutex);
216
217    switch (state) {
218        case TaskSwitchState::BEGIN:
219            ctrl.Update(true);
220            break;
221        case TaskSwitchState::UPDATE:
222            ctrl.Update();
223            break;
224        case TaskSwitchState::END:
225            ctrl.Clear();
226            ctrl.Update(true);
227            break;
228        default:
229            break;
230    }
231
232    lt.Record(state);
233}
234} // namespace ffrt
235