1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /**
23 * DOC: Base kernel power management APIs
24 */
25
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <mali_kbase_vinstr.h>
29 #include <mali_kbase_kinstr_prfcnt.h>
30 #include <mali_kbase_hwcnt_context.h>
31
32 #include <mali_kbase_pm.h>
33 #include <backend/gpu/mali_kbase_pm_internal.h>
34
35 #ifdef CONFIG_MALI_ARBITER_SUPPORT
36 #include <arbiter/mali_kbase_arbiter_pm.h>
37 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
38
39 #include <backend/gpu/mali_kbase_clk_rate_trace_mgr.h>
40
kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)41 int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
42 {
43 return kbase_hwaccess_pm_powerup(kbdev, flags);
44 }
45
kbase_pm_halt(struct kbase_device *kbdev)46 void kbase_pm_halt(struct kbase_device *kbdev)
47 {
48 kbase_hwaccess_pm_halt(kbdev);
49 }
50
kbase_pm_context_active(struct kbase_device *kbdev)51 void kbase_pm_context_active(struct kbase_device *kbdev)
52 {
53 (void)kbase_pm_context_active_handle_suspend(kbdev,
54 KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
55 }
56
kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)57 int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev,
58 enum kbase_pm_suspend_handler suspend_handler)
59 {
60 int c;
61
62 KBASE_DEBUG_ASSERT(kbdev != NULL);
63 dev_dbg(kbdev->dev, "%s - reason = %d, pid = %d\n", __func__,
64 suspend_handler, current->pid);
65 kbase_pm_lock(kbdev);
66
67 #ifdef CONFIG_MALI_ARBITER_SUPPORT
68 if (kbase_arbiter_pm_ctx_active_handle_suspend(kbdev,
69 suspend_handler)) {
70 kbase_pm_unlock(kbdev);
71 return 1;
72 }
73 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
74
75 if (kbase_pm_is_suspending(kbdev)) {
76 switch (suspend_handler) {
77 case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
78 if (kbdev->pm.active_count != 0)
79 break;
80 fallthrough;
81 case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
82 kbase_pm_unlock(kbdev);
83 return 1;
84
85 case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
86 fallthrough;
87 default:
88 KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
89 break;
90 }
91 }
92 c = ++kbdev->pm.active_count;
93 KBASE_KTRACE_ADD(kbdev, PM_CONTEXT_ACTIVE, NULL, c);
94
95 if (c == 1) {
96 /* First context active: Power on the GPU and
97 * any cores requested by the policy
98 */
99 kbase_hwaccess_pm_gpu_active(kbdev);
100 #ifdef CONFIG_MALI_ARBITER_SUPPORT
101 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_REF_EVENT);
102 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
103 kbase_clk_rate_trace_manager_gpu_active(kbdev);
104 }
105
106 kbase_pm_unlock(kbdev);
107 dev_dbg(kbdev->dev, "%s %d\n", __func__, kbdev->pm.active_count);
108
109 return 0;
110 }
111
112 KBASE_EXPORT_TEST_API(kbase_pm_context_active);
113
kbase_pm_context_idle(struct kbase_device *kbdev)114 void kbase_pm_context_idle(struct kbase_device *kbdev)
115 {
116 int c;
117
118 KBASE_DEBUG_ASSERT(kbdev != NULL);
119
120
121 kbase_pm_lock(kbdev);
122
123 c = --kbdev->pm.active_count;
124 KBASE_KTRACE_ADD(kbdev, PM_CONTEXT_IDLE, NULL, c);
125
126 KBASE_DEBUG_ASSERT(c >= 0);
127
128 if (c == 0) {
129 /* Last context has gone idle */
130 kbase_hwaccess_pm_gpu_idle(kbdev);
131 kbase_clk_rate_trace_manager_gpu_idle(kbdev);
132
133 /* Wake up anyone waiting for this to become 0 (e.g. suspend).
134 * The waiters must synchronize with us by locking the pm.lock
135 * after waiting.
136 */
137 wake_up(&kbdev->pm.zero_active_count_wait);
138 }
139
140 kbase_pm_unlock(kbdev);
141 dev_dbg(kbdev->dev, "%s %d (pid = %d)\n", __func__,
142 kbdev->pm.active_count, current->pid);
143 }
144
145 KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
146
kbase_pm_driver_suspend(struct kbase_device *kbdev)147 int kbase_pm_driver_suspend(struct kbase_device *kbdev)
148 {
149 KBASE_DEBUG_ASSERT(kbdev);
150
151 /* Suspend HW counter intermediaries. This blocks until workers and timers
152 * are no longer running.
153 */
154 kbase_vinstr_suspend(kbdev->vinstr_ctx);
155 kbase_kinstr_prfcnt_suspend(kbdev->kinstr_prfcnt_ctx);
156
157 /* Disable GPU hardware counters.
158 * This call will block until counters are disabled.
159 */
160 kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
161
162 mutex_lock(&kbdev->pm.lock);
163 if (WARN_ON(kbase_pm_is_suspending(kbdev))) {
164 mutex_unlock(&kbdev->pm.lock);
165 return 0;
166 }
167 kbdev->pm.suspending = true;
168 mutex_unlock(&kbdev->pm.lock);
169
170 #ifdef CONFIG_MALI_ARBITER_SUPPORT
171 if (kbdev->arb.arb_if) {
172 int i;
173 unsigned long flags;
174
175 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
176 kbdev->js_data.runpool_irq.submit_allowed = 0;
177 kbase_disjoint_state_up(kbdev);
178 for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
179 kbase_job_slot_softstop(kbdev, i, NULL);
180 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
181 }
182 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
183
184 /* From now on, the active count will drop towards zero. Sometimes,
185 * it'll go up briefly before going down again. However, once
186 * it reaches zero it will stay there - guaranteeing that we've idled
187 * all pm references
188 */
189
190 #if !MALI_USE_CSF
191 /* Suspend job scheduler and associated components, so that it releases all
192 * the PM active count references
193 */
194 kbasep_js_suspend(kbdev);
195 #else
196 if (kbase_csf_scheduler_pm_suspend(kbdev)) {
197 mutex_lock(&kbdev->pm.lock);
198 kbdev->pm.suspending = false;
199 mutex_unlock(&kbdev->pm.lock);
200 return -1;
201 }
202 #endif
203
204 /* Wait for the active count to reach zero. This is not the same as
205 * waiting for a power down, since not all policies power down when this
206 * reaches zero.
207 */
208 dev_dbg(kbdev->dev, ">wait_event - waiting for active_count == 0 (pid = %d)\n",
209 current->pid);
210 wait_event(kbdev->pm.zero_active_count_wait,
211 kbdev->pm.active_count == 0);
212 dev_dbg(kbdev->dev, ">wait_event - waiting done\n");
213
214 /* NOTE: We synchronize with anything that was just finishing a
215 * kbase_pm_context_idle() call by locking the pm.lock below
216 */
217 if (kbase_hwaccess_pm_suspend(kbdev)) {
218 mutex_lock(&kbdev->pm.lock);
219 kbdev->pm.suspending = false;
220 mutex_unlock(&kbdev->pm.lock);
221 return -1;
222 }
223
224 #ifdef CONFIG_MALI_ARBITER_SUPPORT
225 if (kbdev->arb.arb_if) {
226 mutex_lock(&kbdev->pm.arb_vm_state->vm_state_lock);
227 kbase_arbiter_pm_vm_stopped(kbdev);
228 mutex_unlock(&kbdev->pm.arb_vm_state->vm_state_lock);
229 }
230 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
231
232 return 0;
233 }
234
kbase_pm_driver_resume(struct kbase_device *kbdev, bool arb_gpu_start)235 void kbase_pm_driver_resume(struct kbase_device *kbdev, bool arb_gpu_start)
236 {
237 unsigned long flags;
238
239 /* MUST happen before any pm_context_active calls occur */
240 kbase_hwaccess_pm_resume(kbdev);
241
242 /* Initial active call, to power on the GPU/cores if needed */
243 #ifdef CONFIG_MALI_ARBITER_SUPPORT
244 if (kbase_pm_context_active_handle_suspend(kbdev,
245 (arb_gpu_start ?
246 KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED :
247 KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE)))
248 return;
249 #else
250 kbase_pm_context_active(kbdev);
251 #endif
252
253 #if !MALI_USE_CSF
254 /* Resume any blocked atoms (which may cause contexts to be scheduled in
255 * and dependent atoms to run)
256 */
257 kbase_resume_suspended_soft_jobs(kbdev);
258
259 /* Resume the Job Scheduler and associated components, and start running
260 * atoms
261 */
262 kbasep_js_resume(kbdev);
263 #endif
264
265 /* Matching idle call, to power off the GPU/cores if we didn't actually
266 * need it and the policy doesn't want it on
267 */
268 kbase_pm_context_idle(kbdev);
269
270 /* Re-enable GPU hardware counters */
271 #if MALI_USE_CSF
272 kbase_csf_scheduler_spin_lock(kbdev, &flags);
273 kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
274 kbase_csf_scheduler_spin_unlock(kbdev, flags);
275 #else
276 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
277 kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
278 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
279 #endif
280
281 /* Resume HW counters intermediaries. */
282 kbase_vinstr_resume(kbdev->vinstr_ctx);
283 kbase_kinstr_prfcnt_resume(kbdev->kinstr_prfcnt_ctx);
284 }
285
kbase_pm_suspend(struct kbase_device *kbdev)286 int kbase_pm_suspend(struct kbase_device *kbdev)
287 {
288 int result = 0;
289 #ifdef CONFIG_MALI_ARBITER_SUPPORT
290 if (kbdev->arb.arb_if)
291 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_OS_SUSPEND_EVENT);
292 else
293 result = kbase_pm_driver_suspend(kbdev);
294 #else
295 result = kbase_pm_driver_suspend(kbdev);
296 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
297
298 return result;
299 }
300
kbase_pm_resume(struct kbase_device *kbdev)301 void kbase_pm_resume(struct kbase_device *kbdev)
302 {
303 #ifdef CONFIG_MALI_ARBITER_SUPPORT
304 if (kbdev->arb.arb_if)
305 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_OS_RESUME_EVENT);
306 else
307 kbase_pm_driver_resume(kbdev, false);
308 #else
309 kbase_pm_driver_resume(kbdev, false);
310 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
311 }
312