1 /*
2  *
3  * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 
19 
20 /**
21  * @file mali_kbase_pm.c
22  * Base kernel power management APIs
23  */
24 
25 #include <mali_kbase.h>
26 #include <mali_midg_regmap.h>
27 #include <mali_kbase_vinstr.h>
28 
29 #include <mali_kbase_pm.h>
30 
kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)31 int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
32 {
33 	return kbase_hwaccess_pm_powerup(kbdev, flags);
34 }
35 
kbase_pm_halt(struct kbase_device *kbdev)36 void kbase_pm_halt(struct kbase_device *kbdev)
37 {
38 	kbase_hwaccess_pm_halt(kbdev);
39 }
40 
kbase_pm_context_active(struct kbase_device *kbdev)41 void kbase_pm_context_active(struct kbase_device *kbdev)
42 {
43 	(void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
44 }
45 
kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)46 int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
47 {
48 	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
49 	int c;
50 	int old_count;
51 
52 	KBASE_DEBUG_ASSERT(kbdev != NULL);
53 
54 	/* Trace timeline information about how long it took to handle the decision
55 	 * to powerup. Sometimes the event might be missed due to reading the count
56 	 * outside of mutex, but this is necessary to get the trace timing
57 	 * correct. */
58 	old_count = kbdev->pm.active_count;
59 	if (old_count == 0)
60 		kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
61 
62 	mutex_lock(&js_devdata->runpool_mutex);
63 	mutex_lock(&kbdev->pm.lock);
64 	if (kbase_pm_is_suspending(kbdev)) {
65 		switch (suspend_handler) {
66 		case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
67 			if (kbdev->pm.active_count != 0)
68 				break;
69 			/* FALLTHROUGH */
70 		case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
71 			mutex_unlock(&kbdev->pm.lock);
72 			mutex_unlock(&js_devdata->runpool_mutex);
73 			if (old_count == 0)
74 				kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
75 			return 1;
76 
77 		case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
78 			/* FALLTHROUGH */
79 		default:
80 			KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
81 			break;
82 		}
83 	}
84 	c = ++kbdev->pm.active_count;
85 	KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
86 	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
87 
88 	/* Trace the event being handled */
89 	if (old_count == 0)
90 		kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
91 
92 	if (c == 1)
93 		/* First context active: Power on the GPU and any cores requested by
94 		 * the policy */
95 		kbase_hwaccess_pm_gpu_active(kbdev);
96 
97 	mutex_unlock(&kbdev->pm.lock);
98 	mutex_unlock(&js_devdata->runpool_mutex);
99 
100 	return 0;
101 }
102 
103 KBASE_EXPORT_TEST_API(kbase_pm_context_active);
104 
kbase_pm_context_idle(struct kbase_device *kbdev)105 void kbase_pm_context_idle(struct kbase_device *kbdev)
106 {
107 	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
108 	int c;
109 	int old_count;
110 
111 	KBASE_DEBUG_ASSERT(kbdev != NULL);
112 
113 	/* Trace timeline information about how long it took to handle the decision
114 	 * to powerdown. Sometimes the event might be missed due to reading the
115 	 * count outside of mutex, but this is necessary to get the trace timing
116 	 * correct. */
117 	old_count = kbdev->pm.active_count;
118 	if (old_count == 0)
119 		kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
120 
121 	mutex_lock(&js_devdata->runpool_mutex);
122 	mutex_lock(&kbdev->pm.lock);
123 
124 	c = --kbdev->pm.active_count;
125 	KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
126 	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
127 
128 	KBASE_DEBUG_ASSERT(c >= 0);
129 
130 	/* Trace the event being handled */
131 	if (old_count == 0)
132 		kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
133 
134 	if (c == 0) {
135 		/* Last context has gone idle */
136 		kbase_hwaccess_pm_gpu_idle(kbdev);
137 
138 		/* Wake up anyone waiting for this to become 0 (e.g. suspend). The
139 		 * waiters must synchronize with us by locking the pm.lock after
140 		 * waiting */
141 		wake_up(&kbdev->pm.zero_active_count_wait);
142 	}
143 
144 	mutex_unlock(&kbdev->pm.lock);
145 	mutex_unlock(&js_devdata->runpool_mutex);
146 }
147 
148 KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
149 
kbase_pm_suspend(struct kbase_device *kbdev)150 void kbase_pm_suspend(struct kbase_device *kbdev)
151 {
152 	KBASE_DEBUG_ASSERT(kbdev);
153 
154 	/* Suspend vinstr.
155 	 * This call will block until vinstr is suspended. */
156 	kbase_vinstr_suspend(kbdev->vinstr_ctx);
157 
158 	mutex_lock(&kbdev->pm.lock);
159 	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
160 	kbdev->pm.suspending = true;
161 	mutex_unlock(&kbdev->pm.lock);
162 
163 	/* From now on, the active count will drop towards zero. Sometimes, it'll
164 	 * go up briefly before going down again. However, once it reaches zero it
165 	 * will stay there - guaranteeing that we've idled all pm references */
166 
167 	/* Suspend job scheduler and associated components, so that it releases all
168 	 * the PM active count references */
169 	kbasep_js_suspend(kbdev);
170 
171 	/* Wait for the active count to reach zero. This is not the same as
172 	 * waiting for a power down, since not all policies power down when this
173 	 * reaches zero. */
174 	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
175 
176 	/* NOTE: We synchronize with anything that was just finishing a
177 	 * kbase_pm_context_idle() call by locking the pm.lock below */
178 
179 	kbase_hwaccess_pm_suspend(kbdev);
180 }
181 
kbase_pm_resume(struct kbase_device *kbdev)182 void kbase_pm_resume(struct kbase_device *kbdev)
183 {
184 	/* MUST happen before any pm_context_active calls occur */
185 	kbase_hwaccess_pm_resume(kbdev);
186 
187 	/* Initial active call, to power on the GPU/cores if needed */
188 	kbase_pm_context_active(kbdev);
189 
190 	/* Resume any blocked atoms (which may cause contexts to be scheduled in
191 	 * and dependent atoms to run) */
192 	kbase_resume_suspended_soft_jobs(kbdev);
193 
194 	/* Resume the Job Scheduler and associated components, and start running
195 	 * atoms */
196 	kbasep_js_resume(kbdev);
197 
198 	/* Matching idle call, to power off the GPU/cores if we didn't actually
199 	 * need it and the policy doesn't want it on */
200 	kbase_pm_context_idle(kbdev);
201 
202 	/* Resume vinstr operation */
203 	kbase_vinstr_resume(kbdev->vinstr_ctx);
204 }
205 
206