1 /*
2 *
3 * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18
19
20 /*
21 * Base kernel context APIs
22 */
23
24 #include <mali_kbase.h>
25 #include <mali_midg_regmap.h>
26 #include <mali_kbase_mem_linux.h>
27 #include <mali_kbase_dma_fence.h>
28 #include <mali_kbase_ctx_sched.h>
29
30 /**
31 * kbase_create_context() - Create a kernel base context.
32 * @kbdev: Kbase device
33 * @is_compat: Force creation of a 32-bit context
34 *
35 * Allocate and init a kernel base context.
36 *
37 * Return: new kbase context
38 */
39 struct kbase_context *
kbase_create_context(struct kbase_device *kbdev, bool is_compat)40 kbase_create_context(struct kbase_device *kbdev, bool is_compat)
41 {
42 struct kbase_context *kctx;
43 int err;
44
45 KBASE_DEBUG_ASSERT(kbdev != NULL);
46
47 /* zero-inited as lot of code assume it's zero'ed out on create */
48 kctx = vzalloc(sizeof(*kctx));
49
50 if (!kctx)
51 goto out;
52
53 /* creating a context is considered a disjoint event */
54 kbase_disjoint_event(kbdev);
55
56 kctx->kbdev = kbdev;
57 kctx->as_nr = KBASEP_AS_NR_INVALID;
58 atomic_set(&kctx->refcount, 0);
59 if (is_compat)
60 kbase_ctx_flag_set(kctx, KCTX_COMPAT);
61 #ifdef CONFIG_MALI_TRACE_TIMELINE
62 kctx->timeline.owner_tgid = task_tgid_nr(current);
63 #endif
64 atomic_set(&kctx->setup_complete, 0);
65 atomic_set(&kctx->setup_in_progress, 0);
66 spin_lock_init(&kctx->mm_update_lock);
67 kctx->process_mm = NULL;
68 atomic_set(&kctx->nonmapped_pages, 0);
69 kctx->slots_pullable = 0;
70 kctx->tgid = current->tgid;
71 kctx->pid = current->pid;
72
73 err = kbase_mem_pool_init(&kctx->mem_pool,
74 kbdev->mem_pool_max_size_default,
75 kctx->kbdev, &kbdev->mem_pool);
76 if (err)
77 goto free_kctx;
78
79 err = kbase_mem_evictable_init(kctx);
80 if (err)
81 goto free_pool;
82
83 atomic_set(&kctx->used_pages, 0);
84
85 err = kbase_jd_init(kctx);
86 if (err)
87 goto deinit_evictable;
88
89 err = kbasep_js_kctx_init(kctx);
90 if (err)
91 goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
92
93 err = kbase_event_init(kctx);
94 if (err)
95 goto free_jd;
96
97 atomic_set(&kctx->drain_pending, 0);
98
99 mutex_init(&kctx->reg_lock);
100
101 INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
102 spin_lock_init(&kctx->waiting_soft_jobs_lock);
103 #ifdef CONFIG_KDS
104 INIT_LIST_HEAD(&kctx->waiting_kds_resource);
105 #endif
106 err = kbase_dma_fence_init(kctx);
107 if (err)
108 goto free_event;
109
110 err = kbase_mmu_init(kctx);
111 if (err)
112 goto term_dma_fence;
113
114 do {
115 err = kbase_mem_pool_grow(&kctx->mem_pool,
116 MIDGARD_MMU_BOTTOMLEVEL);
117 if (err)
118 goto pgd_no_mem;
119
120 mutex_lock(&kctx->mmu_lock);
121 kctx->pgd = kbase_mmu_alloc_pgd(kctx);
122 mutex_unlock(&kctx->mmu_lock);
123 } while (!kctx->pgd);
124
125 kctx->aliasing_sink_page = kbase_mem_alloc_page(kctx->kbdev);
126 if (!kctx->aliasing_sink_page)
127 goto no_sink_page;
128
129 init_waitqueue_head(&kctx->event_queue);
130
131 kctx->cookies = KBASE_COOKIE_MASK;
132
133 /* Make sure page 0 is not used... */
134 err = kbase_region_tracker_init(kctx);
135 if (err)
136 goto no_region_tracker;
137
138 err = kbase_sticky_resource_init(kctx);
139 if (err)
140 goto no_sticky;
141
142 err = kbase_jit_init(kctx);
143 if (err)
144 goto no_jit;
145 #ifdef CONFIG_GPU_TRACEPOINTS
146 atomic_set(&kctx->jctx.work_id, 0);
147 #endif
148 #ifdef CONFIG_MALI_TRACE_TIMELINE
149 atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
150 #endif
151
152 kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
153
154 mutex_init(&kctx->vinstr_cli_lock);
155
156 timer_setup(&kctx->soft_job_timeout,
157 kbasep_soft_job_timeout_worker,
158 0);
159
160 return kctx;
161
162 no_jit:
163 kbase_gpu_vm_lock(kctx);
164 kbase_sticky_resource_term(kctx);
165 kbase_gpu_vm_unlock(kctx);
166 no_sticky:
167 kbase_region_tracker_term(kctx);
168 no_region_tracker:
169 kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
170 no_sink_page:
171 /* VM lock needed for the call to kbase_mmu_free_pgd */
172 kbase_gpu_vm_lock(kctx);
173 kbase_mmu_free_pgd(kctx);
174 kbase_gpu_vm_unlock(kctx);
175 pgd_no_mem:
176 kbase_mmu_term(kctx);
177 term_dma_fence:
178 kbase_dma_fence_term(kctx);
179 free_event:
180 kbase_event_cleanup(kctx);
181 free_jd:
182 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
183 kbasep_js_kctx_term(kctx);
184 kbase_jd_exit(kctx);
185 deinit_evictable:
186 kbase_mem_evictable_deinit(kctx);
187 free_pool:
188 kbase_mem_pool_term(&kctx->mem_pool);
189 free_kctx:
190 vfree(kctx);
191 out:
192 return NULL;
193 }
194 KBASE_EXPORT_SYMBOL(kbase_create_context);
195
kbase_reg_pending_dtor(struct kbase_va_region *reg)196 static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
197 {
198 dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
199 kbase_mem_phy_alloc_put(reg->cpu_alloc);
200 kbase_mem_phy_alloc_put(reg->gpu_alloc);
201 kfree(reg);
202 }
203
204 /**
205 * kbase_destroy_context - Destroy a kernel base context.
206 * @kctx: Context to destroy
207 *
208 * Calls kbase_destroy_os_context() to free OS specific structures.
209 * Will release all outstanding regions.
210 */
kbase_destroy_context(struct kbase_context *kctx)211 void kbase_destroy_context(struct kbase_context *kctx)
212 {
213 struct kbase_device *kbdev;
214 int pages;
215 unsigned long pending_regions_to_clean;
216 unsigned long flags;
217
218 KBASE_DEBUG_ASSERT(NULL != kctx);
219
220 kbdev = kctx->kbdev;
221 KBASE_DEBUG_ASSERT(NULL != kbdev);
222
223 KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
224
225 /* Ensure the core is powered up for the destroy process */
226 /* A suspend won't happen here, because we're in a syscall from a userspace
227 * thread. */
228 kbase_pm_context_active(kbdev);
229
230 kbase_jd_zap_context(kctx);
231
232 #ifdef CONFIG_DEBUG_FS
233 /* Removing the rest of the debugfs entries here as we want to keep the
234 * atom debugfs interface alive until all atoms have completed. This
235 * is useful for debugging hung contexts. */
236 debugfs_remove_recursive(kctx->kctx_dentry);
237 #endif
238
239 kbase_event_cleanup(kctx);
240
241 /*
242 * JIT must be terminated before the code below as it must be called
243 * without the region lock being held.
244 * The code above ensures no new JIT allocations can be made by
245 * by the time we get to this point of context tear down.
246 */
247 kbase_jit_term(kctx);
248
249 kbase_gpu_vm_lock(kctx);
250
251 kbase_sticky_resource_term(kctx);
252
253 /* MMU is disabled as part of scheduling out the context */
254 kbase_mmu_free_pgd(kctx);
255
256 /* drop the aliasing sink page now that it can't be mapped anymore */
257 kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
258
259 /* free pending region setups */
260 pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
261 while (pending_regions_to_clean) {
262 unsigned int cookie = __ffs(pending_regions_to_clean);
263
264 BUG_ON(!kctx->pending_regions[cookie]);
265
266 kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
267
268 kctx->pending_regions[cookie] = NULL;
269 pending_regions_to_clean &= ~(1UL << cookie);
270 }
271
272 kbase_region_tracker_term(kctx);
273 kbase_gpu_vm_unlock(kctx);
274
275 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
276 kbasep_js_kctx_term(kctx);
277
278 kbase_jd_exit(kctx);
279
280 kbase_pm_context_idle(kbdev);
281
282 kbase_dma_fence_term(kctx);
283
284 mutex_lock(&kbdev->mmu_hw_mutex);
285 spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
286 kbase_ctx_sched_remove_ctx(kctx);
287 spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
288 mutex_unlock(&kbdev->mmu_hw_mutex);
289
290 kbase_mmu_term(kctx);
291
292 pages = atomic_read(&kctx->used_pages);
293 if (pages != 0)
294 dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
295
296 kbase_mem_evictable_deinit(kctx);
297 kbase_mem_pool_term(&kctx->mem_pool);
298 WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
299
300 vfree(kctx);
301 }
302 KBASE_EXPORT_SYMBOL(kbase_destroy_context);
303
304 /**
305 * kbase_context_set_create_flags - Set creation flags on a context
306 * @kctx: Kbase context
307 * @flags: Flags to set
308 *
309 * Return: 0 on success
310 */
kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)311 int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
312 {
313 int err = 0;
314 struct kbasep_js_kctx_info *js_kctx_info;
315 unsigned long irq_flags;
316
317 KBASE_DEBUG_ASSERT(NULL != kctx);
318
319 js_kctx_info = &kctx->jctx.sched_info;
320
321 /* Validate flags */
322 if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
323 err = -EINVAL;
324 goto out;
325 }
326
327 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
328 spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
329
330 /* Translate the flags */
331 if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
332 kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
333
334 /* Latch the initial attributes into the Job Scheduler */
335 kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
336
337 spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
338 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
339 out:
340 return err;
341 }
342 KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);
343