1 /*
2 *
3 * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18
19
20 #include <mali_kbase.h>
21
22 #if defined(CONFIG_DMA_SHARED_BUFFER)
23 #include <linux/dma-buf.h>
24 #include <asm/cacheflush.h>
25 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
26 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
27 #include <mali_kbase_sync.h>
28 #endif
29 #include <linux/dma-mapping.h>
30 #include <mali_base_kernel.h>
31 #include <mali_kbase_hwaccess_time.h>
32 #include <mali_kbase_mem_linux.h>
33 #include <mali_kbase_tlstream.h>
34 #include <linux/version.h>
35 #include <linux/ktime.h>
36 #include <linux/pfn.h>
37 #include <linux/sched.h>
38
39 /* Mask to check cache alignment of data structures */
40 #define KBASE_CACHE_ALIGNMENT_MASK ((1<<L1_CACHE_SHIFT)-1)
41
42 /**
43 * @file mali_kbase_softjobs.c
44 *
45 * This file implements the logic behind software only jobs that are
46 * executed within the driver rather than being handed over to the GPU.
47 */
48
kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)49 static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
50 {
51 struct kbase_context *kctx = katom->kctx;
52 unsigned long lflags;
53
54 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
55 list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
56 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
57 }
58
kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)59 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
60 {
61 struct kbase_context *kctx = katom->kctx;
62 unsigned long lflags;
63
64 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
65 list_del(&katom->queue);
66 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
67 }
68
kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)69 static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
70 {
71 struct kbase_context *kctx = katom->kctx;
72
73 /* Record the start time of this atom so we could cancel it at
74 * the right time.
75 */
76 katom->start_timestamp = ktime_get();
77
78 /* Add the atom to the waiting list before the timer is
79 * (re)started to make sure that it gets processed.
80 */
81 kbasep_add_waiting_soft_job(katom);
82
83 /* Schedule timeout of this atom after a period if it is not active */
84 if (!timer_pending(&kctx->soft_job_timeout)) {
85 int timeout_ms = atomic_read(
86 &kctx->kbdev->js_data.soft_job_timeout_ms);
87 mod_timer(&kctx->soft_job_timeout,
88 jiffies + msecs_to_jiffies(timeout_ms));
89 }
90 }
91
kbasep_read_soft_event_status( struct kbase_context *kctx, u64 evt, unsigned char *status)92 static int kbasep_read_soft_event_status(
93 struct kbase_context *kctx, u64 evt, unsigned char *status)
94 {
95 unsigned char *mapped_evt;
96 struct kbase_vmap_struct map;
97
98 mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
99 if (!mapped_evt)
100 return -EFAULT;
101
102 *status = *mapped_evt;
103
104 kbase_vunmap(kctx, &map);
105
106 return 0;
107 }
108
kbasep_write_soft_event_status( struct kbase_context *kctx, u64 evt, unsigned char new_status)109 static int kbasep_write_soft_event_status(
110 struct kbase_context *kctx, u64 evt, unsigned char new_status)
111 {
112 unsigned char *mapped_evt;
113 struct kbase_vmap_struct map;
114
115 if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
116 (new_status != BASE_JD_SOFT_EVENT_RESET))
117 return -EINVAL;
118
119 mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
120 if (!mapped_evt)
121 return -EFAULT;
122
123 *mapped_evt = new_status;
124
125 kbase_vunmap(kctx, &map);
126
127 return 0;
128 }
129
kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)130 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
131 {
132 struct kbase_vmap_struct map;
133 void *user_result;
134 struct timespec64 ts;
135 struct base_dump_cpu_gpu_counters data;
136 u64 system_time;
137 u64 cycle_counter;
138 u64 jc = katom->jc;
139 struct kbase_context *kctx = katom->kctx;
140 int pm_active_err;
141
142 memset(&data, 0, sizeof(data));
143
144 /* Take the PM active reference as late as possible - otherwise, it could
145 * delay suspend until we process the atom (which may be at the end of a
146 * long chain of dependencies */
147 pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
148 if (pm_active_err) {
149 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
150
151 /* We're suspended - queue this on the list of suspended jobs
152 * Use dep_item[1], because dep_item[0] was previously in use
153 * for 'waiting_soft_jobs'.
154 */
155 mutex_lock(&js_devdata->runpool_mutex);
156 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
157 mutex_unlock(&js_devdata->runpool_mutex);
158
159 /* Also adding this to the list of waiting soft job */
160 kbasep_add_waiting_soft_job(katom);
161
162 return pm_active_err;
163 }
164
165 kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
166 &ts);
167
168 kbase_pm_context_idle(kctx->kbdev);
169
170 data.sec = ts.tv_sec;
171 data.usec = ts.tv_nsec / 1000;
172 data.system_time = system_time;
173 data.cycle_counter = cycle_counter;
174
175 /* Assume this atom will be cancelled until we know otherwise */
176 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
177
178 /* GPU_WR access is checked on the range for returning the result to
179 * userspace for the following reasons:
180 * - security, this is currently how imported user bufs are checked.
181 * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
182 user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
183 if (!user_result)
184 return 0;
185
186 memcpy(user_result, &data, sizeof(data));
187
188 kbase_vunmap(kctx, &map);
189
190 /* Atom was fine - mark it as done */
191 katom->event_code = BASE_JD_EVENT_DONE;
192
193 return 0;
194 }
195
196 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
197 /* Called by the explicit fence mechanism when a fence wait has completed */
kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)198 void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
199 {
200 struct kbase_context *kctx = katom->kctx;
201
202 mutex_lock(&kctx->jctx.lock);
203 kbasep_remove_waiting_soft_job(katom);
204 kbase_finish_soft_job(katom);
205 if (jd_done_nolock(katom, NULL))
206 kbase_js_sched_all(kctx->kbdev);
207 mutex_unlock(&kctx->jctx.lock);
208 }
209 #endif
210
kbasep_soft_event_complete_job(struct work_struct *work)211 static void kbasep_soft_event_complete_job(struct work_struct *work)
212 {
213 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
214 work);
215 struct kbase_context *kctx = katom->kctx;
216 int resched;
217
218 mutex_lock(&kctx->jctx.lock);
219 resched = jd_done_nolock(katom, NULL);
220 mutex_unlock(&kctx->jctx.lock);
221
222 if (resched)
223 kbase_js_sched_all(kctx->kbdev);
224 }
225
kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)226 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
227 {
228 int cancel_timer = 1;
229 struct list_head *entry, *tmp;
230 unsigned long lflags;
231
232 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
233 list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
234 struct kbase_jd_atom *katom = list_entry(
235 entry, struct kbase_jd_atom, queue);
236
237 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
238 case BASE_JD_REQ_SOFT_EVENT_WAIT:
239 if (katom->jc == evt) {
240 list_del(&katom->queue);
241
242 katom->event_code = BASE_JD_EVENT_DONE;
243 INIT_WORK(&katom->work,
244 kbasep_soft_event_complete_job);
245 queue_work(kctx->jctx.job_done_wq,
246 &katom->work);
247 } else {
248 /* There are still other waiting jobs, we cannot
249 * cancel the timer yet.
250 */
251 cancel_timer = 0;
252 }
253 break;
254 #ifdef CONFIG_MALI_FENCE_DEBUG
255 case BASE_JD_REQ_SOFT_FENCE_WAIT:
256 /* Keep the timer running if fence debug is enabled and
257 * there are waiting fence jobs.
258 */
259 cancel_timer = 0;
260 break;
261 #endif
262 }
263 }
264
265 if (cancel_timer)
266 del_timer(&kctx->soft_job_timeout);
267 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
268 }
269
270 #ifdef CONFIG_MALI_FENCE_DEBUG
kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)271 static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
272 {
273 struct kbase_context *kctx = katom->kctx;
274 struct device *dev = kctx->kbdev->dev;
275 int i;
276
277 for (i = 0; i < 2; i++) {
278 struct kbase_jd_atom *dep;
279
280 list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
281 if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
282 dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
283 continue;
284
285 if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
286 == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
287 /* Found blocked trigger fence. */
288 struct kbase_sync_fence_info info;
289
290 if (!kbase_sync_fence_in_info_get(dep, &info)) {
291 dev_warn(dev,
292 "\tVictim trigger atom %d fence [%p] %s: %s\n",
293 kbase_jd_atom_id(kctx, dep),
294 info.fence,
295 info.name,
296 kbase_sync_status_string(info.status));
297 }
298 }
299
300 kbase_fence_debug_check_atom(dep);
301 }
302 }
303 }
304
kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)305 static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
306 {
307 struct kbase_context *kctx = katom->kctx;
308 struct device *dev = katom->kctx->kbdev->dev;
309 int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
310 unsigned long lflags;
311 struct kbase_sync_fence_info info;
312
313 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
314
315 if (kbase_sync_fence_in_info_get(katom, &info)) {
316 /* Fence must have signaled just after timeout. */
317 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
318 return;
319 }
320
321 dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
322 kctx->tgid, kctx->id,
323 kbase_jd_atom_id(kctx, katom),
324 info.fence, timeout_ms);
325 dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
326 info.fence, info.name,
327 kbase_sync_status_string(info.status));
328
329 /* Search for blocked trigger atoms */
330 kbase_fence_debug_check_atom(katom);
331
332 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
333
334 kbase_sync_fence_in_dump(katom);
335 }
336
337 struct kbase_fence_debug_work {
338 struct kbase_jd_atom *katom;
339 struct work_struct work;
340 };
341
kbase_fence_debug_wait_timeout_worker(struct work_struct *work)342 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
343 {
344 struct kbase_fence_debug_work *w = container_of(work,
345 struct kbase_fence_debug_work, work);
346 struct kbase_jd_atom *katom = w->katom;
347 struct kbase_context *kctx = katom->kctx;
348
349 mutex_lock(&kctx->jctx.lock);
350 kbase_fence_debug_wait_timeout(katom);
351 mutex_unlock(&kctx->jctx.lock);
352
353 kfree(w);
354 }
355
kbase_fence_debug_timeout(struct kbase_jd_atom *katom)356 static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
357 {
358 struct kbase_fence_debug_work *work;
359 struct kbase_context *kctx = katom->kctx;
360
361 /* Enqueue fence debug worker. Use job_done_wq to get
362 * debug print ordered with job completion.
363 */
364 work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
365 /* Ignore allocation failure. */
366 if (work) {
367 work->katom = katom;
368 INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
369 queue_work(kctx->jctx.job_done_wq, &work->work);
370 }
371 }
372 #endif /* CONFIG_MALI_FENCE_DEBUG */
373
kbasep_soft_job_timeout_worker(struct timer_list *t)374 void kbasep_soft_job_timeout_worker(struct timer_list *t)
375 {
376 struct kbase_context *kctx = from_timer(kctx, t, soft_job_timeout);
377 u32 timeout_ms = (u32)atomic_read(
378 &kctx->kbdev->js_data.soft_job_timeout_ms);
379 struct timer_list *timer = &kctx->soft_job_timeout;
380 ktime_t cur_time = ktime_get();
381 bool restarting = false;
382 unsigned long lflags;
383 struct list_head *entry, *tmp;
384
385 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
386 list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
387 struct kbase_jd_atom *katom = list_entry(entry,
388 struct kbase_jd_atom, queue);
389 s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
390 katom->start_timestamp));
391
392 if (elapsed_time < (s64)timeout_ms) {
393 restarting = true;
394 continue;
395 }
396
397 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
398 case BASE_JD_REQ_SOFT_EVENT_WAIT:
399 /* Take it out of the list to ensure that it
400 * will be cancelled in all cases
401 */
402 list_del(&katom->queue);
403
404 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
405 INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
406 queue_work(kctx->jctx.job_done_wq, &katom->work);
407 break;
408 #ifdef CONFIG_MALI_FENCE_DEBUG
409 case BASE_JD_REQ_SOFT_FENCE_WAIT:
410 kbase_fence_debug_timeout(katom);
411 break;
412 #endif
413 }
414 }
415
416 if (restarting)
417 mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
418 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
419 }
420
kbasep_soft_event_wait(struct kbase_jd_atom *katom)421 static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
422 {
423 struct kbase_context *kctx = katom->kctx;
424 unsigned char status;
425
426 /* The status of this soft-job is stored in jc */
427 if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
428 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
429 return 0;
430 }
431
432 if (status == BASE_JD_SOFT_EVENT_SET)
433 return 0; /* Event already set, nothing to do */
434
435 kbasep_add_waiting_with_timeout(katom);
436
437 return 1;
438 }
439
kbasep_soft_event_update_locked(struct kbase_jd_atom *katom, unsigned char new_status)440 static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
441 unsigned char new_status)
442 {
443 /* Complete jobs waiting on the same event */
444 struct kbase_context *kctx = katom->kctx;
445
446 if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
447 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
448 return;
449 }
450
451 if (new_status == BASE_JD_SOFT_EVENT_SET)
452 kbasep_complete_triggered_soft_events(kctx, katom->jc);
453 }
454
455 /**
456 * kbase_soft_event_update() - Update soft event state
457 * @kctx: Pointer to context
458 * @event: Event to update
459 * @new_status: New status value of event
460 *
461 * Update the event, and wake up any atoms waiting for the event.
462 *
463 * Return: 0 on success, a negative error code on failure.
464 */
kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status)465 int kbase_soft_event_update(struct kbase_context *kctx,
466 u64 event,
467 unsigned char new_status)
468 {
469 int err = 0;
470
471 mutex_lock(&kctx->jctx.lock);
472
473 if (kbasep_write_soft_event_status(kctx, event, new_status)) {
474 err = -ENOENT;
475 goto out;
476 }
477
478 if (new_status == BASE_JD_SOFT_EVENT_SET)
479 kbasep_complete_triggered_soft_events(kctx, event);
480
481 out:
482 mutex_unlock(&kctx->jctx.lock);
483
484 return err;
485 }
486
kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)487 static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
488 {
489 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
490 if (jd_done_nolock(katom, NULL))
491 kbase_js_sched_all(katom->kctx->kbdev);
492 }
493
494 struct kbase_debug_copy_buffer {
495 size_t size;
496 struct page **pages;
497 int nr_pages;
498 size_t offset;
499 struct kbase_mem_phy_alloc *gpu_alloc;
500
501 struct page **extres_pages;
502 int nr_extres_pages;
503 };
504
free_user_buffer(struct kbase_debug_copy_buffer *buffer)505 static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
506 {
507 struct page **pages = buffer->extres_pages;
508 int nr_pages = buffer->nr_extres_pages;
509
510 if (pages) {
511 int i;
512
513 for (i = 0; i < nr_pages; i++) {
514 struct page *pg = pages[i];
515
516 if (pg)
517 put_page(pg);
518 }
519 kfree(pages);
520 }
521 }
522
kbase_debug_copy_finish(struct kbase_jd_atom *katom)523 static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
524 {
525 struct kbase_debug_copy_buffer *buffers =
526 (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
527 unsigned int i;
528 unsigned int nr = katom->nr_extres;
529
530 if (!buffers)
531 return;
532
533 kbase_gpu_vm_lock(katom->kctx);
534 for (i = 0; i < nr; i++) {
535 int p;
536 struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
537
538 if (!buffers[i].pages)
539 break;
540 for (p = 0; p < buffers[i].nr_pages; p++) {
541 struct page *pg = buffers[i].pages[p];
542
543 if (pg)
544 put_page(pg);
545 }
546 kfree(buffers[i].pages);
547 if (gpu_alloc) {
548 switch (gpu_alloc->type) {
549 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
550 {
551 free_user_buffer(&buffers[i]);
552 break;
553 }
554 default:
555 /* Nothing to be done. */
556 break;
557 }
558 kbase_mem_phy_alloc_put(gpu_alloc);
559 }
560 }
561 kbase_gpu_vm_unlock(katom->kctx);
562 kfree(buffers);
563
564 katom->jc = 0;
565 }
566
kbase_debug_copy_prepare(struct kbase_jd_atom *katom)567 static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
568 {
569 struct kbase_debug_copy_buffer *buffers;
570 struct base_jd_debug_copy_buffer *user_buffers = NULL;
571 unsigned int i;
572 unsigned int nr = katom->nr_extres;
573 int ret = 0;
574 void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
575
576 if (!user_structs)
577 return -EINVAL;
578
579 buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
580 if (!buffers) {
581 ret = -ENOMEM;
582 katom->jc = 0;
583 goto out_cleanup;
584 }
585 katom->jc = (u64)(uintptr_t)buffers;
586
587 user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
588
589 if (!user_buffers) {
590 ret = -ENOMEM;
591 goto out_cleanup;
592 }
593
594 ret = copy_from_user(user_buffers, user_structs,
595 sizeof(*user_buffers)*nr);
596 if (ret)
597 goto out_cleanup;
598
599 for (i = 0; i < nr; i++) {
600 u64 addr = user_buffers[i].address;
601 u64 page_addr = addr & PAGE_MASK;
602 u64 end_page_addr = addr + user_buffers[i].size - 1;
603 u64 last_page_addr = end_page_addr & PAGE_MASK;
604 int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
605 int pinned_pages;
606 struct kbase_va_region *reg;
607 struct base_external_resource user_extres;
608
609 if (!addr)
610 continue;
611
612 buffers[i].nr_pages = nr_pages;
613 buffers[i].offset = addr & ~PAGE_MASK;
614 if (buffers[i].offset >= PAGE_SIZE) {
615 ret = -EINVAL;
616 goto out_cleanup;
617 }
618 buffers[i].size = user_buffers[i].size;
619
620 buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
621 GFP_KERNEL);
622 if (!buffers[i].pages) {
623 ret = -ENOMEM;
624 goto out_cleanup;
625 }
626
627 pinned_pages = get_user_pages_fast(page_addr,
628 nr_pages,
629 1, /* Write */
630 buffers[i].pages);
631 if (pinned_pages < 0) {
632 ret = pinned_pages;
633 goto out_cleanup;
634 }
635 if (pinned_pages != nr_pages) {
636 ret = -EINVAL;
637 goto out_cleanup;
638 }
639
640 user_extres = user_buffers[i].extres;
641 if (user_extres.ext_resource == 0ULL) {
642 ret = -EINVAL;
643 goto out_cleanup;
644 }
645
646 kbase_gpu_vm_lock(katom->kctx);
647 reg = kbase_region_tracker_find_region_enclosing_address(
648 katom->kctx, user_extres.ext_resource &
649 ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
650
651 if (NULL == reg || NULL == reg->gpu_alloc ||
652 (reg->flags & KBASE_REG_FREE)) {
653 ret = -EINVAL;
654 goto out_unlock;
655 }
656
657 buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
658 buffers[i].nr_extres_pages = reg->nr_pages;
659
660 if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
661 dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
662
663 switch (reg->gpu_alloc->type) {
664 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
665 {
666 struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
667 unsigned long nr_pages =
668 alloc->imported.user_buf.nr_pages;
669
670 if (alloc->imported.user_buf.mm != current->mm) {
671 ret = -EINVAL;
672 goto out_unlock;
673 }
674 buffers[i].extres_pages = kcalloc(nr_pages,
675 sizeof(struct page *), GFP_KERNEL);
676 if (!buffers[i].extres_pages) {
677 ret = -ENOMEM;
678 goto out_unlock;
679 }
680
681 ret = get_user_pages_fast(
682 alloc->imported.user_buf.address,
683 nr_pages, 0,
684 buffers[i].extres_pages);
685 if (ret != nr_pages)
686 goto out_unlock;
687 ret = 0;
688 break;
689 }
690 case KBASE_MEM_TYPE_IMPORTED_UMP:
691 {
692 dev_warn(katom->kctx->kbdev->dev,
693 "UMP is not supported for debug_copy jobs\n");
694 ret = -EINVAL;
695 goto out_unlock;
696 }
697 default:
698 /* Nothing to be done. */
699 break;
700 }
701 kbase_gpu_vm_unlock(katom->kctx);
702 }
703 kfree(user_buffers);
704
705 return ret;
706
707 out_unlock:
708 kbase_gpu_vm_unlock(katom->kctx);
709
710 out_cleanup:
711 kfree(buffers);
712 kfree(user_buffers);
713
714 /* Frees allocated memory for kbase_debug_copy_job struct, including
715 * members, and sets jc to 0 */
716 kbase_debug_copy_finish(katom);
717 return ret;
718 }
719
kbase_mem_copy_from_extres_page(struct kbase_context *kctx, void *extres_page, struct page **pages, unsigned int nr_pages, unsigned int *target_page_nr, size_t offset, size_t *to_copy)720 static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
721 void *extres_page, struct page **pages, unsigned int nr_pages,
722 unsigned int *target_page_nr, size_t offset, size_t *to_copy)
723 {
724 void *target_page = kmap(pages[*target_page_nr]);
725 size_t chunk = PAGE_SIZE-offset;
726
727 lockdep_assert_held(&kctx->reg_lock);
728
729 if (!target_page) {
730 *target_page_nr += 1;
731 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
732 return;
733 }
734
735 chunk = min(chunk, *to_copy);
736
737 memcpy(target_page + offset, extres_page, chunk);
738 *to_copy -= chunk;
739
740 kunmap(pages[*target_page_nr]);
741
742 *target_page_nr += 1;
743 if (*target_page_nr >= nr_pages)
744 return;
745
746 target_page = kmap(pages[*target_page_nr]);
747 if (!target_page) {
748 *target_page_nr += 1;
749 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
750 return;
751 }
752
753 KBASE_DEBUG_ASSERT(target_page);
754
755 chunk = min(offset, *to_copy);
756 memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
757 *to_copy -= chunk;
758
759 kunmap(pages[*target_page_nr]);
760 }
761
762 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc, unsigned long page_num, struct page **page)763 static void *dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc,
764 unsigned long page_num, struct page **page)
765 {
766 struct sg_table *sgt = gpu_alloc->imported.umm.sgt;
767 struct sg_page_iter sg_iter;
768 unsigned long page_index = 0;
769
770 if (WARN_ON(gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
771 return NULL;
772
773 if (!sgt)
774 return NULL;
775
776 if (WARN_ON(page_num >= gpu_alloc->nents))
777 return NULL;
778
779 for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
780 if (page_index == page_num) {
781 *page = sg_page_iter_page(&sg_iter);
782
783 return kmap(*page);
784 }
785 page_index++;
786 }
787
788 return NULL;
789 }
790 #endif
791
kbase_mem_copy_from_extres(struct kbase_context *kctx, struct kbase_debug_copy_buffer *buf_data)792 static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
793 struct kbase_debug_copy_buffer *buf_data)
794 {
795 unsigned int i;
796 unsigned int target_page_nr = 0;
797 struct page **pages = buf_data->pages;
798 u64 offset = buf_data->offset;
799 size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
800 size_t to_copy = min(extres_size, buf_data->size);
801 struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
802 int ret = 0;
803
804 KBASE_DEBUG_ASSERT(pages != NULL);
805
806 kbase_gpu_vm_lock(kctx);
807 if (!gpu_alloc) {
808 ret = -EINVAL;
809 goto out_unlock;
810 }
811
812 switch (gpu_alloc->type) {
813 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
814 {
815 for (i = 0; i < buf_data->nr_extres_pages; i++) {
816 struct page *pg = buf_data->extres_pages[i];
817 void *extres_page = kmap(pg);
818
819 if (extres_page)
820 kbase_mem_copy_from_extres_page(kctx,
821 extres_page, pages,
822 buf_data->nr_pages,
823 &target_page_nr,
824 offset, &to_copy);
825
826 kunmap(pg);
827 if (target_page_nr >= buf_data->nr_pages)
828 break;
829 }
830 break;
831 }
832 break;
833 #ifdef CONFIG_DMA_SHARED_BUFFER
834 case KBASE_MEM_TYPE_IMPORTED_UMM: {
835 struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
836
837 KBASE_DEBUG_ASSERT(dma_buf != NULL);
838 KBASE_DEBUG_ASSERT(dma_buf->size ==
839 buf_data->nr_extres_pages * PAGE_SIZE);
840
841 ret = dma_buf_begin_cpu_access(dma_buf,
842 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
843 0, buf_data->nr_extres_pages*PAGE_SIZE,
844 #endif
845 DMA_FROM_DEVICE);
846 if (ret)
847 goto out_unlock;
848
849 for (i = 0; i < buf_data->nr_extres_pages; i++) {
850
851 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
852 struct page *pg;
853 void *extres_page = dma_buf_kmap_page(gpu_alloc, i, &pg);
854 #else
855 void *extres_page = dma_buf_kmap(dma_buf, i);
856 #endif
857
858 if (extres_page)
859 kbase_mem_copy_from_extres_page(kctx,
860 extres_page, pages,
861 buf_data->nr_pages,
862 &target_page_nr,
863 offset, &to_copy);
864
865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
866 kunmap(pg);
867 #else
868 dma_buf_kunmap(dma_buf, i, extres_page);
869 #endif
870 if (target_page_nr >= buf_data->nr_pages)
871 break;
872 }
873 dma_buf_end_cpu_access(dma_buf,
874 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
875 0, buf_data->nr_extres_pages*PAGE_SIZE,
876 #endif
877 DMA_FROM_DEVICE);
878 break;
879 }
880 #endif
881 default:
882 ret = -EINVAL;
883 }
884 out_unlock:
885 kbase_gpu_vm_unlock(kctx);
886 return ret;
887
888 }
889
kbase_debug_copy(struct kbase_jd_atom *katom)890 static int kbase_debug_copy(struct kbase_jd_atom *katom)
891 {
892 struct kbase_debug_copy_buffer *buffers =
893 (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
894 unsigned int i;
895
896 for (i = 0; i < katom->nr_extres; i++) {
897 int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
898
899 if (res)
900 return res;
901 }
902
903 return 0;
904 }
905
kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)906 static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
907 {
908 __user void *data = (__user void *)(uintptr_t) katom->jc;
909 struct base_jit_alloc_info *info;
910 struct kbase_context *kctx = katom->kctx;
911 int ret;
912
913 /* Fail the job if there is no info structure */
914 if (!data) {
915 ret = -EINVAL;
916 goto fail;
917 }
918
919 /* Copy the information for safe access and future storage */
920 info = kzalloc(sizeof(*info), GFP_KERNEL);
921 if (!info) {
922 ret = -ENOMEM;
923 goto fail;
924 }
925
926 if (copy_from_user(info, data, sizeof(*info)) != 0) {
927 ret = -EINVAL;
928 goto free_info;
929 }
930
931 /* If the ID is zero then fail the job */
932 if (info->id == 0) {
933 ret = -EINVAL;
934 goto free_info;
935 }
936
937 /* Sanity check that the PA fits within the VA */
938 if (info->va_pages < info->commit_pages) {
939 ret = -EINVAL;
940 goto free_info;
941 }
942
943 /* Ensure the GPU address is correctly aligned */
944 if ((info->gpu_alloc_addr & 0x7) != 0) {
945 ret = -EINVAL;
946 goto free_info;
947 }
948
949 /* Replace the user pointer with our kernel allocated info structure */
950 katom->jc = (u64)(uintptr_t) info;
951 katom->jit_blocked = false;
952
953 lockdep_assert_held(&kctx->jctx.lock);
954 list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
955
956 /*
957 * Note:
958 * The provided info->gpu_alloc_addr isn't validated here as
959 * userland can cache allocations which means that even
960 * though the region is valid it doesn't represent the
961 * same thing it used to.
962 *
963 * Complete validation of va_pages, commit_pages and extent
964 * isn't done here as it will be done during the call to
965 * kbase_mem_alloc.
966 */
967 return 0;
968
969 free_info:
970 kfree(info);
971 fail:
972 katom->jc = 0;
973 return ret;
974 }
975
kbase_jit_free_get_id(struct kbase_jd_atom *katom)976 static u8 kbase_jit_free_get_id(struct kbase_jd_atom *katom)
977 {
978 if (WARN_ON(katom->core_req != BASE_JD_REQ_SOFT_JIT_FREE))
979 return 0;
980
981 return (u8) katom->jc;
982 }
983
kbase_jit_allocate_process(struct kbase_jd_atom *katom)984 static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
985 {
986 struct kbase_context *kctx = katom->kctx;
987 struct base_jit_alloc_info *info;
988 struct kbase_va_region *reg;
989 struct kbase_vmap_struct mapping;
990 u64 *ptr, new_addr;
991
992 if (katom->jit_blocked) {
993 list_del(&katom->queue);
994 katom->jit_blocked = false;
995 }
996
997 info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
998
999 /* The JIT ID is still in use so fail the allocation */
1000 if (kctx->jit_alloc[info->id]) {
1001 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1002 return 0;
1003 }
1004
1005 /* Create a JIT allocation */
1006 reg = kbase_jit_allocate(kctx, info);
1007 if (!reg) {
1008 struct kbase_jd_atom *jit_atom;
1009 bool can_block = false;
1010
1011 lockdep_assert_held(&kctx->jctx.lock);
1012
1013 jit_atom = list_first_entry(&kctx->jit_atoms_head,
1014 struct kbase_jd_atom, jit_node);
1015
1016 list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
1017 if (jit_atom == katom)
1018 break;
1019 if (jit_atom->core_req == BASE_JD_REQ_SOFT_JIT_FREE) {
1020 u8 free_id = kbase_jit_free_get_id(jit_atom);
1021
1022 if (free_id && kctx->jit_alloc[free_id]) {
1023 /* A JIT free which is active and
1024 * submitted before this atom
1025 */
1026 can_block = true;
1027 break;
1028 }
1029 }
1030 }
1031
1032 if (!can_block) {
1033 /* Mark the allocation so we know it's in use even if
1034 * the allocation itself fails.
1035 */
1036 kctx->jit_alloc[info->id] =
1037 (struct kbase_va_region *) -1;
1038
1039 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1040 return 0;
1041 }
1042
1043 /* There are pending frees for an active allocation
1044 * so we should wait to see whether they free the memory.
1045 * Add to the beginning of the list to ensure that the atom is
1046 * processed only once in kbase_jit_free_finish
1047 */
1048 list_add(&katom->queue, &kctx->jit_pending_alloc);
1049 katom->jit_blocked = true;
1050
1051 return 1;
1052 }
1053
1054 /*
1055 * Write the address of the JIT allocation to the user provided
1056 * GPU allocation.
1057 */
1058 ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
1059 &mapping);
1060 if (!ptr) {
1061 /*
1062 * Leave the allocation "live" as the JIT free jit will be
1063 * submitted anyway.
1064 */
1065 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1066 return 0;
1067 }
1068
1069 new_addr = reg->start_pfn << PAGE_SHIFT;
1070 *ptr = new_addr;
1071 KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
1072 katom, info->gpu_alloc_addr, new_addr);
1073 kbase_vunmap(kctx, &mapping);
1074
1075 katom->event_code = BASE_JD_EVENT_DONE;
1076
1077 /*
1078 * Bind it to the user provided ID. Do this last so we can check for
1079 * the JIT free racing this JIT alloc job.
1080 */
1081 kctx->jit_alloc[info->id] = reg;
1082
1083 return 0;
1084 }
1085
kbase_jit_allocate_finish(struct kbase_jd_atom *katom)1086 static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
1087 {
1088 struct base_jit_alloc_info *info;
1089
1090 lockdep_assert_held(&katom->kctx->jctx.lock);
1091
1092 /* Remove atom from jit_atoms_head list */
1093 list_del(&katom->jit_node);
1094
1095 if (katom->jit_blocked) {
1096 list_del(&katom->queue);
1097 katom->jit_blocked = false;
1098 }
1099
1100 info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1101 /* Free the info structure */
1102 kfree(info);
1103 }
1104
kbase_jit_free_prepare(struct kbase_jd_atom *katom)1105 static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
1106 {
1107 struct kbase_context *kctx = katom->kctx;
1108
1109 lockdep_assert_held(&kctx->jctx.lock);
1110 list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
1111
1112 return 0;
1113 }
1114
kbase_jit_free_process(struct kbase_jd_atom *katom)1115 static void kbase_jit_free_process(struct kbase_jd_atom *katom)
1116 {
1117 struct kbase_context *kctx = katom->kctx;
1118 u8 id = kbase_jit_free_get_id(katom);
1119
1120 /*
1121 * If the ID is zero or it is not in use yet then fail the job.
1122 */
1123 if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
1124 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1125 return;
1126 }
1127
1128 /*
1129 * If the ID is valid but the allocation request failed still succeed
1130 * this soft job but don't try and free the allocation.
1131 */
1132 if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
1133 kbase_jit_free(kctx, kctx->jit_alloc[id]);
1134
1135 kctx->jit_alloc[id] = NULL;
1136 }
1137
kbasep_jit_free_finish_worker(struct work_struct *work)1138 static void kbasep_jit_free_finish_worker(struct work_struct *work)
1139 {
1140 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
1141 work);
1142 struct kbase_context *kctx = katom->kctx;
1143 int resched;
1144
1145 mutex_lock(&kctx->jctx.lock);
1146 kbase_finish_soft_job(katom);
1147 resched = jd_done_nolock(katom, NULL);
1148 mutex_unlock(&kctx->jctx.lock);
1149
1150 if (resched)
1151 kbase_js_sched_all(kctx->kbdev);
1152 }
1153
kbase_jit_free_finish(struct kbase_jd_atom *katom)1154 static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
1155 {
1156 struct list_head *i, *tmp;
1157 struct kbase_context *kctx = katom->kctx;
1158
1159 lockdep_assert_held(&kctx->jctx.lock);
1160 /* Remove this atom from the kctx->jit_atoms_head list */
1161 list_del(&katom->jit_node);
1162
1163 list_for_each_safe(i, tmp, &kctx->jit_pending_alloc) {
1164 struct kbase_jd_atom *pending_atom = list_entry(i,
1165 struct kbase_jd_atom, queue);
1166 if (kbase_jit_allocate_process(pending_atom) == 0) {
1167 /* Atom has completed */
1168 INIT_WORK(&pending_atom->work,
1169 kbasep_jit_free_finish_worker);
1170 queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
1171 }
1172 }
1173 }
1174
kbase_ext_res_prepare(struct kbase_jd_atom *katom)1175 static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
1176 {
1177 __user struct base_external_resource_list *user_ext_res;
1178 struct base_external_resource_list *ext_res;
1179 u64 count = 0;
1180 size_t copy_size;
1181 int ret;
1182
1183 user_ext_res = (__user struct base_external_resource_list *)
1184 (uintptr_t) katom->jc;
1185
1186 /* Fail the job if there is no info structure */
1187 if (!user_ext_res) {
1188 ret = -EINVAL;
1189 goto fail;
1190 }
1191
1192 if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
1193 ret = -EINVAL;
1194 goto fail;
1195 }
1196
1197 /* Is the number of external resources in range? */
1198 if (!count || count > BASE_EXT_RES_COUNT_MAX) {
1199 ret = -EINVAL;
1200 goto fail;
1201 }
1202
1203 /* Copy the information for safe access and future storage */
1204 copy_size = sizeof(*ext_res);
1205 copy_size += sizeof(struct base_external_resource) * (count - 1);
1206 ext_res = kzalloc(copy_size, GFP_KERNEL);
1207 if (!ext_res) {
1208 ret = -ENOMEM;
1209 goto fail;
1210 }
1211
1212 if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
1213 ret = -EINVAL;
1214 goto free_info;
1215 }
1216
1217 /*
1218 * Overwrite the count with the first value incase it was changed
1219 * after the fact.
1220 */
1221 ext_res->count = count;
1222
1223 /*
1224 * Replace the user pointer with our kernel allocated
1225 * ext_res structure.
1226 */
1227 katom->jc = (u64)(uintptr_t) ext_res;
1228
1229 return 0;
1230
1231 free_info:
1232 kfree(ext_res);
1233 fail:
1234 return ret;
1235 }
1236
kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)1237 static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
1238 {
1239 struct base_external_resource_list *ext_res;
1240 int i;
1241 bool failed = false;
1242
1243 ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1244 if (!ext_res)
1245 goto failed_jc;
1246
1247 kbase_gpu_vm_lock(katom->kctx);
1248
1249 for (i = 0; i < ext_res->count; i++) {
1250 u64 gpu_addr;
1251
1252 gpu_addr = ext_res->ext_res[i].ext_resource &
1253 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1254 if (map) {
1255 if (!kbase_sticky_resource_acquire(katom->kctx,
1256 gpu_addr))
1257 goto failed_loop;
1258 } else
1259 if (!kbase_sticky_resource_release(katom->kctx, NULL,
1260 gpu_addr))
1261 failed = true;
1262 }
1263
1264 /*
1265 * In the case of unmap we continue unmapping other resources in the
1266 * case of failure but will always report failure if _any_ unmap
1267 * request fails.
1268 */
1269 if (failed)
1270 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1271 else
1272 katom->event_code = BASE_JD_EVENT_DONE;
1273
1274 kbase_gpu_vm_unlock(katom->kctx);
1275
1276 return;
1277
1278 failed_loop:
1279 while (--i > 0) {
1280 u64 gpu_addr;
1281
1282 gpu_addr = ext_res->ext_res[i].ext_resource &
1283 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1284
1285 kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
1286 }
1287
1288 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1289 kbase_gpu_vm_unlock(katom->kctx);
1290
1291 failed_jc:
1292 return;
1293 }
1294
kbase_ext_res_finish(struct kbase_jd_atom *katom)1295 static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
1296 {
1297 struct base_external_resource_list *ext_res;
1298
1299 ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1300 /* Free the info structure */
1301 kfree(ext_res);
1302 }
1303
kbase_process_soft_job(struct kbase_jd_atom *katom)1304 int kbase_process_soft_job(struct kbase_jd_atom *katom)
1305 {
1306 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1307 case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1308 return kbase_dump_cpu_gpu_time(katom);
1309
1310 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1311 case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1312 katom->event_code = kbase_sync_fence_out_trigger(katom,
1313 katom->event_code == BASE_JD_EVENT_DONE ?
1314 0 : -EFAULT);
1315 break;
1316 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1317 {
1318 int ret = kbase_sync_fence_in_wait(katom);
1319
1320 if (ret == 1) {
1321 #ifdef CONFIG_MALI_FENCE_DEBUG
1322 kbasep_add_waiting_with_timeout(katom);
1323 #else
1324 kbasep_add_waiting_soft_job(katom);
1325 #endif
1326 }
1327 return ret;
1328 }
1329 #endif
1330
1331 case BASE_JD_REQ_SOFT_REPLAY:
1332 return kbase_replay_process(katom);
1333 case BASE_JD_REQ_SOFT_EVENT_WAIT:
1334 return kbasep_soft_event_wait(katom);
1335 case BASE_JD_REQ_SOFT_EVENT_SET:
1336 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
1337 break;
1338 case BASE_JD_REQ_SOFT_EVENT_RESET:
1339 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
1340 break;
1341 case BASE_JD_REQ_SOFT_DEBUG_COPY:
1342 {
1343 int res = kbase_debug_copy(katom);
1344
1345 if (res)
1346 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1347 break;
1348 }
1349 case BASE_JD_REQ_SOFT_JIT_ALLOC:
1350 return kbase_jit_allocate_process(katom);
1351 case BASE_JD_REQ_SOFT_JIT_FREE:
1352 kbase_jit_free_process(katom);
1353 break;
1354 case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1355 kbase_ext_res_process(katom, true);
1356 break;
1357 case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1358 kbase_ext_res_process(katom, false);
1359 break;
1360 }
1361
1362 /* Atom is complete */
1363 return 0;
1364 }
1365
kbase_cancel_soft_job(struct kbase_jd_atom *katom)1366 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
1367 {
1368 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1369 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1370 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1371 kbase_sync_fence_in_cancel_wait(katom);
1372 break;
1373 #endif
1374 case BASE_JD_REQ_SOFT_EVENT_WAIT:
1375 kbasep_soft_event_cancel_job(katom);
1376 break;
1377 default:
1378 /* This soft-job doesn't support cancellation! */
1379 KBASE_DEBUG_ASSERT(0);
1380 }
1381 }
1382
kbase_prepare_soft_job(struct kbase_jd_atom *katom)1383 int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
1384 {
1385 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1386 case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1387 {
1388 if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
1389 return -EINVAL;
1390 }
1391 break;
1392 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1393 case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1394 {
1395 struct base_fence fence;
1396 int fd;
1397
1398 if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1399 return -EINVAL;
1400
1401 fd = kbase_sync_fence_out_create(katom,
1402 fence.basep.stream_fd);
1403 if (fd < 0)
1404 return -EINVAL;
1405
1406 fence.basep.fd = fd;
1407 if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
1408 kbase_sync_fence_out_remove(katom);
1409 kbase_sync_fence_close_fd(fd);
1410 fence.basep.fd = -EINVAL;
1411 return -EINVAL;
1412 }
1413 }
1414 break;
1415 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1416 {
1417 struct base_fence fence;
1418 int ret;
1419
1420 if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1421 return -EINVAL;
1422
1423 /* Get a reference to the fence object */
1424 ret = kbase_sync_fence_in_from_fd(katom,
1425 fence.basep.fd);
1426 if (ret < 0)
1427 return ret;
1428
1429 #ifdef CONFIG_MALI_DMA_FENCE
1430 /*
1431 * Set KCTX_NO_IMPLICIT_FENCE in the context the first
1432 * time a soft fence wait job is observed. This will
1433 * prevent the implicit dma-buf fence to conflict with
1434 * the Android native sync fences.
1435 */
1436 if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC))
1437 kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC);
1438 #endif /* CONFIG_MALI_DMA_FENCE */
1439 }
1440 break;
1441 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
1442 case BASE_JD_REQ_SOFT_JIT_ALLOC:
1443 return kbase_jit_allocate_prepare(katom);
1444 case BASE_JD_REQ_SOFT_REPLAY:
1445 break;
1446 case BASE_JD_REQ_SOFT_JIT_FREE:
1447 return kbase_jit_free_prepare(katom);
1448 case BASE_JD_REQ_SOFT_EVENT_WAIT:
1449 case BASE_JD_REQ_SOFT_EVENT_SET:
1450 case BASE_JD_REQ_SOFT_EVENT_RESET:
1451 if (katom->jc == 0)
1452 return -EINVAL;
1453 break;
1454 case BASE_JD_REQ_SOFT_DEBUG_COPY:
1455 return kbase_debug_copy_prepare(katom);
1456 case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1457 return kbase_ext_res_prepare(katom);
1458 case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1459 return kbase_ext_res_prepare(katom);
1460 default:
1461 /* Unsupported soft-job */
1462 return -EINVAL;
1463 }
1464 return 0;
1465 }
1466
kbase_finish_soft_job(struct kbase_jd_atom *katom)1467 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
1468 {
1469 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1470 case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1471 /* Nothing to do */
1472 break;
1473 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1474 case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1475 /* If fence has not yet been signaled, do it now */
1476 kbase_sync_fence_out_trigger(katom, katom->event_code ==
1477 BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1478 break;
1479 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1480 /* Release katom's reference to fence object */
1481 kbase_sync_fence_in_remove(katom);
1482 break;
1483 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
1484 case BASE_JD_REQ_SOFT_DEBUG_COPY:
1485 kbase_debug_copy_finish(katom);
1486 break;
1487 case BASE_JD_REQ_SOFT_JIT_ALLOC:
1488 kbase_jit_allocate_finish(katom);
1489 break;
1490 case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1491 kbase_ext_res_finish(katom);
1492 break;
1493 case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1494 kbase_ext_res_finish(katom);
1495 break;
1496 case BASE_JD_REQ_SOFT_JIT_FREE:
1497 kbase_jit_free_finish(katom);
1498 break;
1499 }
1500 }
1501
kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)1502 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
1503 {
1504 LIST_HEAD(local_suspended_soft_jobs);
1505 struct kbase_jd_atom *tmp_iter;
1506 struct kbase_jd_atom *katom_iter;
1507 struct kbasep_js_device_data *js_devdata;
1508 bool resched = false;
1509
1510 KBASE_DEBUG_ASSERT(kbdev);
1511
1512 js_devdata = &kbdev->js_data;
1513
1514 /* Move out the entire list */
1515 mutex_lock(&js_devdata->runpool_mutex);
1516 list_splice_init(&js_devdata->suspended_soft_jobs_list,
1517 &local_suspended_soft_jobs);
1518 mutex_unlock(&js_devdata->runpool_mutex);
1519
1520 /*
1521 * Each atom must be detached from the list and ran separately -
1522 * it could be re-added to the old list, but this is unlikely
1523 */
1524 list_for_each_entry_safe(katom_iter, tmp_iter,
1525 &local_suspended_soft_jobs, dep_item[1]) {
1526 struct kbase_context *kctx = katom_iter->kctx;
1527
1528 mutex_lock(&kctx->jctx.lock);
1529
1530 /* Remove from the global list */
1531 list_del(&katom_iter->dep_item[1]);
1532 /* Remove from the context's list of waiting soft jobs */
1533 kbasep_remove_waiting_soft_job(katom_iter);
1534
1535 if (kbase_process_soft_job(katom_iter) == 0) {
1536 kbase_finish_soft_job(katom_iter);
1537 resched |= jd_done_nolock(katom_iter, NULL);
1538 } else {
1539 KBASE_DEBUG_ASSERT((katom_iter->core_req &
1540 BASE_JD_REQ_SOFT_JOB_TYPE)
1541 != BASE_JD_REQ_SOFT_REPLAY);
1542 }
1543
1544 mutex_unlock(&kctx->jctx.lock);
1545 }
1546
1547 if (resched)
1548 kbase_js_sched_all(kbdev);
1549 }
1550