1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2011-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 
24 #include <linux/dma-buf.h>
25 #include <asm/cacheflush.h>
26 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
27 #include <mali_kbase_sync.h>
28 #endif
29 #include <linux/dma-mapping.h>
30 #include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
31 #include <mali_kbase_hwaccess_time.h>
32 #include <mali_kbase_kinstr_jm.h>
33 #include <mali_kbase_mem_linux.h>
34 #include <tl/mali_kbase_tracepoints.h>
35 #include <mali_linux_trace.h>
36 #include <linux/version.h>
37 #include <linux/ktime.h>
38 #include <linux/pfn.h>
39 #include <linux/sched.h>
40 #include <linux/kernel.h>
41 #include <linux/cache.h>
42 
43 #if !MALI_USE_CSF
44 /**
45  * DOC: This file implements the logic behind software only jobs that are
46  * executed within the driver rather than being handed over to the GPU.
47  */
48 
kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)49 static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
50 {
51 	struct kbase_context *kctx = katom->kctx;
52 	unsigned long lflags;
53 
54 	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
55 	list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
56 	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
57 }
58 
kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)59 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
60 {
61 	struct kbase_context *kctx = katom->kctx;
62 	unsigned long lflags;
63 
64 	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
65 	list_del(&katom->queue);
66 	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
67 }
68 
kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)69 static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
70 {
71 	struct kbase_context *kctx = katom->kctx;
72 
73 	/* Record the start time of this atom so we could cancel it at
74 	 * the right time.
75 	 */
76 	katom->start_timestamp = ktime_get();
77 
78 	/* Add the atom to the waiting list before the timer is
79 	 * (re)started to make sure that it gets processed.
80 	 */
81 	kbasep_add_waiting_soft_job(katom);
82 
83 	/* Schedule timeout of this atom after a period if it is not active */
84 	if (!timer_pending(&kctx->soft_job_timeout)) {
85 		int timeout_ms = atomic_read(
86 				&kctx->kbdev->js_data.soft_job_timeout_ms);
87 		mod_timer(&kctx->soft_job_timeout,
88 			  jiffies + msecs_to_jiffies(timeout_ms));
89 	}
90 }
91 
kbasep_read_soft_event_status( struct kbase_context *kctx, u64 evt, unsigned char *status)92 static int kbasep_read_soft_event_status(
93 		struct kbase_context *kctx, u64 evt, unsigned char *status)
94 {
95 	unsigned char *mapped_evt;
96 	struct kbase_vmap_struct map;
97 
98 	mapped_evt = kbase_vmap_prot(kctx, evt, sizeof(*mapped_evt),
99 				     KBASE_REG_CPU_RD, &map);
100 	if (!mapped_evt)
101 		return -EFAULT;
102 
103 	*status = *mapped_evt;
104 
105 	kbase_vunmap(kctx, &map);
106 
107 	return 0;
108 }
109 
kbasep_write_soft_event_status( struct kbase_context *kctx, u64 evt, unsigned char new_status)110 static int kbasep_write_soft_event_status(
111 		struct kbase_context *kctx, u64 evt, unsigned char new_status)
112 {
113 	unsigned char *mapped_evt;
114 	struct kbase_vmap_struct map;
115 
116 	if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
117 	    (new_status != BASE_JD_SOFT_EVENT_RESET))
118 		return -EINVAL;
119 
120 	mapped_evt = kbase_vmap_prot(kctx, evt, sizeof(*mapped_evt),
121 				     KBASE_REG_CPU_WR, &map);
122 	if (!mapped_evt)
123 		return -EFAULT;
124 
125 	*mapped_evt = new_status;
126 
127 	kbase_vunmap(kctx, &map);
128 
129 	return 0;
130 }
131 
kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)132 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
133 {
134 	struct kbase_vmap_struct map;
135 	void *user_result;
136 	struct timespec64 ts;
137 	struct base_dump_cpu_gpu_counters data;
138 	u64 system_time = 0ULL;
139 	u64 cycle_counter;
140 	u64 jc = katom->jc;
141 	struct kbase_context *kctx = katom->kctx;
142 	int pm_active_err;
143 
144 	memset(&data, 0, sizeof(data));
145 
146 	/* Take the PM active reference as late as possible - otherwise, it could
147 	 * delay suspend until we process the atom (which may be at the end of a
148 	 * long chain of dependencies
149 	 */
150 #ifdef CONFIG_MALI_ARBITER_SUPPORT
151 	atomic_inc(&kctx->kbdev->pm.gpu_users_waiting);
152 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
153 	pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
154 	if (pm_active_err) {
155 		struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
156 
157 		/* We're suspended - queue this on the list of suspended jobs
158 		 * Use dep_item[1], because dep_item[0] was previously in use
159 		 * for 'waiting_soft_jobs'.
160 		 */
161 		mutex_lock(&js_devdata->runpool_mutex);
162 		list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
163 		mutex_unlock(&js_devdata->runpool_mutex);
164 
165 		/* Also adding this to the list of waiting soft job */
166 		kbasep_add_waiting_soft_job(katom);
167 
168 		return pm_active_err;
169 	}
170 #ifdef CONFIG_MALI_ARBITER_SUPPORT
171 	else
172 		atomic_dec(&kctx->kbdev->pm.gpu_users_waiting);
173 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
174 
175 	kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
176 									&ts);
177 
178 	kbase_pm_context_idle(kctx->kbdev);
179 
180 	data.sec = ts.tv_sec;
181 	data.usec = ts.tv_nsec / 1000;
182 	data.system_time = system_time;
183 	data.cycle_counter = cycle_counter;
184 
185 	/* Assume this atom will be cancelled until we know otherwise */
186 	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
187 
188 	/* GPU_WR access is checked on the range for returning the result to
189 	 * userspace for the following reasons:
190 	 * - security, this is currently how imported user bufs are checked.
191 	 * - userspace ddk guaranteed to assume region was mapped as GPU_WR
192 	 */
193 	user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
194 	if (!user_result)
195 		return 0;
196 
197 	memcpy(user_result, &data, sizeof(data));
198 
199 	kbase_vunmap(kctx, &map);
200 
201 	/* Atom was fine - mark it as done */
202 	katom->event_code = BASE_JD_EVENT_DONE;
203 
204 	return 0;
205 }
206 
207 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
208 /* Called by the explicit fence mechanism when a fence wait has completed */
kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)209 void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
210 {
211 	struct kbase_context *kctx = katom->kctx;
212 
213 	mutex_lock(&kctx->jctx.lock);
214 	kbasep_remove_waiting_soft_job(katom);
215 	kbase_finish_soft_job(katom);
216 	if (jd_done_nolock(katom, NULL))
217 		kbase_js_sched_all(kctx->kbdev);
218 	mutex_unlock(&kctx->jctx.lock);
219 }
220 #endif
221 
kbasep_soft_event_complete_job(struct work_struct *work)222 static void kbasep_soft_event_complete_job(struct work_struct *work)
223 {
224 	struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
225 			work);
226 	struct kbase_context *kctx = katom->kctx;
227 	int resched;
228 
229 	mutex_lock(&kctx->jctx.lock);
230 	resched = jd_done_nolock(katom, NULL);
231 	mutex_unlock(&kctx->jctx.lock);
232 
233 	if (resched)
234 		kbase_js_sched_all(kctx->kbdev);
235 }
236 
kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)237 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
238 {
239 	int cancel_timer = 1;
240 	struct list_head *entry, *tmp;
241 	unsigned long lflags;
242 
243 	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
244 	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
245 		struct kbase_jd_atom *katom = list_entry(
246 				entry, struct kbase_jd_atom, queue);
247 
248 		switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
249 		case BASE_JD_REQ_SOFT_EVENT_WAIT:
250 			if (katom->jc == evt) {
251 				list_del(&katom->queue);
252 
253 				katom->event_code = BASE_JD_EVENT_DONE;
254 				INIT_WORK(&katom->work,
255 					  kbasep_soft_event_complete_job);
256 				queue_work(kctx->jctx.job_done_wq,
257 					   &katom->work);
258 			} else {
259 				/* There are still other waiting jobs, we cannot
260 				 * cancel the timer yet.
261 				 */
262 				cancel_timer = 0;
263 			}
264 			break;
265 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
266 		case BASE_JD_REQ_SOFT_FENCE_WAIT:
267 			/* Keep the timer running if fence debug is enabled and
268 			 * there are waiting fence jobs.
269 			 */
270 			cancel_timer = 0;
271 			break;
272 #endif
273 		}
274 	}
275 
276 	if (cancel_timer)
277 		del_timer(&kctx->soft_job_timeout);
278 	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
279 }
280 
281 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)282 static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
283 {
284 	struct kbase_context *kctx = katom->kctx;
285 	struct device *dev = kctx->kbdev->dev;
286 	int i;
287 
288 	for (i = 0; i < 2; i++) {
289 		struct kbase_jd_atom *dep;
290 
291 		list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
292 			if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
293 			    dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
294 				continue;
295 
296 			if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
297 					== BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
298 				/* Found blocked trigger fence. */
299 				struct kbase_sync_fence_info info;
300 
301 				if (!kbase_sync_fence_in_info_get(dep, &info)) {
302 					dev_warn(dev,
303 						 "\tVictim trigger atom %d fence [%pK] %s: %s\n",
304 						 kbase_jd_atom_id(kctx, dep),
305 						 info.fence,
306 						 info.name,
307 						 kbase_sync_status_string(info.status));
308 				 }
309 			}
310 
311 			kbase_fence_debug_check_atom(dep);
312 		}
313 	}
314 }
315 
kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)316 static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
317 {
318 	struct kbase_context *kctx = katom->kctx;
319 	struct device *dev = katom->kctx->kbdev->dev;
320 	int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
321 	unsigned long lflags;
322 	struct kbase_sync_fence_info info;
323 
324 	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
325 
326 	if (kbase_sync_fence_in_info_get(katom, &info)) {
327 		/* Fence must have signaled just after timeout. */
328 		spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
329 		return;
330 	}
331 
332 	dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%pK] after %dms\n",
333 		 kctx->tgid, kctx->id,
334 		 kbase_jd_atom_id(kctx, katom),
335 		 info.fence, timeout_ms);
336 	dev_warn(dev, "\tGuilty fence [%pK] %s: %s\n",
337 		 info.fence, info.name,
338 		 kbase_sync_status_string(info.status));
339 
340 	/* Search for blocked trigger atoms */
341 	kbase_fence_debug_check_atom(katom);
342 
343 	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
344 
345 	kbase_sync_fence_in_dump(katom);
346 }
347 
348 struct kbase_fence_debug_work {
349 	struct kbase_jd_atom *katom;
350 	struct work_struct work;
351 };
352 
kbase_fence_debug_wait_timeout_worker(struct work_struct *work)353 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
354 {
355 	struct kbase_fence_debug_work *w = container_of(work,
356 			struct kbase_fence_debug_work, work);
357 	struct kbase_jd_atom *katom = w->katom;
358 	struct kbase_context *kctx = katom->kctx;
359 
360 	mutex_lock(&kctx->jctx.lock);
361 	kbase_fence_debug_wait_timeout(katom);
362 	mutex_unlock(&kctx->jctx.lock);
363 
364 	kfree(w);
365 }
366 
kbase_fence_debug_timeout(struct kbase_jd_atom *katom)367 static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
368 {
369 	struct kbase_fence_debug_work *work;
370 	struct kbase_context *kctx = katom->kctx;
371 
372 	/* Enqueue fence debug worker. Use job_done_wq to get
373 	 * debug print ordered with job completion.
374 	 */
375 	work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
376 	/* Ignore allocation failure. */
377 	if (work) {
378 		work->katom = katom;
379 		INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
380 		queue_work(kctx->jctx.job_done_wq, &work->work);
381 	}
382 }
383 #endif /* CONFIG_MALI_BIFROST_FENCE_DEBUG */
384 
kbasep_soft_job_timeout_worker(struct timer_list *timer)385 void kbasep_soft_job_timeout_worker(struct timer_list *timer)
386 {
387 	struct kbase_context *kctx = container_of(timer, struct kbase_context,
388 			soft_job_timeout);
389 	u32 timeout_ms = (u32)atomic_read(
390 			&kctx->kbdev->js_data.soft_job_timeout_ms);
391 	ktime_t cur_time = ktime_get();
392 	bool restarting = false;
393 	unsigned long lflags;
394 	struct list_head *entry, *tmp;
395 
396 	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
397 	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
398 		struct kbase_jd_atom *katom = list_entry(entry,
399 				struct kbase_jd_atom, queue);
400 		s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
401 					katom->start_timestamp));
402 
403 		if (elapsed_time < (s64)timeout_ms) {
404 			restarting = true;
405 			continue;
406 		}
407 
408 		switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
409 		case BASE_JD_REQ_SOFT_EVENT_WAIT:
410 			/* Take it out of the list to ensure that it
411 			 * will be cancelled in all cases
412 			 */
413 			list_del(&katom->queue);
414 
415 			katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
416 			INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
417 			queue_work(kctx->jctx.job_done_wq, &katom->work);
418 			break;
419 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
420 		case BASE_JD_REQ_SOFT_FENCE_WAIT:
421 			kbase_fence_debug_timeout(katom);
422 			break;
423 #endif
424 		}
425 	}
426 
427 	if (restarting)
428 		mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
429 	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
430 }
431 
kbasep_soft_event_wait(struct kbase_jd_atom *katom)432 static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
433 {
434 	struct kbase_context *kctx = katom->kctx;
435 	unsigned char status;
436 
437 	/* The status of this soft-job is stored in jc */
438 	if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
439 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
440 		return 0;
441 	}
442 
443 	if (status == BASE_JD_SOFT_EVENT_SET)
444 		return 0; /* Event already set, nothing to do */
445 
446 	kbasep_add_waiting_with_timeout(katom);
447 
448 	return 1;
449 }
450 
kbasep_soft_event_update_locked(struct kbase_jd_atom *katom, unsigned char new_status)451 static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
452 				     unsigned char new_status)
453 {
454 	/* Complete jobs waiting on the same event */
455 	struct kbase_context *kctx = katom->kctx;
456 
457 	if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
458 		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
459 		return;
460 	}
461 
462 	if (new_status == BASE_JD_SOFT_EVENT_SET)
463 		kbasep_complete_triggered_soft_events(kctx, katom->jc);
464 }
465 
466 /**
467  * kbase_soft_event_update() - Update soft event state
468  * @kctx: Pointer to context
469  * @event: Event to update
470  * @new_status: New status value of event
471  *
472  * Update the event, and wake up any atoms waiting for the event.
473  *
474  * Return: 0 on success, a negative error code on failure.
475  */
kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status)476 int kbase_soft_event_update(struct kbase_context *kctx,
477 			     u64 event,
478 			     unsigned char new_status)
479 {
480 	int err = 0;
481 
482 	mutex_lock(&kctx->jctx.lock);
483 
484 	if (kbasep_write_soft_event_status(kctx, event, new_status)) {
485 		err = -ENOENT;
486 		goto out;
487 	}
488 
489 	if (new_status == BASE_JD_SOFT_EVENT_SET)
490 		kbasep_complete_triggered_soft_events(kctx, event);
491 
492 out:
493 	mutex_unlock(&kctx->jctx.lock);
494 
495 	return err;
496 }
497 
kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)498 static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
499 {
500 	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
501 	if (jd_done_nolock(katom, NULL))
502 		kbase_js_sched_all(katom->kctx->kbdev);
503 }
504 
kbase_debug_copy_finish(struct kbase_jd_atom *katom)505 static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
506 {
507 	struct kbase_debug_copy_buffer *buffers = katom->softjob_data;
508 	unsigned int i;
509 	unsigned int nr = katom->nr_extres;
510 
511 	if (!buffers)
512 		return;
513 
514 	kbase_gpu_vm_lock(katom->kctx);
515 	for (i = 0; i < nr; i++) {
516 		int p;
517 		struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
518 
519 		if (!buffers[i].pages)
520 			break;
521 		for (p = 0; p < buffers[i].nr_pages; p++) {
522 			struct page *pg = buffers[i].pages[p];
523 
524 			if (pg)
525 				put_page(pg);
526 		}
527 		if (buffers[i].is_vmalloc)
528 			vfree(buffers[i].pages);
529 		else
530 			kfree(buffers[i].pages);
531 		if (gpu_alloc) {
532 			switch (gpu_alloc->type) {
533 			case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
534 			{
535 				kbase_free_user_buffer(&buffers[i]);
536 				break;
537 			}
538 			default:
539 				/* Nothing to be done. */
540 				break;
541 			}
542 			kbase_mem_phy_alloc_put(gpu_alloc);
543 		}
544 	}
545 	kbase_gpu_vm_unlock(katom->kctx);
546 	kfree(buffers);
547 
548 	katom->softjob_data = NULL;
549 }
550 
kbase_debug_copy_prepare(struct kbase_jd_atom *katom)551 static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
552 {
553 	struct kbase_debug_copy_buffer *buffers;
554 	struct base_jd_debug_copy_buffer *user_buffers = NULL;
555 	unsigned int i;
556 	unsigned int nr = katom->nr_extres;
557 	int ret = 0;
558 	void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
559 
560 	if (!user_structs)
561 		return -EINVAL;
562 
563 	buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
564 	if (!buffers) {
565 		ret = -ENOMEM;
566 		goto out_cleanup;
567 	}
568 	katom->softjob_data = buffers;
569 
570 	user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
571 
572 	if (!user_buffers) {
573 		ret = -ENOMEM;
574 		goto out_cleanup;
575 	}
576 
577 	ret = copy_from_user(user_buffers, user_structs,
578 			sizeof(*user_buffers)*nr);
579 	if (ret) {
580 		ret = -EFAULT;
581 		goto out_cleanup;
582 	}
583 
584 	for (i = 0; i < nr; i++) {
585 		u64 addr = user_buffers[i].address;
586 		u64 page_addr = addr & PAGE_MASK;
587 		u64 end_page_addr = addr + user_buffers[i].size - 1;
588 		u64 last_page_addr = end_page_addr & PAGE_MASK;
589 		int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
590 		int pinned_pages;
591 		struct kbase_va_region *reg;
592 		struct base_external_resource user_extres;
593 
594 		if (!addr)
595 			continue;
596 
597 		if (last_page_addr < page_addr) {
598 			ret = -EINVAL;
599 			goto out_cleanup;
600 		}
601 
602 		buffers[i].nr_pages = nr_pages;
603 		buffers[i].offset = addr & ~PAGE_MASK;
604 		if (buffers[i].offset >= PAGE_SIZE) {
605 			ret = -EINVAL;
606 			goto out_cleanup;
607 		}
608 		buffers[i].size = user_buffers[i].size;
609 
610 		if (nr_pages > (KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD /
611 				sizeof(struct page *))) {
612 			buffers[i].is_vmalloc = true;
613 			buffers[i].pages = vzalloc(nr_pages *
614 					sizeof(struct page *));
615 		} else {
616 			buffers[i].is_vmalloc = false;
617 			buffers[i].pages = kcalloc(nr_pages,
618 					sizeof(struct page *), GFP_KERNEL);
619 		}
620 
621 		if (!buffers[i].pages) {
622 			ret = -ENOMEM;
623 			goto out_cleanup;
624 		}
625 
626 		pinned_pages = get_user_pages_fast(page_addr,
627 					nr_pages,
628 					1, /* Write */
629 					buffers[i].pages);
630 		if (pinned_pages < 0) {
631 			/* get_user_pages_fast has failed - page array is not
632 			 * valid. Don't try to release any pages.
633 			 */
634 			buffers[i].nr_pages = 0;
635 
636 			ret = pinned_pages;
637 			goto out_cleanup;
638 		}
639 		if (pinned_pages != nr_pages) {
640 			/* Adjust number of pages, so that we only attempt to
641 			 * release pages in the array that we know are valid.
642 			 */
643 			buffers[i].nr_pages = pinned_pages;
644 
645 			ret = -EINVAL;
646 			goto out_cleanup;
647 		}
648 
649 		user_extres = user_buffers[i].extres;
650 		if (user_extres.ext_resource == 0ULL) {
651 			ret = -EINVAL;
652 			goto out_cleanup;
653 		}
654 
655 		kbase_gpu_vm_lock(katom->kctx);
656 		reg = kbase_region_tracker_find_region_enclosing_address(
657 				katom->kctx, user_extres.ext_resource &
658 				~BASE_EXT_RES_ACCESS_EXCLUSIVE);
659 
660 		if (kbase_is_region_invalid_or_free(reg) ||
661 		    reg->gpu_alloc == NULL) {
662 			ret = -EINVAL;
663 			goto out_unlock;
664 		}
665 
666 		buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
667 		buffers[i].nr_extres_pages = reg->nr_pages;
668 
669 		if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
670 			dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
671 
672 		switch (reg->gpu_alloc->type) {
673 		case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
674 		{
675 			struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
676 			unsigned long nr_pages =
677 				alloc->imported.user_buf.nr_pages;
678 
679 			if (alloc->imported.user_buf.mm != current->mm) {
680 				ret = -EINVAL;
681 				goto out_unlock;
682 			}
683 			buffers[i].extres_pages = kcalloc(nr_pages,
684 					sizeof(struct page *), GFP_KERNEL);
685 			if (!buffers[i].extres_pages) {
686 				ret = -ENOMEM;
687 				goto out_unlock;
688 			}
689 
690 			ret = get_user_pages_fast(
691 					alloc->imported.user_buf.address,
692 					nr_pages, 0,
693 					buffers[i].extres_pages);
694 			if (ret != nr_pages) {
695 				/* Adjust number of pages, so that we only
696 				 * attempt to release pages in the array that we
697 				 * know are valid.
698 				 */
699 				if (ret < 0)
700 					buffers[i].nr_extres_pages = 0;
701 				else
702 					buffers[i].nr_extres_pages = ret;
703 
704 				goto out_unlock;
705 			}
706 			ret = 0;
707 			break;
708 		}
709 		default:
710 			/* Nothing to be done. */
711 			break;
712 		}
713 		kbase_gpu_vm_unlock(katom->kctx);
714 	}
715 	kfree(user_buffers);
716 
717 	return ret;
718 
719 out_unlock:
720 	kbase_gpu_vm_unlock(katom->kctx);
721 
722 out_cleanup:
723 	/* Frees allocated memory for kbase_debug_copy_job struct, including
724 	 * members, and sets jc to 0
725 	 */
726 	kbase_debug_copy_finish(katom);
727 	kfree(user_buffers);
728 
729 	return ret;
730 }
731 #endif /* !MALI_USE_CSF */
732 
733 #if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc, unsigned long page_num, struct page **page)734 static void *dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc,
735 	unsigned long page_num, struct page **page)
736 {
737 	struct sg_table *sgt = gpu_alloc->imported.umm.sgt;
738 	struct sg_page_iter sg_iter;
739 	unsigned long page_index = 0;
740 
741 	if (WARN_ON(gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
742 		return NULL;
743 
744 	if (!sgt)
745 		return NULL;
746 
747 	if (WARN_ON(page_num >= gpu_alloc->nents))
748 		return NULL;
749 
750 	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
751 		if (page_index == page_num) {
752 			*page = sg_page_iter_page(&sg_iter);
753 
754 			return kmap(*page);
755 		}
756 		page_index++;
757 	}
758 
759 	return NULL;
760 }
761 #endif
762 
kbase_mem_copy_from_extres(struct kbase_context *kctx, struct kbase_debug_copy_buffer *buf_data)763 int kbase_mem_copy_from_extres(struct kbase_context *kctx,
764 		struct kbase_debug_copy_buffer *buf_data)
765 {
766 	unsigned int i;
767 	unsigned int target_page_nr = 0;
768 	struct page **pages = buf_data->pages;
769 	u64 offset = buf_data->offset;
770 	size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
771 	size_t to_copy = min(extres_size, buf_data->size);
772 	struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
773 	int ret = 0;
774 	size_t dma_to_copy;
775 
776 	KBASE_DEBUG_ASSERT(pages != NULL);
777 
778 	kbase_gpu_vm_lock(kctx);
779 	if (!gpu_alloc) {
780 		ret = -EINVAL;
781 		goto out_unlock;
782 	}
783 
784 	switch (gpu_alloc->type) {
785 	case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
786 	{
787 		for (i = 0; i < buf_data->nr_extres_pages &&
788 				target_page_nr < buf_data->nr_pages; i++) {
789 			struct page *pg = buf_data->extres_pages[i];
790 			void *extres_page = kmap(pg);
791 
792 			if (extres_page) {
793 				ret = kbase_mem_copy_to_pinned_user_pages(
794 						pages, extres_page, &to_copy,
795 						buf_data->nr_pages,
796 						&target_page_nr, offset);
797 				kunmap(pg);
798 				if (ret)
799 					goto out_unlock;
800 			}
801 		}
802 	}
803 	break;
804 	case KBASE_MEM_TYPE_IMPORTED_UMM: {
805 		struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
806 
807 		KBASE_DEBUG_ASSERT(dma_buf != NULL);
808 		if (dma_buf->size > buf_data->nr_extres_pages * PAGE_SIZE)
809 			dev_warn(kctx->kbdev->dev, "External resources buffer size mismatch");
810 
811 		dma_to_copy = min(dma_buf->size,
812 			(size_t)(buf_data->nr_extres_pages * PAGE_SIZE));
813 		ret = dma_buf_begin_cpu_access(dma_buf,
814 #if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
815 					       0, dma_to_copy,
816 #endif
817 					       DMA_FROM_DEVICE);
818 		if (ret)
819 			goto out_unlock;
820 
821 		for (i = 0; i < dma_to_copy/PAGE_SIZE &&
822 				target_page_nr < buf_data->nr_pages; i++) {
823 #if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
824 			struct page *pg;
825 			void *extres_page = dma_buf_kmap_page(gpu_alloc, i, &pg);
826 #else
827 			void *extres_page = dma_buf_kmap(dma_buf, i);
828 #endif
829 			if (extres_page) {
830 				ret = kbase_mem_copy_to_pinned_user_pages(
831 						pages, extres_page, &to_copy,
832 						buf_data->nr_pages,
833 						&target_page_nr, offset);
834 
835 #if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
836 				kunmap(pg);
837 #else
838 				dma_buf_kunmap(dma_buf, i, extres_page);
839 #endif
840 				if (ret)
841 					break;
842 			}
843 		}
844 		dma_buf_end_cpu_access(dma_buf,
845 #if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
846 				       0, dma_to_copy,
847 #endif
848 				       DMA_FROM_DEVICE);
849 		break;
850 	}
851 	default:
852 		ret = -EINVAL;
853 	}
854 out_unlock:
855 	kbase_gpu_vm_unlock(kctx);
856 	return ret;
857 }
858 
859 #if !MALI_USE_CSF
kbase_debug_copy(struct kbase_jd_atom *katom)860 static int kbase_debug_copy(struct kbase_jd_atom *katom)
861 {
862 	struct kbase_debug_copy_buffer *buffers = katom->softjob_data;
863 	unsigned int i;
864 
865 	if (WARN_ON(!buffers))
866 		return -EINVAL;
867 
868 	for (i = 0; i < katom->nr_extres; i++) {
869 		int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
870 
871 		if (res)
872 			return res;
873 	}
874 
875 	return 0;
876 }
877 #endif /* !MALI_USE_CSF */
878 
879 #define KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT ((u32)0x7)
880 
kbasep_jit_alloc_validate(struct kbase_context *kctx, struct base_jit_alloc_info *info)881 int kbasep_jit_alloc_validate(struct kbase_context *kctx,
882 					struct base_jit_alloc_info *info)
883 {
884 	int j;
885 	/* If the ID is zero, then fail the job */
886 	if (info->id == 0)
887 		return -EINVAL;
888 
889 	/* Sanity check that the PA fits within the VA */
890 	if (info->va_pages < info->commit_pages)
891 		return -EINVAL;
892 
893 	/* Ensure the GPU address is correctly aligned */
894 	if ((info->gpu_alloc_addr & KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT) != 0)
895 		return -EINVAL;
896 
897 	/* Interface version 2 (introduced with kernel driver version 11.5)
898 	 * onward has padding and a flags member to validate.
899 	 *
900 	 * Note: To support earlier versions the extra bytes will have been set
901 	 * to 0 by the caller.
902 	 */
903 
904 	/* Check padding is all zeroed */
905 	for (j = 0; j < sizeof(info->padding); j++) {
906 		if (info->padding[j] != 0)
907 			return -EINVAL;
908 	}
909 
910 	/* Only valid flags shall be set */
911 	if (info->flags & ~(BASE_JIT_ALLOC_VALID_FLAGS))
912 		return -EINVAL;
913 
914 #if !MALI_JIT_PRESSURE_LIMIT_BASE
915 	/* If just-in-time memory allocation pressure limit feature is disabled,
916 	 * heap_info_gpu_addr must be zeroed-out
917 	 */
918 	if (info->heap_info_gpu_addr)
919 		return -EINVAL;
920 #endif
921 
922 #if !MALI_USE_CSF
923 	/* If BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE is set, heap_info_gpu_addr
924 	 * cannot be 0
925 	 */
926 	if ((info->flags & BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE) &&
927 			!info->heap_info_gpu_addr)
928 		return -EINVAL;
929 #endif /* !MALI_USE_CSF */
930 
931 	return 0;
932 }
933 
934 #if !MALI_USE_CSF
935 
936 /*
937  * Sizes of user data to copy for each just-in-time memory interface version
938  *
939  * In interface version 2 onwards this is the same as the struct size, allowing
940  * copying of arrays of structures from userspace.
941  *
942  * In interface version 1 the structure size was variable, and hence arrays of
943  * structures cannot be supported easily, and were not a feature present in
944  * version 1 anyway.
945  */
946 static const size_t jit_info_copy_size_for_jit_version[] = {
947 	/* in jit_version 1, the structure did not have any end padding, hence
948 	 * it could be a different size on 32 and 64-bit clients. We therefore
949 	 * do not copy past the last member
950 	 */
951 	[1] = offsetofend(struct base_jit_alloc_info_10_2, id),
952 	[2] = sizeof(struct base_jit_alloc_info_11_5),
953 	[3] = sizeof(struct base_jit_alloc_info)
954 };
955 
kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)956 static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
957 {
958 	__user u8 *data = (__user u8 *)(uintptr_t) katom->jc;
959 	struct base_jit_alloc_info *info;
960 	struct kbase_context *kctx = katom->kctx;
961 	struct kbase_device *kbdev = kctx->kbdev;
962 	u32 count;
963 	int ret;
964 	u32 i;
965 	size_t jit_info_user_copy_size;
966 
967 	WARN_ON(kctx->jit_version >=
968 		ARRAY_SIZE(jit_info_copy_size_for_jit_version));
969 	jit_info_user_copy_size =
970 			jit_info_copy_size_for_jit_version[kctx->jit_version];
971 	WARN_ON(jit_info_user_copy_size > sizeof(*info));
972 
973 	/* For backwards compatibility, and to prevent reading more than 1 jit
974 	 * info struct on jit version 1
975 	 */
976 	if (katom->nr_extres == 0 || kctx->jit_version == 1)
977 		katom->nr_extres = 1;
978 	count = katom->nr_extres;
979 
980 	/* Sanity checks */
981 	if (!data || count > kctx->jit_max_allocations ||
982 			count > ARRAY_SIZE(kctx->jit_alloc)) {
983 		ret = -EINVAL;
984 		goto fail;
985 	}
986 
987 	/* Copy the information for safe access and future storage */
988 	info = kmalloc_array(count, sizeof(*info), GFP_KERNEL);
989 	if (!info) {
990 		ret = -ENOMEM;
991 		goto fail;
992 	}
993 
994 	katom->softjob_data = info;
995 
996 	for (i = 0; i < count; i++, info++, data += jit_info_user_copy_size) {
997 		if (copy_from_user(info, data, jit_info_user_copy_size) != 0) {
998 			ret = -EINVAL;
999 			goto free_info;
1000 		}
1001 		/* Clear any remaining bytes when user struct is smaller than
1002 		 * kernel struct. For jit version 1, this also clears the
1003 		 * padding bytes
1004 		 */
1005 		memset(((u8 *)info) + jit_info_user_copy_size, 0,
1006 				sizeof(*info) - jit_info_user_copy_size);
1007 
1008 		ret = kbasep_jit_alloc_validate(kctx, info);
1009 		if (ret)
1010 			goto free_info;
1011 		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(
1012 			kbdev, katom, info->va_pages, info->commit_pages,
1013 			info->extension, info->id, info->bin_id,
1014 			info->max_allocations, info->flags, info->usage_id);
1015 	}
1016 
1017 	katom->jit_blocked = false;
1018 
1019 	lockdep_assert_held(&kctx->jctx.lock);
1020 	list_add_tail(&katom->jit_node, &kctx->jctx.jit_atoms_head);
1021 
1022 	/*
1023 	 * Note:
1024 	 * The provided info->gpu_alloc_addr isn't validated here as
1025 	 * userland can cache allocations which means that even
1026 	 * though the region is valid it doesn't represent the
1027 	 * same thing it used to.
1028 	 *
1029 	 * Complete validation of va_pages, commit_pages and extension
1030 	 * isn't done here as it will be done during the call to
1031 	 * kbase_mem_alloc.
1032 	 */
1033 	return 0;
1034 
1035 free_info:
1036 	kfree(katom->softjob_data);
1037 	katom->softjob_data = NULL;
1038 fail:
1039 	return ret;
1040 }
1041 
kbase_jit_free_get_ids(struct kbase_jd_atom *katom)1042 static u8 *kbase_jit_free_get_ids(struct kbase_jd_atom *katom)
1043 {
1044 	if (WARN_ON((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) !=
1045 				BASE_JD_REQ_SOFT_JIT_FREE))
1046 		return NULL;
1047 
1048 	return (u8 *) katom->softjob_data;
1049 }
1050 
kbase_jit_add_to_pending_alloc_list(struct kbase_jd_atom *katom)1051 static void kbase_jit_add_to_pending_alloc_list(struct kbase_jd_atom *katom)
1052 {
1053 	struct kbase_context *kctx = katom->kctx;
1054 	struct list_head *target_list_head = NULL;
1055 	struct kbase_jd_atom *entry;
1056 
1057 	list_for_each_entry(entry, &kctx->jctx.jit_pending_alloc, queue) {
1058 		if (katom->age < entry->age) {
1059 			target_list_head = &entry->queue;
1060 			break;
1061 		}
1062 	}
1063 
1064 	if (target_list_head == NULL)
1065 		target_list_head = &kctx->jctx.jit_pending_alloc;
1066 
1067 	list_add_tail(&katom->queue, target_list_head);
1068 }
1069 
kbase_jit_allocate_process(struct kbase_jd_atom *katom)1070 static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
1071 {
1072 	struct kbase_context *kctx = katom->kctx;
1073 	struct kbase_device *kbdev = kctx->kbdev;
1074 	struct base_jit_alloc_info *info;
1075 	struct kbase_va_region *reg;
1076 	struct kbase_vmap_struct mapping;
1077 	u64 *ptr, new_addr;
1078 	u32 count = katom->nr_extres;
1079 	u32 i;
1080 	bool ignore_pressure_limit = false;
1081 
1082 	trace_sysgraph(SGR_SUBMIT, kctx->id,
1083 			kbase_jd_atom_id(kctx, katom));
1084 
1085 	if (katom->jit_blocked) {
1086 		list_del(&katom->queue);
1087 		katom->jit_blocked = false;
1088 	}
1089 
1090 	info = katom->softjob_data;
1091 	if (WARN_ON(!info)) {
1092 		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1093 		return 0;
1094 	}
1095 
1096 	for (i = 0; i < count; i++, info++) {
1097 		/* The JIT ID is still in use so fail the allocation */
1098 		if (kctx->jit_alloc[info->id]) {
1099 			katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1100 			return 0;
1101 		}
1102 	}
1103 
1104 #if MALI_JIT_PRESSURE_LIMIT_BASE
1105 	/*
1106 	 * If this is the only JIT_ALLOC atom in-flight or if JIT pressure limit
1107 	 * is disabled at the context scope, then bypass JIT pressure limit
1108 	 * logic in kbase_jit_allocate().
1109 	 */
1110 	if (!kbase_ctx_flag(kctx, KCTX_JPL_ENABLED)
1111 		|| (kctx->jit_current_allocations == 0)) {
1112 		ignore_pressure_limit = true;
1113 	}
1114 #else
1115 	ignore_pressure_limit = true;
1116 #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
1117 
1118 	for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
1119 		if (kctx->jit_alloc[info->id]) {
1120 			/* The JIT ID is duplicated in this atom. Roll back
1121 			 * previous allocations and fail.
1122 			 */
1123 			u32 j;
1124 
1125 			info = katom->softjob_data;
1126 			for (j = 0; j < i; j++, info++) {
1127 				kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
1128 				kctx->jit_alloc[info->id] =
1129 						KBASE_RESERVED_REG_JIT_ALLOC;
1130 			}
1131 
1132 			katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1133 			return 0;
1134 		}
1135 
1136 		/* Create a JIT allocation */
1137 		reg = kbase_jit_allocate(kctx, info, ignore_pressure_limit);
1138 		if (!reg) {
1139 			struct kbase_jd_atom *jit_atom;
1140 			bool can_block = false;
1141 
1142 			lockdep_assert_held(&kctx->jctx.lock);
1143 
1144 			list_for_each_entry(jit_atom, &kctx->jctx.jit_atoms_head, jit_node) {
1145 				if (jit_atom == katom)
1146 					break;
1147 
1148 				if ((jit_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
1149 						BASE_JD_REQ_SOFT_JIT_FREE) {
1150 					u8 *free_ids = kbase_jit_free_get_ids(jit_atom);
1151 
1152 					if (free_ids && *free_ids &&
1153 						kctx->jit_alloc[*free_ids]) {
1154 						/* A JIT free which is active and
1155 						 * submitted before this atom
1156 						 */
1157 						can_block = true;
1158 						break;
1159 					}
1160 				}
1161 			}
1162 
1163 			if (!can_block) {
1164 				/* Mark the failed allocation as well as the
1165 				 * other un-attempted allocations in the set,
1166 				 * so we know they are in use even if the
1167 				 * allocation itself failed.
1168 				 */
1169 				for (; i < count; i++, info++) {
1170 					kctx->jit_alloc[info->id] =
1171 						KBASE_RESERVED_REG_JIT_ALLOC;
1172 				}
1173 
1174 				katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1175 				dev_warn_ratelimited(kbdev->dev, "JIT alloc softjob failed: atom id %d\n",
1176 						     kbase_jd_atom_id(kctx, katom));
1177 				return 0;
1178 			}
1179 
1180 			/* There are pending frees for an active allocation
1181 			 * so we should wait to see whether they free the
1182 			 * memory. Add to the list of atoms for which JIT
1183 			 * allocation is pending.
1184 			 */
1185 			kbase_jit_add_to_pending_alloc_list(katom);
1186 			katom->jit_blocked = true;
1187 
1188 			/* Rollback, the whole set will be re-attempted */
1189 			while (i-- > 0) {
1190 				info--;
1191 				kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
1192 				kctx->jit_alloc[info->id] = NULL;
1193 			}
1194 
1195 			return 1;
1196 		}
1197 
1198 		/* Bind it to the user provided ID. */
1199 		kctx->jit_alloc[info->id] = reg;
1200 	}
1201 
1202 	for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
1203 		u64 entry_mmu_flags = 0;
1204 		/*
1205 		 * Write the address of the JIT allocation to the user provided
1206 		 * GPU allocation.
1207 		 */
1208 		ptr = kbase_vmap_prot(kctx, info->gpu_alloc_addr, sizeof(*ptr),
1209 				KBASE_REG_CPU_WR, &mapping);
1210 		if (!ptr) {
1211 			/*
1212 			 * Leave the allocations "live" as the JIT free atom
1213 			 * will be submitted anyway.
1214 			 */
1215 			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1216 			return 0;
1217 		}
1218 
1219 		reg = kctx->jit_alloc[info->id];
1220 		new_addr = reg->start_pfn << PAGE_SHIFT;
1221 		*ptr = new_addr;
1222 
1223 #if defined(CONFIG_MALI_VECTOR_DUMP)
1224 		/*
1225 		 * Retrieve the mmu flags for JIT allocation
1226 		 * only if dumping is enabled
1227 		 */
1228 		entry_mmu_flags = kbase_mmu_create_ate(kbdev,
1229 			(struct tagged_addr){ 0 }, reg->flags,
1230 			 MIDGARD_MMU_BOTTOMLEVEL, kctx->jit_group_id);
1231 #endif
1232 
1233 		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
1234 			kbdev, katom, info->gpu_alloc_addr, new_addr,
1235 			info->flags, entry_mmu_flags, info->id,
1236 			info->commit_pages, info->extension, info->va_pages);
1237 		kbase_vunmap(kctx, &mapping);
1238 
1239 		kbase_trace_jit_report_gpu_mem(kctx, reg,
1240 				KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
1241 	}
1242 
1243 	katom->event_code = BASE_JD_EVENT_DONE;
1244 
1245 	return 0;
1246 }
1247 
kbase_jit_allocate_finish(struct kbase_jd_atom *katom)1248 static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
1249 {
1250 	struct base_jit_alloc_info *info;
1251 
1252 	lockdep_assert_held(&katom->kctx->jctx.lock);
1253 
1254 	if (WARN_ON(!katom->softjob_data))
1255 		return;
1256 
1257 	/* Remove atom from jit_atoms_head list */
1258 	list_del(&katom->jit_node);
1259 
1260 	if (katom->jit_blocked) {
1261 		list_del(&katom->queue);
1262 		katom->jit_blocked = false;
1263 	}
1264 
1265 	info = katom->softjob_data;
1266 	/* Free the info structure */
1267 	kfree(info);
1268 }
1269 
kbase_jit_free_prepare(struct kbase_jd_atom *katom)1270 static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
1271 {
1272 	struct kbase_context *kctx = katom->kctx;
1273 	struct kbase_device *kbdev = kctx->kbdev;
1274 	__user void *data = (__user void *)(uintptr_t) katom->jc;
1275 	u8 *ids;
1276 	u32 count = MAX(katom->nr_extres, 1);
1277 	u32 i;
1278 	int ret;
1279 
1280 	/* Sanity checks */
1281 	if (count > ARRAY_SIZE(kctx->jit_alloc)) {
1282 		ret = -EINVAL;
1283 		goto fail;
1284 	}
1285 
1286 	/* Copy the information for safe access and future storage */
1287 	ids = kmalloc_array(count, sizeof(*ids), GFP_KERNEL);
1288 	if (!ids) {
1289 		ret = -ENOMEM;
1290 		goto fail;
1291 	}
1292 
1293 	lockdep_assert_held(&kctx->jctx.lock);
1294 	katom->softjob_data = ids;
1295 
1296 	/* For backwards compatibility */
1297 	if (katom->nr_extres) {
1298 		/* Fail the job if there is no list of ids */
1299 		if (!data) {
1300 			ret = -EINVAL;
1301 			goto free_info;
1302 		}
1303 
1304 		if (copy_from_user(ids, data, sizeof(*ids)*count) != 0) {
1305 			ret = -EINVAL;
1306 			goto free_info;
1307 		}
1308 	} else {
1309 		katom->nr_extres = 1;
1310 		*ids = (u8)katom->jc;
1311 	}
1312 	for (i = 0; i < count; i++)
1313 		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO(kbdev, katom, ids[i]);
1314 
1315 	list_add_tail(&katom->jit_node, &kctx->jctx.jit_atoms_head);
1316 
1317 	return 0;
1318 
1319 free_info:
1320 	kfree(katom->softjob_data);
1321 	katom->softjob_data = NULL;
1322 fail:
1323 	return ret;
1324 }
1325 
kbase_jit_free_process(struct kbase_jd_atom *katom)1326 static void kbase_jit_free_process(struct kbase_jd_atom *katom)
1327 {
1328 	struct kbase_context *kctx = katom->kctx;
1329 	u8 *ids = kbase_jit_free_get_ids(katom);
1330 	u32 count = katom->nr_extres;
1331 	u32 i;
1332 
1333 	if (ids == NULL) {
1334 		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1335 		return;
1336 	}
1337 
1338 	for (i = 0; i < count; i++, ids++) {
1339 		/*
1340 		 * If the ID is zero or it is not in use yet then fail the job.
1341 		 */
1342 		if ((*ids == 0) || (kctx->jit_alloc[*ids] == NULL)) {
1343 			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1344 			return;
1345 		}
1346 	}
1347 }
1348 
kbasep_jit_finish_worker(struct work_struct *work)1349 static void kbasep_jit_finish_worker(struct work_struct *work)
1350 {
1351 	struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
1352 			work);
1353 	struct kbase_context *kctx = katom->kctx;
1354 	int resched;
1355 
1356 	mutex_lock(&kctx->jctx.lock);
1357 	kbase_finish_soft_job(katom);
1358 	resched = jd_done_nolock(katom, NULL);
1359 	mutex_unlock(&kctx->jctx.lock);
1360 
1361 	if (resched)
1362 		kbase_js_sched_all(kctx->kbdev);
1363 }
1364 
kbase_jit_retry_pending_alloc(struct kbase_context *kctx)1365 void kbase_jit_retry_pending_alloc(struct kbase_context *kctx)
1366 {
1367 	LIST_HEAD(jit_pending_alloc_list);
1368 	struct list_head *i, *tmp;
1369 
1370 	list_splice_tail_init(&kctx->jctx.jit_pending_alloc,
1371 		&jit_pending_alloc_list);
1372 
1373 	list_for_each_safe(i, tmp, &jit_pending_alloc_list) {
1374 		struct kbase_jd_atom *pending_atom = list_entry(i,
1375 				struct kbase_jd_atom, queue);
1376 		KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(kctx->kbdev, pending_atom);
1377 		kbase_kinstr_jm_atom_sw_start(pending_atom);
1378 		if (kbase_jit_allocate_process(pending_atom) == 0) {
1379 			/* Atom has completed */
1380 			INIT_WORK(&pending_atom->work,
1381 					kbasep_jit_finish_worker);
1382 			queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
1383 		}
1384 		KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(kctx->kbdev, pending_atom);
1385 		kbase_kinstr_jm_atom_sw_stop(pending_atom);
1386 	}
1387 }
1388 
kbase_jit_free_finish(struct kbase_jd_atom *katom)1389 static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
1390 {
1391 	struct kbase_context *kctx = katom->kctx;
1392 	u8 *ids;
1393 	size_t j;
1394 
1395 	lockdep_assert_held(&kctx->jctx.lock);
1396 
1397 	ids = kbase_jit_free_get_ids(katom);
1398 	if (WARN_ON(ids == NULL)) {
1399 		return;
1400 	}
1401 
1402 	/* Remove this atom from the jit_atoms_head list */
1403 	list_del(&katom->jit_node);
1404 
1405 	for (j = 0; j != katom->nr_extres; ++j) {
1406 		if ((ids[j] != 0) && (kctx->jit_alloc[ids[j]] != NULL)) {
1407 			/*
1408 			 * If the ID is valid but the allocation request failed
1409 			 * still succeed this soft job but don't try and free
1410 			 * the allocation.
1411 			 */
1412 			if (kctx->jit_alloc[ids[j]] !=
1413 					KBASE_RESERVED_REG_JIT_ALLOC) {
1414 				KBASE_TLSTREAM_TL_JIT_USEDPAGES(kctx->kbdev,
1415 					kctx->jit_alloc[ids[j]]->
1416 					gpu_alloc->nents, ids[j]);
1417 				kbase_jit_free(kctx, kctx->jit_alloc[ids[j]]);
1418 			}
1419 			kctx->jit_alloc[ids[j]] = NULL;
1420 		}
1421 	}
1422 	/* Free the list of ids */
1423 	kfree(ids);
1424 
1425 	kbase_jit_retry_pending_alloc(kctx);
1426 }
1427 
kbase_ext_res_prepare(struct kbase_jd_atom *katom)1428 static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
1429 {
1430 	__user struct base_external_resource_list *user_ext_res;
1431 	struct base_external_resource_list *ext_res;
1432 	u64 count = 0;
1433 	size_t copy_size;
1434 
1435 	user_ext_res = (__user struct base_external_resource_list *)
1436 			(uintptr_t) katom->jc;
1437 
1438 	/* Fail the job if there is no info structure */
1439 	if (!user_ext_res)
1440 		return -EINVAL;
1441 
1442 	if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0)
1443 		return -EINVAL;
1444 
1445 	/* Is the number of external resources in range? */
1446 	if (!count || count > BASE_EXT_RES_COUNT_MAX)
1447 		return -EINVAL;
1448 
1449 	/* Copy the information for safe access and future storage */
1450 	copy_size = sizeof(*ext_res);
1451 	copy_size += sizeof(struct base_external_resource) * (count - 1);
1452 	ext_res = memdup_user(user_ext_res, copy_size);
1453 	if (IS_ERR(ext_res))
1454 		return PTR_ERR(ext_res);
1455 
1456 	/*
1457 	 * Overwrite the count with the first value incase it was changed
1458 	 * after the fact.
1459 	 */
1460 	ext_res->count = count;
1461 
1462 	katom->softjob_data = ext_res;
1463 
1464 	return 0;
1465 }
1466 
kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)1467 static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
1468 {
1469 	struct base_external_resource_list *ext_res;
1470 	int i;
1471 	bool failed = false;
1472 
1473 	ext_res = katom->softjob_data;
1474 	if (!ext_res)
1475 		goto failed_jc;
1476 
1477 	kbase_gpu_vm_lock(katom->kctx);
1478 
1479 	for (i = 0; i < ext_res->count; i++) {
1480 		u64 gpu_addr;
1481 
1482 		gpu_addr = ext_res->ext_res[i].ext_resource &
1483 				~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1484 		if (map) {
1485 			if (!kbase_sticky_resource_acquire(katom->kctx,
1486 					gpu_addr))
1487 				goto failed_loop;
1488 		} else
1489 			if (!kbase_sticky_resource_release_force(katom->kctx, NULL,
1490 					gpu_addr))
1491 				failed = true;
1492 	}
1493 
1494 	/*
1495 	 * In the case of unmap we continue unmapping other resources in the
1496 	 * case of failure but will always report failure if _any_ unmap
1497 	 * request fails.
1498 	 */
1499 	if (failed)
1500 		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1501 	else
1502 		katom->event_code = BASE_JD_EVENT_DONE;
1503 
1504 	kbase_gpu_vm_unlock(katom->kctx);
1505 
1506 	return;
1507 
1508 failed_loop:
1509 	while (i > 0) {
1510 		u64 const gpu_addr = ext_res->ext_res[i - 1].ext_resource &
1511 				~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1512 
1513 		kbase_sticky_resource_release_force(katom->kctx, NULL, gpu_addr);
1514 
1515 		--i;
1516 	}
1517 
1518 	katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1519 	kbase_gpu_vm_unlock(katom->kctx);
1520 
1521 failed_jc:
1522 	return;
1523 }
1524 
kbase_ext_res_finish(struct kbase_jd_atom *katom)1525 static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
1526 {
1527 	struct base_external_resource_list *ext_res;
1528 
1529 	ext_res = katom->softjob_data;
1530 	/* Free the info structure */
1531 	kfree(ext_res);
1532 }
1533 
kbase_process_soft_job(struct kbase_jd_atom *katom)1534 int kbase_process_soft_job(struct kbase_jd_atom *katom)
1535 {
1536 	int ret = 0;
1537 	struct kbase_context *kctx = katom->kctx;
1538 	struct kbase_device *kbdev = kctx->kbdev;
1539 
1540 	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(kbdev, katom);
1541 	kbase_kinstr_jm_atom_sw_start(katom);
1542 
1543 	trace_sysgraph(SGR_SUBMIT, kctx->id,
1544 			kbase_jd_atom_id(kctx, katom));
1545 
1546 	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1547 	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1548 		ret = kbase_dump_cpu_gpu_time(katom);
1549 		break;
1550 
1551 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1552 	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1553 		katom->event_code = kbase_sync_fence_out_trigger(katom,
1554 				katom->event_code == BASE_JD_EVENT_DONE ?
1555 								0 : -EFAULT);
1556 		break;
1557 	case BASE_JD_REQ_SOFT_FENCE_WAIT:
1558 	{
1559 		ret = kbase_sync_fence_in_wait(katom);
1560 
1561 		if (ret == 1) {
1562 #ifdef CONFIG_MALI_BIFROST_FENCE_DEBUG
1563 			kbasep_add_waiting_with_timeout(katom);
1564 #else
1565 			kbasep_add_waiting_soft_job(katom);
1566 #endif
1567 		}
1568 		break;
1569 	}
1570 #endif
1571 	case BASE_JD_REQ_SOFT_EVENT_WAIT:
1572 		ret = kbasep_soft_event_wait(katom);
1573 		break;
1574 	case BASE_JD_REQ_SOFT_EVENT_SET:
1575 		kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
1576 		break;
1577 	case BASE_JD_REQ_SOFT_EVENT_RESET:
1578 		kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
1579 		break;
1580 	case BASE_JD_REQ_SOFT_DEBUG_COPY:
1581 	{
1582 		int res = kbase_debug_copy(katom);
1583 
1584 		if (res)
1585 			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1586 		break;
1587 	}
1588 	case BASE_JD_REQ_SOFT_JIT_ALLOC:
1589 		ret = kbase_jit_allocate_process(katom);
1590 		break;
1591 	case BASE_JD_REQ_SOFT_JIT_FREE:
1592 		kbase_jit_free_process(katom);
1593 		break;
1594 	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1595 		kbase_ext_res_process(katom, true);
1596 		break;
1597 	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1598 		kbase_ext_res_process(katom, false);
1599 		break;
1600 	}
1601 
1602 	/* Atom is complete */
1603 	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(kbdev, katom);
1604 	kbase_kinstr_jm_atom_sw_stop(katom);
1605 	return ret;
1606 }
1607 
kbase_cancel_soft_job(struct kbase_jd_atom *katom)1608 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
1609 {
1610 	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1611 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1612 	case BASE_JD_REQ_SOFT_FENCE_WAIT:
1613 		kbase_sync_fence_in_cancel_wait(katom);
1614 		break;
1615 #endif
1616 	case BASE_JD_REQ_SOFT_EVENT_WAIT:
1617 		kbasep_soft_event_cancel_job(katom);
1618 		break;
1619 	default:
1620 		/* This soft-job doesn't support cancellation! */
1621 		KBASE_DEBUG_ASSERT(0);
1622 	}
1623 }
1624 
kbase_prepare_soft_job(struct kbase_jd_atom *katom)1625 int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
1626 {
1627 	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1628 	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1629 		{
1630 			if (!IS_ALIGNED(katom->jc, cache_line_size()))
1631 				return -EINVAL;
1632 		}
1633 		break;
1634 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1635 	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1636 		{
1637 			struct base_fence fence;
1638 			int fd;
1639 
1640 			if (copy_from_user(&fence,
1641 					   (__user void *)(uintptr_t)katom->jc,
1642 					   sizeof(fence)) != 0)
1643 				return -EINVAL;
1644 
1645 			fd = kbase_sync_fence_out_create(katom,
1646 							 fence.basep.stream_fd);
1647 			if (fd < 0)
1648 				return -EINVAL;
1649 
1650 			fence.basep.fd = fd;
1651 			if (copy_to_user((__user void *)(uintptr_t)katom->jc,
1652 					 &fence, sizeof(fence)) != 0) {
1653 				kbase_sync_fence_out_remove(katom);
1654 				/* fd should have been closed here, but there's
1655 				 * no good way of doing that. Since
1656 				 * copy_to_user() very rarely fails, and the fd
1657 				 * will get closed on process termination this
1658 				 * won't be a problem.
1659 				 */
1660 				fence.basep.fd = -EINVAL;
1661 				return -EINVAL;
1662 			}
1663 		}
1664 		break;
1665 	case BASE_JD_REQ_SOFT_FENCE_WAIT:
1666 		{
1667 			struct base_fence fence;
1668 			int ret;
1669 
1670 			if (copy_from_user(&fence,
1671 					   (__user void *)(uintptr_t)katom->jc,
1672 					   sizeof(fence)) != 0)
1673 				return -EINVAL;
1674 
1675 			/* Get a reference to the fence object */
1676 			ret = kbase_sync_fence_in_from_fd(katom,
1677 							  fence.basep.fd);
1678 			if (ret < 0)
1679 				return ret;
1680 
1681 #ifdef CONFIG_MALI_BIFROST_DMA_FENCE
1682 			/*
1683 			 * Set KCTX_NO_IMPLICIT_FENCE in the context the first
1684 			 * time a soft fence wait job is observed. This will
1685 			 * prevent the implicit dma-buf fence to conflict with
1686 			 * the Android native sync fences.
1687 			 */
1688 			if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC))
1689 				kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC);
1690 #endif /* CONFIG_MALI_BIFROST_DMA_FENCE */
1691 		}
1692 		break;
1693 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
1694 	case BASE_JD_REQ_SOFT_JIT_ALLOC:
1695 		return kbase_jit_allocate_prepare(katom);
1696 	case BASE_JD_REQ_SOFT_JIT_FREE:
1697 		return kbase_jit_free_prepare(katom);
1698 	case BASE_JD_REQ_SOFT_EVENT_WAIT:
1699 	case BASE_JD_REQ_SOFT_EVENT_SET:
1700 	case BASE_JD_REQ_SOFT_EVENT_RESET:
1701 		if (katom->jc == 0)
1702 			return -EINVAL;
1703 		break;
1704 	case BASE_JD_REQ_SOFT_DEBUG_COPY:
1705 		return kbase_debug_copy_prepare(katom);
1706 	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1707 		return kbase_ext_res_prepare(katom);
1708 	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1709 		return kbase_ext_res_prepare(katom);
1710 	default:
1711 		/* Unsupported soft-job */
1712 		return -EINVAL;
1713 	}
1714 	return 0;
1715 }
1716 
kbase_finish_soft_job(struct kbase_jd_atom *katom)1717 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
1718 {
1719 	trace_sysgraph(SGR_COMPLETE, katom->kctx->id,
1720 			kbase_jd_atom_id(katom->kctx, katom));
1721 
1722 	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1723 	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1724 		/* Nothing to do */
1725 		break;
1726 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
1727 	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1728 		/* If fence has not yet been signaled, do it now */
1729 		kbase_sync_fence_out_trigger(katom, katom->event_code ==
1730 				BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1731 		break;
1732 	case BASE_JD_REQ_SOFT_FENCE_WAIT:
1733 		/* Release katom's reference to fence object */
1734 		kbase_sync_fence_in_remove(katom);
1735 		break;
1736 #endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
1737 	case BASE_JD_REQ_SOFT_DEBUG_COPY:
1738 		kbase_debug_copy_finish(katom);
1739 		break;
1740 	case BASE_JD_REQ_SOFT_JIT_ALLOC:
1741 		kbase_jit_allocate_finish(katom);
1742 		break;
1743 	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1744 		kbase_ext_res_finish(katom);
1745 		break;
1746 	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1747 		kbase_ext_res_finish(katom);
1748 		break;
1749 	case BASE_JD_REQ_SOFT_JIT_FREE:
1750 		kbase_jit_free_finish(katom);
1751 		break;
1752 	}
1753 }
1754 
kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)1755 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
1756 {
1757 	LIST_HEAD(local_suspended_soft_jobs);
1758 	struct kbase_jd_atom *tmp_iter;
1759 	struct kbase_jd_atom *katom_iter;
1760 	struct kbasep_js_device_data *js_devdata;
1761 	bool resched = false;
1762 
1763 	KBASE_DEBUG_ASSERT(kbdev);
1764 
1765 	js_devdata = &kbdev->js_data;
1766 
1767 	/* Move out the entire list */
1768 	mutex_lock(&js_devdata->runpool_mutex);
1769 	list_splice_init(&js_devdata->suspended_soft_jobs_list,
1770 			&local_suspended_soft_jobs);
1771 	mutex_unlock(&js_devdata->runpool_mutex);
1772 
1773 	/*
1774 	 * Each atom must be detached from the list and ran separately -
1775 	 * it could be re-added to the old list, but this is unlikely
1776 	 */
1777 	list_for_each_entry_safe(katom_iter, tmp_iter,
1778 			&local_suspended_soft_jobs, dep_item[1]) {
1779 		struct kbase_context *kctx = katom_iter->kctx;
1780 
1781 		mutex_lock(&kctx->jctx.lock);
1782 
1783 		/* Remove from the global list */
1784 		list_del(&katom_iter->dep_item[1]);
1785 		/* Remove from the context's list of waiting soft jobs */
1786 		kbasep_remove_waiting_soft_job(katom_iter);
1787 
1788 		if (kbase_process_soft_job(katom_iter) == 0) {
1789 			kbase_finish_soft_job(katom_iter);
1790 			resched |= jd_done_nolock(katom_iter, NULL);
1791 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1792 			atomic_dec(&kbdev->pm.gpu_users_waiting);
1793 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
1794 		}
1795 		mutex_unlock(&kctx->jctx.lock);
1796 	}
1797 
1798 	if (resched)
1799 		kbase_js_sched_all(kbdev);
1800 }
1801 #endif /* !MALI_USE_CSF */
1802