1 /*
2 *
3 * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18
19
20 /**
21 * @file mali_kbase_js.h
22 * Job Scheduler APIs.
23 */
24
25 #ifndef _KBASE_JS_H_
26 #define _KBASE_JS_H_
27
28 #include "mali_kbase_js_defs.h"
29 #include "mali_kbase_context.h"
30 #include "mali_kbase_defs.h"
31 #include "mali_kbase_debug.h"
32
33 #include "mali_kbase_js_ctx_attr.h"
34
35 /**
36 * @addtogroup base_api
37 * @{
38 */
39
40 /**
41 * @addtogroup base_kbase_api
42 * @{
43 */
44
45 /**
46 * @addtogroup kbase_js Job Scheduler Internal APIs
47 * @{
48 *
49 * These APIs are Internal to KBase.
50 */
51
52 /**
53 * @brief Initialize the Job Scheduler
54 *
55 * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
56 * initialized before passing to the kbasep_js_devdata_init() function. This is
57 * to give efficient error path code.
58 */
59 int kbasep_js_devdata_init(struct kbase_device * const kbdev);
60
61 /**
62 * @brief Halt the Job Scheduler.
63 *
64 * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
65 * sub-structure was never initialized/failed initialization, to give efficient
66 * error-path code.
67 *
68 * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
69 * be zero initialized before passing to the kbasep_js_devdata_init()
70 * function. This is to give efficient error path code.
71 *
72 * It is a Programming Error to call this whilst there are still kbase_context
73 * structures registered with this scheduler.
74 *
75 */
76 void kbasep_js_devdata_halt(struct kbase_device *kbdev);
77
78 /**
79 * @brief Terminate the Job Scheduler
80 *
81 * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
82 * sub-structure was never initialized/failed initialization, to give efficient
83 * error-path code.
84 *
85 * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
86 * be zero initialized before passing to the kbasep_js_devdata_init()
87 * function. This is to give efficient error path code.
88 *
89 * It is a Programming Error to call this whilst there are still kbase_context
90 * structures registered with this scheduler.
91 */
92 void kbasep_js_devdata_term(struct kbase_device *kbdev);
93
94 /**
95 * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
96 *
97 * This effectively registers a struct kbase_context with a Job Scheduler.
98 *
99 * It does not register any jobs owned by the struct kbase_context with the scheduler.
100 * Those must be separately registered by kbasep_js_add_job().
101 *
102 * The struct kbase_context must be zero intitialized before passing to the
103 * kbase_js_init() function. This is to give efficient error path code.
104 */
105 int kbasep_js_kctx_init(struct kbase_context * const kctx);
106
107 /**
108 * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
109 *
110 * This effectively de-registers a struct kbase_context from its Job Scheduler
111 *
112 * It is safe to call this on a struct kbase_context that has never had or failed
113 * initialization of its jctx.sched_info member, to give efficient error-path
114 * code.
115 *
116 * For this to work, the struct kbase_context must be zero intitialized before passing
117 * to the kbase_js_init() function.
118 *
119 * It is a Programming Error to call this whilst there are still jobs
120 * registered with this context.
121 */
122 void kbasep_js_kctx_term(struct kbase_context *kctx);
123
124 /**
125 * @brief Add a job chain to the Job Scheduler, and take necessary actions to
126 * schedule the context/run the job.
127 *
128 * This atomically does the following:
129 * - Update the numbers of jobs information
130 * - Add the job to the run pool if necessary (part of init_job)
131 *
132 * Once this is done, then an appropriate action is taken:
133 * - If the ctx is scheduled, it attempts to start the next job (which might be
134 * this added job)
135 * - Otherwise, and if this is the first job on the context, it enqueues it on
136 * the Policy Queue
137 *
138 * The Policy's Queue can be updated by this in the following ways:
139 * - In the above case that this is the first job on the context
140 * - If the context is high priority and the context is not scheduled, then it
141 * could cause the Policy to schedule out a low-priority context, allowing
142 * this context to be scheduled in.
143 *
144 * If the context is already scheduled on the RunPool, then adding a job to it
145 * is guarenteed not to update the Policy Queue. And so, the caller is
146 * guarenteed to not need to try scheduling a context from the Run Pool - it
147 * can safely assert that the result is false.
148 *
149 * It is a programming error to have more than U32_MAX jobs in flight at a time.
150 *
151 * The following locking conditions are made on the caller:
152 * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
153 * - it must \em not hold hwaccess_lock (as this will be obtained internally)
154 * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
155 * obtained internally)
156 * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
157 *
158 * @return true indicates that the Policy Queue was updated, and so the
159 * caller will need to try scheduling a context onto the Run Pool.
160 * @return false indicates that no updates were made to the Policy Queue,
161 * so no further action is required from the caller. This is \b always returned
162 * when the context is currently scheduled.
163 */
164 bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
165
166 /**
167 * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
168 *
169 * Completely removing a job requires several calls:
170 * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
171 * the atom
172 * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
173 * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
174 * remaining state held as part of the job having been run.
175 *
176 * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
177 *
178 * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
179 *
180 * It is a programming error to call this when:
181 * - \a atom is not a job belonging to kctx.
182 * - \a atom has already been removed from the Job Scheduler.
183 * - \a atom is still in the runpool
184 *
185 * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
186 * kbasep_js_remove_cancelled_job() instead.
187 *
188 * The following locking conditions are made on the caller:
189 * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
190 *
191 */
192 void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
193
194 /**
195 * @brief Completely remove a job chain from the Job Scheduler, in the case
196 * where the job chain was cancelled.
197 *
198 * This is a variant of kbasep_js_remove_job() that takes care of removing all
199 * of the retained state too. This is generally useful for cancelled atoms,
200 * which need not be handled in an optimal way.
201 *
202 * It is a programming error to call this when:
203 * - \a atom is not a job belonging to kctx.
204 * - \a atom has already been removed from the Job Scheduler.
205 * - \a atom is still in the runpool:
206 * - it is not being killed with kbasep_jd_cancel()
207 *
208 * The following locking conditions are made on the caller:
209 * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
210 * - it must \em not hold the hwaccess_lock, (as this will be obtained
211 * internally)
212 * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
213 * obtained internally)
214 *
215 * @return true indicates that ctx attributes have changed and the caller
216 * should call kbase_js_sched_all() to try to run more jobs
217 * @return false otherwise
218 */
219 bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
220 struct kbase_context *kctx,
221 struct kbase_jd_atom *katom);
222
223 /**
224 * @brief Refcount a context as being busy, preventing it from being scheduled
225 * out.
226 *
227 * @note This function can safely be called from IRQ context.
228 *
229 * The following locking conditions are made on the caller:
230 * - it must \em not hold mmu_hw_mutex and hwaccess_lock, because they will be
231 * used internally.
232 *
233 * @return value != false if the retain succeeded, and the context will not be scheduled out.
234 * @return false if the retain failed (because the context is being/has been scheduled out).
235 */
236 bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
237
238 /**
239 * @brief Refcount a context as being busy, preventing it from being scheduled
240 * out.
241 *
242 * @note This function can safely be called from IRQ context.
243 *
244 * The following locks must be held by the caller:
245 * - mmu_hw_mutex, hwaccess_lock
246 *
247 * @return value != false if the retain succeeded, and the context will not be scheduled out.
248 * @return false if the retain failed (because the context is being/has been scheduled out).
249 */
250 bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
251
252 /**
253 * @brief Lookup a context in the Run Pool based upon its current address space
254 * and ensure that is stays scheduled in.
255 *
256 * The context is refcounted as being busy to prevent it from scheduling
257 * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
258 * longer required to stay scheduled in.
259 *
260 * @note This function can safely be called from IRQ context.
261 *
262 * The following locking conditions are made on the caller:
263 * - it must \em not hold the hwaccess_lock, because it will be used internally.
264 * If the hwaccess_lock is already held, then the caller should use
265 * kbasep_js_runpool_lookup_ctx_nolock() instead.
266 *
267 * @return a valid struct kbase_context on success, which has been refcounted as being busy.
268 * @return NULL on failure, indicating that no context was found in \a as_nr
269 */
270 struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
271
272 /**
273 * @brief Handling the requeuing/killing of a context that was evicted from the
274 * policy queue or runpool.
275 *
276 * This should be used whenever handing off a context that has been evicted
277 * from the policy queue or the runpool:
278 * - If the context is not dying and has jobs, it gets re-added to the policy
279 * queue
280 * - Otherwise, it is not added
281 *
282 * In addition, if the context is dying the jobs are killed asynchronously.
283 *
284 * In all cases, the Power Manager active reference is released
285 * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a
286 * has_pm_ref must be set to false whenever the context was not previously in
287 * the runpool and does not hold a Power Manager active refcount. Note that
288 * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
289 * active refcount even though they weren't in the runpool.
290 *
291 * The following locking conditions are made on the caller:
292 * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
293 * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
294 * obtained internally)
295 */
296 void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref);
297
298 /**
299 * @brief Release a refcount of a context being busy, allowing it to be
300 * scheduled out.
301 *
302 * When the refcount reaches zero and the context \em might be scheduled out
303 * (depending on whether the Scheudling Policy has deemed it so, or if it has run
304 * out of jobs).
305 *
306 * If the context does get scheduled out, then The following actions will be
307 * taken as part of deschduling a context:
308 * - For the context being descheduled:
309 * - If the context is in the processing of dying (all the jobs are being
310 * removed from it), then descheduling also kills off any jobs remaining in the
311 * context.
312 * - If the context is not dying, and any jobs remain after descheduling the
313 * context then it is re-enqueued to the Policy's Queue.
314 * - Otherwise, the context is still known to the scheduler, but remains absent
315 * from the Policy Queue until a job is next added to it.
316 * - In all descheduling cases, the Power Manager active reference (obtained
317 * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
318 *
319 * Whilst the context is being descheduled, this also handles actions that
320 * cause more atoms to be run:
321 * - Attempt submitting atoms when the Context Attributes on the Runpool have
322 * changed. This is because the context being scheduled out could mean that
323 * there are more opportunities to run atoms.
324 * - Attempt submitting to a slot that was previously blocked due to affinity
325 * restrictions. This is usually only necessary when releasing a context
326 * happens as part of completing a previous job, but is harmless nonetheless.
327 * - Attempt scheduling in a new context (if one is available), and if necessary,
328 * running a job from that new context.
329 *
330 * Unlike retaining a context in the runpool, this function \b cannot be called
331 * from IRQ context.
332 *
333 * It is a programming error to call this on a \a kctx that is not currently
334 * scheduled, or that already has a zero refcount.
335 *
336 * The following locking conditions are made on the caller:
337 * - it must \em not hold the hwaccess_lock, because it will be used internally.
338 * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
339 * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
340 * obtained internally)
341 * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
342 * obtained internally)
343 * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
344 * obtained internally)
345 *
346 */
347 void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
348
349 /**
350 * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
351 * actions from completing an atom.
352 *
353 * This is usually called as part of completing an atom and releasing the
354 * refcount on the context held by the atom.
355 *
356 * Therefore, the extra actions carried out are part of handling actions queued
357 * on a completed atom, namely:
358 * - Releasing the atom's context attributes
359 * - Retrying the submission on a particular slot, because we couldn't submit
360 * on that slot from an IRQ handler.
361 *
362 * The locking conditions of this function are the same as those for
363 * kbasep_js_runpool_release_ctx()
364 */
365 void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
366
367 /**
368 * @brief Variant of kbase_js_runpool_release_ctx() that assumes that
369 * kbasep_js_device_data::runpool_mutex and
370 * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
371 * attempt to schedule new contexts.
372 */
373 void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
374 struct kbase_context *kctx);
375
376 /**
377 * @brief Schedule in a privileged context
378 *
379 * This schedules a context in regardless of the context priority.
380 * If the runpool is full, a context will be forced out of the runpool and the function will wait
381 * for the new context to be scheduled in.
382 * The context will be kept scheduled in (and the corresponding address space reserved) until
383 * kbasep_js_release_privileged_ctx is called).
384 *
385 * The following locking conditions are made on the caller:
386 * - it must \em not hold the hwaccess_lock, because it will be used internally.
387 * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
388 * obtained internally)
389 * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
390 * obtained internally)
391 * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
392 * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
393 * be used internally.
394 *
395 */
396 void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
397
398 /**
399 * @brief Release a privileged context, allowing it to be scheduled out.
400 *
401 * See kbasep_js_runpool_release_ctx for potential side effects.
402 *
403 * The following locking conditions are made on the caller:
404 * - it must \em not hold the hwaccess_lock, because it will be used internally.
405 * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
406 * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
407 * obtained internally)
408 * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
409 * obtained internally)
410 *
411 */
412 void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
413
414 /**
415 * @brief Try to submit the next job on each slot
416 *
417 * The following locks may be used:
418 * - kbasep_js_device_data::runpool_mutex
419 * - hwaccess_lock
420 */
421 void kbase_js_try_run_jobs(struct kbase_device *kbdev);
422
423 /**
424 * @brief Suspend the job scheduler during a Power Management Suspend event.
425 *
426 * Causes all contexts to be removed from the runpool, and prevents any
427 * contexts from (re)entering the runpool.
428 *
429 * This does not handle suspending the one privileged context: the caller must
430 * instead do this by by suspending the GPU HW Counter Instrumentation.
431 *
432 * This will eventually cause all Power Management active references held by
433 * contexts on the runpool to be released, without running any more atoms.
434 *
435 * The caller must then wait for all Power Mangement active refcount to become
436 * zero before completing the suspend.
437 *
438 * The emptying mechanism may take some time to complete, since it can wait for
439 * jobs to complete naturally instead of forcing them to end quickly. However,
440 * this is bounded by the Job Scheduler's Job Timeouts. Hence, this
441 * function is guaranteed to complete in a finite time.
442 */
443 void kbasep_js_suspend(struct kbase_device *kbdev);
444
445 /**
446 * @brief Resume the Job Scheduler after a Power Management Resume event.
447 *
448 * This restores the actions from kbasep_js_suspend():
449 * - Schedules contexts back into the runpool
450 * - Resumes running atoms on the GPU
451 */
452 void kbasep_js_resume(struct kbase_device *kbdev);
453
454 /**
455 * @brief Submit an atom to the job scheduler.
456 *
457 * The atom is enqueued on the context's ringbuffer. The caller must have
458 * ensured that all dependencies can be represented in the ringbuffer.
459 *
460 * Caller must hold jctx->lock
461 *
462 * @param[in] kctx Context pointer
463 * @param[in] atom Pointer to the atom to submit
464 *
465 * @return Whether the context requires to be enqueued. */
466 bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
467 struct kbase_jd_atom *katom);
468
469 /**
470 * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
471 * @kctx: Context Pointer
472 * @prio: Priority (specifies the queue together with js).
473 * @js: Job slot (specifies the queue together with prio).
474 *
475 * Pushes all possible atoms from the linked list to the ringbuffer.
476 * Number of atoms are limited to free space in the ringbuffer and
477 * number of available atoms in the linked list.
478 *
479 */
480 void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
481 /**
482 * @brief Pull an atom from a context in the job scheduler for execution.
483 *
484 * The atom will not be removed from the ringbuffer at this stage.
485 *
486 * The HW access lock must be held when calling this function.
487 *
488 * @param[in] kctx Context to pull from
489 * @param[in] js Job slot to pull from
490 * @return Pointer to an atom, or NULL if there are no atoms for this
491 * slot that can be currently run.
492 */
493 struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
494
495 /**
496 * @brief Return an atom to the job scheduler ringbuffer.
497 *
498 * An atom is 'unpulled' if execution is stopped but intended to be returned to
499 * later. The most common reason for this is that the atom has been
500 * soft-stopped.
501 *
502 * Note that if multiple atoms are to be 'unpulled', they must be returned in
503 * the reverse order to which they were originally pulled. It is a programming
504 * error to return atoms in any other order.
505 *
506 * The HW access lock must be held when calling this function.
507 *
508 * @param[in] kctx Context pointer
509 * @param[in] atom Pointer to the atom to unpull
510 */
511 void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
512
513 /**
514 * @brief Complete an atom from jd_done_worker(), removing it from the job
515 * scheduler ringbuffer.
516 *
517 * If the atom failed then all dependee atoms marked for failure propagation
518 * will also fail.
519 *
520 * @param[in] kctx Context pointer
521 * @param[in] katom Pointer to the atom to complete
522 * @return true if the context is now idle (no jobs pulled)
523 * false otherwise
524 */
525 bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
526 struct kbase_jd_atom *katom);
527
528 /**
529 * @brief Complete an atom.
530 *
531 * Most of the work required to complete an atom will be performed by
532 * jd_done_worker().
533 *
534 * The HW access lock must be held when calling this function.
535 *
536 * @param[in] katom Pointer to the atom to complete
537 * @param[in] end_timestamp The time that the atom completed (may be NULL)
538 *
539 * Return: Atom that has now been unblocked and can now be run, or NULL if none
540 */
541 struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
542 ktime_t *end_timestamp);
543
544 /**
545 * @brief Submit atoms from all available contexts.
546 *
547 * This will attempt to submit as many jobs as possible to the provided job
548 * slots. It will exit when either all job slots are full, or all contexts have
549 * been used.
550 *
551 * @param[in] kbdev Device pointer
552 * @param[in] js_mask Mask of job slots to submit to
553 */
554 void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
555
556 /**
557 * kbase_jd_zap_context - Attempt to deschedule a context that is being
558 * destroyed
559 * @kctx: Context pointer
560 *
561 * This will attempt to remove a context from any internal job scheduler queues
562 * and perform any other actions to ensure a context will not be submitted
563 * from.
564 *
565 * If the context is currently scheduled, then the caller must wait for all
566 * pending jobs to complete before taking any further action.
567 */
568 void kbase_js_zap_context(struct kbase_context *kctx);
569
570 /**
571 * @brief Validate an atom
572 *
573 * This will determine whether the atom can be scheduled onto the GPU. Atoms
574 * with invalid combinations of core requirements will be rejected.
575 *
576 * @param[in] kbdev Device pointer
577 * @param[in] katom Atom to validate
578 * @return true if atom is valid
579 * false otherwise
580 */
581 bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
582 struct kbase_jd_atom *katom);
583
584 /**
585 * kbase_js_set_timeouts - update all JS timeouts with user specified data
586 * @kbdev: Device pointer
587 *
588 * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
589 * set to a positive number then that becomes the new value used, if a timeout
590 * is negative then the default is set.
591 */
592 void kbase_js_set_timeouts(struct kbase_device *kbdev);
593
594 /*
595 * Helpers follow
596 */
597
598 /**
599 * @brief Check that a context is allowed to submit jobs on this policy
600 *
601 * The purpose of this abstraction is to hide the underlying data size, and wrap up
602 * the long repeated line of code.
603 *
604 * As with any bool, never test the return value with true.
605 *
606 * The caller must hold hwaccess_lock.
607 */
kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)608 static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
609 {
610 u16 test_bit;
611
612 /* Ensure context really is scheduled in */
613 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
614 KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
615
616 test_bit = (u16) (1u << kctx->as_nr);
617
618 return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
619 }
620
621 /**
622 * @brief Allow a context to submit jobs on this policy
623 *
624 * The purpose of this abstraction is to hide the underlying data size, and wrap up
625 * the long repeated line of code.
626 *
627 * The caller must hold hwaccess_lock.
628 */
kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)629 static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
630 {
631 u16 set_bit;
632
633 /* Ensure context really is scheduled in */
634 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
635 KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
636
637 set_bit = (u16) (1u << kctx->as_nr);
638
639 dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
640
641 js_devdata->runpool_irq.submit_allowed |= set_bit;
642 }
643
644 /**
645 * @brief Prevent a context from submitting more jobs on this policy
646 *
647 * The purpose of this abstraction is to hide the underlying data size, and wrap up
648 * the long repeated line of code.
649 *
650 * The caller must hold hwaccess_lock.
651 */
kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)652 static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
653 {
654 u16 clear_bit;
655 u16 clear_mask;
656
657 /* Ensure context really is scheduled in */
658 KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
659 KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
660
661 clear_bit = (u16) (1u << kctx->as_nr);
662 clear_mask = ~clear_bit;
663
664 dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
665
666 js_devdata->runpool_irq.submit_allowed &= clear_mask;
667 }
668
669 /**
670 * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom
671 */
kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom)672 static inline void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom)
673 {
674 atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
675 }
676
677 /**
678 * Mark a slot as requiring resubmission by carrying that information on a
679 * completing atom.
680 *
681 * @note This can ASSERT in debug builds if the submit slot has been set to
682 * something other than the current value for @a js. This is because you might
683 * be unintentionally stopping more jobs being submitted on the old submit
684 * slot, and that might cause a scheduling-hang.
685 *
686 * @note If you can guarantee that the atoms for the original slot will be
687 * submitted on some other slot, then call kbasep_js_clear_job_retry_submit()
688 * first to silence the ASSERT.
689 */
kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js)690 static inline void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js)
691 {
692 KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS);
693 KBASE_DEBUG_ASSERT((atom->retry_submit_on_slot ==
694 KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID)
695 || (atom->retry_submit_on_slot == js));
696
697 atom->retry_submit_on_slot = js;
698 }
699
700 /**
701 * Create an initial 'invalid' atom retained state, that requires no
702 * atom-related work to be done on releasing with
703 * kbasep_js_runpool_release_ctx_and_katom_retained_state()
704 */
kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)705 static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
706 {
707 retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
708 retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
709 retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
710 }
711
712 /**
713 * Copy atom state that can be made available after jd_done_nolock() is called
714 * on that atom.
715 */
kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)716 static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
717 {
718 retained_state->event_code = katom->event_code;
719 retained_state->core_req = katom->core_req;
720 retained_state->retry_submit_on_slot = katom->retry_submit_on_slot;
721 retained_state->sched_priority = katom->sched_priority;
722 retained_state->device_nr = katom->device_nr;
723 }
724
725 /**
726 * @brief Determine whether an atom has finished (given its retained state),
727 * and so should be given back to userspace/removed from the system.
728 *
729 * Reasons for an atom not finishing include:
730 * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
731 *
732 * @param[in] katom_retained_state the retained state of the atom to check
733 * @return false if the atom has not finished
734 * @return !=false if the atom has finished
735 */
kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)736 static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
737 {
738 return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
739 }
740
741 /**
742 * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
743 *
744 * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
745 * code should just ignore it.
746 *
747 * @param[in] katom_retained_state the atom's retained state to check
748 * @return false if the retained state is invalid, and can be ignored
749 * @return !=false if the retained state is valid
750 */
kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)751 static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
752 {
753 return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
754 }
755
kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res)756 static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res)
757 {
758 int js = katom_retained_state->retry_submit_on_slot;
759
760 *res = js;
761 return (bool) (js >= 0);
762 }
763
764 /**
765 * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
766 * context is guaranteed to be already previously retained.
767 *
768 * It is a programming error to supply the \a as_nr of a context that has not
769 * been previously retained/has a busy refcount of zero. The only exception is
770 * when there is no ctx in \a as_nr (NULL returned).
771 *
772 * The following locking conditions are made on the caller:
773 * - it must \em not hold the hwaccess_lock, because it will be used internally.
774 *
775 * @return a valid struct kbase_context on success, with a refcount that is guaranteed
776 * to be non-zero and unmodified by this function.
777 * @return NULL on failure, indicating that no context was found in \a as_nr
778 */
kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)779 static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
780 {
781 struct kbase_context *found_kctx;
782
783 KBASE_DEBUG_ASSERT(kbdev != NULL);
784 KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
785
786 found_kctx = kbdev->as_to_kctx[as_nr];
787 KBASE_DEBUG_ASSERT(found_kctx == NULL ||
788 atomic_read(&found_kctx->refcount) > 0);
789
790 return found_kctx;
791 }
792
793 /*
794 * The following locking conditions are made on the caller:
795 * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
796 * - The caller must hold the kbasep_js_device_data::runpool_mutex
797 */
kbase_js_runpool_inc_context_count( struct kbase_device *kbdev, struct kbase_context *kctx)798 static inline void kbase_js_runpool_inc_context_count(
799 struct kbase_device *kbdev,
800 struct kbase_context *kctx)
801 {
802 struct kbasep_js_device_data *js_devdata;
803 struct kbasep_js_kctx_info *js_kctx_info;
804
805 KBASE_DEBUG_ASSERT(kbdev != NULL);
806 KBASE_DEBUG_ASSERT(kctx != NULL);
807
808 js_devdata = &kbdev->js_data;
809 js_kctx_info = &kctx->jctx.sched_info;
810
811 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
812 lockdep_assert_held(&js_devdata->runpool_mutex);
813
814 /* Track total contexts */
815 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
816 ++(js_devdata->nr_all_contexts_running);
817
818 if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
819 /* Track contexts that can submit jobs */
820 KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
821 S8_MAX);
822 ++(js_devdata->nr_user_contexts_running);
823 }
824 }
825
826 /*
827 * The following locking conditions are made on the caller:
828 * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
829 * - The caller must hold the kbasep_js_device_data::runpool_mutex
830 */
kbase_js_runpool_dec_context_count( struct kbase_device *kbdev, struct kbase_context *kctx)831 static inline void kbase_js_runpool_dec_context_count(
832 struct kbase_device *kbdev,
833 struct kbase_context *kctx)
834 {
835 struct kbasep_js_device_data *js_devdata;
836 struct kbasep_js_kctx_info *js_kctx_info;
837
838 KBASE_DEBUG_ASSERT(kbdev != NULL);
839 KBASE_DEBUG_ASSERT(kctx != NULL);
840
841 js_devdata = &kbdev->js_data;
842 js_kctx_info = &kctx->jctx.sched_info;
843
844 lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
845 lockdep_assert_held(&js_devdata->runpool_mutex);
846
847 /* Track total contexts */
848 --(js_devdata->nr_all_contexts_running);
849 KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
850
851 if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
852 /* Track contexts that can submit jobs */
853 --(js_devdata->nr_user_contexts_running);
854 KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
855 }
856 }
857
858
859 /**
860 * @brief Submit atoms from all available contexts to all job slots.
861 *
862 * This will attempt to submit as many jobs as possible. It will exit when
863 * either all job slots are full, or all contexts have been used.
864 *
865 * @param[in] kbdev Device pointer
866 */
kbase_js_sched_all(struct kbase_device *kbdev)867 static inline void kbase_js_sched_all(struct kbase_device *kbdev)
868 {
869 kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
870 }
871
872 extern const int
873 kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
874
875 extern const base_jd_prio
876 kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
877
878 /**
879 * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
880 * to relative ordering
881 * @atom_prio: Priority ID to translate.
882 *
883 * Atom priority values for @ref base_jd_prio cannot be compared directly to
884 * find out which are higher or lower.
885 *
886 * This function will convert base_jd_prio values for successively lower
887 * priorities into a monotonically increasing sequence. That is, the lower the
888 * base_jd_prio priority, the higher the value produced by this function. This
889 * is in accordance with how the rest of the kernel treates priority.
890 *
891 * The mapping is 1:1 and the size of the valid input range is the same as the
892 * size of the valid output range, i.e.
893 * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
894 *
895 * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
896 *
897 * Return: On success: a value in the inclusive range
898 * 0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
899 * KBASE_JS_ATOM_SCHED_PRIO_INVALID
900 */
kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)901 static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
902 {
903 if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
904 return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
905
906 return kbasep_js_atom_priority_to_relative[atom_prio];
907 }
908
kbasep_js_sched_prio_to_atom_prio(int sched_prio)909 static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
910 {
911 unsigned int prio_idx;
912
913 KBASE_DEBUG_ASSERT(0 <= sched_prio
914 && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
915
916 prio_idx = (unsigned int)sched_prio;
917
918 return kbasep_js_relative_priority_to_atom[prio_idx];
919 }
920
921 /** @} *//* end group kbase_js */
922 /** @} *//* end group base_kbase_api */
923 /** @} *//* end group base_api */
924
925 #endif /* _KBASE_JS_H_ */
926