1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _KBASE_H_
23 #define _KBASE_H_
24 
25 #include <mali_malisw.h>
26 
27 #include <mali_kbase_debug.h>
28 
29 #include <linux/atomic.h>
30 #include <linux/highmem.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <linux/list.h>
34 #include <linux/mm.h>
35 #include <linux/mutex.h>
36 #include <linux/rwsem.h>
37 #include <linux/sched.h>
38 #if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE)
39 #include <linux/sched/mm.h>
40 #endif
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/vmalloc.h>
44 #include <linux/wait.h>
45 #include <linux/workqueue.h>
46 #include <linux/interrupt.h>
47 
48 #include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
49 #include <mali_kbase_linux.h>
50 
51 /*
52  * Include mali_kbase_defs.h first as this provides types needed by other local
53  * header files.
54  */
55 #include "mali_kbase_defs.h"
56 
57 #include "debug/mali_kbase_debug_ktrace.h"
58 #include "context/mali_kbase_context.h"
59 #include "mali_kbase_strings.h"
60 #include "mali_kbase_mem_lowlevel.h"
61 #include "mali_kbase_utility.h"
62 #include "mali_kbase_mem.h"
63 #include "mmu/mali_kbase_mmu.h"
64 #include "mali_kbase_gpu_memory_debugfs.h"
65 #include "mali_kbase_mem_profile_debugfs.h"
66 #include "mali_kbase_gpuprops.h"
67 #include <uapi/gpu/arm/bifrost/mali_kbase_ioctl.h>
68 #if !MALI_USE_CSF
69 #include "mali_kbase_debug_job_fault.h"
70 #include "mali_kbase_jd_debugfs.h"
71 #include "mali_kbase_jm.h"
72 #include "mali_kbase_js.h"
73 #endif /* !MALI_USE_CSF */
74 
75 #include "ipa/mali_kbase_ipa.h"
76 
77 #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
78 #include <trace/events/gpu.h>
79 #endif
80 
81 #include "mali_linux_trace.h"
82 
83 #if MALI_USE_CSF
84 #include "csf/mali_kbase_csf.h"
85 #endif
86 
87 #ifndef u64_to_user_ptr
88 /* Introduced in Linux v4.6 */
89 #define u64_to_user_ptr(x) ((void __user *)(uintptr_t)x)
90 #endif
91 
92 #if MALI_USE_CSF
93 /* Physical memory group ID for CSF user I/O.
94  */
95 #define KBASE_MEM_GROUP_CSF_IO BASE_MEM_GROUP_DEFAULT
96 
97 /* Physical memory group ID for CSF firmware.
98  */
99 #define KBASE_MEM_GROUP_CSF_FW BASE_MEM_GROUP_DEFAULT
100 #endif
101 
102 /* Physical memory group ID for a special page which can alias several regions.
103  */
104 #define KBASE_MEM_GROUP_SINK BASE_MEM_GROUP_DEFAULT
105 
106 /*
107  * Kernel-side Base (KBase) APIs
108  */
109 
110 struct kbase_device *kbase_device_alloc(void);
111 /*
112 * note: configuration attributes member of kbdev needs to have
113 * been setup before calling kbase_device_init
114 */
115 
116 int kbase_device_misc_init(struct kbase_device *kbdev);
117 void kbase_device_misc_term(struct kbase_device *kbdev);
118 void kbase_device_free(struct kbase_device *kbdev);
119 int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
120 
121 /* Needed for gator integration and for reporting vsync information */
122 struct kbase_device *kbase_find_device(int minor);
123 void kbase_release_device(struct kbase_device *kbdev);
124 
125 /**
126  * kbase_context_get_unmapped_area() - get an address range which is currently
127  *                                     unmapped.
128  * @kctx: A kernel base context (which has its own GPU address space).
129  * @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
130  *        as Mali GPU driver decides about the mapping).
131  * @len: Length of the address range.
132  * @pgoff: Page offset within the GPU address space of the kbase context.
133  * @flags: Flags for the allocation.
134  *
135  * Finds the unmapped address range which satisfies requirements specific to
136  * GPU and those provided by the call parameters.
137  *
138  * 1) Requirement for allocations greater than 2MB:
139  * - alignment offset is set to 2MB and the alignment mask to 2MB decremented
140  * by 1.
141  *
142  * 2) Requirements imposed for the shader memory alignment:
143  * - alignment is decided by the number of GPU pc bits which can be read from
144  * GPU properties of the device associated with this kbase context; alignment
145  * offset is set to this value in bytes and the alignment mask to the offset
146  * decremented by 1.
147  * - allocations must not to be at 4GB boundaries. Such cases are indicated
148  * by the flag KBASE_REG_GPU_NX not being set (check the flags of the kbase
149  * region). 4GB boundaries can be checked against @ref BASE_MEM_MASK_4GB.
150  *
151  * 3) Requirements imposed for tiler memory alignment, cases indicated by
152  * the flag @ref KBASE_REG_TILER_ALIGN_TOP (check the flags of the kbase
153  * region):
154  * - alignment offset is set to the difference between the kbase region
155  * extension (converted from the original value in pages to bytes) and the kbase
156  * region initial_commit (also converted from the original value in pages to
157  * bytes); alignment mask is set to the kbase region extension in bytes and
158  * decremented by 1.
159  *
160  * Return: if successful, address of the unmapped area aligned as required;
161  *         error code (negative) in case of failure;
162  */
163 unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx,
164 		const unsigned long addr, const unsigned long len,
165 		const unsigned long pgoff, const unsigned long flags);
166 
167 
168 int assign_irqs(struct kbase_device *kbdev);
169 
170 int kbase_sysfs_init(struct kbase_device *kbdev);
171 void kbase_sysfs_term(struct kbase_device *kbdev);
172 
173 
174 int kbase_protected_mode_init(struct kbase_device *kbdev);
175 void kbase_protected_mode_term(struct kbase_device *kbdev);
176 
177 /**
178  * kbase_device_pm_init() - Performs power management initialization and
179  * Verifies device tree configurations.
180  * @kbdev: The kbase device structure for the device (must be a valid pointer)
181  *
182  * Return: 0 if successful, otherwise a standard Linux error code
183  */
184 int kbase_device_pm_init(struct kbase_device *kbdev);
185 
186 /**
187  * kbase_device_pm_term() - Performs power management deinitialization and
188  * Free resources.
189  * @kbdev: The kbase device structure for the device (must be a valid pointer)
190  *
191  * Clean up all the resources
192  */
193 void kbase_device_pm_term(struct kbase_device *kbdev);
194 
195 
196 int power_control_init(struct kbase_device *kbdev);
197 void power_control_term(struct kbase_device *kbdev);
198 
199 #if IS_ENABLED(CONFIG_DEBUG_FS)
200 void kbase_device_debugfs_term(struct kbase_device *kbdev);
201 int kbase_device_debugfs_init(struct kbase_device *kbdev);
202 #else /* CONFIG_DEBUG_FS */
kbase_device_debugfs_init(struct kbase_device *kbdev)203 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
204 {
205 	return 0;
206 }
207 
kbase_device_debugfs_term(struct kbase_device *kbdev)208 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
209 #endif /* CONFIG_DEBUG_FS */
210 
211 int registers_map(struct kbase_device *kbdev);
212 void registers_unmap(struct kbase_device *kbdev);
213 
214 int kbase_device_coherency_init(struct kbase_device *kbdev);
215 
216 
217 #if !MALI_USE_CSF
218 int kbase_jd_init(struct kbase_context *kctx);
219 void kbase_jd_exit(struct kbase_context *kctx);
220 
221 /**
222  * kbase_jd_submit - Submit atoms to the job dispatcher
223  *
224  * @kctx: The kbase context to submit to
225  * @user_addr: The address in user space of the struct base_jd_atom array
226  * @nr_atoms: The number of atoms in the array
227  * @stride: sizeof(struct base_jd_atom)
228  * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
229  *
230  * Return: 0 on success or error code
231  */
232 int kbase_jd_submit(struct kbase_context *kctx,
233 		void __user *user_addr, u32 nr_atoms, u32 stride,
234 		bool uk6_atom);
235 
236 /**
237  * kbase_jd_done_worker - Handle a job completion
238  * @data: a &struct work_struct
239  *
240  * This function requeues the job from the runpool (if it was soft-stopped or
241  * removed from NEXT registers).
242  *
243  * Removes it from the system if it finished/failed/was cancelled.
244  *
245  * Resolves dependencies to add dependent jobs to the context, potentially
246  * starting them if necessary (which may add more references to the context)
247  *
248  * Releases the reference to the context from the no-longer-running job.
249  *
250  * Handles retrying submission outside of IRQ context if it failed from within
251  * IRQ context.
252  */
253 void kbase_jd_done_worker(struct work_struct *data);
254 
255 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
256 		kbasep_js_atom_done_code done_code);
257 void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
258 void kbase_jd_zap_context(struct kbase_context *kctx);
259 bool jd_done_nolock(struct kbase_jd_atom *katom,
260 		struct list_head *completed_jobs_ctx);
261 void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
262 void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
263 
264 /**
265  * kbase_job_done - Process completed jobs from job interrupt
266  * @kbdev: Pointer to the kbase device.
267  * @done: Bitmask of done or failed jobs, from JOB_IRQ_STAT register
268  *
269  * This function processes the completed, or failed, jobs from the GPU job
270  * slots, for the bits set in the @done bitmask.
271  *
272  * The hwaccess_lock must be held when calling this function.
273  */
274 void kbase_job_done(struct kbase_device *kbdev, u32 done);
275 
276 /**
277  * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
278  *                                               and soft stop them
279  * @kctx: Pointer to context to check.
280  * @katom: Pointer to priority atom.
281  *
282  * Atoms from @kctx on the same job slot as @katom, which have lower priority
283  * than @katom will be soft stopped and put back in the queue, so that atoms
284  * with higher priority can run.
285  *
286  * The hwaccess_lock must be held when calling this function.
287  */
288 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
289 				struct kbase_jd_atom *katom);
290 
291 /**
292  * kbase_job_slot_softstop_start_rp() - Soft-stop the atom at the start
293  *                                      of a renderpass.
294  * @kctx: Pointer to a kernel base context.
295  * @reg:  Reference of a growable GPU memory region in the same context.
296  *        Takes ownership of the reference if successful.
297  *
298  * Used to switch to incremental rendering if we have nearly run out of
299  * virtual address space in a growable memory region and the atom currently
300  * executing on a job slot is the tiler job chain at the start of a renderpass.
301  *
302  * Return 0 if successful, otherwise a negative error code.
303  */
304 int kbase_job_slot_softstop_start_rp(struct kbase_context *kctx,
305 		struct kbase_va_region *reg);
306 
307 void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
308 		struct kbase_jd_atom *target_katom);
309 void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
310 		struct kbase_jd_atom *target_katom, u32 sw_flags);
311 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
312 		struct kbase_jd_atom *target_katom);
313 void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
314 		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
315 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
316 		struct kbase_jd_atom *target_katom);
317 
318 #endif /* !MALI_USE_CSF */
319 
320 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
321 #if !MALI_USE_CSF
322 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
323 #endif /* !MALI_USE_CSF */
324 int kbase_event_pending(struct kbase_context *ctx);
325 int kbase_event_init(struct kbase_context *kctx);
326 void kbase_event_close(struct kbase_context *kctx);
327 void kbase_event_cleanup(struct kbase_context *kctx);
328 void kbase_event_wakeup(struct kbase_context *kctx);
329 
330 /**
331  * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
332  *
333  * @kctx:	Pointer to the kbase context within which the JIT
334  *		allocation is to be validated.
335  * @info:	Pointer to struct @base_jit_alloc_info
336  *			which is to be validated.
337  * @return: 0 if jit allocation is valid; negative error code otherwise
338  */
339 int kbasep_jit_alloc_validate(struct kbase_context *kctx,
340 					struct base_jit_alloc_info *info);
341 
342 /**
343  * kbase_jit_retry_pending_alloc() - Retry blocked just-in-time memory
344  *                                   allocations.
345  *
346  * @kctx:	Pointer to the kbase context within which the just-in-time
347  *		memory allocations are to be retried.
348  */
349 void kbase_jit_retry_pending_alloc(struct kbase_context *kctx);
350 
351 /**
352  * kbase_free_user_buffer() - Free memory allocated for struct
353  *		@kbase_debug_copy_buffer.
354  *
355  * @buffer:	Pointer to the memory location allocated for the object
356  *		of the type struct @kbase_debug_copy_buffer.
357  */
kbase_free_user_buffer( struct kbase_debug_copy_buffer *buffer)358 static inline void kbase_free_user_buffer(
359 		struct kbase_debug_copy_buffer *buffer)
360 {
361 	struct page **pages = buffer->extres_pages;
362 	int nr_pages = buffer->nr_extres_pages;
363 
364 	if (pages) {
365 		int i;
366 
367 		for (i = 0; i < nr_pages; i++) {
368 			struct page *pg = pages[i];
369 
370 			if (pg)
371 				put_page(pg);
372 		}
373 		kfree(pages);
374 	}
375 }
376 
377 /**
378  * kbase_mem_copy_from_extres() - Copy from external resources.
379  *
380  * @kctx:	kbase context within which the copying is to take place.
381  * @buf_data:	Pointer to the information about external resources:
382  *		pages pertaining to the external resource, number of
383  *		pages to copy.
384  */
385 int kbase_mem_copy_from_extres(struct kbase_context *kctx,
386 		struct kbase_debug_copy_buffer *buf_data);
387 #if !MALI_USE_CSF
388 int kbase_process_soft_job(struct kbase_jd_atom *katom);
389 int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
390 void kbase_finish_soft_job(struct kbase_jd_atom *katom);
391 void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
392 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
393 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
394 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
395 void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
396 #endif
397 int kbase_soft_event_update(struct kbase_context *kctx,
398 			    u64 event,
399 			    unsigned char new_status);
400 
401 void kbasep_soft_job_timeout_worker(struct timer_list *timer);
402 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
403 #endif /* !MALI_USE_CSF */
404 
405 void kbasep_as_do_poke(struct work_struct *work);
406 
407 /**
408  * Check whether a system suspend is in progress, or has already been suspended
409  * @kbdev: The kbase device structure for the device
410  *
411  * The caller should ensure that either kbdev->pm.active_count_lock is held, or
412  * a dmb was executed recently (to ensure the value is most
413  * up-to-date). However, without a lock the value could change afterwards.
414  *
415  * Return:
416  * * false if a suspend is not in progress
417  * * !=false otherwise
418  */
kbase_pm_is_suspending(struct kbase_device *kbdev)419 static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
420 {
421 	return kbdev->pm.suspending;
422 }
423 
424 #ifdef CONFIG_MALI_ARBITER_SUPPORT
425 /*
426  * Check whether a gpu lost is in progress
427  *
428  * @kbdev: The kbase device structure for the device (must be a valid pointer)
429  *
430  * Indicates whether a gpu lost has been received and jobs are no longer
431  * being scheduled
432  *
433  * Return: false if gpu is lost
434  * Return: != false otherwise
435  */
kbase_pm_is_gpu_lost(struct kbase_device *kbdev)436 static inline bool kbase_pm_is_gpu_lost(struct kbase_device *kbdev)
437 {
438 	return (atomic_read(&kbdev->pm.gpu_lost) == 0 ? false : true);
439 }
440 
441 /*
442  * Set or clear gpu lost state
443  *
444  * @kbdev: The kbase device structure for the device (must be a valid pointer)
445  * @gpu_lost: true to activate GPU lost state, FALSE is deactive it
446  *
447  * Puts power management code into gpu lost state or takes it out of the
448  * state.  Once in gpu lost state new GPU jobs will no longer be
449  * scheduled.
450  */
kbase_pm_set_gpu_lost(struct kbase_device *kbdev, bool gpu_lost)451 static inline void kbase_pm_set_gpu_lost(struct kbase_device *kbdev,
452 	bool gpu_lost)
453 {
454 	const int new_val = (gpu_lost ? 1 : 0);
455 	const int cur_val = atomic_xchg(&kbdev->pm.gpu_lost, new_val);
456 
457 	if (new_val != cur_val)
458 		KBASE_KTRACE_ADD(kbdev, ARB_GPU_LOST, NULL, new_val);
459 }
460 #endif
461 
462 /**
463  * kbase_pm_is_active - Determine whether the GPU is active
464  *
465  * @kbdev: The kbase device structure for the device (must be a valid pointer)
466  *
467  * This takes into account whether there is an active context reference.
468  *
469  * Return: true if the GPU is active, false otherwise
470  */
kbase_pm_is_active(struct kbase_device *kbdev)471 static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
472 {
473 	return kbdev->pm.active_count > 0;
474 }
475 
476 /**
477  * kbase_pm_metrics_start - Start the utilization metrics timer
478  * @kbdev: Pointer to the kbase device for which to start the utilization
479  *         metrics calculation thread.
480  *
481  * Start the timer that drives the metrics calculation, runs the custom DVFS.
482  */
483 void kbase_pm_metrics_start(struct kbase_device *kbdev);
484 
485 /**
486  * kbase_pm_metrics_stop - Stop the utilization metrics timer
487  * @kbdev: Pointer to the kbase device for which to stop the utilization
488  *         metrics calculation thread.
489  *
490  * Stop the timer that drives the metrics calculation, runs the custom DVFS.
491  */
492 void kbase_pm_metrics_stop(struct kbase_device *kbdev);
493 
494 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
495 /**
496  * kbase_pm_handle_runtime_suspend - Handle the runtime suspend of GPU
497  *
498  * @kbdev: The kbase device structure for the device (must be a valid pointer)
499  *
500  * This function is called from the runtime suspend callback function for
501  * saving the HW state and powering down GPU, if GPU was in sleep state mode.
502  * It does the following steps
503  * - Powers up the L2 cache and re-activates the MCU.
504  * - Suspend the CSGs
505  * - Halts the MCU
506  * - Powers down the L2 cache.
507  * - Invokes the power_off callback to power down the GPU.
508  *
509  * Return: 0 if the GPU was already powered down or no error was encountered
510  * in the power down, otherwise an error code.
511  */
512 int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev);
513 
514 /**
515  * kbase_pm_force_mcu_wakeup_after_sleep - Force the wake up of MCU from sleep
516  *
517  * @kbdev: The kbase device structure for the device (must be a valid pointer)
518  *
519  * This function forces the wake up of MCU from sleep state and wait for
520  * MCU to become active.
521  * It usually gets called from the runtime suspend callback function.
522  * It also gets called from the GPU reset handler or at the time of system
523  * suspend or when User tries to terminate/suspend the on-slot group.
524  *
525  * Note: @gpu_wakeup_override flag that forces the reactivation of MCU is
526  *       set by this function and it is the caller's responsibility to
527  *       clear the flag.
528  *
529  * Return: 0 if the wake up was successful.
530  */
531 int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev);
532 #endif
533 
534 #if !MALI_USE_CSF
535 /**
536  * Return the atom's ID, as was originally supplied by userspace in
537  * base_jd_atom::atom_number
538  * @kctx:  KBase context pointer
539  * @katom: Atome for which to return ID
540  */
kbase_jd_atom_id(struct kbase_context *kctx, const struct kbase_jd_atom *katom)541 static inline int kbase_jd_atom_id(struct kbase_context *kctx,
542 				   const struct kbase_jd_atom *katom)
543 {
544 	int result;
545 
546 	KBASE_DEBUG_ASSERT(kctx);
547 	KBASE_DEBUG_ASSERT(katom);
548 	KBASE_DEBUG_ASSERT(katom->kctx == kctx);
549 
550 	result = katom - &kctx->jctx.atoms[0];
551 	KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
552 	return result;
553 }
554 
555 /**
556  * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
557  * @kctx: Context pointer
558  * @id:   ID of atom to retrieve
559  *
560  * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
561  */
kbase_jd_atom_from_id( struct kbase_context *kctx, int id)562 static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
563 		struct kbase_context *kctx, int id)
564 {
565 	return &kctx->jctx.atoms[id];
566 }
567 #endif /* !MALI_USE_CSF */
568 
569 /**
570  * Initialize the disjoint state
571  *
572  * The disjoint event count and state are both set to zero.
573  *
574  * Disjoint functions usage:
575  *
576  * The disjoint event count should be incremented whenever a disjoint event occurs.
577  *
578  * There are several cases which are regarded as disjoint behavior. Rather than just increment
579  * the counter during disjoint events we also increment the counter when jobs may be affected
580  * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
581  *
582  * Disjoint state is entered during GPU reset. Increasing the disjoint state also increases
583  * the count of disjoint events.
584  *
585  * The disjoint state is then used to increase the count of disjoint events during job submission
586  * and job completion. Any atom submitted or completed while the disjoint state is greater than
587  * zero is regarded as a disjoint event.
588  *
589  * The disjoint event counter is also incremented immediately whenever a job is soft stopped
590  * and during context creation.
591  *
592  * @kbdev: The kbase device
593  *
594  * Return: 0 on success and non-zero value on failure.
595  */
596 void kbase_disjoint_init(struct kbase_device *kbdev);
597 
598 /**
599  * Increase the count of disjoint events
600  * called when a disjoint event has happened
601  *
602  * @kbdev: The kbase device
603  */
604 void kbase_disjoint_event(struct kbase_device *kbdev);
605 
606 /**
607  * Increase the count of disjoint events only if the GPU is in a disjoint state
608  *
609  * This should be called when something happens which could be disjoint if the GPU
610  * is in a disjoint state. The state refcount keeps track of this.
611  *
612  * @kbdev: The kbase device
613  */
614 void kbase_disjoint_event_potential(struct kbase_device *kbdev);
615 
616 /**
617  * Returns the count of disjoint events
618  *
619  * @kbdev: The kbase device
620  * @return the count of disjoint events
621  */
622 u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
623 
624 /**
625  * Increment the refcount state indicating that the GPU is in a disjoint state.
626  *
627  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
628  * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
629  * should be called
630  *
631  * @kbdev: The kbase device
632  */
633 void kbase_disjoint_state_up(struct kbase_device *kbdev);
634 
635 /**
636  * Decrement the refcount state
637  *
638  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
639  *
640  * Called after @ref kbase_disjoint_state_up once the disjoint state is over
641  *
642  * @kbdev: The kbase device
643  */
644 void kbase_disjoint_state_down(struct kbase_device *kbdev);
645 
646 /**
647  * kbase_device_pcm_dev_init() - Initialize the priority control manager device
648  *
649  * @kbdev: Pointer to the structure for the kbase device
650  *
651  * Pointer to the priority control manager device is retrieved from the device
652  * tree and a reference is taken on the module implementing the callbacks for
653  * priority control manager operations.
654  *
655  * Return: 0 if successful, or an error code on failure
656  */
657 int kbase_device_pcm_dev_init(struct kbase_device *const kbdev);
658 
659 /**
660  * kbase_device_pcm_dev_term() - Performs priority control manager device
661  *                               deinitialization.
662  *
663  * @kbdev: Pointer to the structure for the kbase device
664  *
665  * Reference is released on the module implementing the callbacks for priority
666  * control manager operations.
667  */
668 void kbase_device_pcm_dev_term(struct kbase_device *const kbdev);
669 
670 /**
671  * If a job is soft stopped and the number of contexts is >= this value
672  * it is reported as a disjoint event
673  */
674 #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
675 
676 #if !defined(UINT64_MAX)
677 	#define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
678 #endif
679 
680 #endif
681