1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2011-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /**
23 * DOC: Defintions (types, defines, etcs) common to Kbase. They are placed here
24 * to allow the hierarchy of header files to work.
25 */
26
27 #ifndef _KBASE_DEFS_H_
28 #define _KBASE_DEFS_H_
29
30 #include <mali_kbase_config.h>
31 #include <mali_base_hwconfig_features.h>
32 #include <mali_base_hwconfig_issues.h>
33 #include <mali_kbase_mem_lowlevel.h>
34 #include <mmu/mali_kbase_mmu_hw.h>
35 #include <backend/gpu/mali_kbase_instr_defs.h>
36 #include <mali_kbase_pm.h>
37 #include <mali_kbase_gpuprops_types.h>
38 #if MALI_USE_CSF
39 #include <mali_kbase_hwcnt_backend_csf.h>
40 #else
41 #include <mali_kbase_hwcnt_backend_jm.h>
42 #endif
43 #include <protected_mode_switcher.h>
44
45 #include <linux/atomic.h>
46 #include <linux/mempool.h>
47 #include <linux/slab.h>
48 #include <linux/file.h>
49 #include <linux/sizes.h>
50
51
52 #if defined(CONFIG_SYNC)
53 #include <sync.h>
54 #else
55 #include "mali_kbase_fence_defs.h"
56 #endif
57
58 #if IS_ENABLED(CONFIG_DEBUG_FS)
59 #include <linux/debugfs.h>
60 #endif /* CONFIG_DEBUG_FS */
61
62 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
63 #include <linux/devfreq.h>
64 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
65
66 #ifdef CONFIG_MALI_ARBITER_SUPPORT
67 #include <arbiter/mali_kbase_arbiter_defs.h>
68 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
69
70 #include <linux/clk.h>
71 #include <linux/regulator/consumer.h>
72 #include <linux/memory_group_manager.h>
73 #include <soc/rockchip/rockchip_opp_select.h>
74
75 #include "debug/mali_kbase_debug_ktrace_defs.h"
76
77 /** Number of milliseconds before we time out on a GPU soft/hard reset */
78 #define RESET_TIMEOUT 500
79
80 /**
81 * The maximum number of Job Slots to support in the Hardware.
82 *
83 * You can optimize this down if your target devices will only ever support a
84 * small number of job slots.
85 */
86 #define BASE_JM_MAX_NR_SLOTS 3
87
88 /**
89 * The maximum number of Address Spaces to support in the Hardware.
90 *
91 * You can optimize this down if your target devices will only ever support a
92 * small number of Address Spaces
93 */
94 #define BASE_MAX_NR_AS 16
95
96 /* mmu */
97 #define MIDGARD_MMU_LEVEL(x) (x)
98
99 #define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(0)
100
101 #define MIDGARD_MMU_BOTTOMLEVEL MIDGARD_MMU_LEVEL(3)
102
103 #define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR)
104
105 /** setting in kbase_context::as_nr that indicates it's invalid */
106 #define KBASEP_AS_NR_INVALID (-1)
107
108 /**
109 * Maximum size in bytes of a MMU lock region, as a logarithm
110 */
111 #define KBASE_LOCK_REGION_MAX_SIZE_LOG2 (48) /* 256 TB */
112
113 /**
114 * Minimum size in bytes of a MMU lock region, as a logarithm
115 */
116 #define KBASE_LOCK_REGION_MIN_SIZE_LOG2 (15) /* 32 kB */
117
118 /**
119 * Maximum number of GPU memory region zones
120 */
121 #define KBASE_REG_ZONE_MAX 4ul
122
123 #include "mali_kbase_hwaccess_defs.h"
124
125 /* Maximum number of pages of memory that require a permanent mapping, per
126 * kbase_context
127 */
128 #define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((32 * 1024ul * 1024ul) >> \
129 PAGE_SHIFT)
130 /* Minimum threshold period for hwcnt dumps between different hwcnt virtualizer
131 * clients, to reduce undesired system load.
132 * If a virtualizer client requests a dump within this threshold period after
133 * some other client has performed a dump, a new dump won't be performed and
134 * the accumulated counter values for that client will be returned instead.
135 */
136 #define KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS (200 * NSEC_PER_USEC)
137
138 #if MALI_USE_CSF
139 /* The buffer count of CSF hwcnt backend ring buffer, which is used when CSF
140 * hwcnt backend allocate the ring buffer to communicate with CSF firmware for
141 * HWC dump samples.
142 * To meet the hardware requirement, this number MUST be power of 2, otherwise,
143 * CSF hwcnt backend creation will be failed.
144 */
145 #define KBASE_HWCNT_BACKEND_CSF_RING_BUFFER_COUNT (128)
146 #endif
147
148 /* Maximum number of clock/regulator pairs that may be referenced by
149 * the device node.
150 * This is dependent on support for of_property_read_u64_array() in the
151 * kernel.
152 * While, the number of clocks could be more than regulators,
153 * as mentioned in power_control_init().
154 */
155 #define BASE_MAX_NR_CLOCKS_REGULATORS (4)
156
157 /* Forward declarations */
158 struct kbase_context;
159 struct kbase_device;
160 struct kbase_as;
161 struct kbase_mmu_setup;
162 struct kbase_kinstr_jm;
163
164 /**
165 * struct kbase_io_access - holds information about 1 register access
166 *
167 * @addr: first bit indicates r/w (r=0, w=1)
168 * @value: value written or read
169 */
170 struct kbase_io_access {
171 uintptr_t addr;
172 u32 value;
173 };
174
175 /**
176 * struct kbase_io_history - keeps track of all recent register accesses
177 *
178 * @enabled: true if register accesses are recorded, false otherwise
179 * @lock: spinlock protecting kbase_io_access array
180 * @count: number of registers read/written
181 * @size: number of elements in kbase_io_access array
182 * @buf: array of kbase_io_access
183 */
184 struct kbase_io_history {
185 bool enabled;
186
187 spinlock_t lock;
188 size_t count;
189 u16 size;
190 struct kbase_io_access *buf;
191 };
192
193 /**
194 * struct kbase_debug_copy_buffer - information about the buffer to be copied.
195 *
196 * @size: size of the buffer in bytes
197 * @pages: pointer to an array of pointers to the pages which contain
198 * the buffer
199 * @is_vmalloc: true if @pages was allocated with vzalloc. false if @pages was
200 * allocated with kcalloc
201 * @nr_pages: number of pages
202 * @offset: offset into the pages
203 * @gpu_alloc: pointer to physical memory allocated by the GPU
204 * @extres_pages: array of pointers to the pages containing external resources
205 * for this buffer
206 * @nr_extres_pages: number of pages in @extres_pages
207 */
208 struct kbase_debug_copy_buffer {
209 size_t size;
210 struct page **pages;
211 bool is_vmalloc;
212 int nr_pages;
213 size_t offset;
214 struct kbase_mem_phy_alloc *gpu_alloc;
215
216 struct page **extres_pages;
217 int nr_extres_pages;
218 };
219
220 struct kbase_device_info {
221 u32 features;
222 };
223
224 struct kbase_mmu_setup {
225 u64 transtab;
226 u64 memattr;
227 u64 transcfg;
228 };
229
230 /**
231 * struct kbase_fault - object containing data relating to a page or bus fault.
232 * @addr: Records the faulting address.
233 * @extra_addr: Records the secondary fault address.
234 * @status: Records the fault status as reported by Hw.
235 * @protected_mode: Flag indicating whether the fault occurred in protected mode
236 * or not.
237 */
238 struct kbase_fault {
239 u64 addr;
240 u64 extra_addr;
241 u32 status;
242 bool protected_mode;
243 };
244
245 /**
246 * struct kbase_mmu_table - object representing a set of GPU page tables
247 * @mmu_teardown_pages: Buffer of 4 Pages in size, used to cache the entries
248 * of top & intermediate level page tables to avoid
249 * repeated calls to kmap_atomic during the MMU teardown.
250 * @mmu_lock: Lock to serialize the accesses made to multi level GPU
251 * page tables
252 * @pgd: Physical address of the page allocated for the top
253 * level page table of the context, this is used for
254 * MMU HW programming as the address translation will
255 * start from the top level page table.
256 * @group_id: A memory group ID to be passed to a platform-specific
257 * memory group manager.
258 * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
259 * @kctx: If this set of MMU tables belongs to a context then
260 * this is a back-reference to the context, otherwise
261 * it is NULL
262 */
263 struct kbase_mmu_table {
264 u64 *mmu_teardown_pages;
265 struct mutex mmu_lock;
266 phys_addr_t pgd;
267 u8 group_id;
268 struct kbase_context *kctx;
269 };
270
271 /**
272 * struct kbase_reg_zone - Information about GPU memory region zones
273 * @base_pfn: Page Frame Number in GPU virtual address space for the start of
274 * the Zone
275 * @va_size_pages: Size of the Zone in pages
276 *
277 * Track information about a zone KBASE_REG_ZONE() and related macros.
278 * In future, this could also store the &rb_root that are currently in
279 * &kbase_context and &kbase_csf_device.
280 */
281 struct kbase_reg_zone {
282 u64 base_pfn;
283 u64 va_size_pages;
284 };
285
286 #if MALI_USE_CSF
287 #include "csf/mali_kbase_csf_defs.h"
288 #else
289 #include "jm/mali_kbase_jm_defs.h"
290 #endif
291
kbase_as_has_bus_fault(struct kbase_as *as, struct kbase_fault *fault)292 static inline int kbase_as_has_bus_fault(struct kbase_as *as,
293 struct kbase_fault *fault)
294 {
295 return (fault == &as->bf_data);
296 }
297
kbase_as_has_page_fault(struct kbase_as *as, struct kbase_fault *fault)298 static inline int kbase_as_has_page_fault(struct kbase_as *as,
299 struct kbase_fault *fault)
300 {
301 return (fault == &as->pf_data);
302 }
303
304 /**
305 * struct kbasep_mem_device - Data stored per device for memory allocation
306 *
307 * @used_pages: Tracks usage of OS shared memory. Updated when OS memory is
308 * allocated/freed.
309 * @ir_threshold: Fraction of the maximum size of an allocation that grows
310 * on GPU page fault that can be used before the driver
311 * switches to incremental rendering, in 1/256ths.
312 * 0 means disabled.
313 */
314 struct kbasep_mem_device {
315 atomic_t used_pages;
316 atomic_t ir_threshold;
317 };
318
319 struct kbase_clk_rate_listener;
320
321 /**
322 * typedef kbase_clk_rate_listener_on_change_t() - Frequency change callback
323 *
324 * @listener: Clock frequency change listener.
325 * @clk_index: Index of the clock for which the change has occurred.
326 * @clk_rate_hz: Clock frequency(Hz).
327 *
328 * A callback to call when clock rate changes. The function must not
329 * sleep. No clock rate manager functions must be called from here, as
330 * its lock is taken.
331 */
332 typedef void
333 kbase_clk_rate_listener_on_change_t(struct kbase_clk_rate_listener *listener,
334 u32 clk_index, u32 clk_rate_hz);
335
336 /**
337 * struct kbase_clk_rate_listener - Clock frequency listener
338 *
339 * @node: List node.
340 * @notify: Callback to be called when GPU frequency changes.
341 */
342 struct kbase_clk_rate_listener {
343 struct list_head node;
344 kbase_clk_rate_listener_on_change_t *notify;
345 };
346
347 /**
348 * struct kbase_clk_rate_trace_manager - Data stored per device for GPU clock
349 * rate trace manager.
350 *
351 * @gpu_idle: Tracks the idle state of GPU.
352 * @clks: Array of pointer to structures storing data for every
353 * enumerated GPU clock.
354 * @clk_rate_trace_ops: Pointer to the platform specific GPU clock rate trace
355 * operations.
356 * @gpu_clk_rate_trace_write: Pointer to the function that would emit the
357 * tracepoint for the clock rate change.
358 * @listeners: List of listener attached.
359 * @lock: Lock to serialize the actions of GPU clock rate trace
360 * manager.
361 */
362 struct kbase_clk_rate_trace_manager {
363 bool gpu_idle;
364 struct kbase_clk_data *clks[BASE_MAX_NR_CLOCKS_REGULATORS];
365 struct kbase_clk_rate_trace_op_conf *clk_rate_trace_ops;
366 struct list_head listeners;
367 spinlock_t lock;
368 };
369
370 /**
371 * struct kbase_pm_device_data - Data stored per device for power management.
372 * @lock: The lock protecting Power Management structures accessed outside of
373 * IRQ.
374 * This lock must also be held whenever the GPU is being powered on or
375 * off.
376 * @active_count: The reference count of active contexts on this device. Note
377 * that some code paths keep shaders/the tiler powered whilst this is 0.
378 * Use kbase_pm_is_active() instead to check for such cases.
379 * @suspending: Flag indicating suspending/suspended
380 * @runtime_active: Flag to track if the GPU is in runtime suspended or active
381 * state. This ensures that runtime_put and runtime_get
382 * functions are called in pairs. For example if runtime_get
383 * has already been called from the power_on callback, then
384 * the call to it from runtime_gpu_active callback can be
385 * skipped.
386 * @gpu_lost: Flag indicating gpu lost
387 * This structure contains data for the power management framework. There
388 * is one instance of this structure per device in the system.
389 * @zero_active_count_wait: Wait queue set when active_count == 0
390 * @resume_wait: system resume of GPU device.
391 * @debug_core_mask: Bit masks identifying the available shader cores that are
392 * specified via sysfs. One mask per job slot.
393 * @debug_core_mask_all: Bit masks identifying the available shader cores that
394 * are specified via sysfs.
395 * @callback_power_runtime_init: Callback for initializing the runtime power
396 * management. Return 0 on success, else error code
397 * @callback_power_runtime_term: Callback for terminating the runtime power
398 * management.
399 * @dvfs_period: Time in milliseconds between each dvfs sample
400 * @backend: KBase PM backend data
401 * @arb_vm_state: The state of the arbiter VM machine
402 * @gpu_users_waiting: Used by virtualization to notify the arbiter that there
403 * are users waiting for the GPU so that it can request and resume the
404 * driver.
405 * @clk_rtm: The state of the GPU clock rate trace manager
406 */
407 struct kbase_pm_device_data {
408 struct mutex lock;
409 int active_count;
410 bool suspending;
411 #if MALI_USE_CSF
412 bool runtime_active;
413 #endif
414 #ifdef CONFIG_MALI_ARBITER_SUPPORT
415 atomic_t gpu_lost;
416 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
417 wait_queue_head_t zero_active_count_wait;
418 wait_queue_head_t resume_wait;
419
420 #if MALI_USE_CSF
421 u64 debug_core_mask;
422 #else
423 /* One mask per job slot. */
424 u64 debug_core_mask[BASE_JM_MAX_NR_SLOTS];
425 u64 debug_core_mask_all;
426 #endif /* MALI_USE_CSF */
427
428 int (*callback_power_runtime_init)(struct kbase_device *kbdev);
429 void (*callback_power_runtime_term)(struct kbase_device *kbdev);
430 u32 dvfs_period;
431 struct kbase_pm_backend_data backend;
432 #ifdef CONFIG_MALI_ARBITER_SUPPORT
433 struct kbase_arbiter_vm_state *arb_vm_state;
434 atomic_t gpu_users_waiting;
435 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
436 struct kbase_clk_rate_trace_manager clk_rtm;
437 };
438
439 /**
440 * struct kbase_mem_pool - Page based memory pool for kctx/kbdev
441 * @kbdev: Kbase device where memory is used
442 * @cur_size: Number of free pages currently in the pool (may exceed
443 * @max_size in some corner cases)
444 * @max_size: Maximum number of free pages in the pool
445 * @order: order = 0 refers to a pool of 4 KB pages
446 * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
447 * @group_id: A memory group ID to be passed to a platform-specific
448 * memory group manager, if present. Immutable.
449 * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
450 * @pool_lock: Lock protecting the pool - must be held when modifying
451 * @cur_size and @page_list
452 * @page_list: List of free pages in the pool
453 * @reclaim: Shrinker for kernel reclaim of free pages
454 * @next_pool: Pointer to next pool where pages can be allocated when this
455 * pool is empty. Pages will spill over to the next pool when
456 * this pool is full. Can be NULL if there is no next pool.
457 * @dying: true if the pool is being terminated, and any ongoing
458 * operations should be abandoned
459 * @dont_reclaim: true if the shrinker is forbidden from reclaiming memory from
460 * this pool, eg during a grow operation
461 */
462 struct kbase_mem_pool {
463 struct kbase_device *kbdev;
464 size_t cur_size;
465 size_t max_size;
466 u8 order;
467 u8 group_id;
468 spinlock_t pool_lock;
469 struct list_head page_list;
470 struct shrinker reclaim;
471
472 struct kbase_mem_pool *next_pool;
473
474 bool dying;
475 bool dont_reclaim;
476 };
477
478 /**
479 * struct kbase_mem_pool_group - a complete set of physical memory pools.
480 *
481 * Memory pools are used to allow efficient reallocation of previously-freed
482 * physical pages. A pair of memory pools is initialized for each physical
483 * memory group: one for 4 KiB pages and one for 2 MiB pages. These arrays
484 * should be indexed by physical memory group ID, the meaning of which is
485 * defined by the systems integrator.
486 *
487 * @small: Array of objects containing the state for pools of 4 KiB size
488 * physical pages.
489 * @large: Array of objects containing the state for pools of 2 MiB size
490 * physical pages.
491 */
492 struct kbase_mem_pool_group {
493 struct kbase_mem_pool small[MEMORY_GROUP_MANAGER_NR_GROUPS];
494 struct kbase_mem_pool large[MEMORY_GROUP_MANAGER_NR_GROUPS];
495 };
496
497 /**
498 * struct kbase_mem_pool_config - Initial configuration for a physical memory
499 * pool
500 *
501 * @max_size: Maximum number of free pages that the pool can hold.
502 */
503 struct kbase_mem_pool_config {
504 size_t max_size;
505 };
506
507 /**
508 * struct kbase_mem_pool_group_config - Initial configuration for a complete
509 * set of physical memory pools
510 *
511 * This array should be indexed by physical memory group ID, the meaning
512 * of which is defined by the systems integrator.
513 *
514 * @small: Array of initial configuration for pools of 4 KiB pages.
515 * @large: Array of initial configuration for pools of 2 MiB pages.
516 */
517 struct kbase_mem_pool_group_config {
518 struct kbase_mem_pool_config small[MEMORY_GROUP_MANAGER_NR_GROUPS];
519 struct kbase_mem_pool_config large[MEMORY_GROUP_MANAGER_NR_GROUPS];
520 };
521
522 /**
523 * struct kbase_devfreq_opp - Lookup table for converting between nominal OPP
524 * frequency, real frequencies and core mask
525 * @real_freqs: Real GPU frequencies.
526 * @opp_volts: OPP voltages.
527 * @opp_freq: Nominal OPP frequency
528 * @core_mask: Shader core mask
529 */
530 struct kbase_devfreq_opp {
531 u64 opp_freq;
532 u64 core_mask;
533 u64 real_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
534 u32 opp_volts[BASE_MAX_NR_CLOCKS_REGULATORS];
535 };
536
537 /* MMU mode flags */
538 #define KBASE_MMU_MODE_HAS_NON_CACHEABLE (1ul << 0) /* Has NON_CACHEABLE MEMATTR */
539
540 /**
541 * struct kbase_mmu_mode - object containing pointer to methods invoked for
542 * programming the MMU, as per the MMU mode supported
543 * by Hw.
544 * @update: enable & setup/configure one of the GPU address space.
545 * @get_as_setup: retrieve the configuration of one of the GPU address space.
546 * @disable_as: disable one of the GPU address space.
547 * @pte_to_phy_addr: retrieve the physical address encoded in the page table entry.
548 * @ate_is_valid: check if the pte is a valid address translation entry
549 * encoding the physical address of the actual mapped page.
550 * @pte_is_valid: check if the pte is a valid entry encoding the physical
551 * address of the next lower level page table.
552 * @entry_set_ate: program the pte to be a valid address translation entry to
553 * encode the physical address of the actual page being mapped.
554 * @entry_set_pte: program the pte to be a valid entry to encode the physical
555 * address of the next lower level page table and also update
556 * the number of valid entries.
557 * @entry_invalidate: clear out or invalidate the pte.
558 * @get_num_valid_entries: returns the number of valid entries for a specific pgd.
559 * @set_num_valid_entries: sets the number of valid entries for a specific pgd
560 * @flags: bitmask of MMU mode flags. Refer to KBASE_MMU_MODE_ constants.
561 */
562 struct kbase_mmu_mode {
563 void (*update)(struct kbase_device *kbdev,
564 struct kbase_mmu_table *mmut,
565 int as_nr);
566 void (*get_as_setup)(struct kbase_mmu_table *mmut,
567 struct kbase_mmu_setup * const setup);
568 void (*disable_as)(struct kbase_device *kbdev, int as_nr);
569 phys_addr_t (*pte_to_phy_addr)(u64 entry);
570 int (*ate_is_valid)(u64 ate, int level);
571 int (*pte_is_valid)(u64 pte, int level);
572 void (*entry_set_ate)(u64 *entry, struct tagged_addr phy,
573 unsigned long flags, int level);
574 void (*entry_set_pte)(u64 *pgd, u64 vpfn, phys_addr_t phy);
575 void (*entry_invalidate)(u64 *entry);
576 unsigned int (*get_num_valid_entries)(u64 *pgd);
577 void (*set_num_valid_entries)(u64 *pgd,
578 unsigned int num_of_valid_entries);
579 unsigned long flags;
580 };
581
582 struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
583
584 #define DEVNAME_SIZE 16
585
586 /**
587 * enum kbase_devfreq_work_type - The type of work to perform in the devfreq
588 * suspend/resume worker.
589 * @DEVFREQ_WORK_NONE: Initilisation state.
590 * @DEVFREQ_WORK_SUSPEND: Call devfreq_suspend_device().
591 * @DEVFREQ_WORK_RESUME: Call devfreq_resume_device().
592 */
593 enum kbase_devfreq_work_type {
594 DEVFREQ_WORK_NONE,
595 DEVFREQ_WORK_SUSPEND,
596 DEVFREQ_WORK_RESUME
597 };
598
599 /**
600 * struct kbase_devfreq_queue_info - Object representing an instance for managing
601 * the queued devfreq suspend/resume works.
602 * @workq: Workqueue for devfreq suspend/resume requests
603 * @work: Work item for devfreq suspend & resume
604 * @req_type: Requested work type to be performed by the devfreq
605 * suspend/resume worker
606 * @acted_type: Work type has been acted on by the worker, i.e. the
607 * internal recorded state of the suspend/resume
608 */
609 struct kbase_devfreq_queue_info {
610 struct workqueue_struct *workq;
611 struct work_struct work;
612 enum kbase_devfreq_work_type req_type;
613 enum kbase_devfreq_work_type acted_type;
614 };
615
616 /**
617 * struct kbase_process - Representing an object of a kbase process instantiated
618 * when the first kbase context is created under it.
619 * @tgid: Thread group ID.
620 * @total_gpu_pages: Total gpu pages allocated across all the contexts
621 * of this process, it accounts for both native allocations
622 * and dma_buf imported allocations.
623 * @kctx_list: List of kbase contexts created for the process.
624 * @kprcs_node: Node to a rb_tree, kbase_device will maintain a rb_tree
625 * based on key tgid, kprcs_node is the node link to
626 * &struct_kbase_device.process_root.
627 * @dma_buf_root: RB tree of the dma-buf imported allocations, imported
628 * across all the contexts created for this process.
629 * Used to ensure that pages of allocation are accounted
630 * only once for the process, even if the allocation gets
631 * imported multiple times for the process.
632 */
633 struct kbase_process {
634 pid_t tgid;
635 size_t total_gpu_pages;
636 struct list_head kctx_list;
637
638 struct rb_node kprcs_node;
639 struct rb_root dma_buf_root;
640 };
641
642 /**
643 * struct kbase_device - Object representing an instance of GPU platform device,
644 * allocated from the probe method of mali driver.
645 * @hw_quirks_sc: Configuration to be used for the shader cores as per
646 * the HW issues present in the GPU.
647 * @hw_quirks_tiler: Configuration to be used for the Tiler as per the HW
648 * issues present in the GPU.
649 * @hw_quirks_mmu: Configuration to be used for the MMU as per the HW
650 * issues present in the GPU.
651 * @hw_quirks_gpu: Configuration to be used for the Job Manager or CSF/MCU
652 * subsystems as per the HW issues present in the GPU.
653 * @entry: Links the device instance to the global list of GPU
654 * devices. The list would have as many entries as there
655 * are GPU device instances.
656 * @dev: Pointer to the kernel's generic/base representation
657 * of the GPU platform device.
658 * @mdev: Pointer to the miscellaneous device registered to
659 * provide Userspace access to kernel driver through the
660 * device file /dev/malixx.
661 * @reg_start: Base address of the region in physical address space
662 * where GPU registers have been mapped.
663 * @reg_size: Size of the region containing GPU registers
664 * @reg: Kernel virtual address of the region containing GPU
665 * registers, using which Driver will access the registers.
666 * @irqs: Array containing IRQ resource info for 3 types of
667 * interrupts : Job scheduling, MMU & GPU events (like
668 * power management, cache etc.)
669 * @irqs.irq: irq number
670 * @irqs.flags: irq flags
671 * @clocks: Pointer to the input clock resources referenced by
672 * the GPU device node.
673 * @scmi_clk: Pointer to the input scmi clock resources
674 * @nr_clocks: Number of clocks set in the clocks array.
675 * @regulators: Pointer to the structs corresponding to the
676 * regulators referenced by the GPU device node.
677 * @nr_regulators: Number of regulators set in the regulators array.
678 * @opp_table: Pointer to the device OPP structure maintaining the
679 * link to OPPs attached to a device. This is obtained
680 * after setting regulator names for the device.
681 * @devname: string containing the name used for GPU device instance,
682 * miscellaneous device is registered using the same name.
683 * @id: Unique identifier for the device, indicates the number of
684 * devices which have been created so far.
685 * @model: Pointer, valid only when Driver is compiled to not access
686 * the real GPU Hw, to the dummy model which tries to mimic
687 * to some extent the state & behavior of GPU Hw in response
688 * to the register accesses made by the Driver.
689 * @irq_slab: slab cache for allocating the work items queued when
690 * model mimics raising of IRQ to cause an interrupt on CPU.
691 * @irq_workq: workqueue for processing the irq work items.
692 * @serving_job_irq: function to execute work items queued when model mimics
693 * the raising of JS irq, mimics the interrupt handler
694 * processing JS interrupts.
695 * @serving_gpu_irq: function to execute work items queued when model mimics
696 * the raising of GPU irq, mimics the interrupt handler
697 * processing GPU interrupts.
698 * @serving_mmu_irq: function to execute work items queued when model mimics
699 * the raising of MMU irq, mimics the interrupt handler
700 * processing MMU interrupts.
701 * @reg_op_lock: lock used by model to serialize the handling of register
702 * accesses made by the driver.
703 * @pm: Per device object for storing data for power management
704 * framework.
705 * @fw_load_lock: Mutex to protect firmware loading in @ref kbase_open.
706 * @csf: CSF object for the GPU device.
707 * @js_data: Per device object encapsulating the current context of
708 * Job Scheduler, which is global to the device and is not
709 * tied to any particular struct kbase_context running on
710 * the device
711 * @mem_pools: Global pools of free physical memory pages which can
712 * be used by all the contexts.
713 * @memdev: keeps track of the in use physical pages allocated by
714 * the Driver.
715 * @mmu_mode: Pointer to the object containing methods for programming
716 * the MMU, depending on the type of MMU supported by Hw.
717 * @mgm_dev: Pointer to the memory group manager device attached
718 * to the GPU device. This points to an internal memory
719 * group manager if no platform-specific memory group
720 * manager was retrieved through device tree.
721 * @as: Array of objects representing address spaces of GPU.
722 * @as_free: Bitpattern of free/available GPU address spaces.
723 * @as_to_kctx: Array of pointers to struct kbase_context, having
724 * GPU adrress spaces assigned to them.
725 * @mmu_mask_change: Lock to serialize the access to MMU interrupt mask
726 * register used in the handling of Bus & Page faults.
727 * @gpu_props: Object containing complete information about the
728 * configuration/properties of GPU HW device in use.
729 * @hw_issues_mask: List of SW workarounds for HW issues
730 * @hw_features_mask: List of available HW features.
731 * @disjoint_event: struct for keeping track of the disjoint information,
732 * that whether the GPU is in a disjoint state and the
733 * number of disjoint events that have occurred on GPU.
734 * @disjoint_event.count: disjoint event count
735 * @disjoint_event.state: disjoint event state
736 * @nr_hw_address_spaces: Number of address spaces actually available in the
737 * GPU, remains constant after driver initialisation.
738 * @nr_user_address_spaces: Number of address spaces available to user contexts
739 * @hwcnt_backend_csf_if_fw: Firmware interface to access CSF GPU performance
740 * counters.
741 * @hwcnt: Structure used for instrumentation and HW counters
742 * dumping
743 * @hwcnt.lock: The lock should be used when accessing any of the
744 * following members
745 * @hwcnt.kctx: kbase context
746 * @hwcnt.addr: HW counter address
747 * @hwcnt.addr_bytes: HW counter size in bytes
748 * @hwcnt.backend: Kbase instrumentation backend
749 * @hwcnt_watchdog_timer: Hardware counter watchdog interface.
750 * @hwcnt_gpu_iface: Backend interface for GPU hardware counter access.
751 * @hwcnt_gpu_ctx: Context for GPU hardware counter access.
752 * @hwaccess_lock must be held when calling
753 * kbase_hwcnt_context_enable() with @hwcnt_gpu_ctx.
754 * @hwcnt_gpu_virt: Virtualizer for GPU hardware counters.
755 * @vinstr_ctx: vinstr context created per device.
756 * @kinstr_prfcnt_ctx: kinstr_prfcnt context created per device.
757 * @timeline_flags: Bitmask defining which sets of timeline tracepoints
758 * are enabled. If zero, there is no timeline client and
759 * therefore timeline is disabled.
760 * @timeline: Timeline context created per device.
761 * @ktrace: kbase device's ktrace
762 * @trace_lock: Lock to serialize the access to trace buffer.
763 * @trace_first_out: Index/offset in the trace buffer at which the first
764 * unread message is present.
765 * @trace_next_in: Index/offset in the trace buffer at which the new
766 * message will be written.
767 * @trace_rbuf: Pointer to the buffer storing debug messages/prints
768 * tracing the various events in Driver.
769 * The buffer is filled in circular fashion.
770 * @reset_timeout_ms: Number of milliseconds to wait for the soft stop to
771 * complete for the GPU jobs before proceeding with the
772 * GPU reset.
773 * @lowest_gpu_freq_khz: Lowest frequency in KHz that the GPU can run at. Used
774 * to calculate suitable timeouts for wait operations.
775 * @cache_clean_in_progress: Set when a cache clean has been started, and
776 * cleared when it has finished. This prevents multiple
777 * cache cleans being done simultaneously.
778 * @cache_clean_queued: Pended cache clean operations invoked while another is
779 * in progress. If this is not 0, another cache clean needs
780 * to be triggered immediately after completion of the
781 * current one.
782 * @cache_clean_wait: Signalled when a cache clean has finished.
783 * @platform_context: Platform specific private data to be accessed by
784 * platform specific config files only.
785 * @kctx_list: List of kbase_contexts created for the device,
786 * including any contexts that might be created for
787 * hardware counters.
788 * @kctx_list_lock: Lock protecting concurrent accesses to @kctx_list.
789 * @devfreq_profile: Describes devfreq profile for the Mali GPU device, passed
790 * to devfreq_add_device() to add devfreq feature to Mali
791 * GPU device.
792 * @devfreq: Pointer to devfreq structure for Mali GPU device,
793 * returned on the call to devfreq_add_device().
794 * @current_freqs: The real frequencies, corresponding to
795 * @current_nominal_freq, at which the Mali GPU device
796 * is currently operating, as retrieved from
797 * @devfreq_table in the target callback of
798 * @devfreq_profile.
799 * @current_nominal_freq: The nominal frequency currently used for the Mali GPU
800 * device as retrieved through devfreq_recommended_opp()
801 * using the freq value passed as an argument to target
802 * callback of @devfreq_profile
803 * @current_voltages: The voltages corresponding to @current_nominal_freq,
804 * as retrieved from @devfreq_table in the target
805 * callback of @devfreq_profile.
806 * @current_core_mask: bitmask of shader cores that are currently desired &
807 * enabled, corresponding to @current_nominal_freq as
808 * retrieved from @devfreq_table in the target callback
809 * of @devfreq_profile.
810 * @devfreq_table: Pointer to the lookup table for converting between
811 * nominal OPP (operating performance point) frequency,
812 * and real frequency and core mask. This table is
813 * constructed according to operating-points-v2-mali
814 * table in devicetree.
815 * @num_opps: Number of operating performance points available for the Mali
816 * GPU device.
817 * @last_devfreq_metrics: last PM metrics
818 * @devfreq_queue: Per device object for storing data that manages devfreq
819 * suspend & resume request queue and the related items.
820 * @devfreq_cooling: Pointer returned on registering devfreq cooling device
821 * corresponding to @devfreq.
822 * @ipa_protection_mode_switched: is set to TRUE when GPU is put into protected
823 * mode. It is a sticky flag which is cleared by IPA
824 * once it has made use of information that GPU had
825 * previously entered protected mode.
826 * @ipa: Top level structure for IPA, containing pointers to both
827 * configured & fallback models.
828 * @ipa.lock: Access to this struct must be with ipa.lock held
829 * @ipa.configured_model: ipa model to use
830 * @ipa.fallback_model: ipa fallback model
831 * @ipa.last_metrics: Values of the PM utilization metrics from last time
832 * the power model was invoked. The utilization is
833 * calculated as the difference between last_metrics
834 * and the current values.
835 * @ipa.force_fallback_model: true if use of fallback model has been forced by
836 * the User
837 * @ipa.last_sample_time: Records the time when counters, used for dynamic
838 * energy estimation, were last sampled.
839 * @previous_frequency: Previous frequency of GPU clock used for
840 * BASE_HW_ISSUE_GPU2017_1336 workaround, This clock is
841 * restored when L2 is powered on.
842 * @job_fault_debug: Flag to control the dumping of debug data for job faults,
843 * set when the 'job_fault' debugfs file is opened.
844 * @mali_debugfs_directory: Root directory for the debugfs files created by the driver
845 * @debugfs_ctx_directory: Directory inside the @mali_debugfs_directory containing
846 * a sub-directory for every context.
847 * @debugfs_instr_directory: Instrumentation debugfs directory
848 * @debugfs_as_read_bitmap: bitmap of address spaces for which the bus or page fault
849 * has occurred.
850 * @job_fault_wq: Waitqueue to block the job fault dumping daemon till the
851 * occurrence of a job fault.
852 * @job_fault_resume_wq: Waitqueue on which every context with a faulty job wait
853 * for the job fault dumping to complete before they can
854 * do bottom half of job done for the atoms which followed
855 * the faulty atom.
856 * @job_fault_resume_workq: workqueue to process the work items queued for the faulty
857 * atoms, whereby the work item function waits for the dumping
858 * to get completed.
859 * @job_fault_event_list: List of atoms, each belonging to a different context, which
860 * generated a job fault.
861 * @job_fault_event_lock: Lock to protect concurrent accesses to @job_fault_event_list
862 * @regs_dump_debugfs_data: Contains the offset of register to be read through debugfs
863 * file "read_register".
864 * @regs_dump_debugfs_data.reg_offset: Contains the offset of register to be
865 * read through debugfs file "read_register".
866 * @ctx_num: Total number of contexts created for the device.
867 * @io_history: Pointer to an object keeping a track of all recent
868 * register accesses. The history of register accesses
869 * can be read through "regs_history" debugfs file.
870 * @hwaccess: Contains a pointer to active kbase context and GPU
871 * backend specific data for HW access layer.
872 * @faults_pending: Count of page/bus faults waiting for bottom half processing
873 * via workqueues.
874 * @poweroff_pending: Set when power off operation for GPU is started, reset when
875 * power on for GPU is started.
876 * @infinite_cache_active_default: Set to enable using infinite cache for all the
877 * allocations of a new context.
878 * @mem_pool_defaults: Default configuration for the group of memory pools
879 * created for a new context.
880 * @current_gpu_coherency_mode: coherency mode in use, which can be different
881 * from @system_coherency, when using protected mode.
882 * @system_coherency: coherency mode as retrieved from the device tree.
883 * @cci_snoop_enabled: Flag to track when CCI snoops have been enabled.
884 * @snoop_enable_smc: SMC function ID to call into Trusted firmware to
885 * enable cache snooping. Value of 0 indicates that it
886 * is not used.
887 * @snoop_disable_smc: SMC function ID to call disable cache snooping.
888 * @protected_ops: Pointer to the methods for switching in or out of the
889 * protected mode, as per the @protected_dev being used.
890 * @protected_dev: Pointer to the protected mode switcher device attached
891 * to the GPU device retrieved through device tree if
892 * GPU do not support protected mode switching natively.
893 * @protected_mode: set to TRUE when GPU is put into protected mode
894 * @protected_mode_transition: set to TRUE when GPU is transitioning into or
895 * out of protected mode.
896 * @protected_mode_hwcnt_desired: True if we want GPU hardware counters to be
897 * enabled. Counters must be disabled before transition
898 * into protected mode.
899 * @protected_mode_hwcnt_disabled: True if GPU hardware counters are not
900 * enabled.
901 * @protected_mode_hwcnt_disable_work: Work item to disable GPU hardware
902 * counters, used if atomic disable is not possible.
903 * @buslogger: Pointer to the structure required for interfacing
904 * with the bus logger module to set the size of buffer
905 * used by the module for capturing bus logs.
906 * @irq_reset_flush: Flag to indicate that GPU reset is in-flight and flush of
907 * IRQ + bottom half is being done, to prevent the writes
908 * to MMU_IRQ_CLEAR & MMU_IRQ_MASK registers.
909 * @inited_subsys: Bitmap of inited sub systems at the time of device probe.
910 * Used during device remove or for handling error in probe.
911 * @hwaccess_lock: Lock, which can be taken from IRQ context, to serialize
912 * the updates made to Job dispatcher + scheduler states.
913 * @mmu_hw_mutex: Protects access to MMU operations and address space
914 * related state.
915 * @serialize_jobs: Currently used mode for serialization of jobs, both
916 * intra & inter slots serialization is supported.
917 * @backup_serialize_jobs: Copy of the original value of @serialize_jobs taken
918 * when GWT is enabled. Used to restore the original value
919 * on disabling of GWT.
920 * @js_ctx_scheduling_mode: Context scheduling mode currently being used by
921 * Job Scheduler
922 * @l2_size_override: Used to set L2 cache size via device tree blob
923 * @l2_hash_override: Used to set L2 cache hash via device tree blob
924 * @l2_hash_values_override: true if @l2_hash_values is valid.
925 * @l2_hash_values: Used to set L2 asn_hash via device tree blob
926 * @sysc_alloc: Array containing values to be programmed into
927 * SYSC_ALLOC[0..7] GPU registers on L2 cache
928 * power down. These come from either DTB or
929 * via DebugFS (if it is available in kernel).
930 * @process_root: rb_tree root node for maintaining a rb_tree of
931 * kbase_process based on key tgid(thread group ID).
932 * @dma_buf_root: rb_tree root node for maintaining a rb_tree of
933 * &struct kbase_dma_buf based on key dma_buf.
934 * We maintain a rb_tree of dma_buf mappings under
935 * kbase_device and kbase_process, one indicates a
936 * mapping and gpu memory usage at device level and
937 * other one at process level.
938 * @total_gpu_pages: Total GPU pages used for the complete GPU device.
939 * @dma_buf_lock: This mutex should be held while accounting for
940 * @total_gpu_pages from imported dma buffers.
941 * @gpu_mem_usage_lock: This spinlock should be held while accounting
942 * @total_gpu_pages for both native and dma-buf imported
943 * allocations.
944 * @dummy_job_wa: struct for dummy job execution workaround for the
945 * GPU hang issue
946 * @dummy_job_wa.ctx: dummy job workaround context
947 * @dummy_job_wa.jc: dummy job workaround job
948 * @dummy_job_wa.slot: dummy job workaround slot
949 * @dummy_job_wa.flags: dummy job workaround flags
950 * @dummy_job_wa_loaded: Flag for indicating that the workaround blob has
951 * been loaded. Protected by @fw_load_lock.
952 * @arb: Pointer to the arbiter device
953 * @pcm_dev: The priority control manager device.
954 * @oom_notifier_block: notifier_block containing kernel-registered out-of-
955 * memory handler.
956 */
957 struct kbase_device {
958 u32 hw_quirks_sc;
959 u32 hw_quirks_tiler;
960 u32 hw_quirks_mmu;
961 u32 hw_quirks_gpu;
962
963 struct list_head entry;
964 struct device *dev;
965 struct miscdevice mdev;
966 u64 reg_start;
967 size_t reg_size;
968 void __iomem *reg;
969
970 struct {
971 int irq;
972 int flags;
973 } irqs[3];
974
975 struct clk *clocks[BASE_MAX_NR_CLOCKS_REGULATORS];
976 struct clk *scmi_clk;
977 unsigned int nr_clocks;
978 #if IS_ENABLED(CONFIG_REGULATOR)
979 struct regulator *regulators[BASE_MAX_NR_CLOCKS_REGULATORS];
980 unsigned int nr_regulators;
981 #if (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
982 struct opp_table *opp_table;
983 #endif /* (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE */
984 #endif /* CONFIG_REGULATOR */
985 char devname[DEVNAME_SIZE];
986 u32 id;
987
988 #if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
989 void *model;
990 struct kmem_cache *irq_slab;
991 struct workqueue_struct *irq_workq;
992 atomic_t serving_job_irq;
993 atomic_t serving_gpu_irq;
994 atomic_t serving_mmu_irq;
995 spinlock_t reg_op_lock;
996 #endif /* CONFIG_MALI_BIFROST_NO_MALI */
997 struct kbase_pm_device_data pm;
998
999 struct kbase_mem_pool_group mem_pools;
1000 struct kbasep_mem_device memdev;
1001 struct kbase_mmu_mode const *mmu_mode;
1002
1003 struct memory_group_manager_device *mgm_dev;
1004
1005 struct kbase_as as[BASE_MAX_NR_AS];
1006 u16 as_free; /* Bitpattern of free Address Spaces */
1007 struct kbase_context *as_to_kctx[BASE_MAX_NR_AS];
1008
1009 spinlock_t mmu_mask_change;
1010
1011 struct kbase_gpu_props gpu_props;
1012
1013 unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
1014 unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
1015
1016 struct {
1017 atomic_t count;
1018 atomic_t state;
1019 } disjoint_event;
1020
1021 s8 nr_hw_address_spaces;
1022 s8 nr_user_address_spaces;
1023
1024 #if MALI_USE_CSF
1025 struct kbase_hwcnt_backend_csf_if hwcnt_backend_csf_if_fw;
1026 struct kbase_hwcnt_watchdog_interface hwcnt_watchdog_timer;
1027 #else
1028 struct kbase_hwcnt {
1029 spinlock_t lock;
1030
1031 struct kbase_context *kctx;
1032 u64 addr;
1033 u64 addr_bytes;
1034
1035 struct kbase_instr_backend backend;
1036 } hwcnt;
1037 #endif
1038
1039 struct kbase_hwcnt_backend_interface hwcnt_gpu_iface;
1040 struct kbase_hwcnt_context *hwcnt_gpu_ctx;
1041 struct kbase_hwcnt_virtualizer *hwcnt_gpu_virt;
1042 struct kbase_vinstr_context *vinstr_ctx;
1043 struct kbase_kinstr_prfcnt_context *kinstr_prfcnt_ctx;
1044
1045 atomic_t timeline_flags;
1046 struct kbase_timeline *timeline;
1047
1048 #if KBASE_KTRACE_TARGET_RBUF
1049 struct kbase_ktrace ktrace;
1050 #endif
1051 u32 reset_timeout_ms;
1052
1053 u64 lowest_gpu_freq_khz;
1054
1055 bool cache_clean_in_progress;
1056 u32 cache_clean_queued;
1057 wait_queue_head_t cache_clean_wait;
1058
1059 void *platform_context;
1060
1061 struct list_head kctx_list;
1062 struct mutex kctx_list_lock;
1063
1064 struct rockchip_opp_info opp_info;
1065 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
1066 struct devfreq_dev_profile devfreq_profile;
1067 struct devfreq *devfreq;
1068 unsigned long current_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
1069 unsigned long current_nominal_freq;
1070 unsigned long current_voltages[BASE_MAX_NR_CLOCKS_REGULATORS];
1071 u64 current_core_mask;
1072 struct kbase_devfreq_opp *devfreq_table;
1073 int num_opps;
1074 struct kbasep_pm_metrics last_devfreq_metrics;
1075 struct monitor_dev_info *mdev_info;
1076 struct ipa_power_model_data *model_data;
1077 struct kbase_devfreq_queue_info devfreq_queue;
1078
1079 #if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
1080 struct thermal_cooling_device *devfreq_cooling;
1081 bool ipa_protection_mode_switched;
1082 struct {
1083 /* Access to this struct must be with ipa.lock held */
1084 struct mutex lock;
1085 struct kbase_ipa_model *configured_model;
1086 struct kbase_ipa_model *fallback_model;
1087
1088 /* Values of the PM utilization metrics from last time the
1089 * power model was invoked. The utilization is calculated as
1090 * the difference between last_metrics and the current values.
1091 */
1092 struct kbasep_pm_metrics last_metrics;
1093
1094 /* true if use of fallback model has been forced by the User */
1095 bool force_fallback_model;
1096 /* Records the time when counters, used for dynamic energy
1097 * estimation, were last sampled.
1098 */
1099 ktime_t last_sample_time;
1100 } ipa;
1101 #endif /* CONFIG_DEVFREQ_THERMAL */
1102 #endif /* CONFIG_MALI_BIFROST_DEVFREQ */
1103 unsigned long previous_frequency;
1104
1105 atomic_t job_fault_debug;
1106
1107 #if IS_ENABLED(CONFIG_DEBUG_FS)
1108 struct dentry *mali_debugfs_directory;
1109 struct dentry *debugfs_ctx_directory;
1110 struct dentry *debugfs_instr_directory;
1111
1112 #ifdef CONFIG_MALI_BIFROST_DEBUG
1113 u64 debugfs_as_read_bitmap;
1114 #endif /* CONFIG_MALI_BIFROST_DEBUG */
1115
1116 wait_queue_head_t job_fault_wq;
1117 wait_queue_head_t job_fault_resume_wq;
1118 struct workqueue_struct *job_fault_resume_workq;
1119 struct list_head job_fault_event_list;
1120 spinlock_t job_fault_event_lock;
1121
1122 #if !MALI_CUSTOMER_RELEASE
1123 struct {
1124 u32 reg_offset;
1125 } regs_dump_debugfs_data;
1126 #endif /* !MALI_CUSTOMER_RELEASE */
1127 #endif /* CONFIG_DEBUG_FS */
1128
1129 atomic_t ctx_num;
1130
1131 #if IS_ENABLED(CONFIG_DEBUG_FS)
1132 struct kbase_io_history io_history;
1133 #endif /* CONFIG_DEBUG_FS */
1134
1135 struct kbase_hwaccess_data hwaccess;
1136
1137 atomic_t faults_pending;
1138
1139 bool poweroff_pending;
1140
1141 #if (KERNEL_VERSION(4, 4, 0) <= LINUX_VERSION_CODE)
1142 bool infinite_cache_active_default;
1143 #else
1144 u32 infinite_cache_active_default;
1145 #endif
1146 struct kbase_mem_pool_group_config mem_pool_defaults;
1147
1148 u32 current_gpu_coherency_mode;
1149 u32 system_coherency;
1150
1151 bool cci_snoop_enabled;
1152
1153 u32 snoop_enable_smc;
1154 u32 snoop_disable_smc;
1155
1156 const struct protected_mode_ops *protected_ops;
1157
1158 struct protected_mode_device *protected_dev;
1159
1160 bool protected_mode;
1161
1162 bool protected_mode_transition;
1163
1164 bool protected_mode_hwcnt_desired;
1165
1166 bool protected_mode_hwcnt_disabled;
1167
1168 struct work_struct protected_mode_hwcnt_disable_work;
1169
1170
1171 bool irq_reset_flush;
1172
1173 u32 inited_subsys;
1174
1175 spinlock_t hwaccess_lock;
1176
1177 struct mutex mmu_hw_mutex;
1178
1179 u8 l2_size_override;
1180 u8 l2_hash_override;
1181 bool l2_hash_values_override;
1182 u32 l2_hash_values[ASN_HASH_COUNT];
1183
1184 u32 sysc_alloc[SYSC_ALLOC_COUNT];
1185
1186 struct mutex fw_load_lock;
1187 #if MALI_USE_CSF
1188 /* CSF object for the GPU device. */
1189 struct kbase_csf_device csf;
1190 #else
1191 struct kbasep_js_device_data js_data;
1192
1193 /* See KBASE_JS_*_PRIORITY_MODE for details. */
1194 u32 js_ctx_scheduling_mode;
1195
1196 /* See KBASE_SERIALIZE_* for details */
1197 u8 serialize_jobs;
1198
1199 #ifdef CONFIG_MALI_CINSTR_GWT
1200 u8 backup_serialize_jobs;
1201 #endif /* CONFIG_MALI_CINSTR_GWT */
1202
1203 #endif /* MALI_USE_CSF */
1204
1205 struct rb_root process_root;
1206 struct rb_root dma_buf_root;
1207
1208 size_t total_gpu_pages;
1209 struct mutex dma_buf_lock;
1210 spinlock_t gpu_mem_usage_lock;
1211
1212 struct {
1213 struct kbase_context *ctx;
1214 u64 jc;
1215 int slot;
1216 u64 flags;
1217 } dummy_job_wa;
1218 bool dummy_job_wa_loaded;
1219
1220 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1221 struct kbase_arbiter_device arb;
1222 #endif
1223 /* Priority Control Manager device */
1224 struct priority_control_manager_device *pcm_dev;
1225
1226 struct notifier_block oom_notifier_block;
1227
1228 };
1229
1230 /**
1231 * enum kbase_file_state - Initialization state of a file opened by @kbase_open
1232 *
1233 * @KBASE_FILE_NEED_VSN: Initial state, awaiting API version.
1234 * @KBASE_FILE_VSN_IN_PROGRESS: Indicates if setting an API version is in
1235 * progress and other setup calls shall be
1236 * rejected.
1237 * @KBASE_FILE_NEED_CTX: Indicates if the API version handshake has
1238 * completed, awaiting context creation flags.
1239 * @KBASE_FILE_CTX_IN_PROGRESS: Indicates if the context's setup is in progress
1240 * and other setup calls shall be rejected.
1241 * @KBASE_FILE_COMPLETE: Indicates if the setup for context has
1242 * completed, i.e. flags have been set for the
1243 * context.
1244 *
1245 * The driver allows only limited interaction with user-space until setup
1246 * is complete.
1247 */
1248 enum kbase_file_state {
1249 KBASE_FILE_NEED_VSN,
1250 KBASE_FILE_VSN_IN_PROGRESS,
1251 KBASE_FILE_NEED_CTX,
1252 KBASE_FILE_CTX_IN_PROGRESS,
1253 KBASE_FILE_COMPLETE
1254 };
1255
1256 /**
1257 * struct kbase_file - Object representing a file opened by @kbase_open
1258 *
1259 * @kbdev: Object representing an instance of GPU platform device,
1260 * allocated from the probe method of the Mali driver.
1261 * @filp: Pointer to the struct file corresponding to device file
1262 * /dev/malixx instance, passed to the file's open method.
1263 * @kctx: Object representing an entity, among which GPU is
1264 * scheduled and which gets its own GPU address space.
1265 * Invalid until @setup_state is KBASE_FILE_COMPLETE.
1266 * @api_version: Contains the version number for User/kernel interface,
1267 * used for compatibility check. Invalid until
1268 * @setup_state is KBASE_FILE_NEED_CTX.
1269 * @setup_state: Initialization state of the file. Values come from
1270 * the kbase_file_state enumeration.
1271 */
1272 struct kbase_file {
1273 struct kbase_device *kbdev;
1274 struct file *filp;
1275 struct kbase_context *kctx;
1276 unsigned long api_version;
1277 atomic_t setup_state;
1278 };
1279 #if MALI_JIT_PRESSURE_LIMIT_BASE
1280 /**
1281 * enum kbase_context_flags - Flags for kbase contexts
1282 *
1283 * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
1284 * process on a 64-bit kernel.
1285 *
1286 * @KCTX_RUNNABLE_REF: Set when context is counted in
1287 * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
1288 *
1289 * @KCTX_ACTIVE: Set when the context is active.
1290 *
1291 * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
1292 * context.
1293 *
1294 * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
1295 * initialized.
1296 *
1297 * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
1298 * allocations. Existing allocations will not change.
1299 *
1300 * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
1301 *
1302 * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
1303 * scheduled in.
1304 *
1305 * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
1306 * This is only ever updated whilst the jsctx_mutex is held.
1307 *
1308 * @KCTX_DYING: Set when the context process is in the process of being evicted.
1309 *
1310 * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
1311 * context, to disable use of implicit dma-buf fences. This is used to avoid
1312 * potential synchronization deadlocks.
1313 *
1314 * @KCTX_FORCE_SAME_VA: Set when BASE_MEM_SAME_VA should be forced on memory
1315 * allocations. For 64-bit clients it is enabled by default, and disabled by
1316 * default on 32-bit clients. Being able to clear this flag is only used for
1317 * testing purposes of the custom zone allocation on 64-bit user-space builds,
1318 * where we also require more control than is available through e.g. the JIT
1319 * allocation mechanism. However, the 64-bit user-space client must still
1320 * reserve a JIT region using KBASE_IOCTL_MEM_JIT_INIT
1321 *
1322 * @KCTX_PULLED_SINCE_ACTIVE_JS0: Set when the context has had an atom pulled
1323 * from it for job slot 0. This is reset when the context first goes active or
1324 * is re-activated on that slot.
1325 *
1326 * @KCTX_PULLED_SINCE_ACTIVE_JS1: Set when the context has had an atom pulled
1327 * from it for job slot 1. This is reset when the context first goes active or
1328 * is re-activated on that slot.
1329 *
1330 * @KCTX_PULLED_SINCE_ACTIVE_JS2: Set when the context has had an atom pulled
1331 * from it for job slot 2. This is reset when the context first goes active or
1332 * is re-activated on that slot.
1333 *
1334 * @KCTX_AS_DISABLED_ON_FAULT: Set when the GPU address space is disabled for
1335 * the context due to unhandled page(or bus) fault. It is cleared when the
1336 * refcount for the context drops to 0 or on when the address spaces are
1337 * re-enabled on GPU reset or power cycle.
1338 *
1339 * @KCTX_JPL_ENABLED: Set when JIT physical page limit is less than JIT virtual
1340 * address page limit, so we must take care to not exceed the physical limit
1341 *
1342 * All members need to be separate bits. This enum is intended for use in a
1343 * bitmask where multiple values get OR-ed together.
1344 */
1345 enum kbase_context_flags {
1346 KCTX_COMPAT = 1U << 0,
1347 KCTX_RUNNABLE_REF = 1U << 1,
1348 KCTX_ACTIVE = 1U << 2,
1349 KCTX_PULLED = 1U << 3,
1350 KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
1351 KCTX_INFINITE_CACHE = 1U << 5,
1352 KCTX_SUBMIT_DISABLED = 1U << 6,
1353 KCTX_PRIVILEGED = 1U << 7,
1354 KCTX_SCHEDULED = 1U << 8,
1355 KCTX_DYING = 1U << 9,
1356 KCTX_NO_IMPLICIT_SYNC = 1U << 10,
1357 KCTX_FORCE_SAME_VA = 1U << 11,
1358 KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
1359 KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
1360 KCTX_PULLED_SINCE_ACTIVE_JS2 = 1U << 14,
1361 KCTX_AS_DISABLED_ON_FAULT = 1U << 15,
1362 KCTX_JPL_ENABLED = 1U << 16,
1363 };
1364 #else
1365 /**
1366 * enum kbase_context_flags - Flags for kbase contexts
1367 *
1368 * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
1369 * process on a 64-bit kernel.
1370 *
1371 * @KCTX_RUNNABLE_REF: Set when context is counted in
1372 * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
1373 *
1374 * @KCTX_ACTIVE: Set when the context is active.
1375 *
1376 * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
1377 * context.
1378 *
1379 * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
1380 * initialized.
1381 *
1382 * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
1383 * allocations. Existing allocations will not change.
1384 *
1385 * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
1386 *
1387 * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
1388 * scheduled in.
1389 *
1390 * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
1391 * This is only ever updated whilst the jsctx_mutex is held.
1392 *
1393 * @KCTX_DYING: Set when the context process is in the process of being evicted.
1394 *
1395 * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
1396 * context, to disable use of implicit dma-buf fences. This is used to avoid
1397 * potential synchronization deadlocks.
1398 *
1399 * @KCTX_FORCE_SAME_VA: Set when BASE_MEM_SAME_VA should be forced on memory
1400 * allocations. For 64-bit clients it is enabled by default, and disabled by
1401 * default on 32-bit clients. Being able to clear this flag is only used for
1402 * testing purposes of the custom zone allocation on 64-bit user-space builds,
1403 * where we also require more control than is available through e.g. the JIT
1404 * allocation mechanism. However, the 64-bit user-space client must still
1405 * reserve a JIT region using KBASE_IOCTL_MEM_JIT_INIT
1406 *
1407 * @KCTX_PULLED_SINCE_ACTIVE_JS0: Set when the context has had an atom pulled
1408 * from it for job slot 0. This is reset when the context first goes active or
1409 * is re-activated on that slot.
1410 *
1411 * @KCTX_PULLED_SINCE_ACTIVE_JS1: Set when the context has had an atom pulled
1412 * from it for job slot 1. This is reset when the context first goes active or
1413 * is re-activated on that slot.
1414 *
1415 * @KCTX_PULLED_SINCE_ACTIVE_JS2: Set when the context has had an atom pulled
1416 * from it for job slot 2. This is reset when the context first goes active or
1417 * is re-activated on that slot.
1418 *
1419 * @KCTX_AS_DISABLED_ON_FAULT: Set when the GPU address space is disabled for
1420 * the context due to unhandled page(or bus) fault. It is cleared when the
1421 * refcount for the context drops to 0 or on when the address spaces are
1422 * re-enabled on GPU reset or power cycle.
1423 *
1424 * All members need to be separate bits. This enum is intended for use in a
1425 * bitmask where multiple values get OR-ed together.
1426 */
1427 enum kbase_context_flags {
1428 KCTX_COMPAT = 1U << 0,
1429 KCTX_RUNNABLE_REF = 1U << 1,
1430 KCTX_ACTIVE = 1U << 2,
1431 KCTX_PULLED = 1U << 3,
1432 KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
1433 KCTX_INFINITE_CACHE = 1U << 5,
1434 KCTX_SUBMIT_DISABLED = 1U << 6,
1435 KCTX_PRIVILEGED = 1U << 7,
1436 KCTX_SCHEDULED = 1U << 8,
1437 KCTX_DYING = 1U << 9,
1438 KCTX_NO_IMPLICIT_SYNC = 1U << 10,
1439 KCTX_FORCE_SAME_VA = 1U << 11,
1440 KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
1441 KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
1442 KCTX_PULLED_SINCE_ACTIVE_JS2 = 1U << 14,
1443 KCTX_AS_DISABLED_ON_FAULT = 1U << 15,
1444 };
1445 #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
1446
1447 struct kbase_sub_alloc {
1448 struct list_head link;
1449 struct page *page;
1450 DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
1451 };
1452
1453 /**
1454 * struct kbase_context - Kernel base context
1455 *
1456 * @filp: Pointer to the struct file corresponding to device file
1457 * /dev/malixx instance, passed to the file's open method.
1458 * @kbdev: Pointer to the Kbase device for which the context is created.
1459 * @kctx_list_link: Node into Kbase device list of contexts.
1460 * @mmu: Structure holding details of the MMU tables for this
1461 * context
1462 * @id: Unique identifier for the context, indicates the number of
1463 * contexts which have been created for the device so far.
1464 * @api_version: contains the version number for User/kernel interface,
1465 * used for compatibility check.
1466 * @event_list: list of posted events about completed atoms, to be sent to
1467 * event handling thread of Userpsace.
1468 * @event_coalesce_list: list containing events corresponding to successive atoms
1469 * which have requested deferred delivery of the completion
1470 * events to Userspace.
1471 * @event_mutex: Lock to protect the concurrent access to @event_list &
1472 * @event_mutex.
1473 * @event_closed: Flag set through POST_TERM ioctl, indicates that Driver
1474 * should stop posting events and also inform event handling
1475 * thread that context termination is in progress.
1476 * @event_workq: Workqueue for processing work items corresponding to atoms
1477 * that do not return an event to userspace.
1478 * @event_count: Count of the posted events to be consumed by Userspace.
1479 * @event_coalesce_count: Count of the events present in @event_coalesce_list.
1480 * @flags: bitmap of enums from kbase_context_flags, indicating the
1481 * state & attributes for the context.
1482 * @aliasing_sink_page: Special page used for KBASE_MEM_TYPE_ALIAS allocations,
1483 * which can alias number of memory regions. The page is
1484 * represent a region where it is mapped with a write-alloc
1485 * cache setup, typically used when the write result of the
1486 * GPU isn't needed, but the GPU must write anyway.
1487 * @mem_partials_lock: Lock for protecting the operations done on the elements
1488 * added to @mem_partials list.
1489 * @mem_partials: List head for the list of large pages, 2MB in size, which
1490 * which have been split into 4 KB pages and are used
1491 * partially for the allocations >= 2 MB in size.
1492 * @reg_lock: Lock used for GPU virtual address space management operations,
1493 * like adding/freeing a memory region in the address space.
1494 * Can be converted to a rwlock ?.
1495 * @reg_rbtree_same: RB tree of the memory regions allocated from the SAME_VA
1496 * zone of the GPU virtual address space. Used for allocations
1497 * having the same value for GPU & CPU virtual address.
1498 * @reg_rbtree_custom: RB tree of the memory regions allocated from the CUSTOM_VA
1499 * zone of the GPU virtual address space.
1500 * @reg_rbtree_exec: RB tree of the memory regions allocated from the EXEC_VA
1501 * zone of the GPU virtual address space. Used for GPU-executable
1502 * allocations which don't need the SAME_VA property.
1503 * @reg_zone: Zone information for the reg_rbtree_<...> members.
1504 * @cookies: Bitmask containing of BITS_PER_LONG bits, used mainly for
1505 * SAME_VA allocations to defer the reservation of memory region
1506 * (from the GPU virtual address space) from base_mem_alloc
1507 * ioctl to mmap system call. This helps returning unique
1508 * handles, disguised as GPU VA, to Userspace from base_mem_alloc
1509 * and later retrieving the pointer to memory region structure
1510 * in the mmap handler.
1511 * @pending_regions: Array containing pointers to memory region structures,
1512 * used in conjunction with @cookies bitmask mainly for
1513 * providing a mechansim to have the same value for CPU &
1514 * GPU virtual address.
1515 * @event_queue: Wait queue used for blocking the thread, which consumes
1516 * the base_jd_event corresponding to an atom, when there
1517 * are no more posted events.
1518 * @tgid: Thread group ID of the process whose thread created
1519 * the context (by calling KBASE_IOCTL_VERSION_CHECK or
1520 * KBASE_IOCTL_SET_FLAGS, depending on the @api_version).
1521 * This is usually, but not necessarily, the same as the
1522 * process whose thread opened the device file
1523 * /dev/malixx instance.
1524 * @pid: ID of the thread, corresponding to process @tgid,
1525 * which actually created the context. This is usually,
1526 * but not necessarily, the same as the thread which
1527 * opened the device file /dev/malixx instance.
1528 * @csf: kbase csf context
1529 * @jctx: object encapsulating all the Job dispatcher related state,
1530 * including the array of atoms.
1531 * @used_pages: Keeps a track of the number of 4KB physical pages in use
1532 * for the context.
1533 * @nonmapped_pages: Updated in the same way as @used_pages, except for the case
1534 * when special tracking page is freed by userspace where it
1535 * is reset to 0.
1536 * @permanent_mapped_pages: Usage count of permanently mapped memory
1537 * @mem_pools: Context-specific pools of free physical memory pages.
1538 * @reclaim: Shrinker object registered with the kernel containing
1539 * the pointer to callback function which is invoked under
1540 * low memory conditions. In the callback function Driver
1541 * frees up the memory for allocations marked as
1542 * evictable/reclaimable.
1543 * @evict_list: List head for the list containing the allocations which
1544 * can be evicted or freed up in the shrinker callback.
1545 * @evict_nents: Total number of pages allocated by the allocations within
1546 * @evict_list (atomic).
1547 * @waiting_soft_jobs: List head for the list containing softjob atoms, which
1548 * are either waiting for the event set operation, or waiting
1549 * for the signaling of input fence or waiting for the GPU
1550 * device to powered on so as to dump the CPU/GPU timestamps.
1551 * @waiting_soft_jobs_lock: Lock to protect @waiting_soft_jobs list from concurrent
1552 * accesses.
1553 * @dma_fence: Object containing list head for the list of dma-buf fence
1554 * waiting atoms and the waitqueue to process the work item
1555 * queued for the atoms blocked on the signaling of dma-buf
1556 * fences.
1557 * @dma_fence.waiting_resource: list head for the list of dma-buf fence
1558 * @dma_fence.wq: waitqueue to process the work item queued
1559 * @as_nr: id of the address space being used for the scheduled in
1560 * context. This is effectively part of the Run Pool, because
1561 * it only has a valid setting (!=KBASEP_AS_NR_INVALID) whilst
1562 * the context is scheduled in. The hwaccess_lock must be held
1563 * whilst accessing this.
1564 * If the context relating to this value of as_nr is required,
1565 * then the context must be retained to ensure that it doesn't
1566 * disappear whilst it is being used. Alternatively, hwaccess_lock
1567 * can be held to ensure the context doesn't disappear (but this
1568 * has restrictions on what other locks can be taken simutaneously).
1569 * @refcount: Keeps track of the number of users of this context. A user
1570 * can be a job that is available for execution, instrumentation
1571 * needing to 'pin' a context for counter collection, etc.
1572 * If the refcount reaches 0 then this context is considered
1573 * inactive and the previously programmed AS might be cleared
1574 * at any point.
1575 * Generally the reference count is incremented when the context
1576 * is scheduled in and an atom is pulled from the context's per
1577 * slot runnable tree in JM GPU or GPU command queue
1578 * group is programmed on CSG slot in CSF GPU.
1579 * @mm_update_lock: lock used for handling of special tracking page.
1580 * @process_mm: Pointer to the memory descriptor of the process which
1581 * created the context. Used for accounting the physical
1582 * pages used for GPU allocations, done for the context,
1583 * to the memory consumed by the process.
1584 * @gpu_va_end: End address of the GPU va space (in 4KB page units)
1585 * @running_total_tiler_heap_nr_chunks: Running total of number of chunks in all
1586 * tiler heaps of the kbase context.
1587 * @running_total_tiler_heap_memory: Running total of the tiler heap memory in the
1588 * kbase context.
1589 * @peak_total_tiler_heap_memory: Peak value of the total tiler heap memory in the
1590 * kbase context.
1591 * @jit_va: Indicates if a JIT_VA zone has been created.
1592 * @mem_profile_data: Buffer containing the profiling information provided by
1593 * Userspace, can be read through the mem_profile debugfs file.
1594 * @mem_profile_size: Size of the @mem_profile_data.
1595 * @mem_profile_lock: Lock to serialize the operations related to mem_profile
1596 * debugfs file.
1597 * @kctx_dentry: Pointer to the debugfs directory created for every context,
1598 * inside kbase_device::debugfs_ctx_directory, containing
1599 * context specific files.
1600 * @reg_dump: Buffer containing a register offset & value pair, used
1601 * for dumping job fault debug info.
1602 * @job_fault_count: Indicates that a job fault occurred for the context and
1603 * dumping of its debug info is in progress.
1604 * @job_fault_resume_event_list: List containing atoms completed after the faulty
1605 * atom but before the debug data for faulty atom was dumped.
1606 * @jsctx_queue: Per slot & priority arrays of object containing the root
1607 * of RB-tree holding currently runnable atoms on the job slot
1608 * and the head item of the linked list of atoms blocked on
1609 * cross-slot dependencies.
1610 * @slot_tracking: Tracking and control of this context's use of all job
1611 * slots
1612 * @atoms_pulled_all_slots: Total number of atoms currently pulled from the
1613 * context, across all slots.
1614 * @slots_pullable: Bitmask of slots, indicating the slots for which the
1615 * context has pullable atoms in the runnable tree.
1616 * @work: Work structure used for deferred ASID assignment.
1617 * @completed_jobs: List containing completed atoms for which base_jd_event is
1618 * to be posted.
1619 * @work_count: Number of work items, corresponding to atoms, currently
1620 * pending on job_done workqueue of @jctx.
1621 * @soft_job_timeout: Timer object used for failing/cancelling the waiting
1622 * soft-jobs which have been blocked for more than the
1623 * timeout value used for the soft-jobs
1624 * @jit_alloc: Array of 256 pointers to GPU memory regions, used for
1625 * just-in-time memory allocations.
1626 * @jit_max_allocations: Maximum allowed number of in-flight
1627 * just-in-time memory allocations.
1628 * @jit_current_allocations: Current number of in-flight just-in-time
1629 * memory allocations.
1630 * @jit_current_allocations_per_bin: Current number of in-flight just-in-time
1631 * memory allocations per bin.
1632 * @jit_version: Version number indicating whether userspace is using
1633 * old or new version of interface for just-in-time
1634 * memory allocations.
1635 * 1 -> client used KBASE_IOCTL_MEM_JIT_INIT_10_2
1636 * 2 -> client used KBASE_IOCTL_MEM_JIT_INIT_11_5
1637 * 3 -> client used KBASE_IOCTL_MEM_JIT_INIT
1638 * @jit_group_id: A memory group ID to be passed to a platform-specific
1639 * memory group manager.
1640 * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
1641 * @jit_phys_pages_limit: Limit of physical pages to apply across all
1642 * just-in-time memory allocations, applied to
1643 * @jit_current_phys_pressure.
1644 * @jit_current_phys_pressure: Current 'pressure' on physical pages, which is
1645 * the sum of the worst case estimate of pages that
1646 * could be used (i.e. the
1647 * &struct_kbase_va_region.nr_pages for all in-use
1648 * just-in-time memory regions that have not yet had
1649 * a usage report) and the actual number of pages
1650 * that were used (i.e. the
1651 * &struct_kbase_va_region.used_pages for regions
1652 * that have had a usage report).
1653 * @jit_phys_pages_to_be_allocated: Count of the physical pages that are being
1654 * now allocated for just-in-time memory
1655 * allocations of a context (across all the
1656 * threads). This is supposed to be updated
1657 * with @reg_lock held before allocating
1658 * the backing pages. This helps ensure that
1659 * total physical memory usage for just in
1660 * time memory allocation remains within the
1661 * @jit_phys_pages_limit in multi-threaded
1662 * scenarios.
1663 * @jit_active_head: List containing the just-in-time memory allocations
1664 * which are in use.
1665 * @jit_pool_head: List containing the just-in-time memory allocations
1666 * which have been freed up by userspace and so not being
1667 * used by them.
1668 * Driver caches them to quickly fulfill requests for new
1669 * JIT allocations. They are released in case of memory
1670 * pressure as they are put on the @evict_list when they
1671 * are freed up by userspace.
1672 * @jit_destroy_head: List containing the just-in-time memory allocations
1673 * which were moved to it from @jit_pool_head, in the
1674 * shrinker callback, after freeing their backing
1675 * physical pages.
1676 * @jit_evict_lock: Lock used for operations done on just-in-time memory
1677 * allocations and also for accessing @evict_list.
1678 * @jit_work: Work item queued to defer the freeing of a memory
1679 * region when a just-in-time memory allocation is moved
1680 * to @jit_destroy_head.
1681 * @ext_res_meta_head: A list of sticky external resources which were requested to
1682 * be mapped on GPU side, through a softjob atom of type
1683 * EXT_RES_MAP or STICKY_RESOURCE_MAP ioctl.
1684 * @age_count: Counter incremented on every call to jd_submit_atom,
1685 * atom is assigned the snapshot of this counter, which
1686 * is used to determine the atom's age when it is added to
1687 * the runnable RB-tree.
1688 * @trim_level: Level of JIT allocation trimming to perform on free (0-100%)
1689 * @kprcs: Reference to @struct kbase_process that the current
1690 * kbase_context belongs to.
1691 * @kprcs_link: List link for the list of kbase context maintained
1692 * under kbase_process.
1693 * @gwt_enabled: Indicates if tracking of GPU writes is enabled, protected by
1694 * kbase_context.reg_lock.
1695 * @gwt_was_enabled: Simple sticky bit flag to know if GWT was ever enabled.
1696 * @gwt_current_list: A list of addresses for which GPU has generated write faults,
1697 * after the last snapshot of it was sent to userspace.
1698 * @gwt_snapshot_list: Snapshot of the @gwt_current_list for sending to user space.
1699 * @priority: Indicates the context priority. Used along with @atoms_count
1700 * for context scheduling, protected by hwaccess_lock.
1701 * @atoms_count: Number of GPU atoms currently in use, per priority
1702 * @create_flags: Flags used in context creation.
1703 * @kinstr_jm: Kernel job manager instrumentation context handle
1704 * @tl_kctx_list_node: List item into the device timeline's list of
1705 * contexts, for timeline summarization.
1706 * @limited_core_mask: The mask that is applied to the affinity in case of atoms
1707 * marked with BASE_JD_REQ_LIMITED_CORE_MASK.
1708 * @platform_data: Pointer to platform specific per-context data.
1709 *
1710 * A kernel base context is an entity among which the GPU is scheduled.
1711 * Each context has its own GPU address space.
1712 * Up to one context can be created for each client that opens the device file
1713 * /dev/malixx. Context creation is deferred until a special ioctl() system call
1714 * is made on the device file.
1715 */
1716 struct kbase_context {
1717 struct file *filp;
1718 struct kbase_device *kbdev;
1719 struct list_head kctx_list_link;
1720 struct kbase_mmu_table mmu;
1721
1722 u32 id;
1723 unsigned long api_version;
1724 struct list_head event_list;
1725 struct list_head event_coalesce_list;
1726 struct mutex event_mutex;
1727 #if !MALI_USE_CSF
1728 atomic_t event_closed;
1729 #endif
1730 struct workqueue_struct *event_workq;
1731 atomic_t event_count;
1732 int event_coalesce_count;
1733
1734 atomic_t flags;
1735
1736 struct tagged_addr aliasing_sink_page;
1737
1738 spinlock_t mem_partials_lock;
1739 struct list_head mem_partials;
1740
1741 struct mutex reg_lock;
1742
1743 struct rb_root reg_rbtree_same;
1744 struct rb_root reg_rbtree_custom;
1745 struct rb_root reg_rbtree_exec;
1746 struct kbase_reg_zone reg_zone[KBASE_REG_ZONE_MAX];
1747
1748 #if MALI_USE_CSF
1749 struct kbase_csf_context csf;
1750 #else
1751 struct kbase_jd_context jctx;
1752 struct jsctx_queue jsctx_queue
1753 [KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
1754 struct kbase_jsctx_slot_tracking slot_tracking[BASE_JM_MAX_NR_SLOTS];
1755 atomic_t atoms_pulled_all_slots;
1756
1757 struct list_head completed_jobs;
1758 atomic_t work_count;
1759 struct timer_list soft_job_timeout;
1760
1761 int priority;
1762 s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
1763 u32 slots_pullable;
1764 u32 age_count;
1765 #endif /* MALI_USE_CSF */
1766
1767 DECLARE_BITMAP(cookies, BITS_PER_LONG);
1768 struct kbase_va_region *pending_regions[BITS_PER_LONG];
1769
1770 wait_queue_head_t event_queue;
1771 pid_t tgid;
1772 pid_t pid;
1773 atomic_t used_pages;
1774 atomic_t nonmapped_pages;
1775 atomic_t permanent_mapped_pages;
1776
1777 struct kbase_mem_pool_group mem_pools;
1778
1779 struct shrinker reclaim;
1780 struct list_head evict_list;
1781 atomic_t evict_nents;
1782
1783 struct list_head waiting_soft_jobs;
1784 spinlock_t waiting_soft_jobs_lock;
1785 #ifdef CONFIG_MALI_BIFROST_DMA_FENCE
1786 struct {
1787 struct list_head waiting_resource;
1788 struct workqueue_struct *wq;
1789 } dma_fence;
1790 #endif /* CONFIG_MALI_BIFROST_DMA_FENCE */
1791
1792 int as_nr;
1793
1794 atomic_t refcount;
1795
1796 spinlock_t mm_update_lock;
1797 struct mm_struct __rcu *process_mm;
1798 u64 gpu_va_end;
1799 #if MALI_USE_CSF
1800 u32 running_total_tiler_heap_nr_chunks;
1801 u64 running_total_tiler_heap_memory;
1802 u64 peak_total_tiler_heap_memory;
1803 #endif
1804 bool jit_va;
1805
1806 #if IS_ENABLED(CONFIG_DEBUG_FS)
1807 char *mem_profile_data;
1808 size_t mem_profile_size;
1809 struct mutex mem_profile_lock;
1810 struct dentry *kctx_dentry;
1811
1812 unsigned int *reg_dump;
1813 atomic_t job_fault_count;
1814 struct list_head job_fault_resume_event_list;
1815
1816 #endif /* CONFIG_DEBUG_FS */
1817 struct kbase_va_region *jit_alloc[1 + BASE_JIT_ALLOC_COUNT];
1818 u8 jit_max_allocations;
1819 u8 jit_current_allocations;
1820 u8 jit_current_allocations_per_bin[256];
1821 u8 jit_version;
1822 u8 jit_group_id;
1823 #if MALI_JIT_PRESSURE_LIMIT_BASE
1824 u64 jit_phys_pages_limit;
1825 u64 jit_current_phys_pressure;
1826 u64 jit_phys_pages_to_be_allocated;
1827 #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
1828 struct list_head jit_active_head;
1829 struct list_head jit_pool_head;
1830 struct list_head jit_destroy_head;
1831 struct mutex jit_evict_lock;
1832 struct work_struct jit_work;
1833
1834 struct list_head ext_res_meta_head;
1835
1836 u8 trim_level;
1837
1838 struct kbase_process *kprcs;
1839 struct list_head kprcs_link;
1840
1841 #ifdef CONFIG_MALI_CINSTR_GWT
1842 bool gwt_enabled;
1843 bool gwt_was_enabled;
1844 struct list_head gwt_current_list;
1845 struct list_head gwt_snapshot_list;
1846 #endif
1847
1848 base_context_create_flags create_flags;
1849
1850 #if !MALI_USE_CSF
1851 struct kbase_kinstr_jm *kinstr_jm;
1852 #endif
1853 struct list_head tl_kctx_list_node;
1854
1855 u64 limited_core_mask;
1856
1857 #if !MALI_USE_CSF
1858 void *platform_data;
1859 #endif
1860 };
1861
1862 #ifdef CONFIG_MALI_CINSTR_GWT
1863 /**
1864 * struct kbasep_gwt_list_element - Structure used to collect GPU
1865 * write faults.
1866 * @link: List head for adding write faults.
1867 * @region: Details of the region where we have the
1868 * faulting page address.
1869 * @page_addr: Page address where GPU write fault occurred.
1870 * @num_pages: The number of pages modified.
1871 *
1872 * Using this structure all GPU write faults are stored in a list.
1873 */
1874 struct kbasep_gwt_list_element {
1875 struct list_head link;
1876 struct kbase_va_region *region;
1877 u64 page_addr;
1878 u64 num_pages;
1879 };
1880
1881 #endif
1882
1883 /**
1884 * struct kbase_ctx_ext_res_meta - Structure which binds an external resource
1885 * to a @kbase_context.
1886 * @ext_res_node: List head for adding the metadata to a
1887 * @kbase_context.
1888 * @alloc: The physical memory allocation structure
1889 * which is mapped.
1890 * @gpu_addr: The GPU virtual address the resource is
1891 * mapped to.
1892 * @ref: Reference count.
1893 *
1894 * External resources can be mapped into multiple contexts as well as the same
1895 * context multiple times.
1896 * As kbase_va_region itself isn't refcounted we can't attach our extra
1897 * information to it as it could be removed under our feet leaving external
1898 * resources pinned.
1899 * This metadata structure binds a single external resource to a single
1900 * context, ensuring that per context mapping is tracked separately so it can
1901 * be overridden when needed and abuses by the application (freeing the resource
1902 * multiple times) don't effect the refcount of the physical allocation.
1903 */
1904 struct kbase_ctx_ext_res_meta {
1905 struct list_head ext_res_node;
1906 struct kbase_mem_phy_alloc *alloc;
1907 u64 gpu_addr;
1908 u32 ref;
1909 };
1910
1911 enum kbase_reg_access_type {
1912 REG_READ,
1913 REG_WRITE
1914 };
1915
1916 enum kbase_share_attr_bits {
1917 /* (1ULL << 8) bit is reserved */
1918 SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
1919 SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
1920 };
1921
1922 /**
1923 * enum kbase_timeout_selector - The choice of which timeout to get scaled
1924 * using current GPU frequency.
1925 * @CSF_FIRMWARE_TIMEOUT: Response timeout from CSF firmware.
1926 */
1927 enum kbase_timeout_selector { CSF_FIRMWARE_TIMEOUT };
1928
1929 /**
1930 * kbase_device_is_cpu_coherent - Returns if the device is CPU coherent.
1931 * @kbdev: kbase device
1932 *
1933 * Return: true if the device access are coherent, false if not.
1934 */
kbase_device_is_cpu_coherent(struct kbase_device *kbdev)1935 static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev)
1936 {
1937 if ((kbdev->system_coherency == COHERENCY_ACE_LITE) ||
1938 (kbdev->system_coherency == COHERENCY_ACE))
1939 return true;
1940
1941 return false;
1942 }
1943
1944 /* Conversion helpers for setting up high resolution timers */
1945 #define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U))
1946 #define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
1947
1948 /* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
1949 #define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
1950 /* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
1951 #define KBASE_AS_INACTIVE_MAX_LOOPS 100000000
1952
1953 #endif /* _KBASE_DEFS_H_ */
1954