1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2011-2016, 2018-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #undef TRACE_SYSTEM
23 #define TRACE_SYSTEM mali
24 
25 #if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ)
26 #define _TRACE_MALI_H
27 
28 #include <linux/tracepoint.h>
29 
30 #if defined(CONFIG_MALI_BIFROST_GATOR_SUPPORT)
31 #define MALI_JOB_SLOTS_EVENT_CHANGED
32 
33 /*
34  * mali_job_slots_event - Reports change of job slot status.
35  * @gpu_id:   Kbase device id
36  * @event_id: ORed together bitfields representing a type of event,
37  *            made with the GATOR_MAKE_EVENT() macro.
38  */
39 TRACE_EVENT(mali_job_slots_event,
40 	TP_PROTO(u32 gpu_id, u32 event_id, u32 tgid, u32 pid,
41 		u8 job_id),
42 	TP_ARGS(gpu_id, event_id, tgid, pid, job_id),
43 	TP_STRUCT__entry(
44 		__field(u32, gpu_id)
45 		__field(u32, event_id)
46 		__field(u32, tgid)
47 		__field(u32, pid)
48 		__field(u8,  job_id)
49 	),
50 	TP_fast_assign(
51 		__entry->gpu_id   = gpu_id;
52 		__entry->event_id = event_id;
53 		__entry->tgid     = tgid;
54 		__entry->pid      = pid;
55 		__entry->job_id   = job_id;
56 	),
57 	TP_printk("gpu=%u event=%u tgid=%u pid=%u job_id=%u",
58 		__entry->gpu_id, __entry->event_id,
59 		__entry->tgid, __entry->pid, __entry->job_id)
60 );
61 
62 /**
63  * mali_pm_status - Reports change of power management status.
64  * @gpu_id:   Kbase device id
65  * @event_id: Core type (shader, tiler, L2 cache)
66  * @value:    64bits bitmask reporting either power status of
67  *            the cores (1-ON, 0-OFF)
68  */
69 TRACE_EVENT(mali_pm_status,
70 	TP_PROTO(u32 gpu_id, u32 event_id, u64 value),
71 	TP_ARGS(gpu_id, event_id, value),
72 	TP_STRUCT__entry(
73 		__field(u32, gpu_id)
74 		__field(u32, event_id)
75 		__field(u64, value)
76 	),
77 	TP_fast_assign(
78 		__entry->gpu_id   = gpu_id;
79 		__entry->event_id = event_id;
80 		__entry->value    = value;
81 	),
82 	TP_printk("gpu=%u event %u = %llu",
83 		__entry->gpu_id, __entry->event_id, __entry->value)
84 );
85 
86 /**
87  * mali_page_fault_insert_pages - Reports an MMU page fault
88  * resulting in new pages being mapped.
89  * @gpu_id:   Kbase device id
90  * @event_id: MMU address space number
91  * @value:    Number of newly allocated pages
92  */
93 TRACE_EVENT(mali_page_fault_insert_pages,
94 	TP_PROTO(u32 gpu_id, s32 event_id, u64 value),
95 	TP_ARGS(gpu_id, event_id, value),
96 	TP_STRUCT__entry(
97 		__field(u32, gpu_id)
98 		__field(s32, event_id)
99 		__field(u64, value)
100 	),
101 	TP_fast_assign(
102 		__entry->gpu_id   = gpu_id;
103 		__entry->event_id = event_id;
104 		__entry->value    = value;
105 	),
106 	TP_printk("gpu=%u event %d = %llu",
107 		__entry->gpu_id, __entry->event_id, __entry->value)
108 );
109 
110 /**
111  * mali_total_alloc_pages_change - Reports that the total number of
112  * allocated pages has changed.
113  * @gpu_id:   Kbase device id
114  * @event_id: Total number of pages allocated
115  */
116 TRACE_EVENT(mali_total_alloc_pages_change,
117 	TP_PROTO(u32 gpu_id, s64 event_id),
118 	TP_ARGS(gpu_id, event_id),
119 	TP_STRUCT__entry(
120 		__field(u32, gpu_id)
121 		__field(s64, event_id)
122 	),
123 	TP_fast_assign(
124 		__entry->gpu_id   = gpu_id;
125 		__entry->event_id = event_id;
126 	),
127 	TP_printk("gpu=%u event=%lld", __entry->gpu_id, __entry->event_id)
128 );
129 #endif /* CONFIG_MALI_BIFROST_GATOR_SUPPORT */
130 
131 /*
132  * MMU subsystem tracepoints
133  */
134 
135 /* Fault status and exception code helpers
136  *
137  * Must be macros to allow use by user-side tracepoint tools
138  *
139  * bits 0:1 masked off code, and used for the level
140  *
141  * Tracepoint files get included more than once - protect against multiple
142  * definition
143  */
144 #ifndef __TRACE_MALI_MMU_HELPERS
145 #define __TRACE_MALI_MMU_HELPERS
146 /* Complex macros should be enclosed in parenthesis.
147  *
148  * We need to have those parentheses removed for our arrays of symbolic look-ups
149  * for __print_symbolic() whilst also being able to use them outside trace code
150  */
151 #define _ENSURE_PARENTHESIS(args...) args
152 
153 #define KBASE_MMU_FAULT_CODE_EXCEPTION_NAME_PRINT(code) \
154 		(!KBASE_MMU_FAULT_CODE_VALID(code) ? "UNKNOWN,level=" : \
155 				__print_symbolic(((code) & ~3u), \
156 				KBASE_MMU_FAULT_CODE_SYMBOLIC_STRINGS))
157 #define KBASE_MMU_FAULT_CODE_LEVEL(code) \
158 	(((((code) & ~0x3u) == 0xC4) ? 4 : 0) + ((code) & 0x3u))
159 
160 #define KBASE_MMU_FAULT_STATUS_CODE(status)	\
161 		((status) & 0xFFu)
162 #define KBASE_MMU_FAULT_STATUS_DECODED_STRING(status) \
163 		(((status) & (1u << 10)) ? "DECODER_FAULT" : "SLAVE_FAULT")
164 
165 #define KBASE_MMU_FAULT_STATUS_EXCEPTION_NAME_PRINT(status) \
166 		KBASE_MMU_FAULT_CODE_EXCEPTION_NAME_PRINT( \
167 				KBASE_MMU_FAULT_STATUS_CODE(status))
168 
169 #define KBASE_MMU_FAULT_STATUS_LEVEL(status) \
170 		KBASE_MMU_FAULT_CODE_LEVEL(KBASE_MMU_FAULT_STATUS_CODE(status))
171 
172 #define KBASE_MMU_FAULT_STATUS_ACCESS(status) \
173 		((status) & AS_FAULTSTATUS_ACCESS_TYPE_MASK)
174 #define KBASE_MMU_FAULT_ACCESS_SYMBOLIC_STRINGS _ENSURE_PARENTHESIS(\
175 	{AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC, "ATOMIC" }, \
176 	{AS_FAULTSTATUS_ACCESS_TYPE_EX,     "EXECUTE"}, \
177 	{AS_FAULTSTATUS_ACCESS_TYPE_READ,   "READ"   }, \
178 	{AS_FAULTSTATUS_ACCESS_TYPE_WRITE,  "WRITE"  })
179 #define KBASE_MMU_FAULT_STATUS_ACCESS_PRINT(status) \
180 		__print_symbolic(KBASE_MMU_FAULT_STATUS_ACCESS(status), \
181 				KBASE_MMU_FAULT_ACCESS_SYMBOLIC_STRINGS)
182 
183 #if MALI_USE_CSF
184 #define KBASE_MMU_FAULT_CODE_VALID(code) \
185 		((code >= 0xC0 && code <= 0xEB) && \
186 		(!(code >= 0xC5 && code <= 0xC7)) && \
187 		(!(code >= 0xCC && code <= 0xD8)) && \
188 		(!(code >= 0xDC && code <= 0xDF)) && \
189 		(!(code >= 0xE1 && code <= 0xE3)))
190 #define KBASE_MMU_FAULT_CODE_SYMBOLIC_STRINGS _ENSURE_PARENTHESIS(\
191 		{0xC0, "TRANSLATION_FAULT_" }, \
192 		{0xC4, "TRANSLATION_FAULT_" }, \
193 		{0xC8, "PERMISSION_FAULT_" }, \
194 		{0xD0, "TRANSTAB_BUS_FAULT_" }, \
195 		{0xD8, "ACCESS_FLAG_" }, \
196 		{0xE0, "ADDRESS_SIZE_FAULT_IN" }, \
197 		{0xE4, "ADDRESS_SIZE_FAULT_OUT" }, \
198 		{0xE8, "MEMORY_ATTRIBUTES_FAULT_" })
199 #else /* MALI_USE_CSF */
200 #define KBASE_MMU_FAULT_CODE_VALID(code) \
201 	((code >= 0xC0 && code <= 0xEF) && \
202 		(!(code >= 0xC5 && code <= 0xC6)) && \
203 		(!(code >= 0xCC && code <= 0xCF)) && \
204 		(!(code >= 0xD4 && code <= 0xD7)) && \
205 		(!(code >= 0xDC && code <= 0xDF)))
206 #define KBASE_MMU_FAULT_CODE_SYMBOLIC_STRINGS _ENSURE_PARENTHESIS(\
207 		{0xC0, "TRANSLATION_FAULT_" }, \
208 		{0xC4, "TRANSLATION_FAULT(_7==_IDENTITY)_" }, \
209 		{0xC8, "PERMISSION_FAULT_" }, \
210 		{0xD0, "TRANSTAB_BUS_FAULT_" }, \
211 		{0xD8, "ACCESS_FLAG_" }, \
212 		{0xE0, "ADDRESS_SIZE_FAULT_IN" }, \
213 		{0xE4, "ADDRESS_SIZE_FAULT_OUT" }, \
214 		{0xE8, "MEMORY_ATTRIBUTES_FAULT_" }, \
215 		{0xEC, "MEMORY_ATTRIBUTES_NONCACHEABLE_" })
216 #endif /* MALI_USE_CSF */
217 #endif /* __TRACE_MALI_MMU_HELPERS */
218 
219 /* trace_mali_mmu_page_fault_grow
220  *
221  * Tracepoint about a successful grow of a region due to a GPU page fault
222  */
223 TRACE_EVENT(mali_mmu_page_fault_grow,
224 	TP_PROTO(struct kbase_va_region *reg, struct kbase_fault *fault,
225 		size_t new_pages),
226 	TP_ARGS(reg, fault, new_pages),
227 	TP_STRUCT__entry(
228 		__field(u64, start_addr)
229 		__field(u64, fault_addr)
230 		__field(u64, fault_extra_addr)
231 		__field(size_t, new_pages)
232 		__field(u32, status)
233 	),
234 	TP_fast_assign(
235 		__entry->start_addr       = ((u64)reg->start_pfn) << PAGE_SHIFT;
236 		__entry->fault_addr       = fault->addr;
237 		__entry->fault_extra_addr = fault->extra_addr;
238 		__entry->new_pages        = new_pages;
239 		__entry->status     = fault->status;
240 	),
241 	TP_printk("start=0x%llx fault_addr=0x%llx fault_extra_addr=0x%llx new_pages=%zu raw_fault_status=0x%x decoded_faultstatus=%s exception_type=0x%x,%s%u access_type=0x%x,%s source_id=0x%x",
242 		__entry->start_addr, __entry->fault_addr,
243 		__entry->fault_extra_addr, __entry->new_pages,
244 		__entry->status,
245 		KBASE_MMU_FAULT_STATUS_DECODED_STRING(__entry->status),
246 		KBASE_MMU_FAULT_STATUS_CODE(__entry->status),
247 		KBASE_MMU_FAULT_STATUS_EXCEPTION_NAME_PRINT(__entry->status),
248 		KBASE_MMU_FAULT_STATUS_LEVEL(__entry->status),
249 		KBASE_MMU_FAULT_STATUS_ACCESS(__entry->status) >> 8,
250 		KBASE_MMU_FAULT_STATUS_ACCESS_PRINT(__entry->status),
251 		__entry->status >> 16)
252 );
253 
254 
255 
256 
257 /*
258  * Just-in-time memory allocation subsystem tracepoints
259  */
260 
261 /* Just-in-time memory allocation soft-job template. Override the TP_printk
262  * further if need be. jit_id can be 0.
263  */
264 DECLARE_EVENT_CLASS(mali_jit_softjob_template,
265 	TP_PROTO(struct kbase_va_region *reg, u8 jit_id),
266 	TP_ARGS(reg, jit_id),
267 	TP_STRUCT__entry(
268 		__field(u64, start_addr)
269 		__field(size_t, nr_pages)
270 		__field(size_t, backed_pages)
271 		__field(u8, jit_id)
272 	),
273 	TP_fast_assign(
274 		__entry->start_addr   = ((u64)reg->start_pfn) << PAGE_SHIFT;
275 		__entry->nr_pages     = reg->nr_pages;
276 		__entry->backed_pages = kbase_reg_current_backed_size(reg);
277 		__entry->jit_id       = jit_id;
278 	),
279 	TP_printk("jit_id=%u start=0x%llx va_pages=0x%zx backed_size=0x%zx",
280 		__entry->jit_id, __entry->start_addr, __entry->nr_pages,
281 		__entry->backed_pages)
282 );
283 
284 /* trace_mali_jit_alloc()
285  *
286  * Tracepoint about a just-in-time memory allocation soft-job successfully
287  * allocating memory
288  */
289 DEFINE_EVENT(mali_jit_softjob_template, mali_jit_alloc,
290 	TP_PROTO(struct kbase_va_region *reg, u8 jit_id),
291 	TP_ARGS(reg, jit_id));
292 
293 /* trace_mali_jit_free()
294  *
295  * Tracepoint about memory that was allocated just-in-time being freed
296  * (which may happen either on free soft-job, or during rollback error
297  * paths of an allocation soft-job, etc)
298  *
299  * Free doesn't immediately have the just-in-time memory allocation ID so
300  * it's currently suppressed from the output - set jit_id to 0
301  */
302 DEFINE_EVENT_PRINT(mali_jit_softjob_template, mali_jit_free,
303 	TP_PROTO(struct kbase_va_region *reg, u8 jit_id),
304 	TP_ARGS(reg, jit_id),
305 	TP_printk("start=0x%llx va_pages=0x%zx backed_size=0x%zx",
306 		__entry->start_addr, __entry->nr_pages, __entry->backed_pages));
307 
308 #if !MALI_USE_CSF
309 #if MALI_JIT_PRESSURE_LIMIT_BASE
310 /* trace_mali_jit_report
311  *
312  * Tracepoint about the GPU data structure read to form a just-in-time memory
313  * allocation report, and its calculated physical page usage
314  */
315 TRACE_EVENT(mali_jit_report,
316 	TP_PROTO(struct kbase_jd_atom *katom, struct kbase_va_region *reg,
317 		unsigned int id_idx, u64 read_val, u64 used_pages),
318 	TP_ARGS(katom, reg, id_idx, read_val, used_pages),
319 	TP_STRUCT__entry(
320 		__field(u64, start_addr)
321 		__field(u64, read_val)
322 		__field(u64, used_pages)
323 		__field(unsigned long, flags)
324 		__field(u8, id_idx)
325 		__field(u8, jit_id)
326 	),
327 	TP_fast_assign(
328 		__entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT;
329 		__entry->read_val   = read_val;
330 		__entry->used_pages = used_pages;
331 		__entry->flags      = reg->flags;
332 		__entry->id_idx     = id_idx;
333 		__entry->jit_id     = katom->jit_ids[id_idx];
334 	),
335 	TP_printk("start=0x%llx jit_ids[%u]=%u read_type='%s' read_val=0x%llx used_pages=%llu",
336 		__entry->start_addr, __entry->id_idx, __entry->jit_id,
337 		__print_symbolic(__entry->flags,
338 			{ 0, "address"},
339 			{ KBASE_REG_TILER_ALIGN_TOP, "address with align" },
340 			{ KBASE_REG_HEAP_INFO_IS_SIZE, "size" },
341 			{ KBASE_REG_HEAP_INFO_IS_SIZE |
342 				KBASE_REG_TILER_ALIGN_TOP,
343 				"size with align (invalid)" }
344 		),
345 		__entry->read_val, __entry->used_pages)
346 );
347 #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
348 #endif /* !MALI_USE_CSF */
349 
350 TRACE_DEFINE_ENUM(KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
351 #if MALI_JIT_PRESSURE_LIMIT_BASE
352 /* trace_mali_jit_report_pressure
353  *
354  * Tracepoint about change in physical memory pressure, due to the information
355  * about a region changing. Examples include:
356  * - a report on a region that was allocated just-in-time
357  * - just-in-time allocation of a region
358  * - free of a region that was allocated just-in-time
359  */
360 TRACE_EVENT(mali_jit_report_pressure,
361 	TP_PROTO(struct kbase_va_region *reg, u64 new_used_pages,
362 		u64 new_pressure, unsigned int flags),
363 	TP_ARGS(reg, new_used_pages, new_pressure, flags),
364 	TP_STRUCT__entry(
365 		__field(u64, start_addr)
366 		__field(u64, used_pages)
367 		__field(u64, new_used_pages)
368 		__field(u64, new_pressure)
369 		__field(unsigned int, flags)
370 	),
371 	TP_fast_assign(
372 		__entry->start_addr     = ((u64)reg->start_pfn) << PAGE_SHIFT;
373 		__entry->used_pages     = reg->used_pages;
374 		__entry->new_used_pages = new_used_pages;
375 		__entry->new_pressure   = new_pressure;
376 		__entry->flags          = flags;
377 	),
378 	TP_printk("start=0x%llx old_used_pages=%llu new_used_pages=%llu new_pressure=%llu report_flags=%s",
379 		__entry->start_addr, __entry->used_pages,
380 		__entry->new_used_pages, __entry->new_pressure,
381 		__print_flags(__entry->flags, "|",
382 			{ KBASE_JIT_REPORT_ON_ALLOC_OR_FREE,
383 				"HAPPENED_ON_ALLOC_OR_FREE" }))
384 );
385 #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
386 
387 #ifndef __TRACE_SYSGRAPH_ENUM
388 #define __TRACE_SYSGRAPH_ENUM
389 /* Enum of sysgraph message IDs */
390 enum sysgraph_msg {
391 	SGR_ARRIVE,
392 	SGR_SUBMIT,
393 	SGR_COMPLETE,
394 	SGR_POST,
395 	SGR_ACTIVE,
396 	SGR_INACTIVE
397 };
398 #endif /* __TRACE_SYSGRAPH_ENUM */
399 
400 /* A template for SYSGRAPH events
401  *
402  * Most of the sysgraph events contain only one input argument
403  * which is atom_id therefore they will be using a common template
404  */
405 TRACE_EVENT(sysgraph,
406 	TP_PROTO(enum sysgraph_msg message, unsigned int proc_id,
407 		unsigned int atom_id),
408 	TP_ARGS(message, proc_id, atom_id),
409 	TP_STRUCT__entry(
410 		__field(unsigned int, proc_id)
411 		__field(enum sysgraph_msg, message)
412 		__field(unsigned int, atom_id)
413 	),
414 	TP_fast_assign(
415 		__entry->proc_id    = proc_id;
416 		__entry->message    = message;
417 		__entry->atom_id    = atom_id;
418 	),
419 	TP_printk("msg=%u proc_id=%u, param1=%d", __entry->message,
420 		 __entry->proc_id,  __entry->atom_id)
421 );
422 
423 /* A template for SYSGRAPH GPU events
424  *
425  * Sysgraph events that record start/complete events
426  * on GPU also record a js value in addition to the
427  * atom id.
428  */
429 TRACE_EVENT(sysgraph_gpu,
430 	TP_PROTO(enum sysgraph_msg message, unsigned int proc_id,
431 		unsigned int atom_id, unsigned int js),
432 	TP_ARGS(message, proc_id, atom_id, js),
433 	TP_STRUCT__entry(
434 		__field(unsigned int, proc_id)
435 		__field(enum sysgraph_msg, message)
436 		__field(unsigned int, atom_id)
437 		__field(unsigned int, js)
438 	),
439 	TP_fast_assign(
440 		__entry->proc_id    = proc_id;
441 		__entry->message    = message;
442 		__entry->atom_id    = atom_id;
443 		__entry->js         = js;
444 	),
445 	TP_printk("msg=%u proc_id=%u, param1=%d, param2=%d",
446 		  __entry->message,  __entry->proc_id,
447 		  __entry->atom_id, __entry->js)
448 );
449 
450 /* Tracepoint files get included more than once - protect against multiple
451  * definition
452  */
453 #undef KBASE_JIT_REPORT_GPU_MEM_SIZE
454 
455 /* Size in bytes of the memory surrounding the location used for a just-in-time
456  * memory allocation report
457  */
458 #define KBASE_JIT_REPORT_GPU_MEM_SIZE (4 * sizeof(u64))
459 
460 /* trace_mali_jit_report_gpu_mem
461  *
462  * Tracepoint about the GPU memory nearby the location used for a just-in-time
463  * memory allocation report
464  */
465 TRACE_EVENT(mali_jit_report_gpu_mem,
466 	TP_PROTO(u64 base_addr, u64 reg_addr, u64 *gpu_mem, unsigned int flags),
467 	TP_ARGS(base_addr, reg_addr, gpu_mem, flags),
468 	TP_STRUCT__entry(
469 		__field(u64, base_addr)
470 		__field(u64, reg_addr)
471 		__array(u64, mem_values,
472 			KBASE_JIT_REPORT_GPU_MEM_SIZE / sizeof(u64))
473 		__field(unsigned int, flags)
474 	),
475 	TP_fast_assign(
476 		__entry->base_addr = base_addr;
477 		__entry->reg_addr  = reg_addr;
478 		memcpy(__entry->mem_values, gpu_mem,
479 				sizeof(__entry->mem_values));
480 		__entry->flags     = flags;
481 	),
482 	TP_printk("start=0x%llx read GPU memory base=0x%llx values=%s report_flags=%s",
483 		__entry->reg_addr, __entry->base_addr,
484 		__print_array(__entry->mem_values,
485 				ARRAY_SIZE(__entry->mem_values), sizeof(u64)),
486 		__print_flags(__entry->flags, "|",
487 			{ KBASE_JIT_REPORT_ON_ALLOC_OR_FREE,
488 				"HAPPENED_ON_ALLOC_OR_FREE" }))
489 );
490 
491 /* trace_mali_jit_trim_from_region
492  *
493  * Tracepoint about trimming physical pages from a region
494  */
495 TRACE_EVENT(mali_jit_trim_from_region,
496 	TP_PROTO(struct kbase_va_region *reg, size_t freed_pages,
497 		size_t old_pages, size_t available_pages, size_t new_pages),
498 	TP_ARGS(reg, freed_pages, old_pages, available_pages, new_pages),
499 	TP_STRUCT__entry(
500 		__field(u64, start_addr)
501 		__field(size_t, freed_pages)
502 		__field(size_t, old_pages)
503 		__field(size_t, available_pages)
504 		__field(size_t, new_pages)
505 	),
506 	TP_fast_assign(
507 		__entry->start_addr      = ((u64)reg->start_pfn) << PAGE_SHIFT;
508 		__entry->freed_pages     = freed_pages;
509 		__entry->old_pages       = old_pages;
510 		__entry->available_pages = available_pages;
511 		__entry->new_pages       = new_pages;
512 	),
513 	TP_printk("start=0x%llx freed_pages=%zu old_pages=%zu available_pages=%zu new_pages=%zu",
514 		__entry->start_addr, __entry->freed_pages, __entry->old_pages,
515 		__entry->available_pages, __entry->new_pages)
516 );
517 
518 /* trace_mali_jit_trim
519  *
520  * Tracepoint about total trimmed physical pages
521  */
522 TRACE_EVENT(mali_jit_trim,
523 	TP_PROTO(size_t freed_pages),
524 	TP_ARGS(freed_pages),
525 	TP_STRUCT__entry(
526 		__field(size_t, freed_pages)
527 	),
528 	TP_fast_assign(
529 		__entry->freed_pages  = freed_pages;
530 	),
531 	TP_printk("freed_pages=%zu", __entry->freed_pages)
532 );
533 
534 #include "debug/mali_kbase_debug_linux_ktrace.h"
535 
536 #endif /* _TRACE_MALI_H */
537 
538 #undef TRACE_INCLUDE_PATH
539 /* lwn.net/Articles/383362 suggests this should remain as '.', and instead
540  * extend CFLAGS
541  */
542 #define TRACE_INCLUDE_PATH .
543 #undef  TRACE_INCLUDE_FILE
544 #define TRACE_INCLUDE_FILE mali_linux_trace
545 
546 /* This part must be outside protection */
547 #include <trace/define_trace.h>
548