1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM sched
4
5#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_SCHED_H
7
8#include <linux/sched/numa_balancing.h>
9#include <linux/sched/clock.h>
10#include <linux/tracepoint.h>
11#include <linux/binfmts.h>
12
13#ifdef CONFIG_SCHED_RT_CAS
14#include "eas_sched.h"
15#endif
16
17/*
18 * Tracepoint for calling kthread_stop, performed to end a kthread:
19 */
20TRACE_EVENT(sched_kthread_stop,
21
22	TP_PROTO(struct task_struct *t),
23
24	TP_ARGS(t),
25
26	TP_STRUCT__entry(
27		__array(	char,	comm,	TASK_COMM_LEN	)
28		__field(	pid_t,	pid			)
29	),
30
31	TP_fast_assign(
32		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
33		__entry->pid	= t->pid;
34	),
35
36	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
37);
38
39/*
40 * Tracepoint for the return value of the kthread stopping:
41 */
42TRACE_EVENT(sched_kthread_stop_ret,
43
44	TP_PROTO(int ret),
45
46	TP_ARGS(ret),
47
48	TP_STRUCT__entry(
49		__field(	int,	ret	)
50	),
51
52	TP_fast_assign(
53		__entry->ret	= ret;
54	),
55
56	TP_printk("ret=%d", __entry->ret)
57);
58
59/*
60 * Tracepoint for waking up a task:
61 */
62DECLARE_EVENT_CLASS(sched_wakeup_template,
63
64	TP_PROTO(struct task_struct *p),
65
66	TP_ARGS(__perf_task(p)),
67
68	TP_STRUCT__entry(
69		__array(	char,	comm,	TASK_COMM_LEN	)
70		__field(	pid_t,	pid			)
71		__field(	int,	prio			)
72		__field(	int,	success			)
73		__field(	int,	target_cpu		)
74	),
75
76	TP_fast_assign(
77		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
78		__entry->pid		= p->pid;
79		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
80		__entry->success	= 1; /* rudiment, kill when possible */
81		__entry->target_cpu	= task_cpu(p);
82	),
83
84	TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
85		  __entry->comm, __entry->pid, __entry->prio,
86		  __entry->target_cpu)
87);
88
89/*
90 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
91 * called from the waking context.
92 */
93DEFINE_EVENT(sched_wakeup_template, sched_waking,
94	     TP_PROTO(struct task_struct *p),
95	     TP_ARGS(p));
96
97/*
98 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
99 * It is not always called from the waking context.
100 */
101DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
102	     TP_PROTO(struct task_struct *p),
103	     TP_ARGS(p));
104
105/*
106 * Tracepoint for waking up a new task:
107 */
108DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
109	     TP_PROTO(struct task_struct *p),
110	     TP_ARGS(p));
111
112#ifdef CREATE_TRACE_POINTS
113static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
114{
115	unsigned int state;
116
117#ifdef CONFIG_SCHED_DEBUG
118	BUG_ON(p != current);
119#endif /* CONFIG_SCHED_DEBUG */
120
121	/*
122	 * Preemption ignores task state, therefore preempted tasks are always
123	 * RUNNING (we will not have dequeued if state != RUNNING).
124	 */
125	if (preempt)
126		return TASK_REPORT_MAX;
127
128	/*
129	 * task_state_index() uses fls() and returns a value from 0-8 range.
130	 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
131	 * it for left shift operation to get the correct task->state
132	 * mapping.
133	 */
134	state = task_state_index(p);
135
136	return state ? (1 << (state - 1)) : state;
137}
138#endif /* CREATE_TRACE_POINTS */
139
140/*
141 * Tracepoint for task switches, performed by the scheduler:
142 */
143TRACE_EVENT(sched_switch,
144
145	TP_PROTO(bool preempt,
146		 struct task_struct *prev,
147		 struct task_struct *next),
148
149	TP_ARGS(preempt, prev, next),
150
151	TP_STRUCT__entry(
152		__array(	char,	prev_comm,	TASK_COMM_LEN	)
153		__field(	pid_t,	prev_pid			)
154		__field(	int,	prev_prio			)
155		__field(	long,	prev_state			)
156		__array(	char,	next_comm,	TASK_COMM_LEN	)
157		__field(	pid_t,	next_pid			)
158		__field(	int,	next_prio			)
159	),
160
161	TP_fast_assign(
162		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
163		__entry->prev_pid	= prev->pid;
164		__entry->prev_prio	= prev->prio;
165		__entry->prev_state	= __trace_sched_switch_state(preempt, prev);
166		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
167		__entry->next_pid	= next->pid;
168		__entry->next_prio	= next->prio;
169		/* XXX SCHED_DEADLINE */
170	),
171
172	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
173		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
174
175		(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
176		  __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
177				{ TASK_INTERRUPTIBLE, "S" },
178				{ TASK_UNINTERRUPTIBLE, "D" },
179				{ __TASK_STOPPED, "T" },
180				{ __TASK_TRACED, "t" },
181				{ EXIT_DEAD, "X" },
182				{ EXIT_ZOMBIE, "Z" },
183				{ TASK_PARKED, "P" },
184				{ TASK_DEAD, "I" }) :
185		  "R",
186
187		__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
188		__entry->next_comm, __entry->next_pid, __entry->next_prio)
189);
190
191/*
192 * Tracepoint for a task being migrated:
193 */
194TRACE_EVENT(sched_migrate_task,
195
196	TP_PROTO(struct task_struct *p, int dest_cpu),
197
198	TP_ARGS(p, dest_cpu),
199
200	TP_STRUCT__entry(
201		__array(	char,	comm,	TASK_COMM_LEN	)
202		__field(	pid_t,	pid			)
203		__field(	int,	prio			)
204		__field(	int,	orig_cpu		)
205		__field(	int,	dest_cpu		)
206	),
207
208	TP_fast_assign(
209		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
210		__entry->pid		= p->pid;
211		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
212		__entry->orig_cpu	= task_cpu(p);
213		__entry->dest_cpu	= dest_cpu;
214	),
215
216	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
217		  __entry->comm, __entry->pid, __entry->prio,
218		  __entry->orig_cpu, __entry->dest_cpu)
219);
220
221DECLARE_EVENT_CLASS(sched_process_template,
222
223	TP_PROTO(struct task_struct *p),
224
225	TP_ARGS(p),
226
227	TP_STRUCT__entry(
228		__array(	char,	comm,	TASK_COMM_LEN	)
229		__field(	pid_t,	pid			)
230		__field(	int,	prio			)
231	),
232
233	TP_fast_assign(
234		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
235		__entry->pid		= p->pid;
236		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
237	),
238
239	TP_printk("comm=%s pid=%d prio=%d",
240		  __entry->comm, __entry->pid, __entry->prio)
241);
242
243/*
244 * Tracepoint for freeing a task:
245 */
246DEFINE_EVENT(sched_process_template, sched_process_free,
247	     TP_PROTO(struct task_struct *p),
248	     TP_ARGS(p));
249
250/*
251 * Tracepoint for a task exiting:
252 */
253DEFINE_EVENT(sched_process_template, sched_process_exit,
254	     TP_PROTO(struct task_struct *p),
255	     TP_ARGS(p));
256
257/*
258 * Tracepoint for waiting on task to unschedule:
259 */
260DEFINE_EVENT(sched_process_template, sched_wait_task,
261	TP_PROTO(struct task_struct *p),
262	TP_ARGS(p));
263
264/*
265 * Tracepoint for a waiting task:
266 */
267TRACE_EVENT(sched_process_wait,
268
269	TP_PROTO(struct pid *pid),
270
271	TP_ARGS(pid),
272
273	TP_STRUCT__entry(
274		__array(	char,	comm,	TASK_COMM_LEN	)
275		__field(	pid_t,	pid			)
276		__field(	int,	prio			)
277	),
278
279	TP_fast_assign(
280		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
281		__entry->pid		= pid_nr(pid);
282		__entry->prio		= current->prio; /* XXX SCHED_DEADLINE */
283	),
284
285	TP_printk("comm=%s pid=%d prio=%d",
286		  __entry->comm, __entry->pid, __entry->prio)
287);
288
289/*
290 * Tracepoint for do_fork:
291 */
292TRACE_EVENT(sched_process_fork,
293
294	TP_PROTO(struct task_struct *parent, struct task_struct *child),
295
296	TP_ARGS(parent, child),
297
298	TP_STRUCT__entry(
299		__array(	char,	parent_comm,	TASK_COMM_LEN	)
300		__field(	pid_t,	parent_pid			)
301		__array(	char,	child_comm,	TASK_COMM_LEN	)
302		__field(	pid_t,	child_pid			)
303	),
304
305	TP_fast_assign(
306		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
307		__entry->parent_pid	= parent->pid;
308		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
309		__entry->child_pid	= child->pid;
310	),
311
312	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
313		__entry->parent_comm, __entry->parent_pid,
314		__entry->child_comm, __entry->child_pid)
315);
316
317/*
318 * Tracepoint for exec:
319 */
320TRACE_EVENT(sched_process_exec,
321
322	TP_PROTO(struct task_struct *p, pid_t old_pid,
323		 struct linux_binprm *bprm),
324
325	TP_ARGS(p, old_pid, bprm),
326
327	TP_STRUCT__entry(
328		__string(	filename,	bprm->filename	)
329		__field(	pid_t,		pid		)
330		__field(	pid_t,		old_pid		)
331	),
332
333	TP_fast_assign(
334		__assign_str(filename, bprm->filename);
335		__entry->pid		= p->pid;
336		__entry->old_pid	= old_pid;
337	),
338
339	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
340		  __entry->pid, __entry->old_pid)
341);
342
343
344#ifdef CONFIG_SCHEDSTATS
345#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
346#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
347#else
348#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
349#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
350#endif
351
352/*
353 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
354 *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
355 */
356DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
357
358	TP_PROTO(struct task_struct *tsk, u64 delay),
359
360	TP_ARGS(__perf_task(tsk), __perf_count(delay)),
361
362	TP_STRUCT__entry(
363		__array( char,	comm,	TASK_COMM_LEN	)
364		__field( pid_t,	pid			)
365		__field( u64,	delay			)
366	),
367
368	TP_fast_assign(
369		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
370		__entry->pid	= tsk->pid;
371		__entry->delay	= delay;
372	),
373
374	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
375			__entry->comm, __entry->pid,
376			(unsigned long long)__entry->delay)
377);
378
379/*
380 * Tracepoint for accounting wait time (time the task is runnable
381 * but not actually running due to scheduler contention).
382 */
383DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
384	     TP_PROTO(struct task_struct *tsk, u64 delay),
385	     TP_ARGS(tsk, delay));
386
387/*
388 * Tracepoint for accounting sleep time (time the task is not runnable,
389 * including iowait, see below).
390 */
391DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
392	     TP_PROTO(struct task_struct *tsk, u64 delay),
393	     TP_ARGS(tsk, delay));
394
395/*
396 * Tracepoint for accounting iowait time (time the task is not runnable
397 * due to waiting on IO to complete).
398 */
399DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
400	     TP_PROTO(struct task_struct *tsk, u64 delay),
401	     TP_ARGS(tsk, delay));
402
403/*
404 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
405 */
406DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
407	     TP_PROTO(struct task_struct *tsk, u64 delay),
408	     TP_ARGS(tsk, delay));
409
410/*
411 * Tracepoint for accounting runtime (time the task is executing
412 * on a CPU).
413 */
414DECLARE_EVENT_CLASS(sched_stat_runtime,
415
416	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
417
418	TP_ARGS(tsk, __perf_count(runtime), vruntime),
419
420	TP_STRUCT__entry(
421		__array( char,	comm,	TASK_COMM_LEN	)
422		__field( pid_t,	pid			)
423		__field( u64,	runtime			)
424		__field( u64,	vruntime			)
425	),
426
427	TP_fast_assign(
428		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
429		__entry->pid		= tsk->pid;
430		__entry->runtime	= runtime;
431		__entry->vruntime	= vruntime;
432	),
433
434	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
435			__entry->comm, __entry->pid,
436			(unsigned long long)__entry->runtime,
437			(unsigned long long)__entry->vruntime)
438);
439
440DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
441	     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
442	     TP_ARGS(tsk, runtime, vruntime));
443
444/*
445 * Tracepoint for showing priority inheritance modifying a tasks
446 * priority.
447 */
448TRACE_EVENT(sched_pi_setprio,
449
450	TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
451
452	TP_ARGS(tsk, pi_task),
453
454	TP_STRUCT__entry(
455		__array( char,	comm,	TASK_COMM_LEN	)
456		__field( pid_t,	pid			)
457		__field( int,	oldprio			)
458		__field( int,	newprio			)
459	),
460
461	TP_fast_assign(
462		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
463		__entry->pid		= tsk->pid;
464		__entry->oldprio	= tsk->prio;
465		__entry->newprio	= pi_task ?
466				min(tsk->normal_prio, pi_task->prio) :
467				tsk->normal_prio;
468		/* XXX SCHED_DEADLINE bits missing */
469	),
470
471	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
472			__entry->comm, __entry->pid,
473			__entry->oldprio, __entry->newprio)
474);
475
476#ifdef CONFIG_DETECT_HUNG_TASK
477TRACE_EVENT(sched_process_hang,
478	TP_PROTO(struct task_struct *tsk),
479	TP_ARGS(tsk),
480
481	TP_STRUCT__entry(
482		__array( char,	comm,	TASK_COMM_LEN	)
483		__field( pid_t,	pid			)
484	),
485
486	TP_fast_assign(
487		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
488		__entry->pid = tsk->pid;
489	),
490
491	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
492);
493#endif /* CONFIG_DETECT_HUNG_TASK */
494
495/*
496 * Tracks migration of tasks from one runqueue to another. Can be used to
497 * detect if automatic NUMA balancing is bouncing between nodes.
498 */
499TRACE_EVENT(sched_move_numa,
500
501	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
502
503	TP_ARGS(tsk, src_cpu, dst_cpu),
504
505	TP_STRUCT__entry(
506		__field( pid_t,	pid			)
507		__field( pid_t,	tgid			)
508		__field( pid_t,	ngid			)
509		__field( int,	src_cpu			)
510		__field( int,	src_nid			)
511		__field( int,	dst_cpu			)
512		__field( int,	dst_nid			)
513	),
514
515	TP_fast_assign(
516		__entry->pid		= task_pid_nr(tsk);
517		__entry->tgid		= task_tgid_nr(tsk);
518		__entry->ngid		= task_numa_group_id(tsk);
519		__entry->src_cpu	= src_cpu;
520		__entry->src_nid	= cpu_to_node(src_cpu);
521		__entry->dst_cpu	= dst_cpu;
522		__entry->dst_nid	= cpu_to_node(dst_cpu);
523	),
524
525	TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
526			__entry->pid, __entry->tgid, __entry->ngid,
527			__entry->src_cpu, __entry->src_nid,
528			__entry->dst_cpu, __entry->dst_nid)
529);
530
531DECLARE_EVENT_CLASS(sched_numa_pair_template,
532
533	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
534		 struct task_struct *dst_tsk, int dst_cpu),
535
536	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
537
538	TP_STRUCT__entry(
539		__field( pid_t,	src_pid			)
540		__field( pid_t,	src_tgid		)
541		__field( pid_t,	src_ngid		)
542		__field( int,	src_cpu			)
543		__field( int,	src_nid			)
544		__field( pid_t,	dst_pid			)
545		__field( pid_t,	dst_tgid		)
546		__field( pid_t,	dst_ngid		)
547		__field( int,	dst_cpu			)
548		__field( int,	dst_nid			)
549	),
550
551	TP_fast_assign(
552		__entry->src_pid	= task_pid_nr(src_tsk);
553		__entry->src_tgid	= task_tgid_nr(src_tsk);
554		__entry->src_ngid	= task_numa_group_id(src_tsk);
555		__entry->src_cpu	= src_cpu;
556		__entry->src_nid	= cpu_to_node(src_cpu);
557		__entry->dst_pid	= dst_tsk ? task_pid_nr(dst_tsk) : 0;
558		__entry->dst_tgid	= dst_tsk ? task_tgid_nr(dst_tsk) : 0;
559		__entry->dst_ngid	= dst_tsk ? task_numa_group_id(dst_tsk) : 0;
560		__entry->dst_cpu	= dst_cpu;
561		__entry->dst_nid	= dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
562	),
563
564	TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
565			__entry->src_pid, __entry->src_tgid, __entry->src_ngid,
566			__entry->src_cpu, __entry->src_nid,
567			__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
568			__entry->dst_cpu, __entry->dst_nid)
569);
570
571DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
572
573	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
574		 struct task_struct *dst_tsk, int dst_cpu),
575
576	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
577);
578
579DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
580
581	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
582		 struct task_struct *dst_tsk, int dst_cpu),
583
584	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
585);
586
587
588/*
589 * Tracepoint for waking a polling cpu without an IPI.
590 */
591TRACE_EVENT(sched_wake_idle_without_ipi,
592
593	TP_PROTO(int cpu),
594
595	TP_ARGS(cpu),
596
597	TP_STRUCT__entry(
598		__field(	int,	cpu	)
599	),
600
601	TP_fast_assign(
602		__entry->cpu	= cpu;
603	),
604
605	TP_printk("cpu=%d", __entry->cpu)
606);
607
608#ifdef CONFIG_SCHED_CORE_CTRL
609TRACE_EVENT(core_ctl_eval_need,
610
611	TP_PROTO(unsigned int cpu, unsigned int old_need,
612		 unsigned int new_need, unsigned int updated),
613	TP_ARGS(cpu, old_need, new_need, updated),
614	TP_STRUCT__entry(
615		__field(u32, cpu)
616		__field(u32, old_need)
617		__field(u32, new_need)
618		__field(u32, updated)
619	),
620	TP_fast_assign(
621		__entry->cpu = cpu;
622		__entry->old_need = old_need;
623		__entry->new_need = new_need;
624		__entry->updated = updated;
625	),
626	TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
627		  __entry->old_need, __entry->new_need, __entry->updated)
628);
629
630TRACE_EVENT(core_ctl_set_busy,
631
632	TP_PROTO(unsigned int cpu, unsigned int busy,
633		 unsigned int old_is_busy, unsigned int is_busy, int high_irqload),
634	TP_ARGS(cpu, busy, old_is_busy, is_busy, high_irqload),
635	TP_STRUCT__entry(
636		__field(u32, cpu)
637		__field(u32, busy)
638		__field(u32, old_is_busy)
639		__field(u32, is_busy)
640		__field(bool, high_irqload)
641	),
642	TP_fast_assign(
643		__entry->cpu = cpu;
644		__entry->busy = busy;
645		__entry->old_is_busy = old_is_busy;
646		__entry->is_busy = is_busy;
647		__entry->high_irqload = high_irqload;
648	),
649	TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u high_irqload=%d",
650		  __entry->cpu, __entry->busy, __entry->old_is_busy,
651		  __entry->is_busy, __entry->high_irqload)
652);
653
654TRACE_EVENT(core_ctl_set_boost,
655
656	TP_PROTO(u32 refcount, s32 ret),
657	TP_ARGS(refcount, ret),
658	TP_STRUCT__entry(
659		__field(u32, refcount)
660		__field(s32, ret)
661	),
662	TP_fast_assign(
663		__entry->refcount = refcount;
664		__entry->ret = ret;
665	),
666	TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret)
667);
668
669TRACE_EVENT(core_ctl_update_nr_need,
670
671	TP_PROTO(int cpu, int nr_need, int prev_misfit_need,
672		int nrrun, int max_nr, int nr_prev_assist),
673
674	TP_ARGS(cpu, nr_need, prev_misfit_need, nrrun, max_nr, nr_prev_assist),
675
676	TP_STRUCT__entry(
677		__field(int, cpu)
678		__field(int, nr_need)
679		__field(int, prev_misfit_need)
680		__field(int, nrrun)
681		__field(int, max_nr)
682		__field(int, nr_prev_assist)
683	),
684
685	TP_fast_assign(
686		__entry->cpu = cpu;
687		__entry->nr_need = nr_need;
688		__entry->prev_misfit_need = prev_misfit_need;
689		__entry->nrrun = nrrun;
690		__entry->max_nr = max_nr;
691		__entry->nr_prev_assist = nr_prev_assist;
692	),
693
694	TP_printk("cpu=%d nr_need=%d prev_misfit_need=%d nrrun=%d max_nr=%d nr_prev_assist=%d",
695		__entry->cpu, __entry->nr_need, __entry->prev_misfit_need,
696		__entry->nrrun, __entry->max_nr, __entry->nr_prev_assist)
697);
698#endif
699
700#ifdef CONFIG_SCHED_RUNNING_AVG
701/*
702 * Tracepoint for sched_get_nr_running_avg
703 */
704TRACE_EVENT(sched_get_nr_running_avg,
705
706	TP_PROTO(int cpu, int nr, int nr_misfit, int nr_max),
707
708	TP_ARGS(cpu, nr, nr_misfit, nr_max),
709
710	TP_STRUCT__entry(
711		__field(int, cpu)
712		__field(int, nr)
713		__field(int, nr_misfit)
714		__field(int, nr_max)
715	),
716
717	TP_fast_assign(
718		__entry->cpu = cpu;
719		__entry->nr = nr;
720		__entry->nr_misfit = nr_misfit;
721		__entry->nr_max = nr_max;
722	),
723
724	TP_printk("cpu=%d nr=%d nr_misfit=%d nr_max=%d",
725		__entry->cpu, __entry->nr, __entry->nr_misfit, __entry->nr_max)
726);
727#endif
728
729#ifdef CONFIG_CPU_ISOLATION_OPT
730/*
731 * sched_isolate - called when cores are isolated/unisolated
732 *
733 * @acutal_mask: mask of cores actually isolated/unisolated
734 * @req_mask: mask of cores requested isolated/unisolated
735 * @online_mask: cpu online mask
736 * @time: amount of time in us it took to isolate/unisolate
737 * @isolate: 1 if isolating, 0 if unisolating
738 *
739 */
740TRACE_EVENT(sched_isolate,
741
742	TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus,
743		u64 start_time, unsigned char isolate),
744
745	TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate),
746
747	TP_STRUCT__entry(
748		__field(u32, requested_cpu)
749		__field(u32, isolated_cpus)
750		__field(u32, time)
751		__field(unsigned char, isolate)
752	),
753
754	TP_fast_assign(
755		__entry->requested_cpu = requested_cpu;
756		__entry->isolated_cpus = isolated_cpus;
757		__entry->time = div64_u64(sched_clock() - start_time, 1000);
758		__entry->isolate = isolate;
759	),
760
761	TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d",
762		__entry->requested_cpu, __entry->isolated_cpus,
763		__entry->time, __entry->isolate)
764);
765#endif
766
767/*
768 * Following tracepoints are not exported in tracefs and provide hooking
769 * mechanisms only for testing and debugging purposes.
770 *
771 * Postfixed with _tp to make them easily identifiable in the code.
772 */
773DECLARE_TRACE(pelt_cfs_tp,
774	TP_PROTO(struct cfs_rq *cfs_rq),
775	TP_ARGS(cfs_rq));
776
777DECLARE_TRACE(pelt_rt_tp,
778	TP_PROTO(struct rq *rq),
779	TP_ARGS(rq));
780
781DECLARE_TRACE(pelt_dl_tp,
782	TP_PROTO(struct rq *rq),
783	TP_ARGS(rq));
784
785DECLARE_TRACE(pelt_thermal_tp,
786	TP_PROTO(struct rq *rq),
787	TP_ARGS(rq));
788
789DECLARE_TRACE(pelt_irq_tp,
790	TP_PROTO(struct rq *rq),
791	TP_ARGS(rq));
792
793DECLARE_TRACE(pelt_se_tp,
794	TP_PROTO(struct sched_entity *se),
795	TP_ARGS(se));
796
797DECLARE_TRACE(sched_cpu_capacity_tp,
798	TP_PROTO(struct rq *rq),
799	TP_ARGS(rq));
800
801DECLARE_TRACE(sched_overutilized_tp,
802	TP_PROTO(struct root_domain *rd, bool overutilized),
803	TP_ARGS(rd, overutilized));
804
805DECLARE_TRACE(sched_util_est_cfs_tp,
806	TP_PROTO(struct cfs_rq *cfs_rq),
807	TP_ARGS(cfs_rq));
808
809DECLARE_TRACE(sched_util_est_se_tp,
810	TP_PROTO(struct sched_entity *se),
811	TP_ARGS(se));
812
813DECLARE_TRACE(sched_update_nr_running_tp,
814	TP_PROTO(struct rq *rq, int change),
815	TP_ARGS(rq, change));
816
817#endif /* _TRACE_SCHED_H */
818
819/* This part must be outside protection */
820#include <trace/define_trace.h>
821