/kernel/linux/linux-5.10/kernel/trace/ |
H A D | fgraph.c | 68 if (!current->ret_stack) in ftrace_push_return_trace() 72 * We must make sure the ret_stack is tested before we read in ftrace_push_return_trace() 87 current->ret_stack[index].ret = ret; in ftrace_push_return_trace() 88 current->ret_stack[index].func = func; in ftrace_push_return_trace() 89 current->ret_stack[index].calltime = calltime; in ftrace_push_return_trace() 91 current->ret_stack[index].fp = frame_pointer; in ftrace_push_return_trace() 94 current->ret_stack[index].retp = retp; in ftrace_push_return_trace() 177 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { in ftrace_pop_return_trace() 181 current->ret_stack[index].fp, in ftrace_pop_return_trace() 183 (void *)current->ret_stack[inde in ftrace_pop_return_trace() 490 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) graph_init_task() argument 515 struct ftrace_ret_stack *ret_stack; ftrace_graph_init_idle_task() local 540 struct ftrace_ret_stack *ret_stack; ftrace_graph_init_task() local 553 struct ftrace_ret_stack *ret_stack = t->ret_stack; ftrace_graph_exit_task() local [all...] |
H A D | ftrace.c | 794 struct ftrace_ret_stack *ret_stack; in profile_graph_entry() local 798 /* If function graph is shutting down, ret_stack can be NULL */ in profile_graph_entry() 799 if (!current->ret_stack) in profile_graph_entry() 802 ret_stack = ftrace_graph_get_ret_stack(current, 0); in profile_graph_entry() 803 if (ret_stack) in profile_graph_entry() 804 ret_stack->subtime = 0; in profile_graph_entry() 811 struct ftrace_ret_stack *ret_stack; in profile_graph_return() local 831 ret_stack = ftrace_graph_get_ret_stack(current, 1); in profile_graph_return() 832 if (ret_stack) in profile_graph_return() 833 ret_stack in profile_graph_return() [all...] |
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | fgraph.c | 78 if (!current->ret_stack) in ftrace_push_return_trace() 82 * We must make sure the ret_stack is tested before we read in ftrace_push_return_trace() 97 current->ret_stack[index].ret = ret; in ftrace_push_return_trace() 98 current->ret_stack[index].func = func; in ftrace_push_return_trace() 99 current->ret_stack[index].calltime = calltime; in ftrace_push_return_trace() 101 current->ret_stack[index].fp = frame_pointer; in ftrace_push_return_trace() 104 current->ret_stack[index].retp = retp; in ftrace_push_return_trace() 189 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { in ftrace_pop_return_trace() 193 current->ret_stack[index].fp, in ftrace_pop_return_trace() 195 (void *)current->ret_stack[inde in ftrace_pop_return_trace() 527 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) graph_init_task() argument 552 struct ftrace_ret_stack *ret_stack; ftrace_graph_init_idle_task() local 577 struct ftrace_ret_stack *ret_stack; ftrace_graph_init_task() local 590 struct ftrace_ret_stack *ret_stack = t->ret_stack; ftrace_graph_exit_task() local [all...] |
H A D | ftrace.c | 822 struct ftrace_ret_stack *ret_stack; in profile_graph_entry() local 826 /* If function graph is shutting down, ret_stack can be NULL */ in profile_graph_entry() 827 if (!current->ret_stack) in profile_graph_entry() 830 ret_stack = ftrace_graph_get_ret_stack(current, 0); in profile_graph_entry() 831 if (ret_stack) in profile_graph_entry() 832 ret_stack->subtime = 0; in profile_graph_entry() 839 struct ftrace_ret_stack *ret_stack; in profile_graph_return() local 859 ret_stack = ftrace_graph_get_ret_stack(current, 1); in profile_graph_return() 860 if (ret_stack) in profile_graph_return() 861 ret_stack in profile_graph_return() [all...] |
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
H A D | stacktrace.c | 89 if (tsk->ret_stack && in unwind_frame() 91 struct ftrace_ret_stack *ret_stack; in unwind_frame() local 98 ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); in unwind_frame() 99 if (WARN_ON_ONCE(!ret_stack)) in unwind_frame() 101 frame->pc = ret_stack->ret; in unwind_frame()
|
/kernel/linux/linux-5.10/arch/sh/kernel/ |
H A D | dumpstack.c | 60 struct ftrace_ret_stack *ret_stack; in print_ftrace_graph_addr() local 66 if (!task->ret_stack) in print_ftrace_graph_addr() 69 ret_stack = ftrace_graph_get_ret_stack(task, *graph); in print_ftrace_graph_addr() 70 if (!ret_stack) in print_ftrace_graph_addr() 73 ret_addr = ret_stack->ret; in print_ftrace_graph_addr()
|
H A D | dwarf.c | 608 struct ftrace_ret_stack *ret_stack; in dwarf_unwind_stack() local 610 ret_stack = ftrace_graph_get_ret_stack(current, 0); in dwarf_unwind_stack() 611 if (ret_stack) in dwarf_unwind_stack() 612 pc = ret_stack->ret; in dwarf_unwind_stack()
|
/kernel/linux/linux-6.6/arch/sh/kernel/ |
H A D | dumpstack.c | 60 struct ftrace_ret_stack *ret_stack; in print_ftrace_graph_addr() local 66 if (!task->ret_stack) in print_ftrace_graph_addr() 69 ret_stack = ftrace_graph_get_ret_stack(task, *graph); in print_ftrace_graph_addr() 70 if (!ret_stack) in print_ftrace_graph_addr() 73 ret_addr = ret_stack->ret; in print_ftrace_graph_addr()
|
H A D | dwarf.c | 608 struct ftrace_ret_stack *ret_stack; in dwarf_unwind_stack() local 610 ret_stack = ftrace_graph_get_ret_stack(current, 0); in dwarf_unwind_stack() 611 if (ret_stack) in dwarf_unwind_stack() 612 pc = ret_stack->ret; in dwarf_unwind_stack()
|
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
H A D | stacktrace.c | 61 struct ftrace_ret_stack *ret_stack; in __save_stack_trace() local 62 ret_stack = ftrace_graph_get_ret_stack(t, in __save_stack_trace() 64 if (ret_stack) { in __save_stack_trace() 65 pc = ret_stack->ret; in __save_stack_trace()
|
H A D | perf_event.c | 1774 struct ftrace_ret_stack *ret_stack; in perf_callchain_kernel() local 1775 ret_stack = ftrace_graph_get_ret_stack(current, in perf_callchain_kernel() 1777 if (ret_stack) { in perf_callchain_kernel() 1778 pc = ret_stack->ret; in perf_callchain_kernel()
|
H A D | traps_64.c | 2502 struct ftrace_ret_stack *ret_stack; in show_stack() local 2503 ret_stack = ftrace_graph_get_ret_stack(tsk, graph); in show_stack() 2504 if (ret_stack) { in show_stack() 2505 pc = ret_stack->ret; in show_stack()
|
/kernel/linux/linux-6.6/arch/sparc/kernel/ |
H A D | stacktrace.c | 61 struct ftrace_ret_stack *ret_stack; in __save_stack_trace() local 62 ret_stack = ftrace_graph_get_ret_stack(t, in __save_stack_trace() 64 if (ret_stack) { in __save_stack_trace() 65 pc = ret_stack->ret; in __save_stack_trace()
|
H A D | perf_event.c | 1774 struct ftrace_ret_stack *ret_stack; in perf_callchain_kernel() local 1775 ret_stack = ftrace_graph_get_ret_stack(current, in perf_callchain_kernel() 1777 if (ret_stack) { in perf_callchain_kernel() 1778 pc = ret_stack->ret; in perf_callchain_kernel()
|
H A D | traps_64.c | 2497 struct ftrace_ret_stack *ret_stack; in show_stack() local 2498 ret_stack = ftrace_graph_get_ret_stack(tsk, graph); in show_stack() 2499 if (ret_stack) { in show_stack() 2500 pc = ret_stack->ret; in show_stack()
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | stacktrace.c | 79 if (state->task->ret_stack && in unwind_recover_return_address()
|
/kernel/linux/linux-5.10/init/ |
H A D | init_task.c | 204 .ret_stack = NULL,
|
/kernel/linux/linux-6.6/init/ |
H A D | init_task.c | 201 .ret_stack = NULL,
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | sched.h | 1388 /* Index of current stored address in ret_stack: */ 1393 struct ftrace_ret_stack *ret_stack; member
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | sched.h | 1502 /* Index of current stored address in ret_stack: */ 1507 struct ftrace_ret_stack *ret_stack; member
|