/kernel/linux/linux-6.6/lib/ |
H A D | ref_tracker.c | 28 } stacks[]; member 37 stats = kmalloc(struct_size(stats, stacks, limit), in ref_tracker_get_stats() 50 if (stats->stacks[i].stack_handle == stack) in ref_tracker_get_stats() 55 stats->stacks[i].stack_handle = stack; in ref_tracker_get_stats() 56 stats->stacks[i].count = 0; in ref_tracker_get_stats() 59 ++stats->stacks[i].count; in ref_tracker_get_stats() 107 stack = stats->stacks[i].stack_handle; in __ref_tracker_dir_pr_ostream() 111 stats->stacks[i].count, stats->total, sbuf); in __ref_tracker_dir_pr_ostream() 112 skipped -= stats->stacks[i].count; in __ref_tracker_dir_pr_ostream()
|
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/nvhe/ |
H A D | stacktrace.c | 123 struct stack_info stacks[] = { in pkvm_save_backtrace() local 128 .stacks = stacks, in pkvm_save_backtrace() 129 .nr_stacks = ARRAY_SIZE(stacks), in pkvm_save_backtrace()
|
/kernel/linux/linux-6.6/arch/arm64/kvm/ |
H A D | stacktrace.c | 181 struct stack_info stacks[] = { in hyp_dump_backtrace() local 186 .stacks = stacks, in hyp_dump_backtrace() 187 .nr_stacks = ARRAY_SIZE(stacks), in hyp_dump_backtrace()
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | stacktrace.c | 147 * Per-cpu stacks are only accessible when unwinding the current task in a 158 * SDEI stacks are only accessible when unwinding the current task in an NMI 179 struct stack_info stacks[] = { in arch_stack_walk() local 194 .stacks = stacks, in arch_stack_walk() 195 .nr_stacks = ARRAY_SIZE(stacks), in arch_stack_walk()
|
/kernel/linux/linux-6.6/samples/fprobe/ |
H A D | fprobe_example.c | 44 unsigned long stacks[BACKTRACE_DEPTH]; in show_backtrace() local 47 len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2); in show_backtrace() 48 stack_trace_print(stacks, len, 24); in show_backtrace()
|
/kernel/linux/linux-6.6/arch/arm64/include/asm/stacktrace/ |
H A D | common.h | 33 * @stacks: An array of stacks which can be unwound. 34 * @nr_stacks: The number of stacks in @stacks. 45 struct stack_info *stacks; member 85 struct stack_info *info = &state->stacks[i]; in unwind_find_next_stack() 123 * Remove the current stack from the list of stacks so that it cannot in unwind_consume_stack() 126 * Note that stacks can nest in several valid orders, e.g. in unwind_consume_stack()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | intel_runtime_pm.c | 86 depot_stack_handle_t stack, *stacks; in track_intel_runtime_pm_wakeref() local 101 stacks = krealloc(rpm->debug.owners, in track_intel_runtime_pm_wakeref() 102 (rpm->debug.count + 1) * sizeof(*stacks), in track_intel_runtime_pm_wakeref() 104 if (stacks) { in track_intel_runtime_pm_wakeref() 105 stacks[rpm->debug.count++] = stack; in track_intel_runtime_pm_wakeref() 106 rpm->debug.owners = stacks; in track_intel_runtime_pm_wakeref()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | intel_runtime_pm.c | 77 depot_stack_handle_t stack, *stacks; in track_intel_runtime_pm_wakeref() local 92 stacks = krealloc(rpm->debug.owners, in track_intel_runtime_pm_wakeref() 93 (rpm->debug.count + 1) * sizeof(*stacks), in track_intel_runtime_pm_wakeref() 95 if (stacks) { in track_intel_runtime_pm_wakeref() 96 stacks[rpm->debug.count++] = stack; in track_intel_runtime_pm_wakeref() 97 rpm->debug.owners = stacks; in track_intel_runtime_pm_wakeref()
|
/kernel/linux/linux-6.6/tools/perf/util/ |
H A D | bpf_lock_contention.c | 33 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64)); in lock_contention_prepare() 43 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries); in lock_contention_prepare() 45 bpf_map__set_max_entries(skel->maps.stacks, 1); in lock_contention_prepare() 260 stack = bpf_map__fd(skel->maps.stacks); in lock_contention_read()
|
H A D | bpf_off_cpu.c | 330 stack = bpf_map__fd(skel->maps.stacks); in off_cpu_write()
|
/kernel/linux/linux-6.6/tools/perf/util/bpf_skel/ |
H A D | off_cpu.bpf.c | 40 } stacks SEC(".maps"); 197 stack_id = bpf_get_stackid(ctx, &stacks, in off_cpu_stat()
|
H A D | lock_contention.bpf.c | 35 } stacks SEC(".maps"); 301 pelem->stack_id = bpf_get_stackid(ctx, &stacks, in contention_begin()
|
/kernel/linux/linux-6.6/arch/arm/lib/ |
H A D | backtrace.S | 103 @ Kernel stacks may be discontiguous in memory. If the next
|
/kernel/linux/linux-5.10/arch/arm/kernel/ |
H A D | setup.c | 151 static struct stack stacks[NR_CPUS]; variable 522 * cpu_init sets up the per-CPU stacks. 528 struct stack *stk = &stacks[cpu]; in cpu_init() 556 * setup stacks for re-entrant exception handlers in cpu_init()
|
/kernel/linux/linux-6.6/arch/arm/kernel/ |
H A D | setup.c | 144 static struct stack stacks[NR_CPUS]; variable 529 * cpu_init sets up the per-CPU stacks. 535 struct stack *stk = &stacks[cpu]; in cpu_init() 563 * setup stacks for re-entrant exception handlers in cpu_init()
|
H A D | entry-header.S | 435 @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
|
/kernel/linux/linux-5.10/kernel/trace/ |
H A D | trace.c | 2956 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; member 2999 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; in __ftrace_trace_stack()
|
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | trace.c | 3091 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; member 3134 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; in __ftrace_trace_stack()
|