1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_UNWIND_H
3#define _ASM_X86_UNWIND_H
4
5#include <linux/sched.h>
6#include <linux/ftrace.h>
7#include <asm/ptrace.h>
8#include <asm/stacktrace.h>
9
10#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
11#define IRET_FRAME_SIZE   (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
12
13struct unwind_state {
14	struct stack_info stack_info;
15	unsigned long stack_mask;
16	struct task_struct *task;
17	int graph_idx;
18	bool error;
19#if defined(CONFIG_UNWINDER_ORC)
20	bool signal, full_regs;
21	unsigned long sp, bp, ip;
22	struct pt_regs *regs, *prev_regs;
23#elif defined(CONFIG_UNWINDER_FRAME_POINTER)
24	bool got_irq;
25	unsigned long *bp, *orig_sp, ip;
26	/*
27	 * If non-NULL: The current frame is incomplete and doesn't contain a
28	 * valid BP. When looking for the next frame, use this instead of the
29	 * non-existent saved BP.
30	 */
31	unsigned long *next_bp;
32	struct pt_regs *regs;
33#else
34	unsigned long *sp;
35#endif
36};
37
38void __unwind_start(struct unwind_state *state, struct task_struct *task,
39		    struct pt_regs *regs, unsigned long *first_frame);
40bool unwind_next_frame(struct unwind_state *state);
41unsigned long unwind_get_return_address(struct unwind_state *state);
42unsigned long *unwind_get_return_address_ptr(struct unwind_state *state);
43
44static inline bool unwind_done(struct unwind_state *state)
45{
46	return state->stack_info.type == STACK_TYPE_UNKNOWN;
47}
48
49static inline bool unwind_error(struct unwind_state *state)
50{
51	return state->error;
52}
53
54static inline
55void unwind_start(struct unwind_state *state, struct task_struct *task,
56		  struct pt_regs *regs, unsigned long *first_frame)
57{
58	first_frame = first_frame ? : get_stack_pointer(task, regs);
59
60	__unwind_start(state, task, regs, first_frame);
61}
62
63#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
64/*
65 * If 'partial' returns true, only the iret frame registers are valid.
66 */
67static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
68						    bool *partial)
69{
70	if (unwind_done(state))
71		return NULL;
72
73	if (partial) {
74#ifdef CONFIG_UNWINDER_ORC
75		*partial = !state->full_regs;
76#else
77		*partial = false;
78#endif
79	}
80
81	return state->regs;
82}
83#else
84static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
85						    bool *partial)
86{
87	return NULL;
88}
89#endif
90
91#ifdef CONFIG_UNWINDER_ORC
92void unwind_init(void);
93void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
94			void *orc, size_t orc_size);
95#else
96static inline void unwind_init(void) {}
97static inline
98void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
99			void *orc, size_t orc_size) {}
100#endif
101
102/*
103 * This disables KASAN checking when reading a value from another task's stack,
104 * since the other task could be running on another CPU and could have poisoned
105 * the stack in the meantime.
106 */
107#define READ_ONCE_TASK_STACK(task, x)			\
108({							\
109	unsigned long val;				\
110	if (task == current)				\
111		val = READ_ONCE(x);			\
112	else						\
113		val = READ_ONCE_NOCHECK(x);		\
114	val;						\
115})
116
117static inline bool task_on_another_cpu(struct task_struct *task)
118{
119#ifdef CONFIG_SMP
120	return task != current && task->on_cpu;
121#else
122	return false;
123#endif
124}
125
126#endif /* _ASM_X86_UNWIND_H */
127