1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_FTRACE
3#define _ASM_POWERPC_FTRACE
4
5#include <asm/types.h>
6
7#ifdef CONFIG_FUNCTION_TRACER
8#define MCOUNT_ADDR		((unsigned long)(_mcount))
9#define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
10
11#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
12
13#ifdef __ASSEMBLY__
14
15/* Based off of objdump optput from glibc */
16
17#define MCOUNT_SAVE_FRAME			\
18	stwu	r1,-48(r1);			\
19	stw	r3, 12(r1);			\
20	stw	r4, 16(r1);			\
21	stw	r5, 20(r1);			\
22	stw	r6, 24(r1);			\
23	mflr	r3;				\
24	lwz	r4, 52(r1);			\
25	mfcr	r5;				\
26	stw	r7, 28(r1);			\
27	stw	r8, 32(r1);			\
28	stw	r9, 36(r1);			\
29	stw	r10,40(r1);			\
30	stw	r3, 44(r1);			\
31	stw	r5, 8(r1)
32
33#define MCOUNT_RESTORE_FRAME			\
34	lwz	r6, 8(r1);			\
35	lwz	r0, 44(r1);			\
36	lwz	r3, 12(r1);			\
37	mtctr	r0;				\
38	lwz	r4, 16(r1);			\
39	mtcr	r6;				\
40	lwz	r5, 20(r1);			\
41	lwz	r6, 24(r1);			\
42	lwz	r0, 52(r1);			\
43	lwz	r7, 28(r1);			\
44	lwz	r8, 32(r1);			\
45	mtlr	r0;				\
46	lwz	r9, 36(r1);			\
47	lwz	r10,40(r1);			\
48	addi	r1, r1, 48
49
50#else /* !__ASSEMBLY__ */
51extern void _mcount(void);
52
53static inline unsigned long ftrace_call_adjust(unsigned long addr)
54{
55       /* reloction of mcount call site is the same as the address */
56       return addr;
57}
58
59struct dyn_arch_ftrace {
60	struct module *mod;
61};
62#endif /* __ASSEMBLY__ */
63
64#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
65#define ARCH_SUPPORTS_FTRACE_OPS 1
66#endif
67#endif /* CONFIG_FUNCTION_TRACER */
68
69#ifndef __ASSEMBLY__
70#ifdef CONFIG_FTRACE_SYSCALLS
71/*
72 * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
73 * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
74 * those.
75 */
76#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
77#ifdef PPC64_ELF_ABI_v1
78static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
79{
80	/* We need to skip past the initial dot, and the __se_sys alias */
81	return !strcmp(sym + 1, name) ||
82		(!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
83		(!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
84		(!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
85		(!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
86}
87#else
88static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
89{
90	return !strcmp(sym, name) ||
91		(!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
92		(!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
93		(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
94		(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
95}
96#endif /* PPC64_ELF_ABI_v1 */
97#endif /* CONFIG_FTRACE_SYSCALLS */
98
99#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
100#include <asm/paca.h>
101
102static inline void this_cpu_disable_ftrace(void)
103{
104	get_paca()->ftrace_enabled = 0;
105}
106
107static inline void this_cpu_enable_ftrace(void)
108{
109	get_paca()->ftrace_enabled = 1;
110}
111
112/* Disable ftrace on this CPU if possible (may not be implemented) */
113static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
114{
115	get_paca()->ftrace_enabled = ftrace_enabled;
116}
117
118static inline u8 this_cpu_get_ftrace_enabled(void)
119{
120	return get_paca()->ftrace_enabled;
121}
122
123void ftrace_free_init_tramp(void);
124#else /* CONFIG_PPC64 */
125static inline void this_cpu_disable_ftrace(void) { }
126static inline void this_cpu_enable_ftrace(void) { }
127static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
128static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
129static inline void ftrace_free_init_tramp(void) { }
130#endif /* CONFIG_PPC64 */
131#endif /* !__ASSEMBLY__ */
132
133#endif /* _ASM_POWERPC_FTRACE */
134