1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_MODULE_H
6#define __ASM_MODULE_H
7
8#include <asm-generic/module.h>
9
10#ifdef CONFIG_ARM64_MODULE_PLTS
11struct mod_plt_sec {
12	int			plt_shndx;
13	int			plt_num_entries;
14	int			plt_max_entries;
15};
16
17struct mod_arch_specific {
18	struct mod_plt_sec	core;
19	struct mod_plt_sec	init;
20
21	/* for CONFIG_DYNAMIC_FTRACE */
22	struct plt_entry	*ftrace_trampolines;
23};
24#endif
25
26u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
27			  void *loc, const Elf64_Rela *rela,
28			  Elf64_Sym *sym);
29
30u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
31				void *loc, u64 val);
32
33#ifdef CONFIG_RANDOMIZE_BASE
34extern u64 module_alloc_base;
35#else
36#define module_alloc_base	((u64)_etext - MODULES_VSIZE)
37#endif
38
39struct plt_entry {
40	/*
41	 * A program that conforms to the AArch64 Procedure Call Standard
42	 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
43	 * IP1 (x17) may be inserted at any branch instruction that is
44	 * exposed to a relocation that supports long branches. Since that
45	 * is exactly what we are dealing with here, we are free to use x16
46	 * as a scratch register in the PLT veneers.
47	 */
48	__le32	adrp;	/* adrp	x16, ....			*/
49	__le32	add;	/* add	x16, x16, #0x....		*/
50	__le32	br;	/* br	x16				*/
51};
52
53static inline bool is_forbidden_offset_for_adrp(void *place)
54{
55	return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
56	       cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
57	       ((u64)place & 0xfff) >= 0xff8;
58}
59
60struct plt_entry get_plt_entry(u64 dst, void *pc);
61bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
62
63static inline bool plt_entry_is_initialized(const struct plt_entry *e)
64{
65	return e->adrp || e->add || e->br;
66}
67
68#endif /* __ASM_MODULE_H */
69