1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/mm.h>
3#include <linux/module.h>
4#include <asm/alternative.h>
5#include <asm/cacheflush.h>
6#include <asm/inst.h>
7#include <asm/sections.h>
8
9int __read_mostly alternatives_patched;
10
11EXPORT_SYMBOL_GPL(alternatives_patched);
12
13#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
14
15static int __initdata_or_module debug_alternative;
16
17static int __init debug_alt(char *str)
18{
19	debug_alternative = 1;
20	return 1;
21}
22__setup("debug-alternative", debug_alt);
23
24#define DPRINTK(fmt, args...)						\
25do {									\
26	if (debug_alternative)						\
27		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
28} while (0)
29
30#define DUMP_WORDS(buf, count, fmt, args...)				\
31do {									\
32	if (unlikely(debug_alternative)) {				\
33		int _j;							\
34		union loongarch_instruction *_buf = buf;		\
35									\
36		if (!(count))						\
37			break;						\
38									\
39		printk(KERN_DEBUG fmt, ##args);				\
40		for (_j = 0; _j < count - 1; _j++)			\
41			printk(KERN_CONT "<%08x> ", _buf[_j].word);	\
42		printk(KERN_CONT "<%08x>\n", _buf[_j].word);		\
43	}								\
44} while (0)
45
46/* Use this to add nops to a buffer, then text_poke the whole buffer. */
47static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
48{
49	while (count--) {
50		insn->word = INSN_NOP;
51		insn++;
52	}
53}
54
55/* Is the jump addr in local .altinstructions */
56static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
57{
58	return jump >= (unsigned long)start && jump < (unsigned long)end;
59}
60
61static void __init_or_module recompute_jump(union loongarch_instruction *buf,
62		union loongarch_instruction *dest, union loongarch_instruction *src,
63		void *start, void *end)
64{
65	unsigned int si, si_l, si_h;
66	unsigned long cur_pc, jump_addr, pc;
67	long offset;
68
69	cur_pc = (unsigned long)src;
70	pc = (unsigned long)dest;
71
72	si_l = src->reg0i26_format.simmediate_l;
73	si_h = src->reg0i26_format.simmediate_h;
74	switch (src->reg0i26_format.opcode) {
75	case b_op:
76	case bl_op:
77		jump_addr = bs_dest_26(cur_pc, si_h, si_l);
78		if (in_alt_jump(jump_addr, start, end))
79			return;
80		offset = jump_addr - pc;
81		BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
82		offset >>= 2;
83		buf->reg0i26_format.simmediate_h = offset >> 16;
84		buf->reg0i26_format.simmediate_l = offset;
85		return;
86	}
87
88	si_l = src->reg1i21_format.simmediate_l;
89	si_h = src->reg1i21_format.simmediate_h;
90	switch (src->reg1i21_format.opcode) {
91	case beqz_op:
92	case bnez_op:
93	case bceqz_op:
94		jump_addr = bs_dest_21(cur_pc, si_h, si_l);
95		if (in_alt_jump(jump_addr, start, end))
96			return;
97		offset = jump_addr - pc;
98		BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
99		offset >>= 2;
100		buf->reg1i21_format.simmediate_h = offset >> 16;
101		buf->reg1i21_format.simmediate_l = offset;
102		return;
103	}
104
105	si = src->reg2i16_format.simmediate;
106	switch (src->reg2i16_format.opcode) {
107	case beq_op:
108	case bne_op:
109	case blt_op:
110	case bge_op:
111	case bltu_op:
112	case bgeu_op:
113		jump_addr = bs_dest_16(cur_pc, si);
114		if (in_alt_jump(jump_addr, start, end))
115			return;
116		offset = jump_addr - pc;
117		BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
118		offset >>= 2;
119		buf->reg2i16_format.simmediate = offset;
120		return;
121	}
122}
123
124static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
125	union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
126{
127	int i;
128
129	for (i = 0; i < nr; i++) {
130		buf[i].word = src[i].word;
131
132		if (is_pc_insn(src[i])) {
133			pr_err("Not support pcrel instruction at present!");
134			return -EINVAL;
135		}
136
137		if (is_branch_insn(src[i]) &&
138		    src[i].reg2i16_format.opcode != jirl_op) {
139			recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
140		}
141	}
142
143	return 0;
144}
145
146/*
147 * text_poke_early - Update instructions on a live kernel at boot time
148 *
149 * When you use this code to patch more than one byte of an instruction
150 * you need to make sure that other CPUs cannot execute this code in parallel.
151 * Also no thread must be currently preempted in the middle of these
152 * instructions. And on the local CPU you need to be protected again NMI or MCE
153 * handlers seeing an inconsistent instruction while you patch.
154 */
155static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
156			      union loongarch_instruction *buf, unsigned int nr)
157{
158	int i;
159	unsigned long flags;
160
161	local_irq_save(flags);
162
163	for (i = 0; i < nr; i++)
164		insn[i].word = buf[i].word;
165
166	local_irq_restore(flags);
167
168	wbflush();
169	flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
170
171	return insn;
172}
173
174/*
175 * Replace instructions with better alternatives for this CPU type. This runs
176 * before SMP is initialized to avoid SMP problems with self modifying code.
177 * This implies that asymmetric systems where APs have less capabilities than
178 * the boot processor are not handled. Tough. Make sure you disable such
179 * features by hand.
180 */
181void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
182{
183	struct alt_instr *a;
184	unsigned int nr_instr, nr_repl, nr_insnbuf;
185	union loongarch_instruction *instr, *replacement;
186	union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
187
188	DPRINTK("alt table %px, -> %px", start, end);
189	/*
190	 * The scan order should be from start to end. A later scanned
191	 * alternative code can overwrite previously scanned alternative code.
192	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
193	 * patch code.
194	 *
195	 * So be careful if you want to change the scan order to any other
196	 * order.
197	 */
198	for (a = start; a < end; a++) {
199		nr_insnbuf = 0;
200
201		instr = (void *)&a->instr_offset + a->instr_offset;
202		replacement = (void *)&a->replace_offset + a->replace_offset;
203
204		BUG_ON(a->instrlen > sizeof(insnbuf));
205		BUG_ON(a->instrlen & 0x3);
206		BUG_ON(a->replacementlen & 0x3);
207
208		nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
209		nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
210
211		if (!cpu_has(a->feature)) {
212			DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
213				a->feature, instr, a->instrlen,
214				replacement, a->replacementlen);
215
216			continue;
217		}
218
219		DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
220			a->feature, instr, a->instrlen,
221			replacement, a->replacementlen);
222
223		DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
224		DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
225
226		copy_alt_insns(insnbuf, instr, replacement, nr_repl);
227		nr_insnbuf = nr_repl;
228
229		if (nr_instr > nr_repl) {
230			add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
231			nr_insnbuf += nr_instr - nr_repl;
232		}
233		DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
234
235		text_poke_early(instr, insnbuf, nr_insnbuf);
236	}
237}
238
239void __init alternative_instructions(void)
240{
241	apply_alternatives(__alt_instructions, __alt_instructions_end);
242
243	alternatives_patched = 1;
244}
245