1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  arch/arm/include/asm/assembler.h
4 *
5 *  Copyright (C) 1996-2000 Russell King
6 *
7 *  This file contains arm architecture specific defines
8 *  for the different processors.
9 *
10 *  Do not include any C declarations in this file - it is included by
11 *  assembler source.
12 */
13#ifndef __ASM_ASSEMBLER_H__
14#define __ASM_ASSEMBLER_H__
15
16#ifndef __ASSEMBLY__
17#error "Only include this from assembly code"
18#endif
19
20#include <asm/ptrace.h>
21#include <asm/extable.h>
22#include <asm/opcodes-virt.h>
23#include <asm/asm-offsets.h>
24#include <asm/page.h>
25#include <asm/thread_info.h>
26#include <asm/uaccess-asm.h>
27
28#define IOMEM(x)	(x)
29
30/*
31 * Endian independent macros for shifting bytes within registers.
32 */
33#ifndef __ARMEB__
34#define lspull          lsr
35#define lspush          lsl
36#define get_byte_0      lsl #0
37#define get_byte_1	lsr #8
38#define get_byte_2	lsr #16
39#define get_byte_3	lsr #24
40#define put_byte_0      lsl #0
41#define put_byte_1	lsl #8
42#define put_byte_2	lsl #16
43#define put_byte_3	lsl #24
44#else
45#define lspull          lsl
46#define lspush          lsr
47#define get_byte_0	lsr #24
48#define get_byte_1	lsr #16
49#define get_byte_2	lsr #8
50#define get_byte_3      lsl #0
51#define put_byte_0	lsl #24
52#define put_byte_1	lsl #16
53#define put_byte_2	lsl #8
54#define put_byte_3      lsl #0
55#endif
56
57/* Select code for any configuration running in BE8 mode */
58#ifdef CONFIG_CPU_ENDIAN_BE8
59#define ARM_BE8(code...) code
60#else
61#define ARM_BE8(code...)
62#endif
63
64/*
65 * Data preload for architectures that support it
66 */
67#if __LINUX_ARM_ARCH__ >= 5
68#define PLD(code...)	code
69#else
70#define PLD(code...)
71#endif
72
73/*
74 * This can be used to enable code to cacheline align the destination
75 * pointer when bulk writing to memory.  Experiments on StrongARM and
76 * XScale didn't show this a worthwhile thing to do when the cache is not
77 * set to write-allocate (this would need further testing on XScale when WA
78 * is used).
79 *
80 * On Feroceon there is much to gain however, regardless of cache mode.
81 */
82#ifdef CONFIG_CPU_FEROCEON
83#define CALGN(code...) code
84#else
85#define CALGN(code...)
86#endif
87
88#define IMM12_MASK 0xfff
89
90/*
91 * Enable and disable interrupts
92 */
93#if __LINUX_ARM_ARCH__ >= 6
94	.macro	disable_irq_notrace
95	cpsid	i
96	.endm
97
98	.macro	enable_irq_notrace
99	cpsie	i
100	.endm
101#else
102	.macro	disable_irq_notrace
103	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
104	.endm
105
106	.macro	enable_irq_notrace
107	msr	cpsr_c, #SVC_MODE
108	.endm
109#endif
110
111#if __LINUX_ARM_ARCH__ < 7
112	.macro	dsb, args
113	mcr	p15, 0, r0, c7, c10, 4
114	.endm
115
116	.macro	isb, args
117	mcr	p15, 0, r0, c7, c5, 4
118	.endm
119#endif
120
121	.macro asm_trace_hardirqs_off, save=1
122#if defined(CONFIG_TRACE_IRQFLAGS)
123	.if \save
124	stmdb   sp!, {r0-r3, ip, lr}
125	.endif
126	bl	trace_hardirqs_off
127	.if \save
128	ldmia	sp!, {r0-r3, ip, lr}
129	.endif
130#endif
131	.endm
132
133	.macro asm_trace_hardirqs_on, cond=al, save=1
134#if defined(CONFIG_TRACE_IRQFLAGS)
135	/*
136	 * actually the registers should be pushed and pop'd conditionally, but
137	 * after bl the flags are certainly clobbered
138	 */
139	.if \save
140	stmdb   sp!, {r0-r3, ip, lr}
141	.endif
142	bl\cond	trace_hardirqs_on
143	.if \save
144	ldmia	sp!, {r0-r3, ip, lr}
145	.endif
146#endif
147	.endm
148
149	.macro disable_irq, save=1
150	disable_irq_notrace
151	asm_trace_hardirqs_off \save
152	.endm
153
154	.macro enable_irq
155	asm_trace_hardirqs_on
156	enable_irq_notrace
157	.endm
158/*
159 * Save the current IRQ state and disable IRQs.  Note that this macro
160 * assumes FIQs are enabled, and that the processor is in SVC mode.
161 */
162	.macro	save_and_disable_irqs, oldcpsr
163#ifdef CONFIG_CPU_V7M
164	mrs	\oldcpsr, primask
165#else
166	mrs	\oldcpsr, cpsr
167#endif
168	disable_irq
169	.endm
170
171	.macro	save_and_disable_irqs_notrace, oldcpsr
172#ifdef CONFIG_CPU_V7M
173	mrs	\oldcpsr, primask
174#else
175	mrs	\oldcpsr, cpsr
176#endif
177	disable_irq_notrace
178	.endm
179
180/*
181 * Restore interrupt state previously stored in a register.  We don't
182 * guarantee that this will preserve the flags.
183 */
184	.macro	restore_irqs_notrace, oldcpsr
185#ifdef CONFIG_CPU_V7M
186	msr	primask, \oldcpsr
187#else
188	msr	cpsr_c, \oldcpsr
189#endif
190	.endm
191
192	.macro restore_irqs, oldcpsr
193	tst	\oldcpsr, #PSR_I_BIT
194	asm_trace_hardirqs_on cond=eq
195	restore_irqs_notrace \oldcpsr
196	.endm
197
198/*
199 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
200 * reference local symbols in the same assembly file which are to be
201 * resolved by the assembler.  Other usage is undefined.
202 */
203	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
204	.macro	badr\c, rd, sym
205#ifdef CONFIG_THUMB2_KERNEL
206	adr\c	\rd, \sym + 1
207#else
208	adr\c	\rd, \sym
209#endif
210	.endm
211	.endr
212
213/*
214 * Get current thread_info.
215 */
216	.macro	get_thread_info, rd
217 ARM(	mov	\rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT	)
218 THUMB(	mov	\rd, sp			)
219 THUMB(	lsr	\rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT	)
220	mov	\rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
221	.endm
222
223/*
224 * Increment/decrement the preempt count.
225 */
226#ifdef CONFIG_PREEMPT_COUNT
227	.macro	inc_preempt_count, ti, tmp
228	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
229	add	\tmp, \tmp, #1			@ increment it
230	str	\tmp, [\ti, #TI_PREEMPT]
231	.endm
232
233	.macro	dec_preempt_count, ti, tmp
234	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
235	sub	\tmp, \tmp, #1			@ decrement it
236	str	\tmp, [\ti, #TI_PREEMPT]
237	.endm
238
239	.macro	dec_preempt_count_ti, ti, tmp
240	get_thread_info \ti
241	dec_preempt_count \ti, \tmp
242	.endm
243#else
244	.macro	inc_preempt_count, ti, tmp
245	.endm
246
247	.macro	dec_preempt_count, ti, tmp
248	.endm
249
250	.macro	dec_preempt_count_ti, ti, tmp
251	.endm
252#endif
253
254#define USERL(l, x...)				\
2559999:	x;					\
256	ex_entry	9999b,l;
257
258#define USER(x...)	USERL(9001f, x)
259
260#ifdef CONFIG_SMP
261#define ALT_SMP(instr...)					\
2629998:	instr
263/*
264 * Note: if you get assembler errors from ALT_UP() when building with
265 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
266 * ALT_SMP( W(instr) ... )
267 */
268#define ALT_UP(instr...)					\
269	.pushsection ".alt.smp.init", "a"			;\
270	.long	9998b - .					;\
2719997:	instr							;\
272	.if . - 9997b == 2					;\
273		nop						;\
274	.endif							;\
275	.if . - 9997b != 4					;\
276		.error "ALT_UP() content must assemble to exactly 4 bytes";\
277	.endif							;\
278	.popsection
279#define ALT_UP_B(label)					\
280	.pushsection ".alt.smp.init", "a"			;\
281	.long	9998b - .					;\
282	W(b)	. + (label - 9998b)					;\
283	.popsection
284#else
285#define ALT_SMP(instr...)
286#define ALT_UP(instr...) instr
287#define ALT_UP_B(label) b label
288#endif
289
290/*
291 * Instruction barrier
292 */
293	.macro	instr_sync
294#if __LINUX_ARM_ARCH__ >= 7
295	isb
296#elif __LINUX_ARM_ARCH__ == 6
297	mcr	p15, 0, r0, c7, c5, 4
298#endif
299	.endm
300
301/*
302 * SMP data memory barrier
303 */
304	.macro	smp_dmb mode
305#ifdef CONFIG_SMP
306#if __LINUX_ARM_ARCH__ >= 7
307	.ifeqs "\mode","arm"
308	ALT_SMP(dmb	ish)
309	.else
310	ALT_SMP(W(dmb)	ish)
311	.endif
312#elif __LINUX_ARM_ARCH__ == 6
313	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
314#else
315#error Incompatible SMP platform
316#endif
317	.ifeqs "\mode","arm"
318	ALT_UP(nop)
319	.else
320	ALT_UP(W(nop))
321	.endif
322#endif
323	.endm
324
325#if defined(CONFIG_CPU_V7M)
326	/*
327	 * setmode is used to assert to be in svc mode during boot. For v7-M
328	 * this is done in __v7m_setup, so setmode can be empty here.
329	 */
330	.macro	setmode, mode, reg
331	.endm
332#elif defined(CONFIG_THUMB2_KERNEL)
333	.macro	setmode, mode, reg
334	mov	\reg, #\mode
335	msr	cpsr_c, \reg
336	.endm
337#else
338	.macro	setmode, mode, reg
339	msr	cpsr_c, #\mode
340	.endm
341#endif
342
343/*
344 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
345 * a scratch register for the macro to overwrite.
346 *
347 * This macro is intended for forcing the CPU into SVC mode at boot time.
348 * you cannot return to the original mode.
349 */
350.macro safe_svcmode_maskall reg:req
351#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
352	mrs	\reg , cpsr
353	eor	\reg, \reg, #HYP_MODE
354	tst	\reg, #MODE_MASK
355	bic	\reg , \reg , #MODE_MASK
356	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
357THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
358	bne	1f
359	orr	\reg, \reg, #PSR_A_BIT
360	badr	lr, 2f
361	msr	spsr_cxsf, \reg
362	__MSR_ELR_HYP(14)
363	__ERET
3641:	msr	cpsr_c, \reg
3652:
366#else
367/*
368 * workaround for possibly broken pre-v6 hardware
369 * (akita, Sharp Zaurus C-1000, PXA270-based)
370 */
371	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
372#endif
373.endm
374
375/*
376 * STRT/LDRT access macros with ARM and Thumb-2 variants
377 */
378#ifdef CONFIG_THUMB2_KERNEL
379
380	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3819999:
382	.if	\inc == 1
383	\instr\()b\t\cond\().w \reg, [\ptr, #\off]
384	.elseif	\inc == 4
385	\instr\t\cond\().w \reg, [\ptr, #\off]
386	.else
387	.error	"Unsupported inc macro argument"
388	.endif
389
390	ex_entry	9999b, \abort
391	.endm
392
393	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
394	@ explicit IT instruction needed because of the label
395	@ introduced by the USER macro
396	.ifnc	\cond,al
397	.if	\rept == 1
398	itt	\cond
399	.elseif	\rept == 2
400	ittt	\cond
401	.else
402	.error	"Unsupported rept macro argument"
403	.endif
404	.endif
405
406	@ Slightly optimised to avoid incrementing the pointer twice
407	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
408	.if	\rept == 2
409	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
410	.endif
411
412	add\cond \ptr, #\rept * \inc
413	.endm
414
415#else	/* !CONFIG_THUMB2_KERNEL */
416
417	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
418	.rept	\rept
4199999:
420	.if	\inc == 1
421	\instr\()b\t\cond \reg, [\ptr], #\inc
422	.elseif	\inc == 4
423	\instr\t\cond \reg, [\ptr], #\inc
424	.else
425	.error	"Unsupported inc macro argument"
426	.endif
427
428	ex_entry	9999b, \abort
429	.endr
430	.endm
431
432#endif	/* CONFIG_THUMB2_KERNEL */
433
434	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
435	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
436	.endm
437
438	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
439	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
440	.endm
441
442/* Utility macro for declaring string literals */
443	.macro	string name:req, string
444	.type \name , #object
445\name:
446	.asciz "\string"
447	.size \name , . - \name
448	.endm
449
450	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
451	.macro	ret\c, reg
452#if __LINUX_ARM_ARCH__ < 6
453	mov\c	pc, \reg
454#else
455	.ifeqs	"\reg", "lr"
456	bx\c	\reg
457	.else
458	mov\c	pc, \reg
459	.endif
460#endif
461	.endm
462	.endr
463
464	.macro	ret.w, reg
465	ret	\reg
466#ifdef CONFIG_THUMB2_KERNEL
467	nop
468#endif
469	.endm
470
471	.macro	bug, msg, line
472#ifdef CONFIG_THUMB2_KERNEL
4731:	.inst	0xde02
474#else
4751:	.inst	0xe7f001f2
476#endif
477#ifdef CONFIG_DEBUG_BUGVERBOSE
478	.pushsection .rodata.str, "aMS", %progbits, 1
4792:	.asciz	"\msg"
480	.popsection
481	.pushsection __bug_table, "aw"
482	.align	2
483	.word	1b, 2b
484	.hword	\line
485	.popsection
486#endif
487	.endm
488
489#ifdef CONFIG_KPROBES
490#define _ASM_NOKPROBE(entry)				\
491	.pushsection "_kprobe_blacklist", "aw" ;	\
492	.balign 4 ;					\
493	.long entry;					\
494	.popsection
495#else
496#define _ASM_NOKPROBE(entry)
497#endif
498
499	.macro		__adldst_l, op, reg, sym, tmp, c
500	.if		__LINUX_ARM_ARCH__ < 7
501	ldr\c		\tmp, .La\@
502	.subsection	1
503	.align		2
504.La\@:	.long		\sym - .Lpc\@
505	.previous
506	.else
507	.ifnb		\c
508 THUMB(	ittt		\c			)
509	.endif
510	movw\c		\tmp, #:lower16:\sym - .Lpc\@
511	movt\c		\tmp, #:upper16:\sym - .Lpc\@
512	.endif
513
514#ifndef CONFIG_THUMB2_KERNEL
515	.set		.Lpc\@, . + 8			// PC bias
516	.ifc		\op, add
517	add\c		\reg, \tmp, pc
518	.else
519	\op\c		\reg, [pc, \tmp]
520	.endif
521#else
522.Lb\@:	add\c		\tmp, \tmp, pc
523	/*
524	 * In Thumb-2 builds, the PC bias depends on whether we are currently
525	 * emitting into a .arm or a .thumb section. The size of the add opcode
526	 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
527	 * emitting in ARM mode, so let's use this to account for the bias.
528	 */
529	.set		.Lpc\@, . + (. - .Lb\@)
530
531	.ifnc		\op, add
532	\op\c		\reg, [\tmp]
533	.endif
534#endif
535	.endm
536
537	/*
538	 * mov_l - move a constant value or [relocated] address into a register
539	 */
540	.macro		mov_l, dst:req, imm:req
541	.if		CONFIG_RELOCATABLE == 1 || __LINUX_ARM_ARCH__ < 7
542	ldr		\dst, =\imm
543	.else
544	movw		\dst, #:lower16:\imm
545	movt		\dst, #:upper16:\imm
546	.endif
547	.endm
548
549	/*
550	 * adr_l - adr pseudo-op with unlimited range
551	 *
552	 * @dst: destination register
553	 * @sym: name of the symbol
554	 * @cond: conditional opcode suffix
555	 */
556	.macro		adr_l, dst:req, sym:req, cond
557	__adldst_l	add, \dst, \sym, \dst, \cond
558	.endm
559
560	/*
561	 * ldr_l - ldr <literal> pseudo-op with unlimited range
562	 *
563	 * @dst: destination register
564	 * @sym: name of the symbol
565	 * @cond: conditional opcode suffix
566	 */
567	.macro		ldr_l, dst:req, sym:req, cond
568	__adldst_l	ldr, \dst, \sym, \dst, \cond
569	.endm
570
571	/*
572	 * str_l - str <literal> pseudo-op with unlimited range
573	 *
574	 * @src: source register
575	 * @sym: name of the symbol
576	 * @tmp: mandatory scratch register
577	 * @cond: conditional opcode suffix
578	 */
579	.macro		str_l, src:req, sym:req, tmp:req, cond
580	__adldst_l	str, \src, \sym, \tmp, \cond
581	.endm
582
583#endif /* __ASM_ASSEMBLER_H__ */
584