xref: /kernel/linux/linux-5.10/arch/m68k/68000/entry.S (revision 8c2ecf20)
1/*
2 *  entry.S -- non-mmu 68000 interrupt and exception entry points
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file README.legal in the main directory of this archive
8 * for more details.
9 *
10 * Linux/m68k support by Hamish Macdonald
11 */
12
13#include <linux/linkage.h>
14#include <asm/thread_info.h>
15#include <asm/unistd.h>
16#include <asm/errno.h>
17#include <asm/setup.h>
18#include <asm/segment.h>
19#include <asm/traps.h>
20#include <asm/asm-offsets.h>
21#include <asm/entry.h>
22
23.text
24
25.globl system_call
26.globl resume
27.globl ret_from_exception
28.globl ret_from_signal
29.globl sys_call_table
30.globl bad_interrupt
31.globl inthandler1
32.globl inthandler2
33.globl inthandler3
34.globl inthandler4
35.globl inthandler5
36.globl inthandler6
37.globl inthandler7
38
39badsys:
40	movel	#-ENOSYS,%sp@(PT_OFF_D0)
41	jra	ret_from_exception
42
43do_trace:
44	movel	#-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
45	subql	#4,%sp
46	SAVE_SWITCH_STACK
47	jbsr	syscall_trace_enter
48	RESTORE_SWITCH_STACK
49	addql	#4,%sp
50	addql	#1,%d0
51	jeq	ret_from_exception
52	movel	%sp@(PT_OFF_ORIG_D0),%d1
53	movel	#-ENOSYS,%d0
54	cmpl	#NR_syscalls,%d1
55	jcc	1f
56	lsl	#2,%d1
57	lea	sys_call_table, %a0
58	jbsr	%a0@(%d1)
59
601:	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value */
61	subql	#4,%sp			/* dummy return address */
62	SAVE_SWITCH_STACK
63	jbsr	syscall_trace_leave
64
65ret_from_signal:
66	RESTORE_SWITCH_STACK
67	addql	#4,%sp
68	jra	ret_from_exception
69
70ENTRY(system_call)
71	SAVE_ALL_SYS
72
73	/* save top of frame*/
74	pea	%sp@
75	jbsr	set_esp0
76	addql	#4,%sp
77
78	movel	%sp@(PT_OFF_ORIG_D0),%d0
79
80	movel	%sp,%d1			/* get thread_info pointer */
81	andl	#-THREAD_SIZE,%d1
82	movel	%d1,%a2
83	btst	#(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
84	jne	do_trace
85	cmpl	#NR_syscalls,%d0
86	jcc	badsys
87	lsl	#2,%d0
88	lea	sys_call_table,%a0
89	movel	%a0@(%d0), %a0
90	jbsr	%a0@
91	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value*/
92
93ret_from_exception:
94	btst	#5,%sp@(PT_OFF_SR)	/* check if returning to kernel*/
95	jeq	Luser_return		/* if so, skip resched, signals*/
96
97Lkernel_return:
98	RESTORE_ALL
99
100Luser_return:
101	/* only allow interrupts when we are really the last one on the*/
102	/* kernel stack, otherwise stack overflow can occur during*/
103	/* heavy interrupt load*/
104	andw	#ALLOWINT,%sr
105
106	movel	%sp,%d1			/* get thread_info pointer */
107	andl	#-THREAD_SIZE,%d1
108	movel	%d1,%a2
1091:
110	move	%a2@(TINFO_FLAGS),%d1	/* thread_info->flags */
111	jne	Lwork_to_do
112	RESTORE_ALL
113
114Lwork_to_do:
115	movel	%a2@(TINFO_FLAGS),%d1	/* thread_info->flags */
116	btst	#TIF_NEED_RESCHED,%d1
117	jne	reschedule
118
119Lsignal_return:
120	subql	#4,%sp			/* dummy return address*/
121	SAVE_SWITCH_STACK
122	pea	%sp@(SWITCH_STACK_SIZE)
123	bsrw	do_notify_resume
124	addql	#4,%sp
125	RESTORE_SWITCH_STACK
126	addql	#4,%sp
127	jra	1b
128
129/*
130 * This is the main interrupt handler, responsible for calling process_int()
131 */
132inthandler1:
133	SAVE_ALL_INT
134	movew	%sp@(PT_OFF_FORMATVEC), %d0
135	and	#0x3ff, %d0
136
137	movel	%sp,%sp@-
138	movel	#65,%sp@- 		/*  put vector # on stack*/
139	jbsr	process_int		/*  process the IRQ*/
1403:     	addql	#8,%sp			/*  pop parameters off stack*/
141	bra	ret_from_exception
142
143inthandler2:
144	SAVE_ALL_INT
145	movew	%sp@(PT_OFF_FORMATVEC), %d0
146	and	#0x3ff, %d0
147
148	movel	%sp,%sp@-
149	movel	#66,%sp@- 		/*  put vector # on stack*/
150	jbsr	process_int		/*  process the IRQ*/
1513:     	addql	#8,%sp			/*  pop parameters off stack*/
152	bra	ret_from_exception
153
154inthandler3:
155	SAVE_ALL_INT
156	movew	%sp@(PT_OFF_FORMATVEC), %d0
157	and	#0x3ff, %d0
158
159	movel	%sp,%sp@-
160	movel	#67,%sp@- 		/*  put vector # on stack*/
161	jbsr	process_int		/*  process the IRQ*/
1623:     	addql	#8,%sp			/*  pop parameters off stack*/
163	bra	ret_from_exception
164
165inthandler4:
166	SAVE_ALL_INT
167	movew	%sp@(PT_OFF_FORMATVEC), %d0
168	and	#0x3ff, %d0
169
170	movel	%sp,%sp@-
171	movel	#68,%sp@- 		/*  put vector # on stack*/
172	jbsr	process_int		/*  process the IRQ*/
1733:     	addql	#8,%sp			/*  pop parameters off stack*/
174	bra	ret_from_exception
175
176inthandler5:
177	SAVE_ALL_INT
178	movew	%sp@(PT_OFF_FORMATVEC), %d0
179	and	#0x3ff, %d0
180
181	movel	%sp,%sp@-
182	movel	#69,%sp@- 		/*  put vector # on stack*/
183	jbsr	process_int		/*  process the IRQ*/
1843:     	addql	#8,%sp			/*  pop parameters off stack*/
185	bra	ret_from_exception
186
187inthandler6:
188	SAVE_ALL_INT
189	movew	%sp@(PT_OFF_FORMATVEC), %d0
190	and	#0x3ff, %d0
191
192	movel	%sp,%sp@-
193	movel	#70,%sp@- 		/*  put vector # on stack*/
194	jbsr	process_int		/*  process the IRQ*/
1953:     	addql	#8,%sp			/*  pop parameters off stack*/
196	bra	ret_from_exception
197
198inthandler7:
199	SAVE_ALL_INT
200	movew	%sp@(PT_OFF_FORMATVEC), %d0
201	and	#0x3ff, %d0
202
203	movel	%sp,%sp@-
204	movel	#71,%sp@- 		/*  put vector # on stack*/
205	jbsr	process_int		/*  process the IRQ*/
2063:     	addql	#8,%sp			/*  pop parameters off stack*/
207	bra	ret_from_exception
208
209inthandler:
210	SAVE_ALL_INT
211	movew	%sp@(PT_OFF_FORMATVEC), %d0
212	and	#0x3ff, %d0
213
214	movel	%sp,%sp@-
215	movel	%d0,%sp@- 		/*  put vector # on stack*/
216	jbsr	process_int		/*  process the IRQ*/
2173:     	addql	#8,%sp			/*  pop parameters off stack*/
218	bra	ret_from_exception
219
220/*
221 * Handler for uninitialized and spurious interrupts.
222 */
223ENTRY(bad_interrupt)
224	addql	#1,irq_err_count
225	rte
226
227/*
228 * Beware - when entering resume, prev (the current task) is
229 * in a0, next (the new task) is in a1, so don't change these
230 * registers until their contents are no longer needed.
231 */
232ENTRY(resume)
233	movel	%a0,%d1				/* save prev thread in d1 */
234	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)	/* save sr */
235	SAVE_SWITCH_STACK
236	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
237	movel	%usp,%a3			/* save usp */
238	movel	%a3,%a0@(TASK_THREAD+THREAD_USP)
239
240	movel	%a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
241	movel	%a3,%usp
242	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
243	RESTORE_SWITCH_STACK
244	movew	%a1@(TASK_THREAD+THREAD_SR),%sr	/* restore thread status reg */
245	rts
246
247