1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  linux/arch/arm/kernel/head.S
4  *
5  *  Copyright (C) 1994-2002 Russell King
6  *  Copyright (c) 2003 ARM Limited
7  *  All Rights Reserved
8  *
9  *  Kernel startup code for all 32-bit CPUs
10  */
11 #include <linux/linkage.h>
12 #include <linux/init.h>
13 #include <linux/pgtable.h>
14 
15 #include <asm/assembler.h>
16 #include <asm/cp15.h>
17 #include <asm/domain.h>
18 #include <asm/ptrace.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/memory.h>
21 #include <asm/thread_info.h>
22 
23 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
24 #include CONFIG_DEBUG_LL_INCLUDE
25 #endif
26 
27 /*
28  * swapper_pg_dir is the virtual address of the initial page table.
29  * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
30  * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
31  * the least significant 16 bits to be 0x8000, but we could probably
32  * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
33  */
34 #define KERNEL_RAM_VADDR	(PAGE_OFFSET + TEXT_OFFSET)
35 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
36 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
37 #endif
38 
39 #ifdef CONFIG_ARM_LPAE
40 	/* LPAE requires an additional page for the PGD */
41 #define PG_DIR_SIZE	0x5000
42 #define PMD_ORDER	3
43 #else
44 #define PG_DIR_SIZE	0x4000
45 #define PMD_ORDER	2
46 #endif
47 
48 	.macro	get_kaslr_offset, reg
49 #ifdef CONFIG_RANDOMIZE_BASE
50 	ldr_l			\reg, __kaslr_offset
51 #else
52 	mov			\reg, #0
53 #endif
54 	.endm
55 
56 	.macro	add_kaslr_offset, reg, tmp
57 #ifdef CONFIG_RANDOMIZE_BASE
58 	get_kaslr_offset	\tmp
59 	add			\reg, \reg, \tmp
60 #endif
61 	.endm
62 
63 	.macro	sub_kaslr_offset, reg, tmp
64 #ifdef CONFIG_RANDOMIZE_BASE
65 	get_kaslr_offset	\tmp
66 	sub			\reg, \reg, \tmp
67 #endif
68 	.endm
69 
70 /*
71  * Kernel startup entry point.
72  * ---------------------------
73  *
74  * This is normally called from the decompressor code.  The requirements
75  * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
76  * r1 = machine nr, r2 = atags or dtb pointer.
77  *
78  * This code is mostly position independent, so if you link the kernel at
79  * 0xc0008000, you call this at __pa(0xc0008000).
80  *
81  * See linux/arch/arm/tools/mach-types for the complete list of machine
82  * numbers for r1.
83  *
84  * We're trying to keep crap to a minimum; DO NOT add any machine specific
85  * crap here - that's what the boot loader (or in extreme, well justified
86  * circumstances, zImage) is for.
87  */
88 	.arm
89 
90 	__HEAD
91 	.globl	swapper_pg_dir
92 	.equ	swapper_pg_dir, . - PG_DIR_SIZE
93 
94 ENTRY(stext)
95 	mov	r3, #0			@ normal entry point - clear r3
96  ARM_BE8(setend	be )			@ ensure we are in BE8 mode
97 
98  THUMB(	badr	r9, 1f		)	@ Kernel is always entered in ARM.
99  THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
100  THUMB(	.thumb			)	@ switch to Thumb now.
101  THUMB(1:			)
102 
103 #ifdef CONFIG_RANDOMIZE_BASE
104 	str_l	r3, __kaslr_offset, r9	@ offset in r3 if entered via kaslr ep
105 
106 	.pushsection .data		@ data in bss will be cleared
107 	.align	2
108 ENTRY(__kaslr_offset)
109 	.long	0			@ will be wiped before entering C code
110 	.popsection
111 #endif
112 
113 #ifdef CONFIG_ARM_VIRT_EXT
114 	bl	__hyp_stub_install
115 #endif
116 	@ ensure svc mode and all interrupts masked
117 	safe_svcmode_maskall r9
118 
119 	mrc	p15, 0, r9, c0, c0		@ get processor id
120 	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
121 	movs	r10, r5				@ invalid processor (r5=0)?
122  THUMB( it	eq )		@ force fixup-able long branch encoding
123 	beq	__error_p			@ yes, error 'p'
124 
125 #ifdef CONFIG_ARM_LPAE
126 	mrc	p15, 0, r3, c0, c1, 4		@ read ID_MMFR0
127 	and	r3, r3, #0xf			@ extract VMSA support
128 	cmp	r3, #5				@ long-descriptor translation table format?
129  THUMB( it	lo )				@ force fixup-able long branch encoding
130 	blo	__error_lpae			@ only classic page table format
131 #endif
132 
133 #ifndef CONFIG_XIP_KERNEL
134 	adr_l	r8, _text			@ __pa(_text)
135 	sub	r8, r8, #TEXT_OFFSET		@ PHYS_OFFSET
136 	sub_kaslr_offset r8, r12
137 #else
138 	ldr	r8, =PLAT_PHYS_OFFSET		@ always constant in this case
139 #endif
140 
141 	/*
142 	 * r1 = machine no, r2 = atags or dtb,
143 	 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
144 	 */
145 	bl	__vet_atags
146 #ifdef CONFIG_SMP_ON_UP
147 	bl	__fixup_smp
148 #endif
149 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
150 	bl	__fixup_pv_table
151 #endif
152 	bl	__create_page_tables
153 
154 	/*
155 	 * The following calls CPU specific code in a position independent
156 	 * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
157 	 * xxx_proc_info structure selected by __lookup_processor_type
158 	 * above.
159 	 *
160 	 * The processor init function will be called with:
161 	 *  r1 - machine type
162 	 *  r2 - boot data (atags/dt) pointer
163 	 *  r4 - translation table base (low word)
164 	 *  r5 - translation table base (high word, if LPAE)
165 	 *  r8 - translation table base 1 (pfn if LPAE)
166 	 *  r9 - cpuid
167 	 *  r13 - virtual address for __enable_mmu -> __turn_mmu_on
168 	 *
169 	 * On return, the CPU will be ready for the MMU to be turned on,
170 	 * r0 will hold the CPU control register value, r1, r2, r4, and
171 	 * r9 will be preserved.  r5 will also be preserved if LPAE.
172 	 */
173 	adr_l	lr, __primary_switch		@ address to jump to after
174 	mov	r13, lr				@ mmu has been enabled
175 	badr	lr, 1f				@ return (PIC) address
176 #ifdef CONFIG_ARM_LPAE
177 	mov	r5, #0				@ high TTBR0
178 	mov	r8, r4, lsr #12			@ TTBR1 is swapper_pg_dir pfn
179 #else
180 	mov	r8, r4				@ set TTBR1 to swapper_pg_dir
181 #endif
182 	ldr	r12, [r10, #PROCINFO_INITFUNC]
183 	add	r12, r12, r10
184 	ret	r12
185 1:	get_kaslr_offset r12			@ get before turning MMU on
186 	b	__enable_mmu
187 ENDPROC(stext)
188 	.ltorg
189 
190 /*
191  * Setup the initial page tables.  We only setup the barest
192  * amount which are required to get the kernel running, which
193  * generally means mapping in the kernel code.
194  *
195  * r8 = phys_offset, r9 = cpuid, r10 = procinfo
196  *
197  * Returns:
198  *  r0, r3, r5-r7 corrupted
199  *  r4 = physical page table address
200  */
201 __create_page_tables:
202 	adr_l	r4, swapper_pg_dir		@ page table address
203 
204 	/*
205 	 * Clear the swapper page table
206 	 */
207 	mov	r0, r4
208 	mov	r3, #0
209 	add	r6, r0, #PG_DIR_SIZE
210 1:	str	r3, [r0], #4
211 	str	r3, [r0], #4
212 	str	r3, [r0], #4
213 	str	r3, [r0], #4
214 	teq	r0, r6
215 	bne	1b
216 
217 #ifdef CONFIG_ARM_LPAE
218 	/*
219 	 * Build the PGD table (first level) to point to the PMD table. A PGD
220 	 * entry is 64-bit wide.
221 	 */
222 	mov	r0, r4
223 	add	r3, r4, #0x1000			@ first PMD table address
224 	orr	r3, r3, #3			@ PGD block type
225 	mov	r6, #4				@ PTRS_PER_PGD
226 	mov	r7, #1 << (55 - 32)		@ L_PGD_SWAPPER
227 1:
228 #ifdef CONFIG_CPU_ENDIAN_BE8
229 	str	r7, [r0], #4			@ set top PGD entry bits
230 	str	r3, [r0], #4			@ set bottom PGD entry bits
231 #else
232 	str	r3, [r0], #4			@ set bottom PGD entry bits
233 	str	r7, [r0], #4			@ set top PGD entry bits
234 #endif
235 	add	r3, r3, #0x1000			@ next PMD table
236 	subs	r6, r6, #1
237 	bne	1b
238 
239 	add	r4, r4, #0x1000			@ point to the PMD tables
240 #ifdef CONFIG_CPU_ENDIAN_BE8
241 	add	r4, r4, #4			@ we only write the bottom word
242 #endif
243 #endif
244 
245 	ldr	r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
246 
247 	/*
248 	 * Create identity mapping to cater for __enable_mmu.
249 	 * This identity mapping will be removed by paging_init().
250 	 */
251 	adr_l	r5, __turn_mmu_on		@ _pa(__turn_mmu_on)
252 	adr_l	r6, __turn_mmu_on_end		@ _pa(__turn_mmu_on_end)
253 	mov	r5, r5, lsr #SECTION_SHIFT
254 	mov	r6, r6, lsr #SECTION_SHIFT
255 
256 1:	orr	r3, r7, r5, lsl #SECTION_SHIFT	@ flags + kernel base
257 	str	r3, [r4, r5, lsl #PMD_ORDER]	@ identity mapping
258 	cmp	r5, r6
259 	addlo	r5, r5, #1			@ next section
260 	blo	1b
261 
262 	/*
263 	 * Map our RAM from the start to the end of the kernel .bss section.
264 	 */
265 	get_kaslr_offset r3
266 	add	r0, r3, #PAGE_OFFSET
267 	add	r0, r4, r0, lsr #(SECTION_SHIFT - PMD_ORDER)
268 	adr_l	r6, _end - 1
269 	sub	r6, r6, r8
270 	add	r6, r6, #PAGE_OFFSET
271 	add	r3, r3, r8
272 	orr	r3, r3, r7
273 	add	r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
274 1:	str	r3, [r0], #1 << PMD_ORDER
275 	add	r3, r3, #1 << SECTION_SHIFT
276 	cmp	r0, r6
277 	bls	1b
278 
279 #ifdef CONFIG_XIP_KERNEL
280 	/*
281 	 * Map the kernel image separately as it is not located in RAM.
282 	 */
283 #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
284 	mov	r3, pc
285 	mov	r3, r3, lsr #SECTION_SHIFT
286 	orr	r3, r7, r3, lsl #SECTION_SHIFT
287 	add	r0, r4,  #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
288 	str	r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
289 	ldr	r6, =(_edata_loc - 1)
290 	add	r0, r0, #1 << PMD_ORDER
291 	add	r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
292 1:	cmp	r0, r6
293 	add	r3, r3, #1 << SECTION_SHIFT
294 	strls	r3, [r0], #1 << PMD_ORDER
295 	bls	1b
296 #endif
297 
298 	/*
299 	 * Then map boot params address in r2 if specified.
300 	 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
301 	 */
302 	mov	r0, r2, lsr #SECTION_SHIFT
303 	cmp	r2, #0
304 	ldrne	r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
305 	addne	r3, r3, r4
306 	orrne	r6, r7, r0, lsl #SECTION_SHIFT
307 	strne	r6, [r3], #1 << PMD_ORDER
308 	addne	r6, r6, #1 << SECTION_SHIFT
309 	strne	r6, [r3]
310 
311 #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
312 	sub	r4, r4, #4			@ Fixup page table pointer
313 						@ for 64-bit descriptors
314 #endif
315 
316 #ifdef CONFIG_DEBUG_LL
317 #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
318 	/*
319 	 * Map in IO space for serial debugging.
320 	 * This allows debug messages to be output
321 	 * via a serial console before paging_init.
322 	 */
323 	addruart r7, r3, r0
324 
325 	mov	r3, r3, lsr #SECTION_SHIFT
326 	mov	r3, r3, lsl #PMD_ORDER
327 
328 	add	r0, r4, r3
329 	mov	r3, r7, lsr #SECTION_SHIFT
330 	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
331 	orr	r3, r7, r3, lsl #SECTION_SHIFT
332 #ifdef CONFIG_ARM_LPAE
333 	mov	r7, #1 << (54 - 32)		@ XN
334 #ifdef CONFIG_CPU_ENDIAN_BE8
335 	str	r7, [r0], #4
336 	str	r3, [r0], #4
337 #else
338 	str	r3, [r0], #4
339 	str	r7, [r0], #4
340 #endif
341 #else
342 	orr	r3, r3, #PMD_SECT_XN
343 	str	r3, [r0], #4
344 #endif
345 
346 #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
347 	/* we don't need any serial debugging mappings */
348 	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
349 #endif
350 
351 #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
352 	/*
353 	 * If we're using the NetWinder or CATS, we also need to map
354 	 * in the 16550-type serial port for the debug messages
355 	 */
356 	add	r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
357 	orr	r3, r7, #0x7c000000
358 	str	r3, [r0]
359 #endif
360 #ifdef CONFIG_ARCH_RPC
361 	/*
362 	 * Map in screen at 0x02000000 & SCREEN2_BASE
363 	 * Similar reasons here - for debug.  This is
364 	 * only for Acorn RiscPC architectures.
365 	 */
366 	add	r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
367 	orr	r3, r7, #0x02000000
368 	str	r3, [r0]
369 	add	r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
370 	str	r3, [r0]
371 #endif
372 #endif
373 #ifdef CONFIG_ARM_LPAE
374 	sub	r4, r4, #0x1000		@ point to the PGD table
375 #endif
376 	ret	lr
377 ENDPROC(__create_page_tables)
378 	.ltorg
379 
380 #if defined(CONFIG_SMP)
381 	.text
382 	.arm
383 ENTRY(secondary_startup_arm)
384  THUMB(	badr	r9, 1f		)	@ Kernel is entered in ARM.
385  THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
386  THUMB(	.thumb			)	@ switch to Thumb now.
387  THUMB(1:			)
388 ENTRY(secondary_startup)
389 	/*
390 	 * Common entry point for secondary CPUs.
391 	 *
392 	 * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
393 	 * the processor type - there is no need to check the machine type
394 	 * as it has already been validated by the primary processor.
395 	 */
396 
397  ARM_BE8(setend	be)				@ ensure we are in BE8 mode
398 
399 #ifdef CONFIG_ARM_VIRT_EXT
400 	bl	__hyp_stub_install_secondary
401 #endif
402 	safe_svcmode_maskall r9
403 
404 	mrc	p15, 0, r9, c0, c0		@ get processor id
405 	bl	__lookup_processor_type
406 	movs	r10, r5				@ invalid processor?
407 	moveq	r0, #'p'			@ yes, error 'p'
408  THUMB( it	eq )		@ force fixup-able long branch encoding
409 	beq	__error_p
410 
411 	/*
412 	 * Use the page tables supplied from  __cpu_up.
413 	 */
414 	adr_l	r3, secondary_data
415 	mov_l	r12, __secondary_switch
416 	ldrd	r4, r5, [r3, #0]		@ get secondary_data.pgdir
417 ARM_BE8(eor	r4, r4, r5)			@ Swap r5 and r4 in BE:
418 ARM_BE8(eor	r5, r4, r5)			@ it can be done in 3 steps
419 ARM_BE8(eor	r4, r4, r5)			@ without using a temp reg.
420 	ldr	r8, [r3, #8]			@ get secondary_data.swapper_pg_dir
421 	badr	lr, __enable_mmu		@ return address
422 	mov	r13, r12			@ __secondary_switched address
423 	ldr	r12, [r10, #PROCINFO_INITFUNC]
424 	add	r12, r12, r10			@ initialise processor
425 						@ (return control reg)
426 	ret	r12
427 ENDPROC(secondary_startup)
428 ENDPROC(secondary_startup_arm)
429 
430 ENTRY(__secondary_switched)
431 	ldr_l	r7, secondary_data + 12		@ get secondary_data.stack
432 	mov	sp, r7
433 	mov	fp, #0
434 	b	secondary_start_kernel
435 ENDPROC(__secondary_switched)
436 
437 #endif /* defined(CONFIG_SMP) */
438 
439 
440 
441 /*
442  * Setup common bits before finally enabling the MMU.  Essentially
443  * this is just loading the page table pointer and domain access
444  * registers.  All these registers need to be preserved by the
445  * processor setup function (or set in the case of r0)
446  *
447  *  r0  = cp#15 control register
448  *  r1  = machine ID
449  *  r2  = atags or dtb pointer
450  *  r4  = TTBR pointer (low word)
451  *  r5  = TTBR pointer (high word if LPAE)
452  *  r9  = processor ID
453  *  r12 = KASLR offset
454  *  r13 = *virtual* address to jump to upon completion
455  */
456 __enable_mmu:
457 #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
458 	orr	r0, r0, #CR_A
459 #else
460 	bic	r0, r0, #CR_A
461 #endif
462 #ifdef CONFIG_CPU_DCACHE_DISABLE
463 	bic	r0, r0, #CR_C
464 #endif
465 #ifdef CONFIG_CPU_BPREDICT_DISABLE
466 	bic	r0, r0, #CR_Z
467 #endif
468 #ifdef CONFIG_CPU_ICACHE_DISABLE
469 	bic	r0, r0, #CR_I
470 #endif
471 #ifdef CONFIG_ARM_LPAE
472 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
473 #else
474 	mov	r5, #DACR_INIT
475 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
476 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
477 #endif
478 	b	__turn_mmu_on
479 ENDPROC(__enable_mmu)
480 
481 /*
482  * Enable the MMU.  This completely changes the structure of the visible
483  * memory space.  You will not be able to trace execution through this.
484  * If you have an enquiry about this, *please* check the linux-arm-kernel
485  * mailing list archives BEFORE sending another post to the list.
486  *
487  *  r0  = cp#15 control register
488  *  r1  = machine ID
489  *  r2  = atags or dtb pointer
490  *  r9  = processor ID
491  *  r12 = KASLR offset
492  *  r13 = *virtual* address to jump to upon completion
493  *
494  * other registers depend on the function called upon completion
495  */
496 	.align	5
497 	.pushsection	.idmap.text, "ax"
498 ENTRY(__turn_mmu_on)
499 	mov	r0, r0
500 	instr_sync
501 	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
502 	mrc	p15, 0, r3, c0, c0, 0		@ read id reg
503 	instr_sync
504 	mov	r3, r3
505 	mov	r3, r13
506 	ret	r3
507 ENDPROC(__turn_mmu_on)
508 
509 __primary_switch:
510 #ifdef CONFIG_RELOCATABLE
511 	adr_l	r7, _text			@ r7 := __pa(_text)
512 	sub	r7, r7, #TEXT_OFFSET		@ r7 := PHYS_OFFSET
513 
514 	adr_l	r5, __rel_begin
515 	adr_l	r6, __rel_end
516 	sub	r5, r5, r7
517 	sub	r6, r6, r7
518 
519 	add	r5, r5, #PAGE_OFFSET
520 	add	r6, r6, #PAGE_OFFSET
521 	add	r5, r5, r12
522 	add	r6, r6, r12
523 
524 	adr_l	r3, __stubs_start		@ __pa(__stubs_start)
525 	sub	r3, r3, r7			@ offset of __stubs_start
526 	add	r3, r3, #PAGE_OFFSET		@ __va(__stubs_start)
527 	sub	r3, r3, #0xffff1000		@ subtract VA of stubs section
528 
529 0:	cmp	r5, r6
530 	bge	1f
531 	ldm	r5!, {r7, r8}			@ load next relocation entry
532 	cmp	r8, #23				@ R_ARM_RELATIVE
533 	bne	0b
534 	cmp	r7, #0xff000000			@ vector page?
535 	addgt	r7, r7, r3			@ fix up VA offset
536 	ldr	r8, [r7, r12]
537 	add	r8, r8, r12
538 	str	r8, [r7, r12]
539 	b	0b
540 1:
541 #endif
542 	ldr	pc, =__mmap_switched
543 ENDPROC(__primary_switch)
544 
545 #ifdef CONFIG_SMP
546 __secondary_switch:
547 	ldr	pc, =__secondary_switched
548 ENDPROC(__secondary_switch)
549 #endif
550 	.ltorg
551 __turn_mmu_on_end:
552 	.popsection
553 
554 #ifdef CONFIG_SMP_ON_UP
555 	__HEAD
556 __fixup_smp:
557 	and	r3, r9, #0x000f0000	@ architecture version
558 	teq	r3, #0x000f0000		@ CPU ID supported?
559 	bne	__fixup_smp_on_up	@ no, assume UP
560 
561 	bic	r3, r9, #0x00ff0000
562 	bic	r3, r3, #0x0000000f	@ mask 0xff00fff0
563 	mov	r4, #0x41000000
564 	orr	r4, r4, #0x0000b000
565 	orr	r4, r4, #0x00000020	@ val 0x4100b020
566 	teq	r3, r4			@ ARM 11MPCore?
567 	reteq	lr			@ yes, assume SMP
568 
569 	mrc	p15, 0, r0, c0, c0, 5	@ read MPIDR
570 	and	r0, r0, #0xc0000000	@ multiprocessing extensions and
571 	teq	r0, #0x80000000		@ not part of a uniprocessor system?
572 	bne    __fixup_smp_on_up	@ no, assume UP
573 
574 	@ Core indicates it is SMP. Check for Aegis SOC where a single
575 	@ Cortex-A9 CPU is present but SMP operations fault.
576 	mov	r4, #0x41000000
577 	orr	r4, r4, #0x0000c000
578 	orr	r4, r4, #0x00000090
579 	teq	r3, r4			@ Check for ARM Cortex-A9
580 	retne	lr			@ Not ARM Cortex-A9,
581 
582 	@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
583 	@ below address check will need to be #ifdef'd or equivalent
584 	@ for the Aegis platform.
585 	mrc	p15, 4, r0, c15, c0	@ get SCU base address
586 	teq	r0, #0x0		@ '0' on actual UP A9 hardware
587 	beq	__fixup_smp_on_up	@ So its an A9 UP
588 	ldr	r0, [r0, #4]		@ read SCU Config
589 ARM_BE8(rev	r0, r0)			@ byteswap if big endian
590 	and	r0, r0, #0x3		@ number of CPUs
591 	teq	r0, #0x0		@ is 1?
592 	retne	lr
593 
594 __fixup_smp_on_up:
595 	adr_l	r4, __smpalt_begin
596 	adr_l	r5, __smpalt_end
597 	b	__do_fixup_smp_on_up
598 ENDPROC(__fixup_smp)
599 
600 	.pushsection .data
601 	.align	2
602 	.globl	smp_on_up
603 smp_on_up:
604 	ALT_SMP(.long	1)
605 	ALT_UP(.long	0)
606 	.popsection
607 #endif
608 
609 	.text
610 __do_fixup_smp_on_up:
611 	cmp	r4, r5
612 	reths	lr
613 	ldmia	r4, {r0, r6}
614  ARM(	str	r6, [r0, r4]	)
615  THUMB(	add	r0, r0, r4	)
616 	add	r4, r4, #8
617 #ifdef __ARMEB__
618  THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
619 #endif
620  THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
621  THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r0.
622  THUMB(	strh	r6, [r0]	)
623 	b	__do_fixup_smp_on_up
624 ENDPROC(__do_fixup_smp_on_up)
625 
626 ENTRY(fixup_smp)
627 	stmfd	sp!, {r4 - r6, lr}
628 	mov	r4, r0
629 	add	r5, r0, r1
630 	bl	__do_fixup_smp_on_up
631 	ldmfd	sp!, {r4 - r6, pc}
632 ENDPROC(fixup_smp)
633 
634 #include "head-common.S"
635