1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7  *  Adapted for Power Macintosh by Paul Mackerras.
8  *  Low-level exception handlers and MMU support
9  *  rewritten by Paul Mackerras.
10  *    Copyright (C) 1996 Paul Mackerras.
11  *
12  *  This file contains low-level assembler routines for managing
13  *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
14  *  hash table, so this file is not used on them.)
15  */
16 
17 #include <linux/pgtable.h>
18 #include <linux/init.h>
19 #include <asm/reg.h>
20 #include <asm/page.h>
21 #include <asm/cputable.h>
22 #include <asm/ppc_asm.h>
23 #include <asm/thread_info.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/export.h>
26 #include <asm/feature-fixups.h>
27 #include <asm/code-patching-asm.h>
28 
29 #ifdef CONFIG_SMP
30 	.section .bss
31 	.align	2
32 mmu_hash_lock:
33 	.space	4
34 #endif /* CONFIG_SMP */
35 
36 /*
37  * Load a PTE into the hash table, if possible.
38  * The address is in r4, and r3 contains an access flag:
39  * _PAGE_RW (0x400) if a write.
40  * r9 contains the SRR1 value, from which we use the MSR_PR bit.
41  * SPRG_THREAD contains the physical address of the current task's thread.
42  *
43  * Returns to the caller if the access is illegal or there is no
44  * mapping for the address.  Otherwise it places an appropriate PTE
45  * in the hash table and returns from the exception.
46  * Uses r0, r3 - r6, r8, r10, ctr, lr.
47  */
48 	.text
49 _GLOBAL(hash_page)
50 #ifdef CONFIG_SMP
51 	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@h
52 	ori	r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
53 	lis	r0,0x0fff
54 	b	10f
55 11:	lwz	r6,0(r8)
56 	cmpwi	0,r6,0
57 	bne	11b
58 10:	lwarx	r6,0,r8
59 	cmpwi	0,r6,0
60 	bne-	11b
61 	stwcx.	r0,0,r8
62 	bne-	10b
63 	isync
64 #endif
65 	/* Get PTE (linux-style) and check access */
66 	lis	r0, TASK_SIZE@h		/* check if kernel address */
67 	cmplw	0,r4,r0
68 	ori	r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
69 	mfspr	r5, SPRN_SPRG_PGDIR	/* phys page-table root */
70 	blt+	112f			/* assume user more likely */
71 	lis	r5, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
72 	addi	r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
73 	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
74 112:
75 #ifndef CONFIG_PTE_64BIT
76 	rlwimi	r5,r4,12,20,29		/* insert top 10 bits of address */
77 	lwz	r8,0(r5)		/* get pmd entry */
78 	rlwinm.	r8,r8,0,0,19		/* extract address of pte page */
79 #else
80 	rlwinm	r8,r4,13,19,29		/* Compute pgdir/pmd offset */
81 	lwzx	r8,r8,r5		/* Get L1 entry */
82 	rlwinm.	r8,r8,0,0,20		/* extract pt base address */
83 #endif
84 #ifdef CONFIG_SMP
85 	beq-	.Lhash_page_out		/* return if no mapping */
86 #else
87 	/* XXX it seems like the 601 will give a machine fault on the
88 	   rfi if its alignment is wrong (bottom 4 bits of address are
89 	   8 or 0xc) and we have had a not-taken conditional branch
90 	   to the address following the rfi. */
91 	beqlr-
92 #endif
93 #ifndef CONFIG_PTE_64BIT
94 	rlwimi	r8,r4,22,20,29		/* insert next 10 bits of address */
95 #else
96 	rlwimi	r8,r4,23,20,28		/* compute pte address */
97 #endif
98 	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
99 	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
100 
101 	/*
102 	 * Update the linux PTE atomically.  We do the lwarx up-front
103 	 * because almost always, there won't be a permission violation
104 	 * and there won't already be an HPTE, and thus we will have
105 	 * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
106 	 *
107 	 * If PTE_64BIT is set, the low word is the flags word; use that
108 	 * word for locking since it contains all the interesting bits.
109 	 */
110 #if (PTE_FLAGS_OFFSET != 0)
111 	addi	r8,r8,PTE_FLAGS_OFFSET
112 #endif
113 .Lretry:
114 	lwarx	r6,0,r8			/* get linux-style pte, flag word */
115 	andc.	r5,r3,r6		/* check access & ~permission */
116 #ifdef CONFIG_SMP
117 	bne-	.Lhash_page_out		/* return if access not permitted */
118 #else
119 	bnelr-
120 #endif
121 	or	r5,r0,r6		/* set accessed/dirty bits */
122 #ifdef CONFIG_PTE_64BIT
123 #ifdef CONFIG_SMP
124 	subf	r10,r6,r8		/* create false data dependency */
125 	subi	r10,r10,PTE_FLAGS_OFFSET
126 	lwzx	r10,r6,r10		/* Get upper PTE word */
127 #else
128 	lwz	r10,-PTE_FLAGS_OFFSET(r8)
129 #endif /* CONFIG_SMP */
130 #endif /* CONFIG_PTE_64BIT */
131 	stwcx.	r5,0,r8			/* attempt to update PTE */
132 	bne-	.Lretry			/* retry if someone got there first */
133 
134 	mfsrin	r3,r4			/* get segment reg for segment */
135 #ifndef CONFIG_VMAP_STACK
136 	mfctr	r0
137 	stw	r0,_CTR(r11)
138 #endif
139 	bl	create_hpte		/* add the hash table entry */
140 
141 #ifdef CONFIG_SMP
142 	eieio
143 	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
144 	li	r0,0
145 	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
146 #endif
147 
148 #ifdef CONFIG_VMAP_STACK
149 	b	fast_hash_page_return
150 #else
151 	/* Return from the exception */
152 	lwz	r5,_CTR(r11)
153 	mtctr	r5
154 	lwz	r0,GPR0(r11)
155 	lwz	r8,GPR8(r11)
156 	b	fast_exception_return
157 #endif
158 
159 #ifdef CONFIG_SMP
160 .Lhash_page_out:
161 	eieio
162 	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
163 	li	r0,0
164 	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
165 	blr
166 #endif /* CONFIG_SMP */
167 _ASM_NOKPROBE_SYMBOL(hash_page)
168 
169 /*
170  * Add an entry for a particular page to the hash table.
171  *
172  * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
173  *
174  * We assume any necessary modifications to the pte (e.g. setting
175  * the accessed bit) have already been done and that there is actually
176  * a hash table in use (i.e. we're not on a 603).
177  */
178 _GLOBAL(add_hash_page)
179 	mflr	r0
180 	stw	r0,4(r1)
181 
182 	/* Convert context and va to VSID */
183 	mulli	r3,r3,897*16		/* multiply context by context skew */
184 	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
185 	mulli	r0,r0,0x111		/* multiply by ESID skew */
186 	add	r3,r3,r0		/* note create_hpte trims to 24 bits */
187 
188 #ifdef CONFIG_SMP
189 	lwz	r8,TASK_CPU(r2)		/* to go in mmu_hash_lock */
190 	oris	r8,r8,12
191 #endif /* CONFIG_SMP */
192 
193 	/*
194 	 * We disable interrupts here, even on UP, because we don't
195 	 * want to race with hash_page, and because we want the
196 	 * _PAGE_HASHPTE bit to be a reliable indication of whether
197 	 * the HPTE exists (or at least whether one did once).
198 	 * We also turn off the MMU for data accesses so that we
199 	 * we can't take a hash table miss (assuming the code is
200 	 * covered by a BAT).  -- paulus
201 	 */
202 	mfmsr	r9
203 	rlwinm	r0,r9,0,17,15		/* clear bit 16 (MSR_EE) */
204 	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
205 	mtmsr	r0
206 	isync
207 
208 #ifdef CONFIG_SMP
209 	lis	r6, (mmu_hash_lock - PAGE_OFFSET)@ha
210 	addi	r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
211 10:	lwarx	r0,0,r6			/* take the mmu_hash_lock */
212 	cmpi	0,r0,0
213 	bne-	11f
214 	stwcx.	r8,0,r6
215 	beq+	12f
216 11:	lwz	r0,0(r6)
217 	cmpi	0,r0,0
218 	beq	10b
219 	b	11b
220 12:	isync
221 #endif
222 
223 	/*
224 	 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
225 	 * If _PAGE_HASHPTE was already set, we don't replace the existing
226 	 * HPTE, so we just unlock and return.
227 	 */
228 	mr	r8,r5
229 #ifndef CONFIG_PTE_64BIT
230 	rlwimi	r8,r4,22,20,29
231 #else
232 	rlwimi	r8,r4,23,20,28
233 	addi	r8,r8,PTE_FLAGS_OFFSET
234 #endif
235 1:	lwarx	r6,0,r8
236 	andi.	r0,r6,_PAGE_HASHPTE
237 	bne	9f			/* if HASHPTE already set, done */
238 #ifdef CONFIG_PTE_64BIT
239 #ifdef CONFIG_SMP
240 	subf	r10,r6,r8		/* create false data dependency */
241 	subi	r10,r10,PTE_FLAGS_OFFSET
242 	lwzx	r10,r6,r10		/* Get upper PTE word */
243 #else
244 	lwz	r10,-PTE_FLAGS_OFFSET(r8)
245 #endif /* CONFIG_SMP */
246 #endif /* CONFIG_PTE_64BIT */
247 	ori	r5,r6,_PAGE_HASHPTE
248 	stwcx.	r5,0,r8
249 	bne-	1b
250 
251 	bl	create_hpte
252 
253 9:
254 #ifdef CONFIG_SMP
255 	lis	r6, (mmu_hash_lock - PAGE_OFFSET)@ha
256 	addi	r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
257 	eieio
258 	li	r0,0
259 	stw	r0,0(r6)		/* clear mmu_hash_lock */
260 #endif
261 
262 	/* reenable interrupts and DR */
263 	mtmsr	r9
264 	isync
265 
266 	lwz	r0,4(r1)
267 	mtlr	r0
268 	blr
269 _ASM_NOKPROBE_SYMBOL(add_hash_page)
270 
271 /*
272  * This routine adds a hardware PTE to the hash table.
273  * It is designed to be called with the MMU either on or off.
274  * r3 contains the VSID, r4 contains the virtual address,
275  * r5 contains the linux PTE, r6 contains the old value of the
276  * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
277  * upper half of the PTE if CONFIG_PTE_64BIT.
278  * On SMP, the caller should have the mmu_hash_lock held.
279  * We assume that the caller has (or will) set the _PAGE_HASHPTE
280  * bit in the linux PTE in memory.  The value passed in r6 should
281  * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
282  * this routine will skip the search for an existing HPTE.
283  * This procedure modifies r0, r3 - r6, r8, cr0.
284  *  -- paulus.
285  *
286  * For speed, 4 of the instructions get patched once the size and
287  * physical address of the hash table are known.  These definitions
288  * of Hash_base and Hash_bits below are for the early hash table.
289  */
290 Hash_base = early_hash
291 Hash_bits = 12				/* e.g. 256kB hash table */
292 Hash_msk = (((1 << Hash_bits) - 1) * 64)
293 
294 /* defines for the PTE format for 32-bit PPCs */
295 #define HPTE_SIZE	8
296 #define PTEG_SIZE	64
297 #define LG_PTEG_SIZE	6
298 #define LDPTEu		lwzu
299 #define LDPTE		lwz
300 #define STPTE		stw
301 #define CMPPTE		cmpw
302 #define PTE_H		0x40
303 #define PTE_V		0x80000000
304 #define TST_V(r)	rlwinm. r,r,0,0,0
305 #define SET_V(r)	oris r,r,PTE_V@h
306 #define CLR_V(r,t)	rlwinm r,r,0,1,31
307 
308 #define HASH_LEFT	31-(LG_PTEG_SIZE+Hash_bits-1)
309 #define HASH_RIGHT	31-LG_PTEG_SIZE
310 
311 __REF
312 _GLOBAL(create_hpte)
313 	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
314 	rlwinm	r8,r5,32-9,30,30	/* _PAGE_RW -> PP msb */
315 	rlwinm	r0,r5,32-6,30,30	/* _PAGE_DIRTY -> PP msb */
316 	and	r8,r8,r0		/* writable if _RW & _DIRTY */
317 	rlwimi	r5,r5,32-1,30,30	/* _PAGE_USER -> PP msb */
318 	rlwimi	r5,r5,32-2,31,31	/* _PAGE_USER -> PP lsb */
319 	ori	r8,r8,0xe04		/* clear out reserved bits */
320 	andc	r8,r5,r8		/* PP = user? (rw&dirty? 1: 3): 0 */
321 BEGIN_FTR_SECTION
322 	rlwinm	r8,r8,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
323 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
324 #ifdef CONFIG_PTE_64BIT
325 	/* Put the XPN bits into the PTE */
326 	rlwimi	r8,r10,8,20,22
327 	rlwimi	r8,r10,2,29,29
328 #endif
329 
330 	/* Construct the high word of the PPC-style PTE (r5) */
331 	rlwinm	r5,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
332 	rlwimi	r5,r4,10,26,31		/* put in API (abbrev page index) */
333 	SET_V(r5)			/* set V (valid) bit */
334 
335 	patch_site	0f, patch__hash_page_A0
336 	patch_site	1f, patch__hash_page_A1
337 	patch_site	2f, patch__hash_page_A2
338 	/* Get the address of the primary PTE group in the hash table (r3) */
339 0:	lis	r0, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
340 1:	rlwimi	r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
341 2:	rlwinm	r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
342 	xor	r3,r3,r0		/* make primary hash */
343 	li	r0,8			/* PTEs/group */
344 
345 	/*
346 	 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
347 	 * if it is clear, meaning that the HPTE isn't there already...
348 	 */
349 	andi.	r6,r6,_PAGE_HASHPTE
350 	beq+	10f			/* no PTE: go look for an empty slot */
351 	tlbie	r4
352 
353 	lis	r4, (htab_hash_searches - PAGE_OFFSET)@ha
354 	lwz	r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
355 	addi	r6,r6,1			/* count how many searches we do */
356 	stw	r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
357 
358 	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
359 	mtctr	r0
360 	addi	r4,r3,-HPTE_SIZE
361 1:	LDPTEu	r6,HPTE_SIZE(r4)	/* get next PTE */
362 	CMPPTE	0,r6,r5
363 	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
364 	beq+	.Lfound_slot
365 
366 	patch_site	0f, patch__hash_page_B
367 	/* Search the secondary PTEG for a matching PTE */
368 	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
369 0:	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
370 	xori	r4,r4,(-PTEG_SIZE & 0xffff)
371 	addi	r4,r4,-HPTE_SIZE
372 	mtctr	r0
373 2:	LDPTEu	r6,HPTE_SIZE(r4)
374 	CMPPTE	0,r6,r5
375 	bdnzf	2,2b
376 	beq+	.Lfound_slot
377 	xori	r5,r5,PTE_H		/* clear H bit again */
378 
379 	/* Search the primary PTEG for an empty slot */
380 10:	mtctr	r0
381 	addi	r4,r3,-HPTE_SIZE	/* search primary PTEG */
382 1:	LDPTEu	r6,HPTE_SIZE(r4)	/* get next PTE */
383 	TST_V(r6)			/* test valid bit */
384 	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
385 	beq+	.Lfound_empty
386 
387 	/* update counter of times that the primary PTEG is full */
388 	lis	r4, (primary_pteg_full - PAGE_OFFSET)@ha
389 	lwz	r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
390 	addi	r6,r6,1
391 	stw	r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
392 
393 	patch_site	0f, patch__hash_page_C
394 	/* Search the secondary PTEG for an empty slot */
395 	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
396 0:	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
397 	xori	r4,r4,(-PTEG_SIZE & 0xffff)
398 	addi	r4,r4,-HPTE_SIZE
399 	mtctr	r0
400 2:	LDPTEu	r6,HPTE_SIZE(r4)
401 	TST_V(r6)
402 	bdnzf	2,2b
403 	beq+	.Lfound_empty
404 	xori	r5,r5,PTE_H		/* clear H bit again */
405 
406 	/*
407 	 * Choose an arbitrary slot in the primary PTEG to overwrite.
408 	 * Since both the primary and secondary PTEGs are full, and we
409 	 * have no information that the PTEs in the primary PTEG are
410 	 * more important or useful than those in the secondary PTEG,
411 	 * and we know there is a definite (although small) speed
412 	 * advantage to putting the PTE in the primary PTEG, we always
413 	 * put the PTE in the primary PTEG.
414 	 *
415 	 * In addition, we skip any slot that is mapping kernel text in
416 	 * order to avoid a deadlock when not using BAT mappings if
417 	 * trying to hash in the kernel hash code itself after it has
418 	 * already taken the hash table lock. This works in conjunction
419 	 * with pre-faulting of the kernel text.
420 	 *
421 	 * If the hash table bucket is full of kernel text entries, we'll
422 	 * lockup here but that shouldn't happen
423 	 */
424 
425 1:	lis	r4, (next_slot - PAGE_OFFSET)@ha	/* get next evict slot */
426 	lwz	r6, (next_slot - PAGE_OFFSET)@l(r4)
427 	addi	r6,r6,HPTE_SIZE			/* search for candidate */
428 	andi.	r6,r6,7*HPTE_SIZE
429 	stw	r6,next_slot@l(r4)
430 	add	r4,r3,r6
431 	LDPTE	r0,HPTE_SIZE/2(r4)		/* get PTE second word */
432 	clrrwi	r0,r0,12
433 	lis	r6,etext@h
434 	ori	r6,r6,etext@l			/* get etext */
435 	tophys(r6,r6)
436 	cmpl	cr0,r0,r6			/* compare and try again */
437 	blt	1b
438 
439 #ifndef CONFIG_SMP
440 	/* Store PTE in PTEG */
441 .Lfound_empty:
442 	STPTE	r5,0(r4)
443 .Lfound_slot:
444 	STPTE	r8,HPTE_SIZE/2(r4)
445 
446 #else /* CONFIG_SMP */
447 /*
448  * Between the tlbie above and updating the hash table entry below,
449  * another CPU could read the hash table entry and put it in its TLB.
450  * There are 3 cases:
451  * 1. using an empty slot
452  * 2. updating an earlier entry to change permissions (i.e. enable write)
453  * 3. taking over the PTE for an unrelated address
454  *
455  * In each case it doesn't really matter if the other CPUs have the old
456  * PTE in their TLB.  So we don't need to bother with another tlbie here,
457  * which is convenient as we've overwritten the register that had the
458  * address. :-)  The tlbie above is mainly to make sure that this CPU comes
459  * and gets the new PTE from the hash table.
460  *
461  * We do however have to make sure that the PTE is never in an invalid
462  * state with the V bit set.
463  */
464 .Lfound_empty:
465 .Lfound_slot:
466 	CLR_V(r5,r0)		/* clear V (valid) bit in PTE */
467 	STPTE	r5,0(r4)
468 	sync
469 	TLBSYNC
470 	STPTE	r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
471 	sync
472 	SET_V(r5)
473 	STPTE	r5,0(r4)	/* finally set V bit in PTE */
474 #endif /* CONFIG_SMP */
475 
476 	sync		/* make sure pte updates get to memory */
477 	blr
478 	.previous
479 _ASM_NOKPROBE_SYMBOL(create_hpte)
480 
481 	.section .bss
482 	.align	2
483 next_slot:
484 	.space	4
485 primary_pteg_full:
486 	.space	4
487 htab_hash_searches:
488 	.space	4
489 	.previous
490 
491 /*
492  * Flush the entry for a particular page from the hash table.
493  *
494  * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
495  *		    int count)
496  *
497  * We assume that there is a hash table in use (Hash != 0).
498  */
499 __REF
500 _GLOBAL(flush_hash_pages)
501 	/*
502 	 * We disable interrupts here, even on UP, because we want
503 	 * the _PAGE_HASHPTE bit to be a reliable indication of
504 	 * whether the HPTE exists (or at least whether one did once).
505 	 * We also turn off the MMU for data accesses so that we
506 	 * we can't take a hash table miss (assuming the code is
507 	 * covered by a BAT).  -- paulus
508 	 */
509 	mfmsr	r10
510 	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
511 	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
512 	mtmsr	r0
513 	isync
514 
515 	/* First find a PTE in the range that has _PAGE_HASHPTE set */
516 #ifndef CONFIG_PTE_64BIT
517 	rlwimi	r5,r4,22,20,29
518 #else
519 	rlwimi	r5,r4,23,20,28
520 #endif
521 1:	lwz	r0,PTE_FLAGS_OFFSET(r5)
522 	cmpwi	cr1,r6,1
523 	andi.	r0,r0,_PAGE_HASHPTE
524 	bne	2f
525 	ble	cr1,19f
526 	addi	r4,r4,0x1000
527 	addi	r5,r5,PTE_SIZE
528 	addi	r6,r6,-1
529 	b	1b
530 
531 	/* Convert context and va to VSID */
532 2:	mulli	r3,r3,897*16		/* multiply context by context skew */
533 	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
534 	mulli	r0,r0,0x111		/* multiply by ESID skew */
535 	add	r3,r3,r0		/* note code below trims to 24 bits */
536 
537 	/* Construct the high word of the PPC-style PTE (r11) */
538 	rlwinm	r11,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
539 	rlwimi	r11,r4,10,26,31		/* put in API (abbrev page index) */
540 	SET_V(r11)			/* set V (valid) bit */
541 
542 #ifdef CONFIG_SMP
543 	lis	r9, (mmu_hash_lock - PAGE_OFFSET)@ha
544 	addi	r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
545 	tophys	(r8, r2)
546 	lwz	r8, TASK_CPU(r8)
547 	oris	r8,r8,9
548 10:	lwarx	r0,0,r9
549 	cmpi	0,r0,0
550 	bne-	11f
551 	stwcx.	r8,0,r9
552 	beq+	12f
553 11:	lwz	r0,0(r9)
554 	cmpi	0,r0,0
555 	beq	10b
556 	b	11b
557 12:	isync
558 #endif
559 
560 	/*
561 	 * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
562 	 * already clear, we're done (for this pte).  If not,
563 	 * clear it (atomically) and proceed.  -- paulus.
564 	 */
565 #if (PTE_FLAGS_OFFSET != 0)
566 	addi	r5,r5,PTE_FLAGS_OFFSET
567 #endif
568 33:	lwarx	r8,0,r5			/* fetch the pte flags word */
569 	andi.	r0,r8,_PAGE_HASHPTE
570 	beq	8f			/* done if HASHPTE is already clear */
571 	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
572 	stwcx.	r8,0,r5			/* update the pte */
573 	bne-	33b
574 
575 	patch_site	0f, patch__flush_hash_A0
576 	patch_site	1f, patch__flush_hash_A1
577 	patch_site	2f, patch__flush_hash_A2
578 	/* Get the address of the primary PTE group in the hash table (r3) */
579 0:	lis	r8, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
580 1:	rlwimi	r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
581 2:	rlwinm	r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
582 	xor	r8,r0,r8		/* make primary hash */
583 
584 	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
585 	li	r0,8			/* PTEs/group */
586 	mtctr	r0
587 	addi	r12,r8,-HPTE_SIZE
588 1:	LDPTEu	r0,HPTE_SIZE(r12)	/* get next PTE */
589 	CMPPTE	0,r0,r11
590 	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
591 	beq+	3f
592 
593 	patch_site	0f, patch__flush_hash_B
594 	/* Search the secondary PTEG for a matching PTE */
595 	ori	r11,r11,PTE_H		/* set H (secondary hash) bit */
596 	li	r0,8			/* PTEs/group */
597 0:	xoris	r12,r8,Hash_msk>>16	/* compute secondary hash */
598 	xori	r12,r12,(-PTEG_SIZE & 0xffff)
599 	addi	r12,r12,-HPTE_SIZE
600 	mtctr	r0
601 2:	LDPTEu	r0,HPTE_SIZE(r12)
602 	CMPPTE	0,r0,r11
603 	bdnzf	2,2b
604 	xori	r11,r11,PTE_H		/* clear H again */
605 	bne-	4f			/* should rarely fail to find it */
606 
607 3:	li	r0,0
608 	STPTE	r0,0(r12)		/* invalidate entry */
609 4:	sync
610 	tlbie	r4			/* in hw tlb too */
611 	sync
612 
613 8:	ble	cr1,9f			/* if all ptes checked */
614 81:	addi	r6,r6,-1
615 	addi	r5,r5,PTE_SIZE
616 	addi	r4,r4,0x1000
617 	lwz	r0,0(r5)		/* check next pte */
618 	cmpwi	cr1,r6,1
619 	andi.	r0,r0,_PAGE_HASHPTE
620 	bne	33b
621 	bgt	cr1,81b
622 
623 9:
624 #ifdef CONFIG_SMP
625 	TLBSYNC
626 	li	r0,0
627 	stw	r0,0(r9)		/* clear mmu_hash_lock */
628 #endif
629 
630 19:	mtmsr	r10
631 	isync
632 	blr
633 	.previous
634 EXPORT_SYMBOL(flush_hash_pages)
635 _ASM_NOKPROBE_SYMBOL(flush_hash_pages)
636 
637 /*
638  * Flush an entry from the TLB
639  */
640 _GLOBAL(_tlbie)
641 #ifdef CONFIG_SMP
642 	lwz	r8,TASK_CPU(r2)
643 	oris	r8,r8,11
644 	mfmsr	r10
645 	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
646 	rlwinm	r0,r0,0,28,26		/* clear DR */
647 	mtmsr	r0
648 	isync
649 	lis	r9,mmu_hash_lock@h
650 	ori	r9,r9,mmu_hash_lock@l
651 	tophys(r9,r9)
652 10:	lwarx	r7,0,r9
653 	cmpwi	0,r7,0
654 	bne-	10b
655 	stwcx.	r8,0,r9
656 	bne-	10b
657 	eieio
658 	tlbie	r3
659 	sync
660 	TLBSYNC
661 	li	r0,0
662 	stw	r0,0(r9)		/* clear mmu_hash_lock */
663 	mtmsr	r10
664 	isync
665 #else /* CONFIG_SMP */
666 	tlbie	r3
667 	sync
668 #endif /* CONFIG_SMP */
669 	blr
670 _ASM_NOKPROBE_SYMBOL(_tlbie)
671 
672 /*
673  * Flush the entire TLB. 603/603e only
674  */
675 _GLOBAL(_tlbia)
676 #if defined(CONFIG_SMP)
677 	lwz	r8,TASK_CPU(r2)
678 	oris	r8,r8,10
679 	mfmsr	r10
680 	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
681 	rlwinm	r0,r0,0,28,26		/* clear DR */
682 	mtmsr	r0
683 	isync
684 	lis	r9,mmu_hash_lock@h
685 	ori	r9,r9,mmu_hash_lock@l
686 	tophys(r9,r9)
687 10:	lwarx	r7,0,r9
688 	cmpwi	0,r7,0
689 	bne-	10b
690 	stwcx.	r8,0,r9
691 	bne-	10b
692 #endif /* CONFIG_SMP */
693 	li	r5, 32
694 	lis	r4, KERNELBASE@h
695 	mtctr	r5
696 	sync
697 0:	tlbie	r4
698 	addi	r4, r4, 0x1000
699 	bdnz	0b
700 	sync
701 #ifdef CONFIG_SMP
702 	TLBSYNC
703 	li	r0,0
704 	stw	r0,0(r9)		/* clear mmu_hash_lock */
705 	mtmsr	r10
706 	isync
707 #endif /* CONFIG_SMP */
708 	blr
709 _ASM_NOKPROBE_SYMBOL(_tlbia)
710