1/*
2 * Copyright IBM Corporation, 2015
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#include <linux/mm.h>
16#include <asm/machdep.h>
17#include <asm/mmu.h>
18
19/*
20 * Return true, if the entry has a slot value which
21 * the software considers as invalid.
22 */
23static inline bool hpte_soft_invalid(unsigned long hidx)
24{
25	return ((hidx & 0xfUL) == 0xfUL);
26}
27
28/*
29 * index from 0 - 15
30 */
31bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
32{
33	return !(hpte_soft_invalid(__rpte_to_hidx(rpte, index)));
34}
35
36int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
37		   pte_t *ptep, unsigned long trap, unsigned long flags,
38		   int ssize, int subpg_prot)
39{
40	real_pte_t rpte;
41	unsigned long hpte_group;
42	unsigned int subpg_index;
43	unsigned long rflags, pa;
44	unsigned long old_pte, new_pte, subpg_pte;
45	unsigned long vpn, hash, slot, gslot;
46	unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
47
48	/*
49	 * atomically mark the linux large page PTE busy and dirty
50	 */
51	do {
52		pte_t pte = READ_ONCE(*ptep);
53
54		old_pte = pte_val(pte);
55		/* If PTE busy, retry the access */
56		if (unlikely(old_pte & H_PAGE_BUSY))
57			return 0;
58		/* If PTE permissions don't match, take page fault */
59		if (unlikely(!check_pte_access(access, old_pte)))
60			return 1;
61		/*
62		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
63		 * a write access. Since this is 4K insert of 64K page size
64		 * also add H_PAGE_COMBO
65		 */
66		new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO;
67		if (access & _PAGE_WRITE)
68			new_pte |= _PAGE_DIRTY;
69	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
70
71	/*
72	 * Handle the subpage protection bits
73	 */
74	subpg_pte = new_pte & ~subpg_prot;
75	rflags = htab_convert_pte_flags(subpg_pte);
76
77	if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
78	    !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
79
80		/*
81		 * No CPU has hugepages but lacks no execute, so we
82		 * don't need to worry about that case
83		 */
84		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
85	}
86
87	subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
88	vpn  = hpt_vpn(ea, vsid, ssize);
89	rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
90	/*
91	 *None of the sub 4k page is hashed
92	 */
93	if (!(old_pte & H_PAGE_HASHPTE))
94		goto htab_insert_hpte;
95	/*
96	 * Check if the pte was already inserted into the hash table
97	 * as a 64k HW page, and invalidate the 64k HPTE if so.
98	 */
99	if (!(old_pte & H_PAGE_COMBO)) {
100		flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
101		/*
102		 * clear the old slot details from the old and new pte.
103		 * On hash insert failure we use old pte value and we don't
104		 * want slot information there if we have a insert failure.
105		 */
106		old_pte &= ~H_PAGE_HASHPTE;
107		new_pte &= ~H_PAGE_HASHPTE;
108		goto htab_insert_hpte;
109	}
110	/*
111	 * Check for sub page valid and update
112	 */
113	if (__rpte_sub_valid(rpte, subpg_index)) {
114		int ret;
115
116		gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte,
117					   subpg_index);
118		ret = mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn,
119						 MMU_PAGE_4K, MMU_PAGE_4K,
120						 ssize, flags);
121
122		/*
123		 * If we failed because typically the HPTE wasn't really here
124		 * we try an insertion.
125		 */
126		if (ret == -1)
127			goto htab_insert_hpte;
128
129		*ptep = __pte(new_pte & ~H_PAGE_BUSY);
130		return 0;
131	}
132
133htab_insert_hpte:
134
135	/*
136	 * Initialize all hidx entries to invalid value, the first time
137	 * the PTE is about to allocate a 4K HPTE.
138	 */
139	if (!(old_pte & H_PAGE_COMBO))
140		rpte.hidx = INVALID_RPTE_HIDX;
141
142	/*
143	 * handle H_PAGE_4K_PFN case
144	 */
145	if (old_pte & H_PAGE_4K_PFN) {
146		/*
147		 * All the sub 4k page have the same
148		 * physical address.
149		 */
150		pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT;
151	} else {
152		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
153		pa += (subpg_index << shift);
154	}
155	hash = hpt_hash(vpn, shift, ssize);
156repeat:
157	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
158
159	/* Insert into the hash table, primary slot */
160	slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
161					MMU_PAGE_4K, MMU_PAGE_4K, ssize);
162	/*
163	 * Primary is full, try the secondary
164	 */
165	if (unlikely(slot == -1)) {
166		bool soft_invalid;
167
168		hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
169		slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
170						rflags, HPTE_V_SECONDARY,
171						MMU_PAGE_4K, MMU_PAGE_4K,
172						ssize);
173
174		soft_invalid = hpte_soft_invalid(slot);
175		if (unlikely(soft_invalid)) {
176			/*
177			 * We got a valid slot from a hardware point of view.
178			 * but we cannot use it, because we use this special
179			 * value; as defined by hpte_soft_invalid(), to track
180			 * invalid slots. We cannot use it. So invalidate it.
181			 */
182			gslot = slot & _PTEIDX_GROUP_IX;
183			mmu_hash_ops.hpte_invalidate(hpte_group + gslot, vpn,
184						     MMU_PAGE_4K, MMU_PAGE_4K,
185						     ssize, 0);
186		}
187
188		if (unlikely(slot == -1 || soft_invalid)) {
189			/*
190			 * For soft invalid slot, let's ensure that we release a
191			 * slot from the primary, with the hope that we will
192			 * acquire that slot next time we try. This will ensure
193			 * that we do not get the same soft-invalid slot.
194			 */
195			if (soft_invalid || (mftb() & 0x1))
196				hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
197
198			mmu_hash_ops.hpte_remove(hpte_group);
199			/*
200			 * FIXME!! Should be try the group from which we removed ?
201			 */
202			goto repeat;
203		}
204	}
205	/*
206	 * Hypervisor failure. Restore old pte and return -1
207	 * similar to __hash_page_*
208	 */
209	if (unlikely(slot == -2)) {
210		*ptep = __pte(old_pte);
211		hash_failure_debug(ea, access, vsid, trap, ssize,
212				   MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
213		return -1;
214	}
215
216	new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
217	new_pte |= H_PAGE_HASHPTE;
218
219	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
220	return 0;
221}
222
223int __hash_page_64K(unsigned long ea, unsigned long access,
224		    unsigned long vsid, pte_t *ptep, unsigned long trap,
225		    unsigned long flags, int ssize)
226{
227	real_pte_t rpte;
228	unsigned long hpte_group;
229	unsigned long rflags, pa;
230	unsigned long old_pte, new_pte;
231	unsigned long vpn, hash, slot;
232	unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift;
233
234	/*
235	 * atomically mark the linux large page PTE busy and dirty
236	 */
237	do {
238		pte_t pte = READ_ONCE(*ptep);
239
240		old_pte = pte_val(pte);
241		/* If PTE busy, retry the access */
242		if (unlikely(old_pte & H_PAGE_BUSY))
243			return 0;
244		/* If PTE permissions don't match, take page fault */
245		if (unlikely(!check_pte_access(access, old_pte)))
246			return 1;
247		/*
248		 * Check if PTE has the cache-inhibit bit set
249		 * If so, bail out and refault as a 4k page
250		 */
251		if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) &&
252		    unlikely(pte_ci(pte)))
253			return 0;
254		/*
255		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
256		 * a write access.
257		 */
258		new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
259		if (access & _PAGE_WRITE)
260			new_pte |= _PAGE_DIRTY;
261	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
262
263	rflags = htab_convert_pte_flags(new_pte);
264	rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
265
266	if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
267	    !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
268		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
269
270	vpn  = hpt_vpn(ea, vsid, ssize);
271	if (unlikely(old_pte & H_PAGE_HASHPTE)) {
272		unsigned long gslot;
273
274		/*
275		 * There MIGHT be an HPTE for this pte
276		 */
277		gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0);
278		if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_64K,
279					       MMU_PAGE_64K, ssize,
280					       flags) == -1)
281			old_pte &= ~_PAGE_HPTEFLAGS;
282	}
283
284	if (likely(!(old_pte & H_PAGE_HASHPTE))) {
285
286		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
287		hash = hpt_hash(vpn, shift, ssize);
288
289repeat:
290		hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
291
292		/* Insert into the hash table, primary slot */
293		slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
294						MMU_PAGE_64K, MMU_PAGE_64K,
295						ssize);
296		/*
297		 * Primary is full, try the secondary
298		 */
299		if (unlikely(slot == -1)) {
300			hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
301			slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
302							rflags,
303							HPTE_V_SECONDARY,
304							MMU_PAGE_64K,
305							MMU_PAGE_64K, ssize);
306			if (slot == -1) {
307				if (mftb() & 0x1)
308					hpte_group = (hash & htab_hash_mask) *
309							HPTES_PER_GROUP;
310				mmu_hash_ops.hpte_remove(hpte_group);
311				/*
312				 * FIXME!! Should be try the group from which we removed ?
313				 */
314				goto repeat;
315			}
316		}
317		/*
318		 * Hypervisor failure. Restore old pte and return -1
319		 * similar to __hash_page_*
320		 */
321		if (unlikely(slot == -2)) {
322			*ptep = __pte(old_pte);
323			hash_failure_debug(ea, access, vsid, trap, ssize,
324					   MMU_PAGE_64K, MMU_PAGE_64K, old_pte);
325			return -1;
326		}
327
328		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
329		new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
330	}
331	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
332	return 0;
333}
334