1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2005, Paul Mackerras, IBM Corporation.
4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 */
7
8#include <linux/sched.h>
9#include <linux/mm_types.h>
10#include <linux/mm.h>
11
12#include <asm/sections.h>
13#include <asm/mmu.h>
14#include <asm/tlb.h>
15
16#include <mm/mmu_decl.h>
17
18#define CREATE_TRACE_POINTS
19#include <trace/events/thp.h>
20
21#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
22#warning Limited user VSID range means pagetable space is wasted
23#endif
24
25#ifdef CONFIG_SPARSEMEM_VMEMMAP
26/*
27 * vmemmap is the starting address of the virtual address space where
28 * struct pages are allocated for all possible PFNs present on the system
29 * including holes and bad memory (hence sparse). These virtual struct
30 * pages are stored in sequence in this virtual address space irrespective
31 * of the fact whether the corresponding PFN is valid or not. This achieves
32 * constant relationship between address of struct page and its PFN.
33 *
34 * During boot or memory hotplug operation when a new memory section is
35 * added, physical memory allocation (including hash table bolting) will
36 * be performed for the set of struct pages which are part of the memory
37 * section. This saves memory by not allocating struct pages for PFNs
38 * which are not valid.
39 *
40 *		----------------------------------------------
41 *		| PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
42 *		----------------------------------------------
43 *
44 *	   f000000000000000                  c000000000000000
45 * vmemmap +--------------+                  +--------------+
46 *  +      |  page struct | +--------------> |  page struct |
47 *  |      +--------------+                  +--------------+
48 *  |      |  page struct | +--------------> |  page struct |
49 *  |      +--------------+ |                +--------------+
50 *  |      |  page struct | +       +------> |  page struct |
51 *  |      +--------------+         |        +--------------+
52 *  |      |  page struct |         |   +--> |  page struct |
53 *  |      +--------------+         |   |    +--------------+
54 *  |      |  page struct |         |   |
55 *  |      +--------------+         |   |
56 *  |      |  page struct |         |   |
57 *  |      +--------------+         |   |
58 *  |      |  page struct |         |   |
59 *  |      +--------------+         |   |
60 *  |      |  page struct |         |   |
61 *  |      +--------------+         |   |
62 *  |      |  page struct | +-------+   |
63 *  |      +--------------+             |
64 *  |      |  page struct | +-----------+
65 *  |      +--------------+
66 *  |      |  page struct | No mapping
67 *  |      +--------------+
68 *  |      |  page struct | No mapping
69 *  v      +--------------+
70 *
71 *		-----------------------------------------
72 *		| RELATION BETWEEN STRUCT PAGES AND PFNS|
73 *		-----------------------------------------
74 *
75 * vmemmap +--------------+                 +---------------+
76 *  +      |  page struct | +-------------> |      PFN      |
77 *  |      +--------------+                 +---------------+
78 *  |      |  page struct | +-------------> |      PFN      |
79 *  |      +--------------+                 +---------------+
80 *  |      |  page struct | +-------------> |      PFN      |
81 *  |      +--------------+                 +---------------+
82 *  |      |  page struct | +-------------> |      PFN      |
83 *  |      +--------------+                 +---------------+
84 *  |      |              |
85 *  |      +--------------+
86 *  |      |              |
87 *  |      +--------------+
88 *  |      |              |
89 *  |      +--------------+                 +---------------+
90 *  |      |  page struct | +-------------> |      PFN      |
91 *  |      +--------------+                 +---------------+
92 *  |      |              |
93 *  |      +--------------+
94 *  |      |              |
95 *  |      +--------------+                 +---------------+
96 *  |      |  page struct | +-------------> |      PFN      |
97 *  |      +--------------+                 +---------------+
98 *  |      |  page struct | +-------------> |      PFN      |
99 *  v      +--------------+                 +---------------+
100 */
101/*
102 * On hash-based CPUs, the vmemmap is bolted in the hash table.
103 *
104 */
105int __meminit hash__vmemmap_create_mapping(unsigned long start,
106				       unsigned long page_size,
107				       unsigned long phys)
108{
109	int rc;
110
111	if ((start + page_size) >= H_VMEMMAP_END) {
112		pr_warn("Outside the supported range\n");
113		return -1;
114	}
115
116	rc = htab_bolt_mapping(start, start + page_size, phys,
117			       pgprot_val(PAGE_KERNEL),
118			       mmu_vmemmap_psize, mmu_kernel_ssize);
119	if (rc < 0) {
120		int rc2 = htab_remove_mapping(start, start + page_size,
121					      mmu_vmemmap_psize,
122					      mmu_kernel_ssize);
123		BUG_ON(rc2 && (rc2 != -ENOENT));
124	}
125	return rc;
126}
127
128#ifdef CONFIG_MEMORY_HOTPLUG
129void hash__vmemmap_remove_mapping(unsigned long start,
130			      unsigned long page_size)
131{
132	int rc = htab_remove_mapping(start, start + page_size,
133				     mmu_vmemmap_psize,
134				     mmu_kernel_ssize);
135	BUG_ON((rc < 0) && (rc != -ENOENT));
136	WARN_ON(rc == -ENOENT);
137}
138#endif
139#endif /* CONFIG_SPARSEMEM_VMEMMAP */
140
141/*
142 * map_kernel_page currently only called by __ioremap
143 * map_kernel_page adds an entry to the ioremap page table
144 * and adds an entry to the HPT, possibly bolting it
145 */
146int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
147{
148	pgd_t *pgdp;
149	p4d_t *p4dp;
150	pud_t *pudp;
151	pmd_t *pmdp;
152	pte_t *ptep;
153
154	BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
155	if (slab_is_available()) {
156		pgdp = pgd_offset_k(ea);
157		p4dp = p4d_offset(pgdp, ea);
158		pudp = pud_alloc(&init_mm, p4dp, ea);
159		if (!pudp)
160			return -ENOMEM;
161		pmdp = pmd_alloc(&init_mm, pudp, ea);
162		if (!pmdp)
163			return -ENOMEM;
164		ptep = pte_alloc_kernel(pmdp, ea);
165		if (!ptep)
166			return -ENOMEM;
167		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
168	} else {
169		/*
170		 * If the mm subsystem is not fully up, we cannot create a
171		 * linux page table entry for this mapping.  Simply bolt an
172		 * entry in the hardware page table.
173		 *
174		 */
175		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
176				      mmu_io_psize, mmu_kernel_ssize)) {
177			printk(KERN_ERR "Failed to do bolted mapping IO "
178			       "memory at %016lx !\n", pa);
179			return -ENOMEM;
180		}
181	}
182
183	smp_wmb();
184	return 0;
185}
186
187#ifdef CONFIG_TRANSPARENT_HUGEPAGE
188
189unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
190				    pmd_t *pmdp, unsigned long clr,
191				    unsigned long set)
192{
193	__be64 old_be, tmp;
194	unsigned long old;
195
196#ifdef CONFIG_DEBUG_VM
197	WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
198	assert_spin_locked(pmd_lockptr(mm, pmdp));
199#endif
200
201	__asm__ __volatile__(
202	"1:	ldarx	%0,0,%3\n\
203		and.	%1,%0,%6\n\
204		bne-	1b \n\
205		andc	%1,%0,%4 \n\
206		or	%1,%1,%7\n\
207		stdcx.	%1,0,%3 \n\
208		bne-	1b"
209	: "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
210	: "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
211	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
212	: "cc" );
213
214	old = be64_to_cpu(old_be);
215
216	trace_hugepage_update(addr, old, clr, set);
217	if (old & H_PAGE_HASHPTE)
218		hpte_do_hugepage_flush(mm, addr, pmdp, old);
219	return old;
220}
221
222pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
223			    pmd_t *pmdp)
224{
225	pmd_t pmd;
226
227	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
228	VM_BUG_ON(pmd_trans_huge(*pmdp));
229	VM_BUG_ON(pmd_devmap(*pmdp));
230
231	pmd = *pmdp;
232	pmd_clear(pmdp);
233	/*
234	 * Wait for all pending hash_page to finish. This is needed
235	 * in case of subpage collapse. When we collapse normal pages
236	 * to hugepage, we first clear the pmd, then invalidate all
237	 * the PTE entries. The assumption here is that any low level
238	 * page fault will see a none pmd and take the slow path that
239	 * will wait on mmap_lock. But we could very well be in a
240	 * hash_page with local ptep pointer value. Such a hash page
241	 * can result in adding new HPTE entries for normal subpages.
242	 * That means we could be modifying the page content as we
243	 * copy them to a huge page. So wait for parallel hash_page
244	 * to finish before invalidating HPTE entries. We can do this
245	 * by sending an IPI to all the cpus and executing a dummy
246	 * function there.
247	 */
248	serialize_against_pte_lookup(vma->vm_mm);
249	/*
250	 * Now invalidate the hpte entries in the range
251	 * covered by pmd. This make sure we take a
252	 * fault and will find the pmd as none, which will
253	 * result in a major fault which takes mmap_lock and
254	 * hence wait for collapse to complete. Without this
255	 * the __collapse_huge_page_copy can result in copying
256	 * the old content.
257	 */
258	flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
259	return pmd;
260}
261
262/*
263 * We want to put the pgtable in pmd and use pgtable for tracking
264 * the base page size hptes
265 */
266void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
267				  pgtable_t pgtable)
268{
269	pgtable_t *pgtable_slot;
270
271	assert_spin_locked(pmd_lockptr(mm, pmdp));
272	/*
273	 * we store the pgtable in the second half of PMD
274	 */
275	pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
276	*pgtable_slot = pgtable;
277	/*
278	 * expose the deposited pgtable to other cpus.
279	 * before we set the hugepage PTE at pmd level
280	 * hash fault code looks at the deposted pgtable
281	 * to store hash index values.
282	 */
283	smp_wmb();
284}
285
286pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
287{
288	pgtable_t pgtable;
289	pgtable_t *pgtable_slot;
290
291	assert_spin_locked(pmd_lockptr(mm, pmdp));
292
293	pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
294	pgtable = *pgtable_slot;
295	/*
296	 * Once we withdraw, mark the entry NULL.
297	 */
298	*pgtable_slot = NULL;
299	/*
300	 * We store HPTE information in the deposited PTE fragment.
301	 * zero out the content on withdraw.
302	 */
303	memset(pgtable, 0, PTE_FRAG_SIZE);
304	return pgtable;
305}
306
307/*
308 * A linux hugepage PMD was changed and the corresponding hash table entries
309 * neesd to be flushed.
310 */
311void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
312			    pmd_t *pmdp, unsigned long old_pmd)
313{
314	int ssize;
315	unsigned int psize;
316	unsigned long vsid;
317	unsigned long flags = 0;
318
319	/* get the base page size,vsid and segment size */
320#ifdef CONFIG_DEBUG_VM
321	psize = get_slice_psize(mm, addr);
322	BUG_ON(psize == MMU_PAGE_16M);
323#endif
324	if (old_pmd & H_PAGE_COMBO)
325		psize = MMU_PAGE_4K;
326	else
327		psize = MMU_PAGE_64K;
328
329	if (!is_kernel_addr(addr)) {
330		ssize = user_segment_size(addr);
331		vsid = get_user_vsid(&mm->context, addr, ssize);
332		WARN_ON(vsid == 0);
333	} else {
334		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
335		ssize = mmu_kernel_ssize;
336	}
337
338	if (mm_is_thread_local(mm))
339		flags |= HPTE_LOCAL_UPDATE;
340
341	return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
342}
343
344pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
345				unsigned long addr, pmd_t *pmdp)
346{
347	pmd_t old_pmd;
348	pgtable_t pgtable;
349	unsigned long old;
350	pgtable_t *pgtable_slot;
351
352	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
353	old_pmd = __pmd(old);
354	/*
355	 * We have pmd == none and we are holding page_table_lock.
356	 * So we can safely go and clear the pgtable hash
357	 * index info.
358	 */
359	pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
360	pgtable = *pgtable_slot;
361	/*
362	 * Let's zero out old valid and hash index details
363	 * hash fault look at them.
364	 */
365	memset(pgtable, 0, PTE_FRAG_SIZE);
366	return old_pmd;
367}
368
369int hash__has_transparent_hugepage(void)
370{
371
372	if (!mmu_has_feature(MMU_FTR_16M_PAGE))
373		return 0;
374	/*
375	 * We support THP only if PMD_SIZE is 16MB.
376	 */
377	if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
378		return 0;
379	/*
380	 * We need to make sure that we support 16MB hugepage in a segement
381	 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
382	 * of 64K.
383	 */
384	/*
385	 * If we have 64K HPTE, we will be using that by default
386	 */
387	if (mmu_psize_defs[MMU_PAGE_64K].shift &&
388	    (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
389		return 0;
390	/*
391	 * Ok we only have 4K HPTE
392	 */
393	if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
394		return 0;
395
396	return 1;
397}
398EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
399
400#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
401
402#ifdef CONFIG_STRICT_KERNEL_RWX
403static bool hash__change_memory_range(unsigned long start, unsigned long end,
404				      unsigned long newpp)
405{
406	unsigned long idx;
407	unsigned int step, shift;
408
409	shift = mmu_psize_defs[mmu_linear_psize].shift;
410	step = 1 << shift;
411
412	start = ALIGN_DOWN(start, step);
413	end = ALIGN(end, step); // aligns up
414
415	if (start >= end)
416		return false;
417
418	pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
419		 start, end, newpp, step);
420
421	for (idx = start; idx < end; idx += step)
422		/* Not sure if we can do much with the return value */
423		mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
424							mmu_kernel_ssize);
425
426	return true;
427}
428
429void hash__mark_rodata_ro(void)
430{
431	unsigned long start, end;
432
433	start = (unsigned long)_stext;
434	end = (unsigned long)__init_begin;
435
436	WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
437}
438
439void hash__mark_initmem_nx(void)
440{
441	unsigned long start, end, pp;
442
443	start = (unsigned long)__init_begin;
444	end = (unsigned long)__init_end;
445
446	pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
447
448	WARN_ON(!hash__change_memory_range(start, end, pp));
449}
450#endif
451