1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm64/mm/hugetlbpage.c
4 *
5 * Copyright (C) 2013 Linaro Ltd.
6 *
7 * Based on arch/x86/mm/hugetlbpage.c.
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/err.h>
16#include <linux/sysctl.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20
21/*
22 * HugeTLB Support Matrix
23 *
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
26 * ---------------------------------------------------
27 * |     4K    |   64K    |   2M  |    32M   |   1G  |
28 * |    16K    |    2M    |  32M  |     1G   |       |
29 * |    64K    |    2M    | 512M  |    16G   |       |
30 * ---------------------------------------------------
31 */
32
33/*
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
37 */
38#ifdef CONFIG_CMA
39void __init arm64_hugetlb_cma_reserve(void)
40{
41	int order;
42
43	if (pud_sect_supported())
44		order = PUD_SHIFT - PAGE_SHIFT;
45	else
46		order = CONT_PMD_SHIFT - PAGE_SHIFT;
47
48	/*
49	 * HugeTLB CMA reservation is required for gigantic
50	 * huge pages which could not be allocated via the
51	 * page allocator. Just warn if there is any change
52	 * breaking this assumption.
53	 */
54	WARN_ON(order <= MAX_ORDER);
55	hugetlb_cma_reserve(order);
56}
57#endif /* CONFIG_CMA */
58
59static bool __hugetlb_valid_size(unsigned long size)
60{
61	switch (size) {
62#ifndef __PAGETABLE_PMD_FOLDED
63	case PUD_SIZE:
64		return pud_sect_supported();
65#endif
66	case CONT_PMD_SIZE:
67	case PMD_SIZE:
68	case CONT_PTE_SIZE:
69		return true;
70	}
71
72	return false;
73}
74
75#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
76bool arch_hugetlb_migration_supported(struct hstate *h)
77{
78	size_t pagesize = huge_page_size(h);
79
80	if (!__hugetlb_valid_size(pagesize)) {
81		pr_warn("%s: unrecognized huge page size 0x%lx\n",
82			__func__, pagesize);
83		return false;
84	}
85	return true;
86}
87#endif
88
89int pmd_huge(pmd_t pmd)
90{
91	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
92}
93
94int pud_huge(pud_t pud)
95{
96#ifndef __PAGETABLE_PMD_FOLDED
97	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
98#else
99	return 0;
100#endif
101}
102
103static int find_num_contig(struct mm_struct *mm, unsigned long addr,
104			   pte_t *ptep, size_t *pgsize)
105{
106	pgd_t *pgdp = pgd_offset(mm, addr);
107	p4d_t *p4dp;
108	pud_t *pudp;
109	pmd_t *pmdp;
110
111	*pgsize = PAGE_SIZE;
112	p4dp = p4d_offset(pgdp, addr);
113	pudp = pud_offset(p4dp, addr);
114	pmdp = pmd_offset(pudp, addr);
115	if ((pte_t *)pmdp == ptep) {
116		*pgsize = PMD_SIZE;
117		return CONT_PMDS;
118	}
119	return CONT_PTES;
120}
121
122static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
123{
124	int contig_ptes = 0;
125
126	*pgsize = size;
127
128	switch (size) {
129#ifndef __PAGETABLE_PMD_FOLDED
130	case PUD_SIZE:
131		if (pud_sect_supported())
132			contig_ptes = 1;
133		break;
134#endif
135	case PMD_SIZE:
136		contig_ptes = 1;
137		break;
138	case CONT_PMD_SIZE:
139		*pgsize = PMD_SIZE;
140		contig_ptes = CONT_PMDS;
141		break;
142	case CONT_PTE_SIZE:
143		*pgsize = PAGE_SIZE;
144		contig_ptes = CONT_PTES;
145		break;
146	}
147
148	return contig_ptes;
149}
150
151pte_t huge_ptep_get(pte_t *ptep)
152{
153	int ncontig, i;
154	size_t pgsize;
155	pte_t orig_pte = ptep_get(ptep);
156
157	if (!pte_present(orig_pte) || !pte_cont(orig_pte))
158		return orig_pte;
159
160	ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
161	for (i = 0; i < ncontig; i++, ptep++) {
162		pte_t pte = ptep_get(ptep);
163
164		if (pte_dirty(pte))
165			orig_pte = pte_mkdirty(orig_pte);
166
167		if (pte_young(pte))
168			orig_pte = pte_mkyoung(orig_pte);
169	}
170	return orig_pte;
171}
172
173/*
174 * Changing some bits of contiguous entries requires us to follow a
175 * Break-Before-Make approach, breaking the whole contiguous set
176 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
177 * "Misprogramming of the Contiguous bit", page D4-1762.
178 *
179 * This helper performs the break step.
180 */
181static pte_t get_clear_contig(struct mm_struct *mm,
182			     unsigned long addr,
183			     pte_t *ptep,
184			     unsigned long pgsize,
185			     unsigned long ncontig)
186{
187	pte_t orig_pte = ptep_get(ptep);
188	unsigned long i;
189
190	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
191		pte_t pte = ptep_get_and_clear(mm, addr, ptep);
192
193		/*
194		 * If HW_AFDBM is enabled, then the HW could turn on
195		 * the dirty or accessed bit for any page in the set,
196		 * so check them all.
197		 */
198		if (pte_dirty(pte))
199			orig_pte = pte_mkdirty(orig_pte);
200
201		if (pte_young(pte))
202			orig_pte = pte_mkyoung(orig_pte);
203	}
204	return orig_pte;
205}
206
207static pte_t get_clear_contig_flush(struct mm_struct *mm,
208				    unsigned long addr,
209				    pte_t *ptep,
210				    unsigned long pgsize,
211				    unsigned long ncontig)
212{
213	pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
214	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
215
216	flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
217	return orig_pte;
218}
219
220/*
221 * Changing some bits of contiguous entries requires us to follow a
222 * Break-Before-Make approach, breaking the whole contiguous set
223 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
224 * "Misprogramming of the Contiguous bit", page D4-1762.
225 *
226 * This helper performs the break step for use cases where the
227 * original pte is not needed.
228 */
229static void clear_flush(struct mm_struct *mm,
230			     unsigned long addr,
231			     pte_t *ptep,
232			     unsigned long pgsize,
233			     unsigned long ncontig)
234{
235	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
236	unsigned long i, saddr = addr;
237
238	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
239		ptep_clear(mm, addr, ptep);
240
241	flush_tlb_range(&vma, saddr, addr);
242}
243
244void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
245			    pte_t *ptep, pte_t pte, unsigned long sz)
246{
247	size_t pgsize;
248	int i;
249	int ncontig;
250	unsigned long pfn, dpfn;
251	pgprot_t hugeprot;
252
253	ncontig = num_contig_ptes(sz, &pgsize);
254
255	if (!pte_present(pte)) {
256		for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
257			set_pte_at(mm, addr, ptep, pte);
258		return;
259	}
260
261	if (!pte_cont(pte)) {
262		set_pte_at(mm, addr, ptep, pte);
263		return;
264	}
265
266	pfn = pte_pfn(pte);
267	dpfn = pgsize >> PAGE_SHIFT;
268	hugeprot = pte_pgprot(pte);
269
270	clear_flush(mm, addr, ptep, pgsize, ncontig);
271
272	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
273		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
274}
275
276pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
277		      unsigned long addr, unsigned long sz)
278{
279	pgd_t *pgdp;
280	p4d_t *p4dp;
281	pud_t *pudp;
282	pmd_t *pmdp;
283	pte_t *ptep = NULL;
284
285	pgdp = pgd_offset(mm, addr);
286	p4dp = p4d_offset(pgdp, addr);
287	pudp = pud_alloc(mm, p4dp, addr);
288	if (!pudp)
289		return NULL;
290
291	if (sz == PUD_SIZE) {
292		ptep = (pte_t *)pudp;
293	} else if (sz == (CONT_PTE_SIZE)) {
294		pmdp = pmd_alloc(mm, pudp, addr);
295		if (!pmdp)
296			return NULL;
297
298		WARN_ON(addr & (sz - 1));
299		ptep = pte_alloc_huge(mm, pmdp, addr);
300	} else if (sz == PMD_SIZE) {
301		if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
302			ptep = huge_pmd_share(mm, vma, addr, pudp);
303		else
304			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
305	} else if (sz == (CONT_PMD_SIZE)) {
306		pmdp = pmd_alloc(mm, pudp, addr);
307		WARN_ON(addr & (sz - 1));
308		return (pte_t *)pmdp;
309	}
310
311	return ptep;
312}
313
314pte_t *huge_pte_offset(struct mm_struct *mm,
315		       unsigned long addr, unsigned long sz)
316{
317	pgd_t *pgdp;
318	p4d_t *p4dp;
319	pud_t *pudp, pud;
320	pmd_t *pmdp, pmd;
321
322	pgdp = pgd_offset(mm, addr);
323	if (!pgd_present(READ_ONCE(*pgdp)))
324		return NULL;
325
326	p4dp = p4d_offset(pgdp, addr);
327	if (!p4d_present(READ_ONCE(*p4dp)))
328		return NULL;
329
330	pudp = pud_offset(p4dp, addr);
331	pud = READ_ONCE(*pudp);
332	if (sz != PUD_SIZE && pud_none(pud))
333		return NULL;
334	/* hugepage or swap? */
335	if (pud_huge(pud) || !pud_present(pud))
336		return (pte_t *)pudp;
337	/* table; check the next level */
338
339	if (sz == CONT_PMD_SIZE)
340		addr &= CONT_PMD_MASK;
341
342	pmdp = pmd_offset(pudp, addr);
343	pmd = READ_ONCE(*pmdp);
344	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
345	    pmd_none(pmd))
346		return NULL;
347	if (pmd_huge(pmd) || !pmd_present(pmd))
348		return (pte_t *)pmdp;
349
350	if (sz == CONT_PTE_SIZE)
351		return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
352
353	return NULL;
354}
355
356unsigned long hugetlb_mask_last_page(struct hstate *h)
357{
358	unsigned long hp_size = huge_page_size(h);
359
360	switch (hp_size) {
361#ifndef __PAGETABLE_PMD_FOLDED
362	case PUD_SIZE:
363		return PGDIR_SIZE - PUD_SIZE;
364#endif
365	case CONT_PMD_SIZE:
366		return PUD_SIZE - CONT_PMD_SIZE;
367	case PMD_SIZE:
368		return PUD_SIZE - PMD_SIZE;
369	case CONT_PTE_SIZE:
370		return PMD_SIZE - CONT_PTE_SIZE;
371	default:
372		break;
373	}
374
375	return 0UL;
376}
377
378pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
379{
380	size_t pagesize = 1UL << shift;
381
382	entry = pte_mkhuge(entry);
383	if (pagesize == CONT_PTE_SIZE) {
384		entry = pte_mkcont(entry);
385	} else if (pagesize == CONT_PMD_SIZE) {
386		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
387	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
388		pr_warn("%s: unrecognized huge page size 0x%lx\n",
389			__func__, pagesize);
390	}
391	return entry;
392}
393
394void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
395		    pte_t *ptep, unsigned long sz)
396{
397	int i, ncontig;
398	size_t pgsize;
399
400	ncontig = num_contig_ptes(sz, &pgsize);
401
402	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
403		pte_clear(mm, addr, ptep);
404}
405
406pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
407			      unsigned long addr, pte_t *ptep)
408{
409	int ncontig;
410	size_t pgsize;
411	pte_t orig_pte = ptep_get(ptep);
412
413	if (!pte_cont(orig_pte))
414		return ptep_get_and_clear(mm, addr, ptep);
415
416	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
417
418	return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
419}
420
421/*
422 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
423 * and write permission.
424 *
425 * For a contiguous huge pte range we need to check whether or not write
426 * permission has to change only on the first pte in the set. Then for
427 * all the contiguous ptes we need to check whether or not there is a
428 * discrepancy between dirty or young.
429 */
430static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
431{
432	int i;
433
434	if (pte_write(pte) != pte_write(ptep_get(ptep)))
435		return 1;
436
437	for (i = 0; i < ncontig; i++) {
438		pte_t orig_pte = ptep_get(ptep + i);
439
440		if (pte_dirty(pte) != pte_dirty(orig_pte))
441			return 1;
442
443		if (pte_young(pte) != pte_young(orig_pte))
444			return 1;
445	}
446
447	return 0;
448}
449
450int huge_ptep_set_access_flags(struct vm_area_struct *vma,
451			       unsigned long addr, pte_t *ptep,
452			       pte_t pte, int dirty)
453{
454	int ncontig, i;
455	size_t pgsize = 0;
456	unsigned long pfn = pte_pfn(pte), dpfn;
457	struct mm_struct *mm = vma->vm_mm;
458	pgprot_t hugeprot;
459	pte_t orig_pte;
460
461	if (!pte_cont(pte))
462		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
463
464	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
465	dpfn = pgsize >> PAGE_SHIFT;
466
467	if (!__cont_access_flags_changed(ptep, pte, ncontig))
468		return 0;
469
470	orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
471
472	/* Make sure we don't lose the dirty or young state */
473	if (pte_dirty(orig_pte))
474		pte = pte_mkdirty(pte);
475
476	if (pte_young(orig_pte))
477		pte = pte_mkyoung(pte);
478
479	hugeprot = pte_pgprot(pte);
480	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
481		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
482
483	return 1;
484}
485
486void huge_ptep_set_wrprotect(struct mm_struct *mm,
487			     unsigned long addr, pte_t *ptep)
488{
489	unsigned long pfn, dpfn;
490	pgprot_t hugeprot;
491	int ncontig, i;
492	size_t pgsize;
493	pte_t pte;
494
495	if (!pte_cont(READ_ONCE(*ptep))) {
496		ptep_set_wrprotect(mm, addr, ptep);
497		return;
498	}
499
500	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
501	dpfn = pgsize >> PAGE_SHIFT;
502
503	pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
504	pte = pte_wrprotect(pte);
505
506	hugeprot = pte_pgprot(pte);
507	pfn = pte_pfn(pte);
508
509	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
510		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
511}
512
513pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
514			    unsigned long addr, pte_t *ptep)
515{
516	struct mm_struct *mm = vma->vm_mm;
517	size_t pgsize;
518	int ncontig;
519
520	if (!pte_cont(READ_ONCE(*ptep)))
521		return ptep_clear_flush(vma, addr, ptep);
522
523	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
524	return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
525}
526
527static int __init hugetlbpage_init(void)
528{
529	if (pud_sect_supported())
530		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
531
532	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
533	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
534	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
535
536	return 0;
537}
538arch_initcall(hugetlbpage_init);
539
540bool __init arch_hugetlb_valid_size(unsigned long size)
541{
542	return __hugetlb_valid_size(size);
543}
544
545pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
546{
547	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
548	    cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
549		/*
550		 * Break-before-make (BBM) is required for all user space mappings
551		 * when the permission changes from executable to non-executable
552		 * in cases where cpu is affected with errata #2645198.
553		 */
554		if (pte_user_exec(READ_ONCE(*ptep)))
555			return huge_ptep_clear_flush(vma, addr, ptep);
556	}
557	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
558}
559
560void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
561				  pte_t old_pte, pte_t pte)
562{
563	unsigned long psize = huge_page_size(hstate_vma(vma));
564
565	set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
566}
567