1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
3#define _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
4
5#ifndef __ASSEMBLY__
6#ifdef CONFIG_HUGETLB_PAGE
7/*
8 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
9 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
10 *
11 * Defined in such a way that we can optimize away code block at build time
12 * if CONFIG_HUGETLB_PAGE=n.
13 *
14 * returns true for pmd migration entries, THP, devmap, hugetlb
15 * But compile time dependent on CONFIG_HUGETLB_PAGE
16 */
17static inline int pmd_huge(pmd_t pmd)
18{
19	/*
20	 * leaf pte for huge page
21	 */
22	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
23}
24
25static inline int pud_huge(pud_t pud)
26{
27	/*
28	 * leaf pte for huge page
29	 */
30	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
31}
32
33/*
34 * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
35 * need to setup hugepage directory for them. Our pte and page directory format
36 * enable us to have this enabled.
37 */
38static inline int hugepd_ok(hugepd_t hpd)
39{
40	return 0;
41}
42
43#define is_hugepd(pdep)			0
44
45/*
46 * This should never get called
47 */
48static inline int get_hugepd_cache_index(int index)
49{
50	BUG();
51}
52
53#endif /* CONFIG_HUGETLB_PAGE */
54
55static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
56			       unsigned long pfn, pgprot_t prot)
57{
58	if (radix_enabled())
59		BUG();
60	return hash__remap_4k_pfn(vma, addr, pfn, prot);
61}
62#endif	/* __ASSEMBLY__ */
63#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
64