162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
262306a36Sopenharmony_ci#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
362306a36Sopenharmony_ci#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
462306a36Sopenharmony_ci
562306a36Sopenharmony_ci#include <asm-generic/pgtable-nopmd.h>
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#ifndef __ASSEMBLY__
862306a36Sopenharmony_ci#include <linux/sched.h>
962306a36Sopenharmony_ci#include <linux/threads.h>
1062306a36Sopenharmony_ci#include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
1162306a36Sopenharmony_ci
1262306a36Sopenharmony_ci#ifdef CONFIG_44x
1362306a36Sopenharmony_ciextern int icache_44x_need_flush;
1462306a36Sopenharmony_ci#endif
1562306a36Sopenharmony_ci
1662306a36Sopenharmony_ci#endif /* __ASSEMBLY__ */
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_ci#define PTE_INDEX_SIZE	PTE_SHIFT
1962306a36Sopenharmony_ci#define PMD_INDEX_SIZE	0
2062306a36Sopenharmony_ci#define PUD_INDEX_SIZE	0
2162306a36Sopenharmony_ci#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_ci#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
2462306a36Sopenharmony_ci#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci#ifndef __ASSEMBLY__
2762306a36Sopenharmony_ci#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
2862306a36Sopenharmony_ci#define PMD_TABLE_SIZE	0
2962306a36Sopenharmony_ci#define PUD_TABLE_SIZE	0
3062306a36Sopenharmony_ci#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
3162306a36Sopenharmony_ci
3262306a36Sopenharmony_ci#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
3362306a36Sopenharmony_ci#endif	/* __ASSEMBLY__ */
3462306a36Sopenharmony_ci
3562306a36Sopenharmony_ci#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
3662306a36Sopenharmony_ci#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_ci/*
3962306a36Sopenharmony_ci * The normal case is that PTEs are 32-bits and we have a 1-page
4062306a36Sopenharmony_ci * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
4162306a36Sopenharmony_ci *
4262306a36Sopenharmony_ci * For any >32-bit physical address platform, we can use the following
4362306a36Sopenharmony_ci * two level page table layout where the pgdir is 8KB and the MS 13 bits
4462306a36Sopenharmony_ci * are an index to the second level table.  The combined pgdir/pmd first
4562306a36Sopenharmony_ci * level has 2048 entries and the second level has 512 64-bit PTE entries.
4662306a36Sopenharmony_ci * -Matt
4762306a36Sopenharmony_ci */
4862306a36Sopenharmony_ci/* PGDIR_SHIFT determines what a top-level page table entry can map */
4962306a36Sopenharmony_ci#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
5062306a36Sopenharmony_ci#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
5162306a36Sopenharmony_ci#define PGDIR_MASK	(~(PGDIR_SIZE-1))
5262306a36Sopenharmony_ci
5362306a36Sopenharmony_ci/* Bits to mask out from a PGD to get to the PUD page */
5462306a36Sopenharmony_ci#define PGD_MASKED_BITS		0
5562306a36Sopenharmony_ci
5662306a36Sopenharmony_ci#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
5762306a36Sopenharmony_ci
5862306a36Sopenharmony_ci#define pte_ERROR(e) \
5962306a36Sopenharmony_ci	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
6062306a36Sopenharmony_ci		(unsigned long long)pte_val(e))
6162306a36Sopenharmony_ci#define pgd_ERROR(e) \
6262306a36Sopenharmony_ci	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci#ifndef __ASSEMBLY__
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_ciint map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
6762306a36Sopenharmony_civoid unmap_kernel_page(unsigned long va);
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_ci#endif /* !__ASSEMBLY__ */
7062306a36Sopenharmony_ci
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci/*
7362306a36Sopenharmony_ci * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
7462306a36Sopenharmony_ci * value (for now) on others, from where we can start layout kernel
7562306a36Sopenharmony_ci * virtual space that goes below PKMAP and FIXMAP
7662306a36Sopenharmony_ci */
7762306a36Sopenharmony_ci#include <asm/fixmap.h>
7862306a36Sopenharmony_ci
7962306a36Sopenharmony_ci/*
8062306a36Sopenharmony_ci * ioremap_bot starts at that address. Early ioremaps move down from there,
8162306a36Sopenharmony_ci * until mem_init() at which point this becomes the top of the vmalloc
8262306a36Sopenharmony_ci * and ioremap space
8362306a36Sopenharmony_ci */
8462306a36Sopenharmony_ci#ifdef CONFIG_HIGHMEM
8562306a36Sopenharmony_ci#define IOREMAP_TOP	PKMAP_BASE
8662306a36Sopenharmony_ci#else
8762306a36Sopenharmony_ci#define IOREMAP_TOP	FIXADDR_START
8862306a36Sopenharmony_ci#endif
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci/* PPC32 shares vmalloc area with ioremap */
9162306a36Sopenharmony_ci#define IOREMAP_START	VMALLOC_START
9262306a36Sopenharmony_ci#define IOREMAP_END	VMALLOC_END
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci/*
9562306a36Sopenharmony_ci * Just any arbitrary offset to the start of the vmalloc VM area: the
9662306a36Sopenharmony_ci * current 16MB value just means that there will be a 64MB "hole" after the
9762306a36Sopenharmony_ci * physical memory until the kernel virtual memory starts.  That means that
9862306a36Sopenharmony_ci * any out-of-bounds memory accesses will hopefully be caught.
9962306a36Sopenharmony_ci * The vmalloc() routines leaves a hole of 4kB between each vmalloced
10062306a36Sopenharmony_ci * area for the same reason. ;)
10162306a36Sopenharmony_ci *
10262306a36Sopenharmony_ci * We no longer map larger than phys RAM with the BATs so we don't have
10362306a36Sopenharmony_ci * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
10462306a36Sopenharmony_ci * about clashes between our early calls to ioremap() that start growing down
10562306a36Sopenharmony_ci * from IOREMAP_TOP being run into the VM area allocations (growing upwards
10662306a36Sopenharmony_ci * from VMALLOC_START).  For this reason we have ioremap_bot to check when
10762306a36Sopenharmony_ci * we actually run into our mappings setup in the early boot with the VM
10862306a36Sopenharmony_ci * system.  This really does become a problem for machines with good amounts
10962306a36Sopenharmony_ci * of RAM.  -- Cort
11062306a36Sopenharmony_ci */
11162306a36Sopenharmony_ci#define VMALLOC_OFFSET (0x1000000) /* 16M */
11262306a36Sopenharmony_ci#ifdef PPC_PIN_SIZE
11362306a36Sopenharmony_ci#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
11462306a36Sopenharmony_ci#else
11562306a36Sopenharmony_ci#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
11662306a36Sopenharmony_ci#endif
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_ci#ifdef CONFIG_KASAN_VMALLOC
11962306a36Sopenharmony_ci#define VMALLOC_END	ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
12062306a36Sopenharmony_ci#else
12162306a36Sopenharmony_ci#define VMALLOC_END	ioremap_bot
12262306a36Sopenharmony_ci#endif
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_ci/*
12562306a36Sopenharmony_ci * Bits in a linux-style PTE.  These match the bits in the
12662306a36Sopenharmony_ci * (hardware-defined) PowerPC PTE as closely as possible.
12762306a36Sopenharmony_ci */
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci#if defined(CONFIG_40x)
13062306a36Sopenharmony_ci#include <asm/nohash/32/pte-40x.h>
13162306a36Sopenharmony_ci#elif defined(CONFIG_44x)
13262306a36Sopenharmony_ci#include <asm/nohash/32/pte-44x.h>
13362306a36Sopenharmony_ci#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
13462306a36Sopenharmony_ci#include <asm/nohash/pte-e500.h>
13562306a36Sopenharmony_ci#elif defined(CONFIG_PPC_85xx)
13662306a36Sopenharmony_ci#include <asm/nohash/32/pte-85xx.h>
13762306a36Sopenharmony_ci#elif defined(CONFIG_PPC_8xx)
13862306a36Sopenharmony_ci#include <asm/nohash/32/pte-8xx.h>
13962306a36Sopenharmony_ci#endif
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci/*
14262306a36Sopenharmony_ci * Location of the PFN in the PTE. Most 32-bit platforms use the same
14362306a36Sopenharmony_ci * as _PAGE_SHIFT here (ie, naturally aligned).
14462306a36Sopenharmony_ci * Platform who don't just pre-define the value so we don't override it here.
14562306a36Sopenharmony_ci */
14662306a36Sopenharmony_ci#ifndef PTE_RPN_SHIFT
14762306a36Sopenharmony_ci#define PTE_RPN_SHIFT	(PAGE_SHIFT)
14862306a36Sopenharmony_ci#endif
14962306a36Sopenharmony_ci
15062306a36Sopenharmony_ci/*
15162306a36Sopenharmony_ci * The mask covered by the RPN must be a ULL on 32-bit platforms with
15262306a36Sopenharmony_ci * 64-bit PTEs.
15362306a36Sopenharmony_ci */
15462306a36Sopenharmony_ci#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
15562306a36Sopenharmony_ci#define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
15662306a36Sopenharmony_ci#define MAX_POSSIBLE_PHYSMEM_BITS 36
15762306a36Sopenharmony_ci#else
15862306a36Sopenharmony_ci#define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
15962306a36Sopenharmony_ci#define MAX_POSSIBLE_PHYSMEM_BITS 32
16062306a36Sopenharmony_ci#endif
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_ci/*
16362306a36Sopenharmony_ci * _PAGE_CHG_MASK masks of bits that are to be preserved across
16462306a36Sopenharmony_ci * pgprot changes.
16562306a36Sopenharmony_ci */
16662306a36Sopenharmony_ci#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
16762306a36Sopenharmony_ci
16862306a36Sopenharmony_ci#ifndef __ASSEMBLY__
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci#define pte_clear(mm, addr, ptep) \
17162306a36Sopenharmony_ci	do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
17262306a36Sopenharmony_ci
17362306a36Sopenharmony_ci#ifndef pte_mkwrite_novma
17462306a36Sopenharmony_cistatic inline pte_t pte_mkwrite_novma(pte_t pte)
17562306a36Sopenharmony_ci{
17662306a36Sopenharmony_ci	return __pte(pte_val(pte) | _PAGE_RW);
17762306a36Sopenharmony_ci}
17862306a36Sopenharmony_ci#endif
17962306a36Sopenharmony_ci
18062306a36Sopenharmony_cistatic inline pte_t pte_mkdirty(pte_t pte)
18162306a36Sopenharmony_ci{
18262306a36Sopenharmony_ci	return __pte(pte_val(pte) | _PAGE_DIRTY);
18362306a36Sopenharmony_ci}
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_cistatic inline pte_t pte_mkyoung(pte_t pte)
18662306a36Sopenharmony_ci{
18762306a36Sopenharmony_ci	return __pte(pte_val(pte) | _PAGE_ACCESSED);
18862306a36Sopenharmony_ci}
18962306a36Sopenharmony_ci
19062306a36Sopenharmony_ci#ifndef pte_wrprotect
19162306a36Sopenharmony_cistatic inline pte_t pte_wrprotect(pte_t pte)
19262306a36Sopenharmony_ci{
19362306a36Sopenharmony_ci	return __pte(pte_val(pte) & ~_PAGE_RW);
19462306a36Sopenharmony_ci}
19562306a36Sopenharmony_ci#endif
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci#ifndef pte_mkexec
19862306a36Sopenharmony_cistatic inline pte_t pte_mkexec(pte_t pte)
19962306a36Sopenharmony_ci{
20062306a36Sopenharmony_ci	return __pte(pte_val(pte) | _PAGE_EXEC);
20162306a36Sopenharmony_ci}
20262306a36Sopenharmony_ci#endif
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci#define pmd_none(pmd)		(!pmd_val(pmd))
20562306a36Sopenharmony_ci#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
20662306a36Sopenharmony_ci#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
20762306a36Sopenharmony_cistatic inline void pmd_clear(pmd_t *pmdp)
20862306a36Sopenharmony_ci{
20962306a36Sopenharmony_ci	*pmdp = __pmd(0);
21062306a36Sopenharmony_ci}
21162306a36Sopenharmony_ci
21262306a36Sopenharmony_ci/*
21362306a36Sopenharmony_ci * PTE updates. This function is called whenever an existing
21462306a36Sopenharmony_ci * valid PTE is updated. This does -not- include set_pte_at()
21562306a36Sopenharmony_ci * which nowadays only sets a new PTE.
21662306a36Sopenharmony_ci *
21762306a36Sopenharmony_ci * Depending on the type of MMU, we may need to use atomic updates
21862306a36Sopenharmony_ci * and the PTE may be either 32 or 64 bit wide. In the later case,
21962306a36Sopenharmony_ci * when using atomic updates, only the low part of the PTE is
22062306a36Sopenharmony_ci * accessed atomically.
22162306a36Sopenharmony_ci *
22262306a36Sopenharmony_ci * In addition, on 44x, we also maintain a global flag indicating
22362306a36Sopenharmony_ci * that an executable user mapping was modified, which is needed
22462306a36Sopenharmony_ci * to properly flush the virtually tagged instruction cache of
22562306a36Sopenharmony_ci * those implementations.
22662306a36Sopenharmony_ci *
22762306a36Sopenharmony_ci * On the 8xx, the page tables are a bit special. For 16k pages, we have
22862306a36Sopenharmony_ci * 4 identical entries. For 512k pages, we have 128 entries as if it was
22962306a36Sopenharmony_ci * 4k pages, but they are flagged as 512k pages for the hardware.
23062306a36Sopenharmony_ci * For other page sizes, we have a single entry in the table.
23162306a36Sopenharmony_ci */
23262306a36Sopenharmony_ci#ifdef CONFIG_PPC_8xx
23362306a36Sopenharmony_cistatic pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
23462306a36Sopenharmony_cistatic int hugepd_ok(hugepd_t hpd);
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_cistatic int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
23762306a36Sopenharmony_ci{
23862306a36Sopenharmony_ci	if (!huge)
23962306a36Sopenharmony_ci		return PAGE_SIZE / SZ_4K;
24062306a36Sopenharmony_ci	else if (hugepd_ok(*((hugepd_t *)pmd)))
24162306a36Sopenharmony_ci		return 1;
24262306a36Sopenharmony_ci	else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
24362306a36Sopenharmony_ci		return SZ_16K / SZ_4K;
24462306a36Sopenharmony_ci	else
24562306a36Sopenharmony_ci		return SZ_512K / SZ_4K;
24662306a36Sopenharmony_ci}
24762306a36Sopenharmony_ci
24862306a36Sopenharmony_cistatic inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
24962306a36Sopenharmony_ci				     unsigned long clr, unsigned long set, int huge)
25062306a36Sopenharmony_ci{
25162306a36Sopenharmony_ci	pte_basic_t *entry = (pte_basic_t *)p;
25262306a36Sopenharmony_ci	pte_basic_t old = pte_val(*p);
25362306a36Sopenharmony_ci	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
25462306a36Sopenharmony_ci	int num, i;
25562306a36Sopenharmony_ci	pmd_t *pmd = pmd_off(mm, addr);
25662306a36Sopenharmony_ci
25762306a36Sopenharmony_ci	num = number_of_cells_per_pte(pmd, new, huge);
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_ci	for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
26062306a36Sopenharmony_ci		*entry++ = new;
26162306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) {
26262306a36Sopenharmony_ci			*entry++ = new;
26362306a36Sopenharmony_ci			*entry++ = new;
26462306a36Sopenharmony_ci			*entry++ = new;
26562306a36Sopenharmony_ci		}
26662306a36Sopenharmony_ci	}
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_ci	return old;
26962306a36Sopenharmony_ci}
27062306a36Sopenharmony_ci
27162306a36Sopenharmony_ci#ifdef CONFIG_PPC_16K_PAGES
27262306a36Sopenharmony_ci#define ptep_get ptep_get
27362306a36Sopenharmony_cistatic inline pte_t ptep_get(pte_t *ptep)
27462306a36Sopenharmony_ci{
27562306a36Sopenharmony_ci	pte_basic_t val = READ_ONCE(ptep->pte);
27662306a36Sopenharmony_ci	pte_t pte = {val, val, val, val};
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	return pte;
27962306a36Sopenharmony_ci}
28062306a36Sopenharmony_ci#endif /* CONFIG_PPC_16K_PAGES */
28162306a36Sopenharmony_ci
28262306a36Sopenharmony_ci#else
28362306a36Sopenharmony_cistatic inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
28462306a36Sopenharmony_ci				     unsigned long clr, unsigned long set, int huge)
28562306a36Sopenharmony_ci{
28662306a36Sopenharmony_ci	pte_basic_t old = pte_val(*p);
28762306a36Sopenharmony_ci	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
28862306a36Sopenharmony_ci
28962306a36Sopenharmony_ci	*p = __pte(new);
29062306a36Sopenharmony_ci
29162306a36Sopenharmony_ci#ifdef CONFIG_44x
29262306a36Sopenharmony_ci	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
29362306a36Sopenharmony_ci		icache_44x_need_flush = 1;
29462306a36Sopenharmony_ci#endif
29562306a36Sopenharmony_ci	return old;
29662306a36Sopenharmony_ci}
29762306a36Sopenharmony_ci#endif
29862306a36Sopenharmony_ci
29962306a36Sopenharmony_ci#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
30062306a36Sopenharmony_cistatic inline int __ptep_test_and_clear_young(struct mm_struct *mm,
30162306a36Sopenharmony_ci					      unsigned long addr, pte_t *ptep)
30262306a36Sopenharmony_ci{
30362306a36Sopenharmony_ci	unsigned long old;
30462306a36Sopenharmony_ci	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
30562306a36Sopenharmony_ci	return (old & _PAGE_ACCESSED) != 0;
30662306a36Sopenharmony_ci}
30762306a36Sopenharmony_ci#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
30862306a36Sopenharmony_ci	__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_ci#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
31162306a36Sopenharmony_cistatic inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
31262306a36Sopenharmony_ci				       pte_t *ptep)
31362306a36Sopenharmony_ci{
31462306a36Sopenharmony_ci	return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
31562306a36Sopenharmony_ci}
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci#define __HAVE_ARCH_PTEP_SET_WRPROTECT
31862306a36Sopenharmony_ci#ifndef ptep_set_wrprotect
31962306a36Sopenharmony_cistatic inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
32062306a36Sopenharmony_ci				      pte_t *ptep)
32162306a36Sopenharmony_ci{
32262306a36Sopenharmony_ci	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
32362306a36Sopenharmony_ci}
32462306a36Sopenharmony_ci#endif
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ci#ifndef __ptep_set_access_flags
32762306a36Sopenharmony_cistatic inline void __ptep_set_access_flags(struct vm_area_struct *vma,
32862306a36Sopenharmony_ci					   pte_t *ptep, pte_t entry,
32962306a36Sopenharmony_ci					   unsigned long address,
33062306a36Sopenharmony_ci					   int psize)
33162306a36Sopenharmony_ci{
33262306a36Sopenharmony_ci	unsigned long set = pte_val(entry) &
33362306a36Sopenharmony_ci			    (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
33462306a36Sopenharmony_ci	int huge = psize > mmu_virtual_psize ? 1 : 0;
33562306a36Sopenharmony_ci
33662306a36Sopenharmony_ci	pte_update(vma->vm_mm, address, ptep, 0, set, huge);
33762306a36Sopenharmony_ci
33862306a36Sopenharmony_ci	flush_tlb_page(vma, address);
33962306a36Sopenharmony_ci}
34062306a36Sopenharmony_ci#endif
34162306a36Sopenharmony_ci
34262306a36Sopenharmony_cistatic inline int pte_young(pte_t pte)
34362306a36Sopenharmony_ci{
34462306a36Sopenharmony_ci	return pte_val(pte) & _PAGE_ACCESSED;
34562306a36Sopenharmony_ci}
34662306a36Sopenharmony_ci
34762306a36Sopenharmony_ci/*
34862306a36Sopenharmony_ci * Note that on Book E processors, the pmd contains the kernel virtual
34962306a36Sopenharmony_ci * (lowmem) address of the pte page.  The physical address is less useful
35062306a36Sopenharmony_ci * because everything runs with translation enabled (even the TLB miss
35162306a36Sopenharmony_ci * handler).  On everything else the pmd contains the physical address
35262306a36Sopenharmony_ci * of the pte page.  -- paulus
35362306a36Sopenharmony_ci */
35462306a36Sopenharmony_ci#ifndef CONFIG_BOOKE
35562306a36Sopenharmony_ci#define pmd_pfn(pmd)		(pmd_val(pmd) >> PAGE_SHIFT)
35662306a36Sopenharmony_ci#else
35762306a36Sopenharmony_ci#define pmd_page_vaddr(pmd)	\
35862306a36Sopenharmony_ci	((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
35962306a36Sopenharmony_ci#define pmd_pfn(pmd)		(__pa(pmd_val(pmd)) >> PAGE_SHIFT)
36062306a36Sopenharmony_ci#endif
36162306a36Sopenharmony_ci
36262306a36Sopenharmony_ci#define pmd_page(pmd)		pfn_to_page(pmd_pfn(pmd))
36362306a36Sopenharmony_ci
36462306a36Sopenharmony_ci/*
36562306a36Sopenharmony_ci * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
36662306a36Sopenharmony_ci * are !pte_none() && !pte_present().
36762306a36Sopenharmony_ci *
36862306a36Sopenharmony_ci * Format of swap PTEs (32bit PTEs):
36962306a36Sopenharmony_ci *
37062306a36Sopenharmony_ci *                         1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
37162306a36Sopenharmony_ci *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
37262306a36Sopenharmony_ci *   <------------------ offset -------------------> < type -> E 0 0
37362306a36Sopenharmony_ci *
37462306a36Sopenharmony_ci * E is the exclusive marker that is not stored in swap entries.
37562306a36Sopenharmony_ci *
37662306a36Sopenharmony_ci * For 64bit PTEs, the offset is extended by 32bit.
37762306a36Sopenharmony_ci */
37862306a36Sopenharmony_ci#define __swp_type(entry)		((entry).val & 0x1f)
37962306a36Sopenharmony_ci#define __swp_offset(entry)		((entry).val >> 5)
38062306a36Sopenharmony_ci#define __swp_entry(type, offset)	((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
38162306a36Sopenharmony_ci#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
38262306a36Sopenharmony_ci#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
38362306a36Sopenharmony_ci
38462306a36Sopenharmony_ci/* We borrow LSB 2 to store the exclusive marker in swap PTEs. */
38562306a36Sopenharmony_ci#define _PAGE_SWP_EXCLUSIVE	0x000004
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_ci#endif /* !__ASSEMBLY__ */
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
390