1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  S390 version
4 *    Copyright IBM Corp. 1999, 2000
5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
6 */
7
8#ifndef _S390_PAGE_H
9#define _S390_PAGE_H
10
11#include <linux/const.h>
12#include <asm/types.h>
13
14#define _PAGE_SHIFT	12
15#define _PAGE_SIZE	(_AC(1, UL) << _PAGE_SHIFT)
16#define _PAGE_MASK	(~(_PAGE_SIZE - 1))
17
18/* PAGE_SHIFT determines the page size */
19#define PAGE_SHIFT	_PAGE_SHIFT
20#define PAGE_SIZE	_PAGE_SIZE
21#define PAGE_MASK	_PAGE_MASK
22#define PAGE_DEFAULT_ACC	0
23#define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
24
25#define HPAGE_SHIFT	20
26#define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
27#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
28#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
29#define HUGE_MAX_HSTATE		2
30
31#define ARCH_HAS_SETCLEAR_HUGE_PTE
32#define ARCH_HAS_HUGE_PTE_TYPE
33#define ARCH_HAS_PREPARE_HUGEPAGE
34#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
35
36#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
37
38#include <asm/setup.h>
39#ifndef __ASSEMBLY__
40
41void __storage_key_init_range(unsigned long start, unsigned long end);
42
43static inline void storage_key_init_range(unsigned long start, unsigned long end)
44{
45	if (PAGE_DEFAULT_KEY != 0)
46		__storage_key_init_range(start, end);
47}
48
49#define clear_page(page)	memset((page), 0, PAGE_SIZE)
50
51/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
53 * bypass caches when copying a page. Especially when copying huge pages
54 * this keeps L1 and L2 data caches alive.
55 */
56static inline void copy_page(void *to, void *from)
57{
58	register void *reg2 asm ("2") = to;
59	register unsigned long reg3 asm ("3") = 0x1000;
60	register void *reg4 asm ("4") = from;
61	register unsigned long reg5 asm ("5") = 0xb0001000;
62	asm volatile(
63		"	mvcl	2,4"
64		: "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
65		: : "memory", "cc");
66}
67
68#define clear_user_page(page, vaddr, pg)	clear_page(page)
69#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
70
71#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
72	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
73#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
74
75/*
76 * These are used to make use of C type-checking..
77 */
78
79typedef struct { unsigned long pgprot; } pgprot_t;
80typedef struct { unsigned long pgste; } pgste_t;
81typedef struct { unsigned long pte; } pte_t;
82typedef struct { unsigned long pmd; } pmd_t;
83typedef struct { unsigned long pud; } pud_t;
84typedef struct { unsigned long p4d; } p4d_t;
85typedef struct { unsigned long pgd; } pgd_t;
86typedef pte_t *pgtable_t;
87
88#define pgprot_val(x)	((x).pgprot)
89#define pgste_val(x)	((x).pgste)
90#define pte_val(x)	((x).pte)
91#define pmd_val(x)	((x).pmd)
92#define pud_val(x)	((x).pud)
93#define p4d_val(x)	((x).p4d)
94#define pgd_val(x)      ((x).pgd)
95
96#define __pgste(x)	((pgste_t) { (x) } )
97#define __pte(x)        ((pte_t) { (x) } )
98#define __pmd(x)        ((pmd_t) { (x) } )
99#define __pud(x)	((pud_t) { (x) } )
100#define __p4d(x)	((p4d_t) { (x) } )
101#define __pgd(x)        ((pgd_t) { (x) } )
102#define __pgprot(x)     ((pgprot_t) { (x) } )
103
104static inline void page_set_storage_key(unsigned long addr,
105					unsigned char skey, int mapped)
106{
107	if (!mapped)
108		asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
109			     : : "d" (skey), "a" (addr));
110	else
111		asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
112}
113
114static inline unsigned char page_get_storage_key(unsigned long addr)
115{
116	unsigned char skey;
117
118	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
119	return skey;
120}
121
122static inline int page_reset_referenced(unsigned long addr)
123{
124	int cc;
125
126	asm volatile(
127		"	rrbe	0,%1\n"
128		"	ipm	%0\n"
129		"	srl	%0,28\n"
130		: "=d" (cc) : "a" (addr) : "cc");
131	return cc;
132}
133
134/* Bits int the storage key */
135#define _PAGE_CHANGED		0x02	/* HW changed bit		*/
136#define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
137#define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
138#define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
139
140struct page;
141void arch_free_page(struct page *page, int order);
142void arch_alloc_page(struct page *page, int order);
143void arch_set_page_dat(struct page *page, int order);
144void arch_set_page_nodat(struct page *page, int order);
145int arch_test_page_nodat(struct page *page);
146void arch_set_page_states(int make_stable);
147
148static inline int devmem_is_allowed(unsigned long pfn)
149{
150	return 0;
151}
152
153#define HAVE_ARCH_FREE_PAGE
154#define HAVE_ARCH_ALLOC_PAGE
155
156#if IS_ENABLED(CONFIG_PGSTE)
157int arch_make_page_accessible(struct page *page);
158#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
159#endif
160
161#endif /* !__ASSEMBLY__ */
162
163#define __PAGE_OFFSET		0x0UL
164#define PAGE_OFFSET		0x0UL
165
166#define __pa(x)			((unsigned long)(x))
167#define __va(x)			((void *)(unsigned long)(x))
168
169#define phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
170#define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
171
172#define phys_to_page(phys)	pfn_to_page(phys_to_pfn(phys))
173#define page_to_phys(page)	pfn_to_phys(page_to_pfn(page))
174
175#define pfn_to_virt(pfn)	__va(pfn_to_phys(pfn))
176#define virt_to_pfn(kaddr)	(phys_to_pfn(__pa(kaddr)))
177#define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
178
179#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
180#define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
181
182#define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
183
184#define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
185
186#include <asm-generic/memory_model.h>
187#include <asm-generic/getorder.h>
188
189#endif /* _S390_PAGE_H */
190