1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_XEN_PAGE_H
3#define _ASM_X86_XEN_PAGE_H
4
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/spinlock.h>
8#include <linux/pfn.h>
9#include <linux/mm.h>
10#include <linux/device.h>
11
12#include <asm/extable.h>
13#include <asm/page.h>
14
15#include <xen/interface/xen.h>
16#include <xen/interface/grant_table.h>
17#include <xen/features.h>
18
19/* Xen machine address */
20typedef struct xmaddr {
21	phys_addr_t maddr;
22} xmaddr_t;
23
24/* Xen pseudo-physical address */
25typedef struct xpaddr {
26	phys_addr_t paddr;
27} xpaddr_t;
28
29#ifdef CONFIG_X86_64
30#define XEN_PHYSICAL_MASK	__sme_clr((1UL << 52) - 1)
31#else
32#define XEN_PHYSICAL_MASK	__PHYSICAL_MASK
33#endif
34
35#define XEN_PTE_MFN_MASK	((pteval_t)(((signed long)PAGE_MASK) & \
36					    XEN_PHYSICAL_MASK))
37
38#define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
39#define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
40
41/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
42#define INVALID_P2M_ENTRY	(~0UL)
43#define FOREIGN_FRAME_BIT	(1UL<<(BITS_PER_LONG-1))
44#define IDENTITY_FRAME_BIT	(1UL<<(BITS_PER_LONG-2))
45#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
46#define IDENTITY_FRAME(m)	((m) | IDENTITY_FRAME_BIT)
47
48#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
49
50extern unsigned long *machine_to_phys_mapping;
51extern unsigned long  machine_to_phys_nr;
52extern unsigned long *xen_p2m_addr;
53extern unsigned long  xen_p2m_size;
54extern unsigned long  xen_max_p2m_pfn;
55
56extern int xen_alloc_p2m_entry(unsigned long pfn);
57
58extern unsigned long get_phys_to_machine(unsigned long pfn);
59extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
60extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
61extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
62						    unsigned long pfn_e);
63
64#ifdef CONFIG_XEN_PV
65extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
66				   struct gnttab_map_grant_ref *kmap_ops,
67				   struct page **pages, unsigned int count);
68extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
69				     struct gnttab_unmap_grant_ref *kunmap_ops,
70				     struct page **pages, unsigned int count);
71#else
72static inline int
73set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
74			struct gnttab_map_grant_ref *kmap_ops,
75			struct page **pages, unsigned int count)
76{
77	return 0;
78}
79
80static inline int
81clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
82			  struct gnttab_unmap_grant_ref *kunmap_ops,
83			  struct page **pages, unsigned int count)
84{
85	return 0;
86}
87#endif
88
89/*
90 * Helper functions to write or read unsigned long values to/from
91 * memory, when the access may fault.
92 */
93static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
94{
95	int ret = 0;
96
97	asm volatile("1: mov %[val], %[ptr]\n"
98		     "2:\n"
99		     ".section .fixup, \"ax\"\n"
100		     "3: sub $1, %[ret]\n"
101		     "   jmp 2b\n"
102		     ".previous\n"
103		     _ASM_EXTABLE(1b, 3b)
104		     : [ret] "+r" (ret), [ptr] "=m" (*addr)
105		     : [val] "r" (val));
106
107	return ret;
108}
109
110static inline int xen_safe_read_ulong(const unsigned long *addr,
111				      unsigned long *val)
112{
113	int ret = 0;
114	unsigned long rval = ~0ul;
115
116	asm volatile("1: mov %[ptr], %[rval]\n"
117		     "2:\n"
118		     ".section .fixup, \"ax\"\n"
119		     "3: sub $1, %[ret]\n"
120		     "   jmp 2b\n"
121		     ".previous\n"
122		     _ASM_EXTABLE(1b, 3b)
123		     : [ret] "+r" (ret), [rval] "+r" (rval)
124		     : [ptr] "m" (*addr));
125	*val = rval;
126
127	return ret;
128}
129
130#ifdef CONFIG_XEN_PV
131/*
132 * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
133 * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
134 *   bits (identity or foreign) are set.
135 * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
136 *   identity or foreign indicator will be still set. __pfn_to_mfn() is
137 *   encapsulating get_phys_to_machine() which is called in special cases only.
138 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
139 *   cases needing an extended handling.
140 */
141static inline unsigned long __pfn_to_mfn(unsigned long pfn)
142{
143	unsigned long mfn;
144
145	if (pfn < xen_p2m_size)
146		mfn = xen_p2m_addr[pfn];
147	else if (unlikely(pfn < xen_max_p2m_pfn))
148		return get_phys_to_machine(pfn);
149	else
150		return IDENTITY_FRAME(pfn);
151
152	if (unlikely(mfn == INVALID_P2M_ENTRY))
153		return get_phys_to_machine(pfn);
154
155	return mfn;
156}
157#else
158static inline unsigned long __pfn_to_mfn(unsigned long pfn)
159{
160	return pfn;
161}
162#endif
163
164static inline unsigned long pfn_to_mfn(unsigned long pfn)
165{
166	unsigned long mfn;
167
168	/*
169	 * Some x86 code are still using pfn_to_mfn instead of
170	 * pfn_to_mfn. This will have to be removed when we figured
171	 * out which call.
172	 */
173	if (xen_feature(XENFEAT_auto_translated_physmap))
174		return pfn;
175
176	mfn = __pfn_to_mfn(pfn);
177
178	if (mfn != INVALID_P2M_ENTRY)
179		mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
180
181	return mfn;
182}
183
184static inline int phys_to_machine_mapping_valid(unsigned long pfn)
185{
186	if (xen_feature(XENFEAT_auto_translated_physmap))
187		return 1;
188
189	return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
190}
191
192static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
193{
194	unsigned long pfn;
195	int ret;
196
197	if (unlikely(mfn >= machine_to_phys_nr))
198		return ~0;
199
200	/*
201	 * The array access can fail (e.g., device space beyond end of RAM).
202	 * In such cases it doesn't matter what we return (we return garbage),
203	 * but we must handle the fault without crashing!
204	 */
205	ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
206	if (ret < 0)
207		return ~0;
208
209	return pfn;
210}
211
212static inline unsigned long mfn_to_pfn(unsigned long mfn)
213{
214	unsigned long pfn;
215
216	/*
217	 * Some x86 code are still using mfn_to_pfn instead of
218	 * gfn_to_pfn. This will have to be removed when we figure
219	 * out which call.
220	 */
221	if (xen_feature(XENFEAT_auto_translated_physmap))
222		return mfn;
223
224	pfn = mfn_to_pfn_no_overrides(mfn);
225	if (__pfn_to_mfn(pfn) != mfn)
226		pfn = ~0;
227
228	/*
229	 * pfn is ~0 if there are no entries in the m2p for mfn or the
230	 * entry doesn't map back to the mfn.
231	 */
232	if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
233		pfn = mfn;
234
235	return pfn;
236}
237
238static inline xmaddr_t phys_to_machine(xpaddr_t phys)
239{
240	unsigned offset = phys.paddr & ~PAGE_MASK;
241	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
242}
243
244static inline xpaddr_t machine_to_phys(xmaddr_t machine)
245{
246	unsigned offset = machine.maddr & ~PAGE_MASK;
247	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
248}
249
250/* Pseudo-physical <-> Guest conversion */
251static inline unsigned long pfn_to_gfn(unsigned long pfn)
252{
253	if (xen_feature(XENFEAT_auto_translated_physmap))
254		return pfn;
255	else
256		return pfn_to_mfn(pfn);
257}
258
259static inline unsigned long gfn_to_pfn(unsigned long gfn)
260{
261	if (xen_feature(XENFEAT_auto_translated_physmap))
262		return gfn;
263	else
264		return mfn_to_pfn(gfn);
265}
266
267/* Pseudo-physical <-> Bus conversion */
268#define pfn_to_bfn(pfn)		pfn_to_gfn(pfn)
269#define bfn_to_pfn(bfn)		gfn_to_pfn(bfn)
270
271/*
272 * We detect special mappings in one of two ways:
273 *  1. If the MFN is an I/O page then Xen will set the m2p entry
274 *     to be outside our maximum possible pseudophys range.
275 *  2. If the MFN belongs to a different domain then we will certainly
276 *     not have MFN in our p2m table. Conversely, if the page is ours,
277 *     then we'll have p2m(m2p(MFN))==MFN.
278 * If we detect a special mapping then it doesn't have a 'struct page'.
279 * We force !pfn_valid() by returning an out-of-range pointer.
280 *
281 * NB. These checks require that, for any MFN that is not in our reservation,
282 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
283 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
284 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
285 *
286 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
287 *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
288 *      require. In all the cases we care about, the FOREIGN_FRAME bit is
289 *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
290 */
291static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
292{
293	unsigned long pfn;
294
295	if (xen_feature(XENFEAT_auto_translated_physmap))
296		return mfn;
297
298	pfn = mfn_to_pfn(mfn);
299	if (__pfn_to_mfn(pfn) != mfn)
300		return -1; /* force !pfn_valid() */
301	return pfn;
302}
303
304/* VIRT <-> MACHINE conversion */
305#define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
306#define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
307#define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
308#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
309
310/* VIRT <-> GUEST conversion */
311#define virt_to_gfn(v)		(pfn_to_gfn(virt_to_pfn(v)))
312#define gfn_to_virt(g)		(__va(gfn_to_pfn(g) << PAGE_SHIFT))
313
314static inline unsigned long pte_mfn(pte_t pte)
315{
316	return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
317}
318
319static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
320{
321	pte_t pte;
322
323	pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
324			massage_pgprot(pgprot);
325
326	return pte;
327}
328
329static inline pteval_t pte_val_ma(pte_t pte)
330{
331	return pte.pte;
332}
333
334static inline pte_t __pte_ma(pteval_t x)
335{
336	return (pte_t) { .pte = x };
337}
338
339#define pmd_val_ma(v) ((v).pmd)
340#ifdef __PAGETABLE_PUD_FOLDED
341#define pud_val_ma(v) ((v).p4d.pgd.pgd)
342#else
343#define pud_val_ma(v) ((v).pud)
344#endif
345#define __pmd_ma(x)	((pmd_t) { (x) } )
346
347#ifdef __PAGETABLE_P4D_FOLDED
348#define p4d_val_ma(x)	((x).pgd.pgd)
349#else
350#define p4d_val_ma(x)	((x).p4d)
351#endif
352
353xmaddr_t arbitrary_virt_to_machine(void *address);
354unsigned long arbitrary_virt_to_mfn(void *vaddr);
355void make_lowmem_page_readonly(void *vaddr);
356void make_lowmem_page_readwrite(void *vaddr);
357
358#define xen_remap(cookie, size) ioremap((cookie), (size));
359#define xen_unmap(cookie) iounmap((cookie))
360
361static inline bool xen_arch_need_swiotlb(struct device *dev,
362					 phys_addr_t phys,
363					 dma_addr_t dev_addr)
364{
365	return false;
366}
367
368static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
369{
370	return __get_free_pages(__GFP_NOWARN, order);
371}
372
373#endif /* _ASM_X86_XEN_PAGE_H */
374