xref: /kernel/linux/linux-5.10/arch/arm/xen/p2m.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/memblock.h>
3#include <linux/gfp.h>
4#include <linux/export.h>
5#include <linux/spinlock.h>
6#include <linux/slab.h>
7#include <linux/types.h>
8#include <linux/dma-mapping.h>
9#include <linux/vmalloc.h>
10#include <linux/swiotlb.h>
11
12#include <xen/xen.h>
13#include <xen/interface/memory.h>
14#include <xen/page.h>
15#include <xen/swiotlb-xen.h>
16
17#include <asm/cacheflush.h>
18#include <asm/xen/hypercall.h>
19#include <asm/xen/interface.h>
20
21struct xen_p2m_entry {
22	unsigned long pfn;
23	unsigned long mfn;
24	unsigned long nr_pages;
25	struct rb_node rbnode_phys;
26};
27
28static rwlock_t p2m_lock;
29struct rb_root phys_to_mach = RB_ROOT;
30EXPORT_SYMBOL_GPL(phys_to_mach);
31
32static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
33{
34	struct rb_node **link = &phys_to_mach.rb_node;
35	struct rb_node *parent = NULL;
36	struct xen_p2m_entry *entry;
37	int rc = 0;
38
39	while (*link) {
40		parent = *link;
41		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
42
43		if (new->pfn == entry->pfn)
44			goto err_out;
45
46		if (new->pfn < entry->pfn)
47			link = &(*link)->rb_left;
48		else
49			link = &(*link)->rb_right;
50	}
51	rb_link_node(&new->rbnode_phys, parent, link);
52	rb_insert_color(&new->rbnode_phys, &phys_to_mach);
53	goto out;
54
55err_out:
56	rc = -EINVAL;
57	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
58			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
59out:
60	return rc;
61}
62
63unsigned long __pfn_to_mfn(unsigned long pfn)
64{
65	struct rb_node *n;
66	struct xen_p2m_entry *entry;
67	unsigned long irqflags;
68
69	read_lock_irqsave(&p2m_lock, irqflags);
70	n = phys_to_mach.rb_node;
71	while (n) {
72		entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
73		if (entry->pfn <= pfn &&
74				entry->pfn + entry->nr_pages > pfn) {
75			unsigned long mfn = entry->mfn + (pfn - entry->pfn);
76			read_unlock_irqrestore(&p2m_lock, irqflags);
77			return mfn;
78		}
79		if (pfn < entry->pfn)
80			n = n->rb_left;
81		else
82			n = n->rb_right;
83	}
84	read_unlock_irqrestore(&p2m_lock, irqflags);
85
86	return INVALID_P2M_ENTRY;
87}
88EXPORT_SYMBOL_GPL(__pfn_to_mfn);
89
90int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
91			    struct gnttab_map_grant_ref *kmap_ops,
92			    struct page **pages, unsigned int count)
93{
94	int i;
95
96	for (i = 0; i < count; i++) {
97		struct gnttab_unmap_grant_ref unmap;
98		int rc;
99
100		if (map_ops[i].status)
101			continue;
102		if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
103				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
104			continue;
105
106		/*
107		 * Signal an error for this slot. This in turn requires
108		 * immediate unmapping.
109		 */
110		map_ops[i].status = GNTST_general_error;
111		unmap.host_addr = map_ops[i].host_addr,
112		unmap.handle = map_ops[i].handle;
113		map_ops[i].handle = ~0;
114		if (map_ops[i].flags & GNTMAP_device_map)
115			unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
116		else
117			unmap.dev_bus_addr = 0;
118
119		/*
120		 * Pre-populate the status field, to be recognizable in
121		 * the log message below.
122		 */
123		unmap.status = 1;
124
125		rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
126					       &unmap, 1);
127		if (rc || unmap.status != GNTST_okay)
128			pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
129				    rc, unmap.status);
130	}
131
132	return 0;
133}
134EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
135
136int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
137			      struct gnttab_unmap_grant_ref *kunmap_ops,
138			      struct page **pages, unsigned int count)
139{
140	int i;
141
142	for (i = 0; i < count; i++) {
143		set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT,
144				    INVALID_P2M_ENTRY);
145	}
146
147	return 0;
148}
149EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
150
151bool __set_phys_to_machine_multi(unsigned long pfn,
152		unsigned long mfn, unsigned long nr_pages)
153{
154	int rc;
155	unsigned long irqflags;
156	struct xen_p2m_entry *p2m_entry;
157	struct rb_node *n;
158
159	if (mfn == INVALID_P2M_ENTRY) {
160		write_lock_irqsave(&p2m_lock, irqflags);
161		n = phys_to_mach.rb_node;
162		while (n) {
163			p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
164			if (p2m_entry->pfn <= pfn &&
165					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
166				rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
167				write_unlock_irqrestore(&p2m_lock, irqflags);
168				kfree(p2m_entry);
169				return true;
170			}
171			if (pfn < p2m_entry->pfn)
172				n = n->rb_left;
173			else
174				n = n->rb_right;
175		}
176		write_unlock_irqrestore(&p2m_lock, irqflags);
177		return true;
178	}
179
180	p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
181	if (!p2m_entry)
182		return false;
183
184	p2m_entry->pfn = pfn;
185	p2m_entry->nr_pages = nr_pages;
186	p2m_entry->mfn = mfn;
187
188	write_lock_irqsave(&p2m_lock, irqflags);
189	rc = xen_add_phys_to_mach_entry(p2m_entry);
190	if (rc < 0) {
191		write_unlock_irqrestore(&p2m_lock, irqflags);
192		kfree(p2m_entry);
193		return false;
194	}
195	write_unlock_irqrestore(&p2m_lock, irqflags);
196	return true;
197}
198EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
199
200bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
201{
202	return __set_phys_to_machine_multi(pfn, mfn, 1);
203}
204EXPORT_SYMBOL_GPL(__set_phys_to_machine);
205
206static int p2m_init(void)
207{
208	rwlock_init(&p2m_lock);
209	return 0;
210}
211arch_initcall(p2m_init);
212