1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36#include <linux/bitmap.h>
37#include <linux/memblock.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/slab.h>
41#include <linux/vmalloc.h>
42#include <linux/uaccess.h>
43#include <linux/io.h>
44#include <linux/delay.h>
45#include <linux/hardirq.h>
46#include <linux/workqueue.h>
47#include <linux/ratelimit.h>
48#include <linux/moduleparam.h>
49#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
50#include <linux/dma-mapping.h>
51#endif
52
53#include <xen/xen.h>
54#include <xen/interface/xen.h>
55#include <xen/page.h>
56#include <xen/grant_table.h>
57#include <xen/interface/memory.h>
58#include <xen/hvc-console.h>
59#include <xen/swiotlb-xen.h>
60#include <xen/balloon.h>
61#ifdef CONFIG_X86
62#include <asm/xen/cpuid.h>
63#endif
64#include <xen/mem-reservation.h>
65#include <asm/xen/hypercall.h>
66#include <asm/xen/interface.h>
67
68#include <asm/sync_bitops.h>
69
70#define GNTTAB_LIST_END 0xffffffff
71
72static grant_ref_t **gnttab_list;
73static unsigned int nr_grant_frames;
74
75/*
76 * Handling of free grants:
77 *
78 * Free grants are in a simple list anchored in gnttab_free_head. They are
79 * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
80 * of free entries is stored in gnttab_free_count.
81 * Additionally there is a bitmap of free entries anchored in
82 * gnttab_free_bitmap. This is being used for simplifying allocation of
83 * multiple consecutive grants, which is needed e.g. for support of virtio.
84 * gnttab_last_free is used to add free entries of new frames at the end of
85 * the free list.
86 * gnttab_free_tail_ptr specifies the variable which references the start
87 * of consecutive free grants ending with gnttab_last_free. This pointer is
88 * updated in a rather defensive way, in order to avoid performance hits in
89 * hot paths.
90 * All those variables are protected by gnttab_list_lock.
91 */
92static int gnttab_free_count;
93static unsigned int gnttab_size;
94static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
95static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
96static grant_ref_t *gnttab_free_tail_ptr;
97static unsigned long *gnttab_free_bitmap;
98static DEFINE_SPINLOCK(gnttab_list_lock);
99
100struct grant_frames xen_auto_xlat_grant_frames;
101static unsigned int xen_gnttab_version;
102module_param_named(version, xen_gnttab_version, uint, 0);
103
104static union {
105	struct grant_entry_v1 *v1;
106	union grant_entry_v2 *v2;
107	void *addr;
108} gnttab_shared;
109
110/*This is a structure of function pointers for grant table*/
111struct gnttab_ops {
112	/*
113	 * Version of the grant interface.
114	 */
115	unsigned int version;
116	/*
117	 * Grant refs per grant frame.
118	 */
119	unsigned int grefs_per_grant_frame;
120	/*
121	 * Mapping a list of frames for storing grant entries. Frames parameter
122	 * is used to store grant table address when grant table being setup,
123	 * nr_gframes is the number of frames to map grant table. Returning
124	 * GNTST_okay means success and negative value means failure.
125	 */
126	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
127	/*
128	 * Release a list of frames which are mapped in map_frames for grant
129	 * entry status.
130	 */
131	void (*unmap_frames)(void);
132	/*
133	 * Introducing a valid entry into the grant table, granting the frame of
134	 * this grant entry to domain for accessing. Ref
135	 * parameter is reference of this introduced grant entry, domid is id of
136	 * granted domain, frame is the page frame to be granted, and flags is
137	 * status of the grant entry to be updated.
138	 */
139	void (*update_entry)(grant_ref_t ref, domid_t domid,
140			     unsigned long frame, unsigned flags);
141	/*
142	 * Stop granting a grant entry to domain for accessing. Ref parameter is
143	 * reference of a grant entry whose grant access will be stopped.
144	 * If the grant entry is currently mapped for reading or writing, just
145	 * return failure(==0) directly and don't tear down the grant access.
146	 * Otherwise, stop grant access for this entry and return success(==1).
147	 */
148	int (*end_foreign_access_ref)(grant_ref_t ref);
149	/*
150	 * Read the frame number related to a given grant reference.
151	 */
152	unsigned long (*read_frame)(grant_ref_t ref);
153};
154
155struct unmap_refs_callback_data {
156	struct completion completion;
157	int result;
158};
159
160static const struct gnttab_ops *gnttab_interface;
161
162/* This reflects status of grant entries, so act as a global value. */
163static grant_status_t *grstatus;
164
165static struct gnttab_free_callback *gnttab_free_callback_list;
166
167static int gnttab_expand(unsigned int req_entries);
168
169#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
170#define SPP (PAGE_SIZE / sizeof(grant_status_t))
171
172static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
173{
174	return &gnttab_list[(entry) / RPP][(entry) % RPP];
175}
176/* This can be used as an l-value */
177#define gnttab_entry(entry) (*__gnttab_entry(entry))
178
179static int get_free_entries(unsigned count)
180{
181	unsigned long flags;
182	int ref, rc = 0;
183	grant_ref_t head;
184
185	spin_lock_irqsave(&gnttab_list_lock, flags);
186
187	if ((gnttab_free_count < count) &&
188	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
189		spin_unlock_irqrestore(&gnttab_list_lock, flags);
190		return rc;
191	}
192
193	ref = head = gnttab_free_head;
194	gnttab_free_count -= count;
195	while (count--) {
196		bitmap_clear(gnttab_free_bitmap, head, 1);
197		if (gnttab_free_tail_ptr == __gnttab_entry(head))
198			gnttab_free_tail_ptr = &gnttab_free_head;
199		if (count)
200			head = gnttab_entry(head);
201	}
202	gnttab_free_head = gnttab_entry(head);
203	gnttab_entry(head) = GNTTAB_LIST_END;
204
205	if (!gnttab_free_count) {
206		gnttab_last_free = GNTTAB_LIST_END;
207		gnttab_free_tail_ptr = NULL;
208	}
209
210	spin_unlock_irqrestore(&gnttab_list_lock, flags);
211
212	return ref;
213}
214
215static int get_seq_entry_count(void)
216{
217	if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
218	    *gnttab_free_tail_ptr == GNTTAB_LIST_END)
219		return 0;
220
221	return gnttab_last_free - *gnttab_free_tail_ptr + 1;
222}
223
224/* Rebuilds the free grant list and tries to find count consecutive entries. */
225static int get_free_seq(unsigned int count)
226{
227	int ret = -ENOSPC;
228	unsigned int from, to;
229	grant_ref_t *last;
230
231	gnttab_free_tail_ptr = &gnttab_free_head;
232	last = &gnttab_free_head;
233
234	for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
235	     from < gnttab_size;
236	     from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
237		to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
238					from + 1);
239		if (ret < 0 && to - from >= count) {
240			ret = from;
241			bitmap_clear(gnttab_free_bitmap, ret, count);
242			from += count;
243			gnttab_free_count -= count;
244			if (from == to)
245				continue;
246		}
247
248		/*
249		 * Recreate the free list in order to have it properly sorted.
250		 * This is needed to make sure that the free tail has the maximum
251		 * possible size.
252		 */
253		while (from < to) {
254			*last = from;
255			last = __gnttab_entry(from);
256			gnttab_last_free = from;
257			from++;
258		}
259		if (to < gnttab_size)
260			gnttab_free_tail_ptr = __gnttab_entry(to - 1);
261	}
262
263	*last = GNTTAB_LIST_END;
264	if (gnttab_last_free != gnttab_size - 1)
265		gnttab_free_tail_ptr = NULL;
266
267	return ret;
268}
269
270static int get_free_entries_seq(unsigned int count)
271{
272	unsigned long flags;
273	int ret = 0;
274
275	spin_lock_irqsave(&gnttab_list_lock, flags);
276
277	if (gnttab_free_count < count) {
278		ret = gnttab_expand(count - gnttab_free_count);
279		if (ret < 0)
280			goto out;
281	}
282
283	if (get_seq_entry_count() < count) {
284		ret = get_free_seq(count);
285		if (ret >= 0)
286			goto out;
287		ret = gnttab_expand(count - get_seq_entry_count());
288		if (ret < 0)
289			goto out;
290	}
291
292	ret = *gnttab_free_tail_ptr;
293	*gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
294	gnttab_free_count -= count;
295	if (!gnttab_free_count)
296		gnttab_free_tail_ptr = NULL;
297	bitmap_clear(gnttab_free_bitmap, ret, count);
298
299 out:
300	spin_unlock_irqrestore(&gnttab_list_lock, flags);
301
302	return ret;
303}
304
305static void do_free_callbacks(void)
306{
307	struct gnttab_free_callback *callback, *next;
308
309	callback = gnttab_free_callback_list;
310	gnttab_free_callback_list = NULL;
311
312	while (callback != NULL) {
313		next = callback->next;
314		if (gnttab_free_count >= callback->count) {
315			callback->next = NULL;
316			callback->fn(callback->arg);
317		} else {
318			callback->next = gnttab_free_callback_list;
319			gnttab_free_callback_list = callback;
320		}
321		callback = next;
322	}
323}
324
325static inline void check_free_callbacks(void)
326{
327	if (unlikely(gnttab_free_callback_list))
328		do_free_callbacks();
329}
330
331static void put_free_entry_locked(grant_ref_t ref)
332{
333	if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
334		return;
335
336	gnttab_entry(ref) = gnttab_free_head;
337	gnttab_free_head = ref;
338	if (!gnttab_free_count)
339		gnttab_last_free = ref;
340	if (gnttab_free_tail_ptr == &gnttab_free_head)
341		gnttab_free_tail_ptr = __gnttab_entry(ref);
342	gnttab_free_count++;
343	bitmap_set(gnttab_free_bitmap, ref, 1);
344}
345
346static void put_free_entry(grant_ref_t ref)
347{
348	unsigned long flags;
349
350	spin_lock_irqsave(&gnttab_list_lock, flags);
351	put_free_entry_locked(ref);
352	check_free_callbacks();
353	spin_unlock_irqrestore(&gnttab_list_lock, flags);
354}
355
356static void gnttab_set_free(unsigned int start, unsigned int n)
357{
358	unsigned int i;
359
360	for (i = start; i < start + n - 1; i++)
361		gnttab_entry(i) = i + 1;
362
363	gnttab_entry(i) = GNTTAB_LIST_END;
364	if (!gnttab_free_count) {
365		gnttab_free_head = start;
366		gnttab_free_tail_ptr = &gnttab_free_head;
367	} else {
368		gnttab_entry(gnttab_last_free) = start;
369	}
370	gnttab_free_count += n;
371	gnttab_last_free = i;
372
373	bitmap_set(gnttab_free_bitmap, start, n);
374}
375
376/*
377 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
378 * Introducing a valid entry into the grant table:
379 *  1. Write ent->domid.
380 *  2. Write ent->frame: Frame to which access is permitted.
381 *  3. Write memory barrier (WMB).
382 *  4. Write ent->flags, inc. valid type.
383 */
384static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
385				   unsigned long frame, unsigned flags)
386{
387	gnttab_shared.v1[ref].domid = domid;
388	gnttab_shared.v1[ref].frame = frame;
389	wmb();
390	gnttab_shared.v1[ref].flags = flags;
391}
392
393static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
394				   unsigned long frame, unsigned int flags)
395{
396	gnttab_shared.v2[ref].hdr.domid = domid;
397	gnttab_shared.v2[ref].full_page.frame = frame;
398	wmb();	/* Hypervisor concurrent accesses. */
399	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
400}
401
402/*
403 * Public grant-issuing interface functions
404 */
405void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
406				     unsigned long frame, int readonly)
407{
408	gnttab_interface->update_entry(ref, domid, frame,
409			   GTF_permit_access | (readonly ? GTF_readonly : 0));
410}
411EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
412
413int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
414				int readonly)
415{
416	int ref;
417
418	ref = get_free_entries(1);
419	if (unlikely(ref < 0))
420		return -ENOSPC;
421
422	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
423
424	return ref;
425}
426EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
427
428static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
429{
430	u16 flags, nflags;
431	u16 *pflags;
432
433	pflags = &gnttab_shared.v1[ref].flags;
434	nflags = *pflags;
435	do {
436		flags = nflags;
437		if (flags & (GTF_reading|GTF_writing))
438			return 0;
439	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
440
441	return 1;
442}
443
444static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
445{
446	gnttab_shared.v2[ref].hdr.flags = 0;
447	mb();	/* Concurrent access by hypervisor. */
448	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
449		return 0;
450	} else {
451		/*
452		 * The read of grstatus needs to have acquire semantics.
453		 *  On x86, reads already have that, and we just need to
454		 * protect against compiler reorderings.
455		 * On other architectures we may need a full barrier.
456		 */
457#ifdef CONFIG_X86
458		barrier();
459#else
460		mb();
461#endif
462	}
463
464	return 1;
465}
466
467static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
468{
469	return gnttab_interface->end_foreign_access_ref(ref);
470}
471
472int gnttab_end_foreign_access_ref(grant_ref_t ref)
473{
474	if (_gnttab_end_foreign_access_ref(ref))
475		return 1;
476	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
477	return 0;
478}
479EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
480
481static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
482{
483	return gnttab_shared.v1[ref].frame;
484}
485
486static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
487{
488	return gnttab_shared.v2[ref].full_page.frame;
489}
490
491struct deferred_entry {
492	struct list_head list;
493	grant_ref_t ref;
494	uint16_t warn_delay;
495	struct page *page;
496};
497static LIST_HEAD(deferred_list);
498static void gnttab_handle_deferred(struct timer_list *);
499static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
500
501static atomic64_t deferred_count;
502static atomic64_t leaked_count;
503static unsigned int free_per_iteration = 10;
504module_param(free_per_iteration, uint, 0600);
505
506static void gnttab_handle_deferred(struct timer_list *unused)
507{
508	unsigned int nr = READ_ONCE(free_per_iteration);
509	const bool ignore_limit = nr == 0;
510	struct deferred_entry *first = NULL;
511	unsigned long flags;
512	size_t freed = 0;
513
514	spin_lock_irqsave(&gnttab_list_lock, flags);
515	while ((ignore_limit || nr--) && !list_empty(&deferred_list)) {
516		struct deferred_entry *entry
517			= list_first_entry(&deferred_list,
518					   struct deferred_entry, list);
519
520		if (entry == first)
521			break;
522		list_del(&entry->list);
523		spin_unlock_irqrestore(&gnttab_list_lock, flags);
524		if (_gnttab_end_foreign_access_ref(entry->ref)) {
525			uint64_t ret = atomic64_dec_return(&deferred_count);
526
527			put_free_entry(entry->ref);
528			pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n",
529				 entry->ref, page_to_pfn(entry->page),
530				 (unsigned long long)ret);
531			put_page(entry->page);
532			freed++;
533			kfree(entry);
534			entry = NULL;
535		} else {
536			if (!--entry->warn_delay)
537				pr_info("g.e. %#x still pending\n", entry->ref);
538			if (!first)
539				first = entry;
540		}
541		spin_lock_irqsave(&gnttab_list_lock, flags);
542		if (entry)
543			list_add_tail(&entry->list, &deferred_list);
544	}
545	if (list_empty(&deferred_list))
546		WARN_ON(atomic64_read(&deferred_count));
547	else if (!timer_pending(&deferred_timer)) {
548		deferred_timer.expires = jiffies + HZ;
549		add_timer(&deferred_timer);
550	}
551	spin_unlock_irqrestore(&gnttab_list_lock, flags);
552	pr_debug("Freed %zu references", freed);
553}
554
555static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
556{
557	struct deferred_entry *entry;
558	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
559	uint64_t leaked, deferred;
560
561	entry = kmalloc(sizeof(*entry), gfp);
562	if (!page) {
563		unsigned long gfn = gnttab_interface->read_frame(ref);
564
565		page = pfn_to_page(gfn_to_pfn(gfn));
566		get_page(page);
567	}
568
569	if (entry) {
570		unsigned long flags;
571
572		entry->ref = ref;
573		entry->page = page;
574		entry->warn_delay = 60;
575		spin_lock_irqsave(&gnttab_list_lock, flags);
576		list_add_tail(&entry->list, &deferred_list);
577		if (!timer_pending(&deferred_timer)) {
578			deferred_timer.expires = jiffies + HZ;
579			add_timer(&deferred_timer);
580		}
581		spin_unlock_irqrestore(&gnttab_list_lock, flags);
582		deferred = atomic64_inc_return(&deferred_count);
583		leaked = atomic64_read(&leaked_count);
584		pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
585			 ref, page ? page_to_pfn(page) : -1, deferred, leaked);
586	} else {
587		deferred = atomic64_read(&deferred_count);
588		leaked = atomic64_inc_return(&leaked_count);
589		pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
590			ref, page ? page_to_pfn(page) : -1, deferred, leaked);
591	}
592}
593
594int gnttab_try_end_foreign_access(grant_ref_t ref)
595{
596	int ret = _gnttab_end_foreign_access_ref(ref);
597
598	if (ret)
599		put_free_entry(ref);
600
601	return ret;
602}
603EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
604
605void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
606{
607	if (gnttab_try_end_foreign_access(ref)) {
608		if (page)
609			put_page(page);
610	} else
611		gnttab_add_deferred(ref, page);
612}
613EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
614
615void gnttab_free_grant_reference(grant_ref_t ref)
616{
617	put_free_entry(ref);
618}
619EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
620
621void gnttab_free_grant_references(grant_ref_t head)
622{
623	grant_ref_t ref;
624	unsigned long flags;
625
626	spin_lock_irqsave(&gnttab_list_lock, flags);
627	while (head != GNTTAB_LIST_END) {
628		ref = gnttab_entry(head);
629		put_free_entry_locked(head);
630		head = ref;
631	}
632	check_free_callbacks();
633	spin_unlock_irqrestore(&gnttab_list_lock, flags);
634}
635EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
636
637void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
638{
639	unsigned long flags;
640	unsigned int i;
641
642	spin_lock_irqsave(&gnttab_list_lock, flags);
643	for (i = count; i > 0; i--)
644		put_free_entry_locked(head + i - 1);
645	check_free_callbacks();
646	spin_unlock_irqrestore(&gnttab_list_lock, flags);
647}
648EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
649
650int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
651{
652	int h = get_free_entries(count);
653
654	if (h < 0)
655		return -ENOSPC;
656
657	*head = h;
658
659	return 0;
660}
661EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
662
663int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
664{
665	int h;
666
667	if (count == 1)
668		h = get_free_entries(1);
669	else
670		h = get_free_entries_seq(count);
671
672	if (h < 0)
673		return -ENOSPC;
674
675	*first = h;
676
677	return 0;
678}
679EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
680
681int gnttab_empty_grant_references(const grant_ref_t *private_head)
682{
683	return (*private_head == GNTTAB_LIST_END);
684}
685EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
686
687int gnttab_claim_grant_reference(grant_ref_t *private_head)
688{
689	grant_ref_t g = *private_head;
690	if (unlikely(g == GNTTAB_LIST_END))
691		return -ENOSPC;
692	*private_head = gnttab_entry(g);
693	return g;
694}
695EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
696
697void gnttab_release_grant_reference(grant_ref_t *private_head,
698				    grant_ref_t release)
699{
700	gnttab_entry(release) = *private_head;
701	*private_head = release;
702}
703EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
704
705void gnttab_request_free_callback(struct gnttab_free_callback *callback,
706				  void (*fn)(void *), void *arg, u16 count)
707{
708	unsigned long flags;
709	struct gnttab_free_callback *cb;
710
711	spin_lock_irqsave(&gnttab_list_lock, flags);
712
713	/* Check if the callback is already on the list */
714	cb = gnttab_free_callback_list;
715	while (cb) {
716		if (cb == callback)
717			goto out;
718		cb = cb->next;
719	}
720
721	callback->fn = fn;
722	callback->arg = arg;
723	callback->count = count;
724	callback->next = gnttab_free_callback_list;
725	gnttab_free_callback_list = callback;
726	check_free_callbacks();
727out:
728	spin_unlock_irqrestore(&gnttab_list_lock, flags);
729}
730EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
731
732void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
733{
734	struct gnttab_free_callback **pcb;
735	unsigned long flags;
736
737	spin_lock_irqsave(&gnttab_list_lock, flags);
738	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
739		if (*pcb == callback) {
740			*pcb = callback->next;
741			break;
742		}
743	}
744	spin_unlock_irqrestore(&gnttab_list_lock, flags);
745}
746EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
747
748static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
749{
750	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
751	       align;
752}
753
754static int grow_gnttab_list(unsigned int more_frames)
755{
756	unsigned int new_nr_grant_frames, extra_entries, i;
757	unsigned int nr_glist_frames, new_nr_glist_frames;
758	unsigned int grefs_per_frame;
759
760	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
761
762	new_nr_grant_frames = nr_grant_frames + more_frames;
763	extra_entries = more_frames * grefs_per_frame;
764
765	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
766	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
767	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
768		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
769		if (!gnttab_list[i])
770			goto grow_nomem;
771	}
772
773	gnttab_set_free(gnttab_size, extra_entries);
774
775	if (!gnttab_free_tail_ptr)
776		gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
777
778	nr_grant_frames = new_nr_grant_frames;
779	gnttab_size += extra_entries;
780
781	check_free_callbacks();
782
783	return 0;
784
785grow_nomem:
786	while (i-- > nr_glist_frames)
787		free_page((unsigned long) gnttab_list[i]);
788	return -ENOMEM;
789}
790
791static unsigned int __max_nr_grant_frames(void)
792{
793	struct gnttab_query_size query;
794	int rc;
795
796	query.dom = DOMID_SELF;
797
798	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
799	if ((rc < 0) || (query.status != GNTST_okay))
800		return 4; /* Legacy max supported number of frames */
801
802	return query.max_nr_frames;
803}
804
805unsigned int gnttab_max_grant_frames(void)
806{
807	unsigned int xen_max = __max_nr_grant_frames();
808	static unsigned int boot_max_nr_grant_frames;
809
810	/* First time, initialize it properly. */
811	if (!boot_max_nr_grant_frames)
812		boot_max_nr_grant_frames = __max_nr_grant_frames();
813
814	if (xen_max > boot_max_nr_grant_frames)
815		return boot_max_nr_grant_frames;
816	return xen_max;
817}
818EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
819
820int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
821{
822	xen_pfn_t *pfn;
823	unsigned int max_nr_gframes = __max_nr_grant_frames();
824	unsigned int i;
825	void *vaddr;
826
827	if (xen_auto_xlat_grant_frames.count)
828		return -EINVAL;
829
830	vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
831	if (vaddr == NULL) {
832		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
833			&addr);
834		return -ENOMEM;
835	}
836	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
837	if (!pfn) {
838		memunmap(vaddr);
839		return -ENOMEM;
840	}
841	for (i = 0; i < max_nr_gframes; i++)
842		pfn[i] = XEN_PFN_DOWN(addr) + i;
843
844	xen_auto_xlat_grant_frames.vaddr = vaddr;
845	xen_auto_xlat_grant_frames.pfn = pfn;
846	xen_auto_xlat_grant_frames.count = max_nr_gframes;
847
848	return 0;
849}
850EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
851
852void gnttab_free_auto_xlat_frames(void)
853{
854	if (!xen_auto_xlat_grant_frames.count)
855		return;
856	kfree(xen_auto_xlat_grant_frames.pfn);
857	memunmap(xen_auto_xlat_grant_frames.vaddr);
858
859	xen_auto_xlat_grant_frames.pfn = NULL;
860	xen_auto_xlat_grant_frames.count = 0;
861	xen_auto_xlat_grant_frames.vaddr = NULL;
862}
863EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
864
865int gnttab_pages_set_private(int nr_pages, struct page **pages)
866{
867	int i;
868
869	for (i = 0; i < nr_pages; i++) {
870#if BITS_PER_LONG < 64
871		struct xen_page_foreign *foreign;
872
873		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
874		if (!foreign)
875			return -ENOMEM;
876
877		set_page_private(pages[i], (unsigned long)foreign);
878#endif
879		SetPagePrivate(pages[i]);
880	}
881
882	return 0;
883}
884EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
885
886/**
887 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
888 * @nr_pages: number of pages to alloc
889 * @pages: returns the pages
890 */
891int gnttab_alloc_pages(int nr_pages, struct page **pages)
892{
893	int ret;
894
895	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
896	if (ret < 0)
897		return ret;
898
899	ret = gnttab_pages_set_private(nr_pages, pages);
900	if (ret < 0)
901		gnttab_free_pages(nr_pages, pages);
902
903	return ret;
904}
905EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
906
907#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
908static inline void cache_init(struct gnttab_page_cache *cache)
909{
910	cache->pages = NULL;
911}
912
913static inline bool cache_empty(struct gnttab_page_cache *cache)
914{
915	return !cache->pages;
916}
917
918static inline struct page *cache_deq(struct gnttab_page_cache *cache)
919{
920	struct page *page;
921
922	page = cache->pages;
923	cache->pages = page->zone_device_data;
924
925	return page;
926}
927
928static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
929{
930	page->zone_device_data = cache->pages;
931	cache->pages = page;
932}
933#else
934static inline void cache_init(struct gnttab_page_cache *cache)
935{
936	INIT_LIST_HEAD(&cache->pages);
937}
938
939static inline bool cache_empty(struct gnttab_page_cache *cache)
940{
941	return list_empty(&cache->pages);
942}
943
944static inline struct page *cache_deq(struct gnttab_page_cache *cache)
945{
946	struct page *page;
947
948	page = list_first_entry(&cache->pages, struct page, lru);
949	list_del(&page->lru);
950
951	return page;
952}
953
954static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
955{
956	list_add(&page->lru, &cache->pages);
957}
958#endif
959
960void gnttab_page_cache_init(struct gnttab_page_cache *cache)
961{
962	spin_lock_init(&cache->lock);
963	cache_init(cache);
964	cache->num_pages = 0;
965}
966EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
967
968int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
969{
970	unsigned long flags;
971
972	spin_lock_irqsave(&cache->lock, flags);
973
974	if (cache_empty(cache)) {
975		spin_unlock_irqrestore(&cache->lock, flags);
976		return gnttab_alloc_pages(1, page);
977	}
978
979	page[0] = cache_deq(cache);
980	cache->num_pages--;
981
982	spin_unlock_irqrestore(&cache->lock, flags);
983
984	return 0;
985}
986EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
987
988void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
989			   unsigned int num)
990{
991	unsigned long flags;
992	unsigned int i;
993
994	spin_lock_irqsave(&cache->lock, flags);
995
996	for (i = 0; i < num; i++)
997		cache_enq(cache, page[i]);
998	cache->num_pages += num;
999
1000	spin_unlock_irqrestore(&cache->lock, flags);
1001}
1002EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
1003
1004void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
1005{
1006	struct page *page[10];
1007	unsigned int i = 0;
1008	unsigned long flags;
1009
1010	spin_lock_irqsave(&cache->lock, flags);
1011
1012	while (cache->num_pages > num) {
1013		page[i] = cache_deq(cache);
1014		cache->num_pages--;
1015		if (++i == ARRAY_SIZE(page)) {
1016			spin_unlock_irqrestore(&cache->lock, flags);
1017			gnttab_free_pages(i, page);
1018			i = 0;
1019			spin_lock_irqsave(&cache->lock, flags);
1020		}
1021	}
1022
1023	spin_unlock_irqrestore(&cache->lock, flags);
1024
1025	if (i != 0)
1026		gnttab_free_pages(i, page);
1027}
1028EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1029
1030void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1031{
1032	int i;
1033
1034	for (i = 0; i < nr_pages; i++) {
1035		if (PagePrivate(pages[i])) {
1036#if BITS_PER_LONG < 64
1037			kfree((void *)page_private(pages[i]));
1038#endif
1039			ClearPagePrivate(pages[i]);
1040		}
1041	}
1042}
1043EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1044
1045/**
1046 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1047 * @nr_pages: number of pages to free
1048 * @pages: the pages
1049 */
1050void gnttab_free_pages(int nr_pages, struct page **pages)
1051{
1052	gnttab_pages_clear_private(nr_pages, pages);
1053	xen_free_unpopulated_pages(nr_pages, pages);
1054}
1055EXPORT_SYMBOL_GPL(gnttab_free_pages);
1056
1057#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1058/**
1059 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1060 * @args: arguments to the function
1061 */
1062int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1063{
1064	unsigned long pfn, start_pfn;
1065	size_t size;
1066	int i, ret;
1067
1068	if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1069		return -ENOMEM;
1070
1071	size = args->nr_pages << PAGE_SHIFT;
1072	if (args->coherent)
1073		args->vaddr = dma_alloc_coherent(args->dev, size,
1074						 &args->dev_bus_addr,
1075						 GFP_KERNEL | __GFP_NOWARN);
1076	else
1077		args->vaddr = dma_alloc_wc(args->dev, size,
1078					   &args->dev_bus_addr,
1079					   GFP_KERNEL | __GFP_NOWARN);
1080	if (!args->vaddr) {
1081		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1082		return -ENOMEM;
1083	}
1084
1085	start_pfn = __phys_to_pfn(args->dev_bus_addr);
1086	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1087			pfn++, i++) {
1088		struct page *page = pfn_to_page(pfn);
1089
1090		args->pages[i] = page;
1091		args->frames[i] = xen_page_to_gfn(page);
1092		xenmem_reservation_scrub_page(page);
1093	}
1094
1095	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1096
1097	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1098	if (ret != args->nr_pages) {
1099		pr_debug("Failed to decrease reservation for DMA buffer\n");
1100		ret = -EFAULT;
1101		goto fail;
1102	}
1103
1104	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1105	if (ret < 0)
1106		goto fail;
1107
1108	return 0;
1109
1110fail:
1111	gnttab_dma_free_pages(args);
1112	return ret;
1113}
1114EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1115
1116/**
1117 * gnttab_dma_free_pages - free DMAable pages
1118 * @args: arguments to the function
1119 */
1120int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1121{
1122	size_t size;
1123	int i, ret;
1124
1125	gnttab_pages_clear_private(args->nr_pages, args->pages);
1126
1127	for (i = 0; i < args->nr_pages; i++)
1128		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1129
1130	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1131	if (ret != args->nr_pages) {
1132		pr_debug("Failed to increase reservation for DMA buffer\n");
1133		ret = -EFAULT;
1134	} else {
1135		ret = 0;
1136	}
1137
1138	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1139					     args->frames);
1140
1141	size = args->nr_pages << PAGE_SHIFT;
1142	if (args->coherent)
1143		dma_free_coherent(args->dev, size,
1144				  args->vaddr, args->dev_bus_addr);
1145	else
1146		dma_free_wc(args->dev, size,
1147			    args->vaddr, args->dev_bus_addr);
1148	return ret;
1149}
1150EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1151#endif
1152
1153/* Handling of paged out grant targets (GNTST_eagain) */
1154#define MAX_DELAY 256
1155static inline void
1156gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1157						const char *func)
1158{
1159	unsigned delay = 1;
1160
1161	do {
1162		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1163		if (*status == GNTST_eagain)
1164			msleep(delay++);
1165	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1166
1167	if (delay >= MAX_DELAY) {
1168		pr_err("%s: %s eagain grant\n", func, current->comm);
1169		*status = GNTST_bad_page;
1170	}
1171}
1172
1173void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1174{
1175	struct gnttab_map_grant_ref *op;
1176
1177	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1178		BUG();
1179	for (op = batch; op < batch + count; op++)
1180		if (op->status == GNTST_eagain)
1181			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1182						&op->status, __func__);
1183}
1184EXPORT_SYMBOL_GPL(gnttab_batch_map);
1185
1186void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1187{
1188	struct gnttab_copy *op;
1189
1190	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1191		BUG();
1192	for (op = batch; op < batch + count; op++)
1193		if (op->status == GNTST_eagain)
1194			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1195						&op->status, __func__);
1196}
1197EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1198
1199void gnttab_foreach_grant_in_range(struct page *page,
1200				   unsigned int offset,
1201				   unsigned int len,
1202				   xen_grant_fn_t fn,
1203				   void *data)
1204{
1205	unsigned int goffset;
1206	unsigned int glen;
1207	unsigned long xen_pfn;
1208
1209	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1210	goffset = xen_offset_in_page(offset);
1211
1212	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1213
1214	while (len) {
1215		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1216		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1217
1218		goffset = 0;
1219		xen_pfn++;
1220		len -= glen;
1221	}
1222}
1223EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1224
1225void gnttab_foreach_grant(struct page **pages,
1226			  unsigned int nr_grefs,
1227			  xen_grant_fn_t fn,
1228			  void *data)
1229{
1230	unsigned int goffset = 0;
1231	unsigned long xen_pfn = 0;
1232	unsigned int i;
1233
1234	for (i = 0; i < nr_grefs; i++) {
1235		if ((i % XEN_PFN_PER_PAGE) == 0) {
1236			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1237			goffset = 0;
1238		}
1239
1240		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1241
1242		goffset += XEN_PAGE_SIZE;
1243		xen_pfn++;
1244	}
1245}
1246
1247int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1248		    struct gnttab_map_grant_ref *kmap_ops,
1249		    struct page **pages, unsigned int count)
1250{
1251	int i, ret;
1252
1253	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1254	if (ret)
1255		return ret;
1256
1257	for (i = 0; i < count; i++) {
1258		switch (map_ops[i].status) {
1259		case GNTST_okay:
1260		{
1261			struct xen_page_foreign *foreign;
1262
1263			SetPageForeign(pages[i]);
1264			foreign = xen_page_foreign(pages[i]);
1265			foreign->domid = map_ops[i].dom;
1266			foreign->gref = map_ops[i].ref;
1267			break;
1268		}
1269
1270		case GNTST_no_device_space:
1271			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1272			break;
1273
1274		case GNTST_eagain:
1275			/* Retry eagain maps */
1276			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1277						map_ops + i,
1278						&map_ops[i].status, __func__);
1279			/* Test status in next loop iteration. */
1280			i--;
1281			break;
1282
1283		default:
1284			break;
1285		}
1286	}
1287
1288	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1289}
1290EXPORT_SYMBOL_GPL(gnttab_map_refs);
1291
1292int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1293		      struct gnttab_unmap_grant_ref *kunmap_ops,
1294		      struct page **pages, unsigned int count)
1295{
1296	unsigned int i;
1297	int ret;
1298
1299	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1300	if (ret)
1301		return ret;
1302
1303	for (i = 0; i < count; i++)
1304		ClearPageForeign(pages[i]);
1305
1306	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1307}
1308EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1309
1310#define GNTTAB_UNMAP_REFS_DELAY 5
1311
1312static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1313
1314static void gnttab_unmap_work(struct work_struct *work)
1315{
1316	struct gntab_unmap_queue_data
1317		*unmap_data = container_of(work,
1318					   struct gntab_unmap_queue_data,
1319					   gnttab_work.work);
1320	if (unmap_data->age != UINT_MAX)
1321		unmap_data->age++;
1322	__gnttab_unmap_refs_async(unmap_data);
1323}
1324
1325static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1326{
1327	int ret;
1328	int pc;
1329
1330	for (pc = 0; pc < item->count; pc++) {
1331		if (page_count(item->pages[pc]) > 1) {
1332			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1333			schedule_delayed_work(&item->gnttab_work,
1334					      msecs_to_jiffies(delay));
1335			return;
1336		}
1337	}
1338
1339	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1340				item->pages, item->count);
1341	item->done(ret, item);
1342}
1343
1344void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1345{
1346	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1347	item->age = 0;
1348
1349	__gnttab_unmap_refs_async(item);
1350}
1351EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1352
1353static void unmap_refs_callback(int result,
1354		struct gntab_unmap_queue_data *data)
1355{
1356	struct unmap_refs_callback_data *d = data->data;
1357
1358	d->result = result;
1359	complete(&d->completion);
1360}
1361
1362int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1363{
1364	struct unmap_refs_callback_data data;
1365
1366	init_completion(&data.completion);
1367	item->data = &data;
1368	item->done = &unmap_refs_callback;
1369	gnttab_unmap_refs_async(item);
1370	wait_for_completion(&data.completion);
1371
1372	return data.result;
1373}
1374EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1375
1376static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1377{
1378	return gnttab_frames(nr_grant_frames, SPP);
1379}
1380
1381static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1382{
1383	int rc;
1384
1385	rc = arch_gnttab_map_shared(frames, nr_gframes,
1386				    gnttab_max_grant_frames(),
1387				    &gnttab_shared.addr);
1388	BUG_ON(rc);
1389
1390	return 0;
1391}
1392
1393static void gnttab_unmap_frames_v1(void)
1394{
1395	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1396}
1397
1398static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1399{
1400	uint64_t *sframes;
1401	unsigned int nr_sframes;
1402	struct gnttab_get_status_frames getframes;
1403	int rc;
1404
1405	nr_sframes = nr_status_frames(nr_gframes);
1406
1407	/* No need for kzalloc as it is initialized in following hypercall
1408	 * GNTTABOP_get_status_frames.
1409	 */
1410	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1411	if (!sframes)
1412		return -ENOMEM;
1413
1414	getframes.dom        = DOMID_SELF;
1415	getframes.nr_frames  = nr_sframes;
1416	set_xen_guest_handle(getframes.frame_list, sframes);
1417
1418	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1419				       &getframes, 1);
1420	if (rc == -ENOSYS) {
1421		kfree(sframes);
1422		return -ENOSYS;
1423	}
1424
1425	BUG_ON(rc || getframes.status);
1426
1427	rc = arch_gnttab_map_status(sframes, nr_sframes,
1428				    nr_status_frames(gnttab_max_grant_frames()),
1429				    &grstatus);
1430	BUG_ON(rc);
1431	kfree(sframes);
1432
1433	rc = arch_gnttab_map_shared(frames, nr_gframes,
1434				    gnttab_max_grant_frames(),
1435				    &gnttab_shared.addr);
1436	BUG_ON(rc);
1437
1438	return 0;
1439}
1440
1441static void gnttab_unmap_frames_v2(void)
1442{
1443	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1444	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1445}
1446
1447static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1448{
1449	struct gnttab_setup_table setup;
1450	xen_pfn_t *frames;
1451	unsigned int nr_gframes = end_idx + 1;
1452	int rc;
1453
1454	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1455		struct xen_add_to_physmap xatp;
1456		unsigned int i = end_idx;
1457		rc = 0;
1458		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1459		/*
1460		 * Loop backwards, so that the first hypercall has the largest
1461		 * index, ensuring that the table will grow only once.
1462		 */
1463		do {
1464			xatp.domid = DOMID_SELF;
1465			xatp.idx = i;
1466			xatp.space = XENMAPSPACE_grant_table;
1467			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1468			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1469			if (rc != 0) {
1470				pr_warn("grant table add_to_physmap failed, err=%d\n",
1471					rc);
1472				break;
1473			}
1474		} while (i-- > start_idx);
1475
1476		return rc;
1477	}
1478
1479	/* No need for kzalloc as it is initialized in following hypercall
1480	 * GNTTABOP_setup_table.
1481	 */
1482	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1483	if (!frames)
1484		return -ENOMEM;
1485
1486	setup.dom        = DOMID_SELF;
1487	setup.nr_frames  = nr_gframes;
1488	set_xen_guest_handle(setup.frame_list, frames);
1489
1490	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1491	if (rc == -ENOSYS) {
1492		kfree(frames);
1493		return -ENOSYS;
1494	}
1495
1496	BUG_ON(rc || setup.status);
1497
1498	rc = gnttab_interface->map_frames(frames, nr_gframes);
1499
1500	kfree(frames);
1501
1502	return rc;
1503}
1504
1505static const struct gnttab_ops gnttab_v1_ops = {
1506	.version			= 1,
1507	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1508					  sizeof(struct grant_entry_v1),
1509	.map_frames			= gnttab_map_frames_v1,
1510	.unmap_frames			= gnttab_unmap_frames_v1,
1511	.update_entry			= gnttab_update_entry_v1,
1512	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1513	.read_frame			= gnttab_read_frame_v1,
1514};
1515
1516static const struct gnttab_ops gnttab_v2_ops = {
1517	.version			= 2,
1518	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1519					  sizeof(union grant_entry_v2),
1520	.map_frames			= gnttab_map_frames_v2,
1521	.unmap_frames			= gnttab_unmap_frames_v2,
1522	.update_entry			= gnttab_update_entry_v2,
1523	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1524	.read_frame			= gnttab_read_frame_v2,
1525};
1526
1527static bool gnttab_need_v2(void)
1528{
1529#ifdef CONFIG_X86
1530	uint32_t base, width;
1531
1532	if (xen_pv_domain()) {
1533		base = xen_cpuid_base();
1534		if (cpuid_eax(base) < 5)
1535			return false;	/* Information not available, use V1. */
1536		width = cpuid_ebx(base + 5) &
1537			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1538		return width > 32 + PAGE_SHIFT;
1539	}
1540#endif
1541	return !!(max_possible_pfn >> 32);
1542}
1543
1544static void gnttab_request_version(void)
1545{
1546	long rc;
1547	struct gnttab_set_version gsv;
1548
1549	if (gnttab_need_v2())
1550		gsv.version = 2;
1551	else
1552		gsv.version = 1;
1553
1554	/* Boot parameter overrides automatic selection. */
1555	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1556		gsv.version = xen_gnttab_version;
1557
1558	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1559	if (rc == 0 && gsv.version == 2)
1560		gnttab_interface = &gnttab_v2_ops;
1561	else
1562		gnttab_interface = &gnttab_v1_ops;
1563	pr_info("Grant tables using version %d layout\n",
1564		gnttab_interface->version);
1565}
1566
1567static int gnttab_setup(void)
1568{
1569	unsigned int max_nr_gframes;
1570
1571	max_nr_gframes = gnttab_max_grant_frames();
1572	if (max_nr_gframes < nr_grant_frames)
1573		return -ENOSYS;
1574
1575	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1576		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1577		if (gnttab_shared.addr == NULL) {
1578			pr_warn("gnttab share frames is not mapped!\n");
1579			return -ENOMEM;
1580		}
1581	}
1582	return gnttab_map(0, nr_grant_frames - 1);
1583}
1584
1585int gnttab_resume(void)
1586{
1587	gnttab_request_version();
1588	return gnttab_setup();
1589}
1590
1591int gnttab_suspend(void)
1592{
1593	if (!xen_feature(XENFEAT_auto_translated_physmap))
1594		gnttab_interface->unmap_frames();
1595	return 0;
1596}
1597
1598static int gnttab_expand(unsigned int req_entries)
1599{
1600	int rc;
1601	unsigned int cur, extra;
1602
1603	cur = nr_grant_frames;
1604	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1605		 gnttab_interface->grefs_per_grant_frame);
1606	if (cur + extra > gnttab_max_grant_frames()) {
1607		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1608				    " cur=%u extra=%u limit=%u"
1609				    " gnttab_free_count=%u req_entries=%u\n",
1610				    cur, extra, gnttab_max_grant_frames(),
1611				    gnttab_free_count, req_entries);
1612		return -ENOSPC;
1613	}
1614
1615	rc = gnttab_map(cur, cur + extra - 1);
1616	if (rc == 0)
1617		rc = grow_gnttab_list(extra);
1618
1619	return rc;
1620}
1621
1622int gnttab_init(void)
1623{
1624	int i;
1625	unsigned long max_nr_grant_frames, max_nr_grefs;
1626	unsigned int max_nr_glist_frames, nr_glist_frames;
1627	int ret;
1628
1629	gnttab_request_version();
1630	max_nr_grant_frames = gnttab_max_grant_frames();
1631	max_nr_grefs = max_nr_grant_frames *
1632			gnttab_interface->grefs_per_grant_frame;
1633	nr_grant_frames = 1;
1634
1635	/* Determine the maximum number of frames required for the
1636	 * grant reference free list on the current hypervisor.
1637	 */
1638	max_nr_glist_frames = max_nr_grefs / RPP;
1639
1640	gnttab_list = kmalloc_array(max_nr_glist_frames,
1641				    sizeof(grant_ref_t *),
1642				    GFP_KERNEL);
1643	if (gnttab_list == NULL)
1644		return -ENOMEM;
1645
1646	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1647	for (i = 0; i < nr_glist_frames; i++) {
1648		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1649		if (gnttab_list[i] == NULL) {
1650			ret = -ENOMEM;
1651			goto ini_nomem;
1652		}
1653	}
1654
1655	gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1656	if (!gnttab_free_bitmap) {
1657		ret = -ENOMEM;
1658		goto ini_nomem;
1659	}
1660
1661	ret = arch_gnttab_init(max_nr_grant_frames,
1662			       nr_status_frames(max_nr_grant_frames));
1663	if (ret < 0)
1664		goto ini_nomem;
1665
1666	if (gnttab_setup() < 0) {
1667		ret = -ENODEV;
1668		goto ini_nomem;
1669	}
1670
1671	gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
1672
1673	gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1674			gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
1675
1676	printk("Grant table initialized\n");
1677	return 0;
1678
1679 ini_nomem:
1680	for (i--; i >= 0; i--)
1681		free_page((unsigned long)gnttab_list[i]);
1682	kfree(gnttab_list);
1683	bitmap_free(gnttab_free_bitmap);
1684	return ret;
1685}
1686EXPORT_SYMBOL_GPL(gnttab_init);
1687
1688static int __gnttab_init(void)
1689{
1690	if (!xen_domain())
1691		return -ENODEV;
1692
1693	/* Delay grant-table initialization in the PV on HVM case */
1694	if (xen_hvm_domain() && !xen_pvh_domain())
1695		return 0;
1696
1697	return gnttab_init();
1698}
1699/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1700 * beforehand to initialize xen_auto_xlat_grant_frames. */
1701core_initcall_sync(__gnttab_init);
1702