1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36#include <linux/memblock.h>
37#include <linux/sched.h>
38#include <linux/mm.h>
39#include <linux/slab.h>
40#include <linux/vmalloc.h>
41#include <linux/uaccess.h>
42#include <linux/io.h>
43#include <linux/delay.h>
44#include <linux/hardirq.h>
45#include <linux/workqueue.h>
46#include <linux/ratelimit.h>
47#include <linux/moduleparam.h>
48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49#include <linux/dma-mapping.h>
50#endif
51
52#include <xen/xen.h>
53#include <xen/interface/xen.h>
54#include <xen/page.h>
55#include <xen/grant_table.h>
56#include <xen/interface/memory.h>
57#include <xen/hvc-console.h>
58#include <xen/swiotlb-xen.h>
59#include <xen/balloon.h>
60#ifdef CONFIG_X86
61#include <asm/xen/cpuid.h>
62#endif
63#include <xen/mem-reservation.h>
64#include <asm/xen/hypercall.h>
65#include <asm/xen/interface.h>
66
67#include <asm/sync_bitops.h>
68
69/* External tools reserve first few grant table entries. */
70#define NR_RESERVED_ENTRIES 8
71#define GNTTAB_LIST_END 0xffffffff
72
73static grant_ref_t **gnttab_list;
74static unsigned int nr_grant_frames;
75static int gnttab_free_count;
76static grant_ref_t gnttab_free_head;
77static DEFINE_SPINLOCK(gnttab_list_lock);
78struct grant_frames xen_auto_xlat_grant_frames;
79static unsigned int xen_gnttab_version;
80module_param_named(version, xen_gnttab_version, uint, 0);
81
82static union {
83	struct grant_entry_v1 *v1;
84	union grant_entry_v2 *v2;
85	void *addr;
86} gnttab_shared;
87
88/*This is a structure of function pointers for grant table*/
89struct gnttab_ops {
90	/*
91	 * Version of the grant interface.
92	 */
93	unsigned int version;
94	/*
95	 * Grant refs per grant frame.
96	 */
97	unsigned int grefs_per_grant_frame;
98	/*
99	 * Mapping a list of frames for storing grant entries. Frames parameter
100	 * is used to store grant table address when grant table being setup,
101	 * nr_gframes is the number of frames to map grant table. Returning
102	 * GNTST_okay means success and negative value means failure.
103	 */
104	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
105	/*
106	 * Release a list of frames which are mapped in map_frames for grant
107	 * entry status.
108	 */
109	void (*unmap_frames)(void);
110	/*
111	 * Introducing a valid entry into the grant table, granting the frame of
112	 * this grant entry to domain for accessing or transfering. Ref
113	 * parameter is reference of this introduced grant entry, domid is id of
114	 * granted domain, frame is the page frame to be granted, and flags is
115	 * status of the grant entry to be updated.
116	 */
117	void (*update_entry)(grant_ref_t ref, domid_t domid,
118			     unsigned long frame, unsigned flags);
119	/*
120	 * Stop granting a grant entry to domain for accessing. Ref parameter is
121	 * reference of a grant entry whose grant access will be stopped,
122	 * readonly is not in use in this function. If the grant entry is
123	 * currently mapped for reading or writing, just return failure(==0)
124	 * directly and don't tear down the grant access. Otherwise, stop grant
125	 * access for this entry and return success(==1).
126	 */
127	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
128	/*
129	 * Stop granting a grant entry to domain for transfer. Ref parameter is
130	 * reference of a grant entry whose grant transfer will be stopped. If
131	 * tranfer has not started, just reclaim the grant entry and return
132	 * failure(==0). Otherwise, wait for the transfer to complete and then
133	 * return the frame.
134	 */
135	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
136	/*
137	 * Read the frame number related to a given grant reference.
138	 */
139	unsigned long (*read_frame)(grant_ref_t ref);
140};
141
142struct unmap_refs_callback_data {
143	struct completion completion;
144	int result;
145};
146
147static const struct gnttab_ops *gnttab_interface;
148
149/* This reflects status of grant entries, so act as a global value. */
150static grant_status_t *grstatus;
151
152static struct gnttab_free_callback *gnttab_free_callback_list;
153
154static int gnttab_expand(unsigned int req_entries);
155
156#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
157#define SPP (PAGE_SIZE / sizeof(grant_status_t))
158
159static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
160{
161	return &gnttab_list[(entry) / RPP][(entry) % RPP];
162}
163/* This can be used as an l-value */
164#define gnttab_entry(entry) (*__gnttab_entry(entry))
165
166static int get_free_entries(unsigned count)
167{
168	unsigned long flags;
169	int ref, rc = 0;
170	grant_ref_t head;
171
172	spin_lock_irqsave(&gnttab_list_lock, flags);
173
174	if ((gnttab_free_count < count) &&
175	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
176		spin_unlock_irqrestore(&gnttab_list_lock, flags);
177		return rc;
178	}
179
180	ref = head = gnttab_free_head;
181	gnttab_free_count -= count;
182	while (count-- > 1)
183		head = gnttab_entry(head);
184	gnttab_free_head = gnttab_entry(head);
185	gnttab_entry(head) = GNTTAB_LIST_END;
186
187	spin_unlock_irqrestore(&gnttab_list_lock, flags);
188
189	return ref;
190}
191
192static void do_free_callbacks(void)
193{
194	struct gnttab_free_callback *callback, *next;
195
196	callback = gnttab_free_callback_list;
197	gnttab_free_callback_list = NULL;
198
199	while (callback != NULL) {
200		next = callback->next;
201		if (gnttab_free_count >= callback->count) {
202			callback->next = NULL;
203			callback->fn(callback->arg);
204		} else {
205			callback->next = gnttab_free_callback_list;
206			gnttab_free_callback_list = callback;
207		}
208		callback = next;
209	}
210}
211
212static inline void check_free_callbacks(void)
213{
214	if (unlikely(gnttab_free_callback_list))
215		do_free_callbacks();
216}
217
218static void put_free_entry(grant_ref_t ref)
219{
220	unsigned long flags;
221	spin_lock_irqsave(&gnttab_list_lock, flags);
222	gnttab_entry(ref) = gnttab_free_head;
223	gnttab_free_head = ref;
224	gnttab_free_count++;
225	check_free_callbacks();
226	spin_unlock_irqrestore(&gnttab_list_lock, flags);
227}
228
229/*
230 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
231 * Introducing a valid entry into the grant table:
232 *  1. Write ent->domid.
233 *  2. Write ent->frame:
234 *      GTF_permit_access:   Frame to which access is permitted.
235 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
236 *                           frame, or zero if none.
237 *  3. Write memory barrier (WMB).
238 *  4. Write ent->flags, inc. valid type.
239 */
240static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
241				   unsigned long frame, unsigned flags)
242{
243	gnttab_shared.v1[ref].domid = domid;
244	gnttab_shared.v1[ref].frame = frame;
245	wmb();
246	gnttab_shared.v1[ref].flags = flags;
247}
248
249static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
250				   unsigned long frame, unsigned int flags)
251{
252	gnttab_shared.v2[ref].hdr.domid = domid;
253	gnttab_shared.v2[ref].full_page.frame = frame;
254	wmb();	/* Hypervisor concurrent accesses. */
255	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
256}
257
258/*
259 * Public grant-issuing interface functions
260 */
261void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
262				     unsigned long frame, int readonly)
263{
264	gnttab_interface->update_entry(ref, domid, frame,
265			   GTF_permit_access | (readonly ? GTF_readonly : 0));
266}
267EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
268
269int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
270				int readonly)
271{
272	int ref;
273
274	ref = get_free_entries(1);
275	if (unlikely(ref < 0))
276		return -ENOSPC;
277
278	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
279
280	return ref;
281}
282EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
283
284static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
285{
286	u16 flags, nflags;
287	u16 *pflags;
288
289	pflags = &gnttab_shared.v1[ref].flags;
290	nflags = *pflags;
291	do {
292		flags = nflags;
293		if (flags & (GTF_reading|GTF_writing))
294			return 0;
295	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
296
297	return 1;
298}
299
300static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
301{
302	gnttab_shared.v2[ref].hdr.flags = 0;
303	mb();	/* Concurrent access by hypervisor. */
304	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
305		return 0;
306	} else {
307		/*
308		 * The read of grstatus needs to have acquire semantics.
309		 *  On x86, reads already have that, and we just need to
310		 * protect against compiler reorderings.
311		 * On other architectures we may need a full barrier.
312		 */
313#ifdef CONFIG_X86
314		barrier();
315#else
316		mb();
317#endif
318	}
319
320	return 1;
321}
322
323static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
324{
325	return gnttab_interface->end_foreign_access_ref(ref, readonly);
326}
327
328int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
329{
330	if (_gnttab_end_foreign_access_ref(ref, readonly))
331		return 1;
332	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
333	return 0;
334}
335EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
336
337static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
338{
339	return gnttab_shared.v1[ref].frame;
340}
341
342static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
343{
344	return gnttab_shared.v2[ref].full_page.frame;
345}
346
347struct deferred_entry {
348	struct list_head list;
349	grant_ref_t ref;
350	bool ro;
351	uint16_t warn_delay;
352	struct page *page;
353};
354static LIST_HEAD(deferred_list);
355static void gnttab_handle_deferred(struct timer_list *);
356static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
357
358static void gnttab_handle_deferred(struct timer_list *unused)
359{
360	unsigned int nr = 10;
361	struct deferred_entry *first = NULL;
362	unsigned long flags;
363
364	spin_lock_irqsave(&gnttab_list_lock, flags);
365	while (nr--) {
366		struct deferred_entry *entry
367			= list_first_entry(&deferred_list,
368					   struct deferred_entry, list);
369
370		if (entry == first)
371			break;
372		list_del(&entry->list);
373		spin_unlock_irqrestore(&gnttab_list_lock, flags);
374		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
375			put_free_entry(entry->ref);
376			pr_debug("freeing g.e. %#x (pfn %#lx)\n",
377				 entry->ref, page_to_pfn(entry->page));
378			put_page(entry->page);
379			kfree(entry);
380			entry = NULL;
381		} else {
382			if (!--entry->warn_delay)
383				pr_info("g.e. %#x still pending\n", entry->ref);
384			if (!first)
385				first = entry;
386		}
387		spin_lock_irqsave(&gnttab_list_lock, flags);
388		if (entry)
389			list_add_tail(&entry->list, &deferred_list);
390		else if (list_empty(&deferred_list))
391			break;
392	}
393	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
394		deferred_timer.expires = jiffies + HZ;
395		add_timer(&deferred_timer);
396	}
397	spin_unlock_irqrestore(&gnttab_list_lock, flags);
398}
399
400static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
401				struct page *page)
402{
403	struct deferred_entry *entry;
404	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
405	const char *what = KERN_WARNING "leaking";
406
407	entry = kmalloc(sizeof(*entry), gfp);
408	if (!page) {
409		unsigned long gfn = gnttab_interface->read_frame(ref);
410
411		page = pfn_to_page(gfn_to_pfn(gfn));
412		get_page(page);
413	}
414
415	if (entry) {
416		unsigned long flags;
417
418		entry->ref = ref;
419		entry->ro = readonly;
420		entry->page = page;
421		entry->warn_delay = 60;
422		spin_lock_irqsave(&gnttab_list_lock, flags);
423		list_add_tail(&entry->list, &deferred_list);
424		if (!timer_pending(&deferred_timer)) {
425			deferred_timer.expires = jiffies + HZ;
426			add_timer(&deferred_timer);
427		}
428		spin_unlock_irqrestore(&gnttab_list_lock, flags);
429		what = KERN_DEBUG "deferring";
430	}
431	printk("%s g.e. %#x (pfn %#lx)\n",
432	       what, ref, page ? page_to_pfn(page) : -1);
433}
434
435int gnttab_try_end_foreign_access(grant_ref_t ref)
436{
437	int ret = _gnttab_end_foreign_access_ref(ref, 0);
438
439	if (ret)
440		put_free_entry(ref);
441
442	return ret;
443}
444EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
445
446void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
447			       unsigned long page)
448{
449	if (gnttab_try_end_foreign_access(ref)) {
450		if (page != 0)
451			put_page(virt_to_page(page));
452	} else
453		gnttab_add_deferred(ref, readonly,
454				    page ? virt_to_page(page) : NULL);
455}
456EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
457
458int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
459{
460	int ref;
461
462	ref = get_free_entries(1);
463	if (unlikely(ref < 0))
464		return -ENOSPC;
465	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
466
467	return ref;
468}
469EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
470
471void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
472				       unsigned long pfn)
473{
474	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
475}
476EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
477
478static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
479{
480	unsigned long frame;
481	u16           flags;
482	u16          *pflags;
483
484	pflags = &gnttab_shared.v1[ref].flags;
485
486	/*
487	 * If a transfer is not even yet started, try to reclaim the grant
488	 * reference and return failure (== 0).
489	 */
490	while (!((flags = *pflags) & GTF_transfer_committed)) {
491		if (sync_cmpxchg(pflags, flags, 0) == flags)
492			return 0;
493		cpu_relax();
494	}
495
496	/* If a transfer is in progress then wait until it is completed. */
497	while (!(flags & GTF_transfer_completed)) {
498		flags = *pflags;
499		cpu_relax();
500	}
501
502	rmb();	/* Read the frame number /after/ reading completion status. */
503	frame = gnttab_shared.v1[ref].frame;
504	BUG_ON(frame == 0);
505
506	return frame;
507}
508
509static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
510{
511	unsigned long frame;
512	u16           flags;
513	u16          *pflags;
514
515	pflags = &gnttab_shared.v2[ref].hdr.flags;
516
517	/*
518	 * If a transfer is not even yet started, try to reclaim the grant
519	 * reference and return failure (== 0).
520	 */
521	while (!((flags = *pflags) & GTF_transfer_committed)) {
522		if (sync_cmpxchg(pflags, flags, 0) == flags)
523			return 0;
524		cpu_relax();
525	}
526
527	/* If a transfer is in progress then wait until it is completed. */
528	while (!(flags & GTF_transfer_completed)) {
529		flags = *pflags;
530		cpu_relax();
531	}
532
533	rmb();  /* Read the frame number /after/ reading completion status. */
534	frame = gnttab_shared.v2[ref].full_page.frame;
535	BUG_ON(frame == 0);
536
537	return frame;
538}
539
540unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
541{
542	return gnttab_interface->end_foreign_transfer_ref(ref);
543}
544EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
545
546unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
547{
548	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
549	put_free_entry(ref);
550	return frame;
551}
552EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
553
554void gnttab_free_grant_reference(grant_ref_t ref)
555{
556	put_free_entry(ref);
557}
558EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
559
560void gnttab_free_grant_references(grant_ref_t head)
561{
562	grant_ref_t ref;
563	unsigned long flags;
564	int count = 1;
565	if (head == GNTTAB_LIST_END)
566		return;
567	spin_lock_irqsave(&gnttab_list_lock, flags);
568	ref = head;
569	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
570		ref = gnttab_entry(ref);
571		count++;
572	}
573	gnttab_entry(ref) = gnttab_free_head;
574	gnttab_free_head = head;
575	gnttab_free_count += count;
576	check_free_callbacks();
577	spin_unlock_irqrestore(&gnttab_list_lock, flags);
578}
579EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
580
581int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
582{
583	int h = get_free_entries(count);
584
585	if (h < 0)
586		return -ENOSPC;
587
588	*head = h;
589
590	return 0;
591}
592EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
593
594int gnttab_empty_grant_references(const grant_ref_t *private_head)
595{
596	return (*private_head == GNTTAB_LIST_END);
597}
598EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
599
600int gnttab_claim_grant_reference(grant_ref_t *private_head)
601{
602	grant_ref_t g = *private_head;
603	if (unlikely(g == GNTTAB_LIST_END))
604		return -ENOSPC;
605	*private_head = gnttab_entry(g);
606	return g;
607}
608EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
609
610void gnttab_release_grant_reference(grant_ref_t *private_head,
611				    grant_ref_t release)
612{
613	gnttab_entry(release) = *private_head;
614	*private_head = release;
615}
616EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
617
618void gnttab_request_free_callback(struct gnttab_free_callback *callback,
619				  void (*fn)(void *), void *arg, u16 count)
620{
621	unsigned long flags;
622	struct gnttab_free_callback *cb;
623
624	spin_lock_irqsave(&gnttab_list_lock, flags);
625
626	/* Check if the callback is already on the list */
627	cb = gnttab_free_callback_list;
628	while (cb) {
629		if (cb == callback)
630			goto out;
631		cb = cb->next;
632	}
633
634	callback->fn = fn;
635	callback->arg = arg;
636	callback->count = count;
637	callback->next = gnttab_free_callback_list;
638	gnttab_free_callback_list = callback;
639	check_free_callbacks();
640out:
641	spin_unlock_irqrestore(&gnttab_list_lock, flags);
642}
643EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
644
645void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
646{
647	struct gnttab_free_callback **pcb;
648	unsigned long flags;
649
650	spin_lock_irqsave(&gnttab_list_lock, flags);
651	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
652		if (*pcb == callback) {
653			*pcb = callback->next;
654			break;
655		}
656	}
657	spin_unlock_irqrestore(&gnttab_list_lock, flags);
658}
659EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
660
661static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
662{
663	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
664	       align;
665}
666
667static int grow_gnttab_list(unsigned int more_frames)
668{
669	unsigned int new_nr_grant_frames, extra_entries, i;
670	unsigned int nr_glist_frames, new_nr_glist_frames;
671	unsigned int grefs_per_frame;
672
673	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
674
675	new_nr_grant_frames = nr_grant_frames + more_frames;
676	extra_entries = more_frames * grefs_per_frame;
677
678	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
679	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
680	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
681		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
682		if (!gnttab_list[i])
683			goto grow_nomem;
684	}
685
686
687	for (i = grefs_per_frame * nr_grant_frames;
688	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
689		gnttab_entry(i) = i + 1;
690
691	gnttab_entry(i) = gnttab_free_head;
692	gnttab_free_head = grefs_per_frame * nr_grant_frames;
693	gnttab_free_count += extra_entries;
694
695	nr_grant_frames = new_nr_grant_frames;
696
697	check_free_callbacks();
698
699	return 0;
700
701grow_nomem:
702	while (i-- > nr_glist_frames)
703		free_page((unsigned long) gnttab_list[i]);
704	return -ENOMEM;
705}
706
707static unsigned int __max_nr_grant_frames(void)
708{
709	struct gnttab_query_size query;
710	int rc;
711
712	query.dom = DOMID_SELF;
713
714	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
715	if ((rc < 0) || (query.status != GNTST_okay))
716		return 4; /* Legacy max supported number of frames */
717
718	return query.max_nr_frames;
719}
720
721unsigned int gnttab_max_grant_frames(void)
722{
723	unsigned int xen_max = __max_nr_grant_frames();
724	static unsigned int boot_max_nr_grant_frames;
725
726	/* First time, initialize it properly. */
727	if (!boot_max_nr_grant_frames)
728		boot_max_nr_grant_frames = __max_nr_grant_frames();
729
730	if (xen_max > boot_max_nr_grant_frames)
731		return boot_max_nr_grant_frames;
732	return xen_max;
733}
734EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
735
736int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
737{
738	xen_pfn_t *pfn;
739	unsigned int max_nr_gframes = __max_nr_grant_frames();
740	unsigned int i;
741	void *vaddr;
742
743	if (xen_auto_xlat_grant_frames.count)
744		return -EINVAL;
745
746	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
747	if (vaddr == NULL) {
748		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
749			&addr);
750		return -ENOMEM;
751	}
752	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
753	if (!pfn) {
754		xen_unmap(vaddr);
755		return -ENOMEM;
756	}
757	for (i = 0; i < max_nr_gframes; i++)
758		pfn[i] = XEN_PFN_DOWN(addr) + i;
759
760	xen_auto_xlat_grant_frames.vaddr = vaddr;
761	xen_auto_xlat_grant_frames.pfn = pfn;
762	xen_auto_xlat_grant_frames.count = max_nr_gframes;
763
764	return 0;
765}
766EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
767
768void gnttab_free_auto_xlat_frames(void)
769{
770	if (!xen_auto_xlat_grant_frames.count)
771		return;
772	kfree(xen_auto_xlat_grant_frames.pfn);
773	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
774
775	xen_auto_xlat_grant_frames.pfn = NULL;
776	xen_auto_xlat_grant_frames.count = 0;
777	xen_auto_xlat_grant_frames.vaddr = NULL;
778}
779EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
780
781int gnttab_pages_set_private(int nr_pages, struct page **pages)
782{
783	int i;
784
785	for (i = 0; i < nr_pages; i++) {
786#if BITS_PER_LONG < 64
787		struct xen_page_foreign *foreign;
788
789		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
790		if (!foreign)
791			return -ENOMEM;
792
793		set_page_private(pages[i], (unsigned long)foreign);
794#endif
795		SetPagePrivate(pages[i]);
796	}
797
798	return 0;
799}
800EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
801
802/**
803 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
804 * @nr_pages: number of pages to alloc
805 * @pages: returns the pages
806 */
807int gnttab_alloc_pages(int nr_pages, struct page **pages)
808{
809	int ret;
810
811	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
812	if (ret < 0)
813		return ret;
814
815	ret = gnttab_pages_set_private(nr_pages, pages);
816	if (ret < 0)
817		gnttab_free_pages(nr_pages, pages);
818
819	return ret;
820}
821EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
822
823#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
824static inline void cache_init(struct gnttab_page_cache *cache)
825{
826	cache->pages = NULL;
827}
828
829static inline bool cache_empty(struct gnttab_page_cache *cache)
830{
831	return !cache->pages;
832}
833
834static inline struct page *cache_deq(struct gnttab_page_cache *cache)
835{
836	struct page *page;
837
838	page = cache->pages;
839	cache->pages = page->zone_device_data;
840
841	return page;
842}
843
844static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
845{
846	page->zone_device_data = cache->pages;
847	cache->pages = page;
848}
849#else
850static inline void cache_init(struct gnttab_page_cache *cache)
851{
852	INIT_LIST_HEAD(&cache->pages);
853}
854
855static inline bool cache_empty(struct gnttab_page_cache *cache)
856{
857	return list_empty(&cache->pages);
858}
859
860static inline struct page *cache_deq(struct gnttab_page_cache *cache)
861{
862	struct page *page;
863
864	page = list_first_entry(&cache->pages, struct page, lru);
865	list_del(&page->lru);
866
867	return page;
868}
869
870static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
871{
872	list_add(&page->lru, &cache->pages);
873}
874#endif
875
876void gnttab_page_cache_init(struct gnttab_page_cache *cache)
877{
878	spin_lock_init(&cache->lock);
879	cache_init(cache);
880	cache->num_pages = 0;
881}
882EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
883
884int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
885{
886	unsigned long flags;
887
888	spin_lock_irqsave(&cache->lock, flags);
889
890	if (cache_empty(cache)) {
891		spin_unlock_irqrestore(&cache->lock, flags);
892		return gnttab_alloc_pages(1, page);
893	}
894
895	page[0] = cache_deq(cache);
896	cache->num_pages--;
897
898	spin_unlock_irqrestore(&cache->lock, flags);
899
900	return 0;
901}
902EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
903
904void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
905			   unsigned int num)
906{
907	unsigned long flags;
908	unsigned int i;
909
910	spin_lock_irqsave(&cache->lock, flags);
911
912	for (i = 0; i < num; i++)
913		cache_enq(cache, page[i]);
914	cache->num_pages += num;
915
916	spin_unlock_irqrestore(&cache->lock, flags);
917}
918EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
919
920void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
921{
922	struct page *page[10];
923	unsigned int i = 0;
924	unsigned long flags;
925
926	spin_lock_irqsave(&cache->lock, flags);
927
928	while (cache->num_pages > num) {
929		page[i] = cache_deq(cache);
930		cache->num_pages--;
931		if (++i == ARRAY_SIZE(page)) {
932			spin_unlock_irqrestore(&cache->lock, flags);
933			gnttab_free_pages(i, page);
934			i = 0;
935			spin_lock_irqsave(&cache->lock, flags);
936		}
937	}
938
939	spin_unlock_irqrestore(&cache->lock, flags);
940
941	if (i != 0)
942		gnttab_free_pages(i, page);
943}
944EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
945
946void gnttab_pages_clear_private(int nr_pages, struct page **pages)
947{
948	int i;
949
950	for (i = 0; i < nr_pages; i++) {
951		if (PagePrivate(pages[i])) {
952#if BITS_PER_LONG < 64
953			kfree((void *)page_private(pages[i]));
954#endif
955			ClearPagePrivate(pages[i]);
956		}
957	}
958}
959EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
960
961/**
962 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
963 * @nr_pages; number of pages to free
964 * @pages: the pages
965 */
966void gnttab_free_pages(int nr_pages, struct page **pages)
967{
968	gnttab_pages_clear_private(nr_pages, pages);
969	xen_free_unpopulated_pages(nr_pages, pages);
970}
971EXPORT_SYMBOL_GPL(gnttab_free_pages);
972
973#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
974/**
975 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
976 * @args: arguments to the function
977 */
978int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
979{
980	unsigned long pfn, start_pfn;
981	size_t size;
982	int i, ret;
983
984	if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
985		return -ENOMEM;
986
987	size = args->nr_pages << PAGE_SHIFT;
988	if (args->coherent)
989		args->vaddr = dma_alloc_coherent(args->dev, size,
990						 &args->dev_bus_addr,
991						 GFP_KERNEL | __GFP_NOWARN);
992	else
993		args->vaddr = dma_alloc_wc(args->dev, size,
994					   &args->dev_bus_addr,
995					   GFP_KERNEL | __GFP_NOWARN);
996	if (!args->vaddr) {
997		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
998		return -ENOMEM;
999	}
1000
1001	start_pfn = __phys_to_pfn(args->dev_bus_addr);
1002	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1003			pfn++, i++) {
1004		struct page *page = pfn_to_page(pfn);
1005
1006		args->pages[i] = page;
1007		args->frames[i] = xen_page_to_gfn(page);
1008		xenmem_reservation_scrub_page(page);
1009	}
1010
1011	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1012
1013	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1014	if (ret != args->nr_pages) {
1015		pr_debug("Failed to decrease reservation for DMA buffer\n");
1016		ret = -EFAULT;
1017		goto fail;
1018	}
1019
1020	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1021	if (ret < 0)
1022		goto fail;
1023
1024	return 0;
1025
1026fail:
1027	gnttab_dma_free_pages(args);
1028	return ret;
1029}
1030EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1031
1032/**
1033 * gnttab_dma_free_pages - free DMAable pages
1034 * @args: arguments to the function
1035 */
1036int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1037{
1038	size_t size;
1039	int i, ret;
1040
1041	gnttab_pages_clear_private(args->nr_pages, args->pages);
1042
1043	for (i = 0; i < args->nr_pages; i++)
1044		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1045
1046	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1047	if (ret != args->nr_pages) {
1048		pr_debug("Failed to increase reservation for DMA buffer\n");
1049		ret = -EFAULT;
1050	} else {
1051		ret = 0;
1052	}
1053
1054	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1055					     args->frames);
1056
1057	size = args->nr_pages << PAGE_SHIFT;
1058	if (args->coherent)
1059		dma_free_coherent(args->dev, size,
1060				  args->vaddr, args->dev_bus_addr);
1061	else
1062		dma_free_wc(args->dev, size,
1063			    args->vaddr, args->dev_bus_addr);
1064	return ret;
1065}
1066EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1067#endif
1068
1069/* Handling of paged out grant targets (GNTST_eagain) */
1070#define MAX_DELAY 256
1071static inline void
1072gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1073						const char *func)
1074{
1075	unsigned delay = 1;
1076
1077	do {
1078		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1079		if (*status == GNTST_eagain)
1080			msleep(delay++);
1081	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1082
1083	if (delay >= MAX_DELAY) {
1084		pr_err("%s: %s eagain grant\n", func, current->comm);
1085		*status = GNTST_bad_page;
1086	}
1087}
1088
1089void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1090{
1091	struct gnttab_map_grant_ref *op;
1092
1093	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1094		BUG();
1095	for (op = batch; op < batch + count; op++)
1096		if (op->status == GNTST_eagain)
1097			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1098						&op->status, __func__);
1099}
1100EXPORT_SYMBOL_GPL(gnttab_batch_map);
1101
1102void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1103{
1104	struct gnttab_copy *op;
1105
1106	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1107		BUG();
1108	for (op = batch; op < batch + count; op++)
1109		if (op->status == GNTST_eagain)
1110			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1111						&op->status, __func__);
1112}
1113EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1114
1115void gnttab_foreach_grant_in_range(struct page *page,
1116				   unsigned int offset,
1117				   unsigned int len,
1118				   xen_grant_fn_t fn,
1119				   void *data)
1120{
1121	unsigned int goffset;
1122	unsigned int glen;
1123	unsigned long xen_pfn;
1124
1125	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1126	goffset = xen_offset_in_page(offset);
1127
1128	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1129
1130	while (len) {
1131		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1132		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1133
1134		goffset = 0;
1135		xen_pfn++;
1136		len -= glen;
1137	}
1138}
1139EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1140
1141void gnttab_foreach_grant(struct page **pages,
1142			  unsigned int nr_grefs,
1143			  xen_grant_fn_t fn,
1144			  void *data)
1145{
1146	unsigned int goffset = 0;
1147	unsigned long xen_pfn = 0;
1148	unsigned int i;
1149
1150	for (i = 0; i < nr_grefs; i++) {
1151		if ((i % XEN_PFN_PER_PAGE) == 0) {
1152			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1153			goffset = 0;
1154		}
1155
1156		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1157
1158		goffset += XEN_PAGE_SIZE;
1159		xen_pfn++;
1160	}
1161}
1162
1163int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1164		    struct gnttab_map_grant_ref *kmap_ops,
1165		    struct page **pages, unsigned int count)
1166{
1167	int i, ret;
1168
1169	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1170	if (ret)
1171		return ret;
1172
1173	for (i = 0; i < count; i++) {
1174		switch (map_ops[i].status) {
1175		case GNTST_okay:
1176		{
1177			struct xen_page_foreign *foreign;
1178
1179			SetPageForeign(pages[i]);
1180			foreign = xen_page_foreign(pages[i]);
1181			foreign->domid = map_ops[i].dom;
1182			foreign->gref = map_ops[i].ref;
1183			break;
1184		}
1185
1186		case GNTST_no_device_space:
1187			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1188			break;
1189
1190		case GNTST_eagain:
1191			/* Retry eagain maps */
1192			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1193						map_ops + i,
1194						&map_ops[i].status, __func__);
1195			/* Test status in next loop iteration. */
1196			i--;
1197			break;
1198
1199		default:
1200			break;
1201		}
1202	}
1203
1204	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1205}
1206EXPORT_SYMBOL_GPL(gnttab_map_refs);
1207
1208int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1209		      struct gnttab_unmap_grant_ref *kunmap_ops,
1210		      struct page **pages, unsigned int count)
1211{
1212	unsigned int i;
1213	int ret;
1214
1215	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1216	if (ret)
1217		return ret;
1218
1219	for (i = 0; i < count; i++)
1220		ClearPageForeign(pages[i]);
1221
1222	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1223}
1224EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1225
1226#define GNTTAB_UNMAP_REFS_DELAY 5
1227
1228static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1229
1230static void gnttab_unmap_work(struct work_struct *work)
1231{
1232	struct gntab_unmap_queue_data
1233		*unmap_data = container_of(work,
1234					   struct gntab_unmap_queue_data,
1235					   gnttab_work.work);
1236	if (unmap_data->age != UINT_MAX)
1237		unmap_data->age++;
1238	__gnttab_unmap_refs_async(unmap_data);
1239}
1240
1241static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1242{
1243	int ret;
1244	int pc;
1245
1246	for (pc = 0; pc < item->count; pc++) {
1247		if (page_count(item->pages[pc]) > 1) {
1248			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1249			schedule_delayed_work(&item->gnttab_work,
1250					      msecs_to_jiffies(delay));
1251			return;
1252		}
1253	}
1254
1255	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1256				item->pages, item->count);
1257	item->done(ret, item);
1258}
1259
1260void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1261{
1262	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1263	item->age = 0;
1264
1265	__gnttab_unmap_refs_async(item);
1266}
1267EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1268
1269static void unmap_refs_callback(int result,
1270		struct gntab_unmap_queue_data *data)
1271{
1272	struct unmap_refs_callback_data *d = data->data;
1273
1274	d->result = result;
1275	complete(&d->completion);
1276}
1277
1278int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1279{
1280	struct unmap_refs_callback_data data;
1281
1282	init_completion(&data.completion);
1283	item->data = &data;
1284	item->done = &unmap_refs_callback;
1285	gnttab_unmap_refs_async(item);
1286	wait_for_completion(&data.completion);
1287
1288	return data.result;
1289}
1290EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1291
1292static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1293{
1294	return gnttab_frames(nr_grant_frames, SPP);
1295}
1296
1297static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1298{
1299	int rc;
1300
1301	rc = arch_gnttab_map_shared(frames, nr_gframes,
1302				    gnttab_max_grant_frames(),
1303				    &gnttab_shared.addr);
1304	BUG_ON(rc);
1305
1306	return 0;
1307}
1308
1309static void gnttab_unmap_frames_v1(void)
1310{
1311	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1312}
1313
1314static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1315{
1316	uint64_t *sframes;
1317	unsigned int nr_sframes;
1318	struct gnttab_get_status_frames getframes;
1319	int rc;
1320
1321	nr_sframes = nr_status_frames(nr_gframes);
1322
1323	/* No need for kzalloc as it is initialized in following hypercall
1324	 * GNTTABOP_get_status_frames.
1325	 */
1326	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1327	if (!sframes)
1328		return -ENOMEM;
1329
1330	getframes.dom        = DOMID_SELF;
1331	getframes.nr_frames  = nr_sframes;
1332	set_xen_guest_handle(getframes.frame_list, sframes);
1333
1334	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1335				       &getframes, 1);
1336	if (rc == -ENOSYS) {
1337		kfree(sframes);
1338		return -ENOSYS;
1339	}
1340
1341	BUG_ON(rc || getframes.status);
1342
1343	rc = arch_gnttab_map_status(sframes, nr_sframes,
1344				    nr_status_frames(gnttab_max_grant_frames()),
1345				    &grstatus);
1346	BUG_ON(rc);
1347	kfree(sframes);
1348
1349	rc = arch_gnttab_map_shared(frames, nr_gframes,
1350				    gnttab_max_grant_frames(),
1351				    &gnttab_shared.addr);
1352	BUG_ON(rc);
1353
1354	return 0;
1355}
1356
1357static void gnttab_unmap_frames_v2(void)
1358{
1359	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1360	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1361}
1362
1363static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1364{
1365	struct gnttab_setup_table setup;
1366	xen_pfn_t *frames;
1367	unsigned int nr_gframes = end_idx + 1;
1368	int rc;
1369
1370	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1371		struct xen_add_to_physmap xatp;
1372		unsigned int i = end_idx;
1373		rc = 0;
1374		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1375		/*
1376		 * Loop backwards, so that the first hypercall has the largest
1377		 * index, ensuring that the table will grow only once.
1378		 */
1379		do {
1380			xatp.domid = DOMID_SELF;
1381			xatp.idx = i;
1382			xatp.space = XENMAPSPACE_grant_table;
1383			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1384			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1385			if (rc != 0) {
1386				pr_warn("grant table add_to_physmap failed, err=%d\n",
1387					rc);
1388				break;
1389			}
1390		} while (i-- > start_idx);
1391
1392		return rc;
1393	}
1394
1395	/* No need for kzalloc as it is initialized in following hypercall
1396	 * GNTTABOP_setup_table.
1397	 */
1398	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1399	if (!frames)
1400		return -ENOMEM;
1401
1402	setup.dom        = DOMID_SELF;
1403	setup.nr_frames  = nr_gframes;
1404	set_xen_guest_handle(setup.frame_list, frames);
1405
1406	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1407	if (rc == -ENOSYS) {
1408		kfree(frames);
1409		return -ENOSYS;
1410	}
1411
1412	BUG_ON(rc || setup.status);
1413
1414	rc = gnttab_interface->map_frames(frames, nr_gframes);
1415
1416	kfree(frames);
1417
1418	return rc;
1419}
1420
1421static const struct gnttab_ops gnttab_v1_ops = {
1422	.version			= 1,
1423	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1424					  sizeof(struct grant_entry_v1),
1425	.map_frames			= gnttab_map_frames_v1,
1426	.unmap_frames			= gnttab_unmap_frames_v1,
1427	.update_entry			= gnttab_update_entry_v1,
1428	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1429	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1430	.read_frame			= gnttab_read_frame_v1,
1431};
1432
1433static const struct gnttab_ops gnttab_v2_ops = {
1434	.version			= 2,
1435	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1436					  sizeof(union grant_entry_v2),
1437	.map_frames			= gnttab_map_frames_v2,
1438	.unmap_frames			= gnttab_unmap_frames_v2,
1439	.update_entry			= gnttab_update_entry_v2,
1440	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1441	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1442	.read_frame			= gnttab_read_frame_v2,
1443};
1444
1445static bool gnttab_need_v2(void)
1446{
1447#ifdef CONFIG_X86
1448	uint32_t base, width;
1449
1450	if (xen_pv_domain()) {
1451		base = xen_cpuid_base();
1452		if (cpuid_eax(base) < 5)
1453			return false;	/* Information not available, use V1. */
1454		width = cpuid_ebx(base + 5) &
1455			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1456		return width > 32 + PAGE_SHIFT;
1457	}
1458#endif
1459	return !!(max_possible_pfn >> 32);
1460}
1461
1462static void gnttab_request_version(void)
1463{
1464	long rc;
1465	struct gnttab_set_version gsv;
1466
1467	if (gnttab_need_v2())
1468		gsv.version = 2;
1469	else
1470		gsv.version = 1;
1471
1472	/* Boot parameter overrides automatic selection. */
1473	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1474		gsv.version = xen_gnttab_version;
1475
1476	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1477	if (rc == 0 && gsv.version == 2)
1478		gnttab_interface = &gnttab_v2_ops;
1479	else
1480		gnttab_interface = &gnttab_v1_ops;
1481	pr_info("Grant tables using version %d layout\n",
1482		gnttab_interface->version);
1483}
1484
1485static int gnttab_setup(void)
1486{
1487	unsigned int max_nr_gframes;
1488
1489	max_nr_gframes = gnttab_max_grant_frames();
1490	if (max_nr_gframes < nr_grant_frames)
1491		return -ENOSYS;
1492
1493	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1494		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1495		if (gnttab_shared.addr == NULL) {
1496			pr_warn("gnttab share frames is not mapped!\n");
1497			return -ENOMEM;
1498		}
1499	}
1500	return gnttab_map(0, nr_grant_frames - 1);
1501}
1502
1503int gnttab_resume(void)
1504{
1505	gnttab_request_version();
1506	return gnttab_setup();
1507}
1508
1509int gnttab_suspend(void)
1510{
1511	if (!xen_feature(XENFEAT_auto_translated_physmap))
1512		gnttab_interface->unmap_frames();
1513	return 0;
1514}
1515
1516static int gnttab_expand(unsigned int req_entries)
1517{
1518	int rc;
1519	unsigned int cur, extra;
1520
1521	cur = nr_grant_frames;
1522	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1523		 gnttab_interface->grefs_per_grant_frame);
1524	if (cur + extra > gnttab_max_grant_frames()) {
1525		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1526				    " cur=%u extra=%u limit=%u"
1527				    " gnttab_free_count=%u req_entries=%u\n",
1528				    cur, extra, gnttab_max_grant_frames(),
1529				    gnttab_free_count, req_entries);
1530		return -ENOSPC;
1531	}
1532
1533	rc = gnttab_map(cur, cur + extra - 1);
1534	if (rc == 0)
1535		rc = grow_gnttab_list(extra);
1536
1537	return rc;
1538}
1539
1540int gnttab_init(void)
1541{
1542	int i;
1543	unsigned long max_nr_grant_frames;
1544	unsigned int max_nr_glist_frames, nr_glist_frames;
1545	unsigned int nr_init_grefs;
1546	int ret;
1547
1548	gnttab_request_version();
1549	max_nr_grant_frames = gnttab_max_grant_frames();
1550	nr_grant_frames = 1;
1551
1552	/* Determine the maximum number of frames required for the
1553	 * grant reference free list on the current hypervisor.
1554	 */
1555	max_nr_glist_frames = (max_nr_grant_frames *
1556			       gnttab_interface->grefs_per_grant_frame / RPP);
1557
1558	gnttab_list = kmalloc_array(max_nr_glist_frames,
1559				    sizeof(grant_ref_t *),
1560				    GFP_KERNEL);
1561	if (gnttab_list == NULL)
1562		return -ENOMEM;
1563
1564	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1565	for (i = 0; i < nr_glist_frames; i++) {
1566		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1567		if (gnttab_list[i] == NULL) {
1568			ret = -ENOMEM;
1569			goto ini_nomem;
1570		}
1571	}
1572
1573	ret = arch_gnttab_init(max_nr_grant_frames,
1574			       nr_status_frames(max_nr_grant_frames));
1575	if (ret < 0)
1576		goto ini_nomem;
1577
1578	if (gnttab_setup() < 0) {
1579		ret = -ENODEV;
1580		goto ini_nomem;
1581	}
1582
1583	nr_init_grefs = nr_grant_frames *
1584			gnttab_interface->grefs_per_grant_frame;
1585
1586	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1587		gnttab_entry(i) = i + 1;
1588
1589	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1590	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1591	gnttab_free_head  = NR_RESERVED_ENTRIES;
1592
1593	printk("Grant table initialized\n");
1594	return 0;
1595
1596 ini_nomem:
1597	for (i--; i >= 0; i--)
1598		free_page((unsigned long)gnttab_list[i]);
1599	kfree(gnttab_list);
1600	return ret;
1601}
1602EXPORT_SYMBOL_GPL(gnttab_init);
1603
1604static int __gnttab_init(void)
1605{
1606	if (!xen_domain())
1607		return -ENODEV;
1608
1609	/* Delay grant-table initialization in the PV on HVM case */
1610	if (xen_hvm_domain() && !xen_pvh_domain())
1611		return 0;
1612
1613	return gnttab_init();
1614}
1615/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1616 * beforehand to initialize xen_auto_xlat_grant_frames. */
1617core_initcall_sync(__gnttab_init);
1618