1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PARAVIRT_H
3#define _ASM_X86_PARAVIRT_H
4/* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
6
7#ifdef CONFIG_PARAVIRT
8#include <asm/pgtable_types.h>
9#include <asm/asm.h>
10#include <asm/nospec-branch.h>
11
12#include <asm/paravirt_types.h>
13
14#ifndef __ASSEMBLY__
15#include <linux/bug.h>
16#include <linux/types.h>
17#include <linux/cpumask.h>
18#include <asm/frame.h>
19
20static inline unsigned long long paravirt_sched_clock(void)
21{
22	return PVOP_CALL0(unsigned long long, time.sched_clock);
23}
24
25struct static_key;
26extern struct static_key paravirt_steal_enabled;
27extern struct static_key paravirt_steal_rq_enabled;
28
29__visible void __native_queued_spin_unlock(struct qspinlock *lock);
30bool pv_is_native_spin_unlock(void);
31__visible bool __native_vcpu_is_preempted(long cpu);
32bool pv_is_native_vcpu_is_preempted(void);
33
34static inline u64 paravirt_steal_clock(int cpu)
35{
36	return PVOP_CALL1(u64, time.steal_clock, cpu);
37}
38
39/* The paravirtualized I/O functions */
40static inline void slow_down_io(void)
41{
42	pv_ops.cpu.io_delay();
43#ifdef REALLY_SLOW_IO
44	pv_ops.cpu.io_delay();
45	pv_ops.cpu.io_delay();
46	pv_ops.cpu.io_delay();
47#endif
48}
49
50void native_flush_tlb_local(void);
51void native_flush_tlb_global(void);
52void native_flush_tlb_one_user(unsigned long addr);
53void native_flush_tlb_others(const struct cpumask *cpumask,
54			     const struct flush_tlb_info *info);
55
56static inline void __flush_tlb_local(void)
57{
58	PVOP_VCALL0(mmu.flush_tlb_user);
59}
60
61static inline void __flush_tlb_global(void)
62{
63	PVOP_VCALL0(mmu.flush_tlb_kernel);
64}
65
66static inline void __flush_tlb_one_user(unsigned long addr)
67{
68	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
69}
70
71static inline void __flush_tlb_others(const struct cpumask *cpumask,
72				      const struct flush_tlb_info *info)
73{
74	PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
75}
76
77static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
78{
79	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
80}
81
82static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
83{
84	PVOP_VCALL1(mmu.exit_mmap, mm);
85}
86
87#ifdef CONFIG_PARAVIRT_XXL
88static inline void load_sp0(unsigned long sp0)
89{
90	PVOP_VCALL1(cpu.load_sp0, sp0);
91}
92
93/* The paravirtualized CPUID instruction. */
94static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
95			   unsigned int *ecx, unsigned int *edx)
96{
97	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
98}
99
100/*
101 * These special macros can be used to get or set a debugging register
102 */
103static inline unsigned long paravirt_get_debugreg(int reg)
104{
105	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
106}
107#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
108static inline void set_debugreg(unsigned long val, int reg)
109{
110	PVOP_VCALL2(cpu.set_debugreg, reg, val);
111}
112
113static inline unsigned long read_cr0(void)
114{
115	return PVOP_CALL0(unsigned long, cpu.read_cr0);
116}
117
118static inline void write_cr0(unsigned long x)
119{
120	PVOP_VCALL1(cpu.write_cr0, x);
121}
122
123static inline unsigned long read_cr2(void)
124{
125	return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
126}
127
128static inline void write_cr2(unsigned long x)
129{
130	PVOP_VCALL1(mmu.write_cr2, x);
131}
132
133static inline unsigned long __read_cr3(void)
134{
135	return PVOP_CALL0(unsigned long, mmu.read_cr3);
136}
137
138static inline void write_cr3(unsigned long x)
139{
140	PVOP_VCALL1(mmu.write_cr3, x);
141}
142
143static inline void __write_cr4(unsigned long x)
144{
145	PVOP_VCALL1(cpu.write_cr4, x);
146}
147
148static inline void arch_safe_halt(void)
149{
150	PVOP_VCALL0(irq.safe_halt);
151}
152
153static inline void halt(void)
154{
155	PVOP_VCALL0(irq.halt);
156}
157
158static inline void wbinvd(void)
159{
160	PVOP_VCALL0(cpu.wbinvd);
161}
162
163static inline u64 paravirt_read_msr(unsigned msr)
164{
165	return PVOP_CALL1(u64, cpu.read_msr, msr);
166}
167
168static inline void paravirt_write_msr(unsigned msr,
169				      unsigned low, unsigned high)
170{
171	PVOP_VCALL3(cpu.write_msr, msr, low, high);
172}
173
174static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
175{
176	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
177}
178
179static inline int paravirt_write_msr_safe(unsigned msr,
180					  unsigned low, unsigned high)
181{
182	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
183}
184
185#define rdmsr(msr, val1, val2)			\
186do {						\
187	u64 _l = paravirt_read_msr(msr);	\
188	val1 = (u32)_l;				\
189	val2 = _l >> 32;			\
190} while (0)
191
192#define wrmsr(msr, val1, val2)			\
193do {						\
194	paravirt_write_msr(msr, val1, val2);	\
195} while (0)
196
197#define rdmsrl(msr, val)			\
198do {						\
199	val = paravirt_read_msr(msr);		\
200} while (0)
201
202static inline void wrmsrl(unsigned msr, u64 val)
203{
204	wrmsr(msr, (u32)val, (u32)(val>>32));
205}
206
207#define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
208
209/* rdmsr with exception handling */
210#define rdmsr_safe(msr, a, b)				\
211({							\
212	int _err;					\
213	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
214	(*a) = (u32)_l;					\
215	(*b) = _l >> 32;				\
216	_err;						\
217})
218
219static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
220{
221	int err;
222
223	*p = paravirt_read_msr_safe(msr, &err);
224	return err;
225}
226
227static inline unsigned long long paravirt_read_pmc(int counter)
228{
229	return PVOP_CALL1(u64, cpu.read_pmc, counter);
230}
231
232#define rdpmc(counter, low, high)		\
233do {						\
234	u64 _l = paravirt_read_pmc(counter);	\
235	low = (u32)_l;				\
236	high = _l >> 32;			\
237} while (0)
238
239#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
240
241static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
242{
243	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
244}
245
246static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
247{
248	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
249}
250
251static inline void load_TR_desc(void)
252{
253	PVOP_VCALL0(cpu.load_tr_desc);
254}
255static inline void load_gdt(const struct desc_ptr *dtr)
256{
257	PVOP_VCALL1(cpu.load_gdt, dtr);
258}
259static inline void load_idt(const struct desc_ptr *dtr)
260{
261	PVOP_VCALL1(cpu.load_idt, dtr);
262}
263static inline void set_ldt(const void *addr, unsigned entries)
264{
265	PVOP_VCALL2(cpu.set_ldt, addr, entries);
266}
267static inline unsigned long paravirt_store_tr(void)
268{
269	return PVOP_CALL0(unsigned long, cpu.store_tr);
270}
271
272#define store_tr(tr)	((tr) = paravirt_store_tr())
273static inline void load_TLS(struct thread_struct *t, unsigned cpu)
274{
275	PVOP_VCALL2(cpu.load_tls, t, cpu);
276}
277
278static inline void load_gs_index(unsigned int gs)
279{
280	PVOP_VCALL1(cpu.load_gs_index, gs);
281}
282
283static inline void write_ldt_entry(struct desc_struct *dt, int entry,
284				   const void *desc)
285{
286	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
287}
288
289static inline void write_gdt_entry(struct desc_struct *dt, int entry,
290				   void *desc, int type)
291{
292	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
293}
294
295static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
296{
297	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
298}
299
300#ifdef CONFIG_X86_IOPL_IOPERM
301static inline void tss_invalidate_io_bitmap(void)
302{
303	PVOP_VCALL0(cpu.invalidate_io_bitmap);
304}
305
306static inline void tss_update_io_bitmap(void)
307{
308	PVOP_VCALL0(cpu.update_io_bitmap);
309}
310#endif
311
312static inline void paravirt_activate_mm(struct mm_struct *prev,
313					struct mm_struct *next)
314{
315	PVOP_VCALL2(mmu.activate_mm, prev, next);
316}
317
318static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
319					  struct mm_struct *mm)
320{
321	PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
322}
323
324static inline int paravirt_pgd_alloc(struct mm_struct *mm)
325{
326	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
327}
328
329static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
330{
331	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
332}
333
334static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
335{
336	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
337}
338static inline void paravirt_release_pte(unsigned long pfn)
339{
340	PVOP_VCALL1(mmu.release_pte, pfn);
341}
342
343static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
344{
345	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
346}
347
348static inline void paravirt_release_pmd(unsigned long pfn)
349{
350	PVOP_VCALL1(mmu.release_pmd, pfn);
351}
352
353static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
354{
355	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
356}
357static inline void paravirt_release_pud(unsigned long pfn)
358{
359	PVOP_VCALL1(mmu.release_pud, pfn);
360}
361
362static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
363{
364	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
365}
366
367static inline void paravirt_release_p4d(unsigned long pfn)
368{
369	PVOP_VCALL1(mmu.release_p4d, pfn);
370}
371
372static inline pte_t __pte(pteval_t val)
373{
374	return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
375}
376
377static inline pteval_t pte_val(pte_t pte)
378{
379	return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
380}
381
382static inline pgd_t __pgd(pgdval_t val)
383{
384	return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
385}
386
387static inline pgdval_t pgd_val(pgd_t pgd)
388{
389	return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
390}
391
392#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
393static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
394					   pte_t *ptep)
395{
396	pteval_t ret;
397
398	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
399
400	return (pte_t) { .pte = ret };
401}
402
403static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
404					   pte_t *ptep, pte_t old_pte, pte_t pte)
405{
406
407	PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
408}
409
410static inline void set_pte(pte_t *ptep, pte_t pte)
411{
412	PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
413}
414
415static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
416{
417	PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
418}
419
420static inline pmd_t __pmd(pmdval_t val)
421{
422	return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
423}
424
425static inline pmdval_t pmd_val(pmd_t pmd)
426{
427	return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
428}
429
430static inline void set_pud(pud_t *pudp, pud_t pud)
431{
432	PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
433}
434
435static inline pud_t __pud(pudval_t val)
436{
437	pudval_t ret;
438
439	ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
440
441	return (pud_t) { ret };
442}
443
444static inline pudval_t pud_val(pud_t pud)
445{
446	return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
447}
448
449static inline void pud_clear(pud_t *pudp)
450{
451	set_pud(pudp, native_make_pud(0));
452}
453
454static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
455{
456	p4dval_t val = native_p4d_val(p4d);
457
458	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
459}
460
461#if CONFIG_PGTABLE_LEVELS >= 5
462
463static inline p4d_t __p4d(p4dval_t val)
464{
465	p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
466
467	return (p4d_t) { ret };
468}
469
470static inline p4dval_t p4d_val(p4d_t p4d)
471{
472	return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
473}
474
475static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
476{
477	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
478}
479
480#define set_pgd(pgdp, pgdval) do {					\
481	if (pgtable_l5_enabled())						\
482		__set_pgd(pgdp, pgdval);				\
483	else								\
484		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
485} while (0)
486
487#define pgd_clear(pgdp) do {						\
488	if (pgtable_l5_enabled())					\
489		set_pgd(pgdp, native_make_pgd(0));			\
490} while (0)
491
492#endif  /* CONFIG_PGTABLE_LEVELS == 5 */
493
494static inline void p4d_clear(p4d_t *p4dp)
495{
496	set_p4d(p4dp, native_make_p4d(0));
497}
498
499static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
500{
501	set_pte(ptep, pte);
502}
503
504static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
505			     pte_t *ptep)
506{
507	set_pte(ptep, native_make_pte(0));
508}
509
510static inline void pmd_clear(pmd_t *pmdp)
511{
512	set_pmd(pmdp, native_make_pmd(0));
513}
514
515#define  __HAVE_ARCH_START_CONTEXT_SWITCH
516static inline void arch_start_context_switch(struct task_struct *prev)
517{
518	PVOP_VCALL1(cpu.start_context_switch, prev);
519}
520
521static inline void arch_end_context_switch(struct task_struct *next)
522{
523	PVOP_VCALL1(cpu.end_context_switch, next);
524}
525
526#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
527static inline void arch_enter_lazy_mmu_mode(void)
528{
529	PVOP_VCALL0(mmu.lazy_mode.enter);
530}
531
532static inline void arch_leave_lazy_mmu_mode(void)
533{
534	PVOP_VCALL0(mmu.lazy_mode.leave);
535}
536
537static inline void arch_flush_lazy_mmu_mode(void)
538{
539	PVOP_VCALL0(mmu.lazy_mode.flush);
540}
541
542static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
543				phys_addr_t phys, pgprot_t flags)
544{
545	pv_ops.mmu.set_fixmap(idx, phys, flags);
546}
547#endif
548
549#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
550
551static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
552							u32 val)
553{
554	PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
555}
556
557static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
558{
559	PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
560}
561
562static __always_inline void pv_wait(u8 *ptr, u8 val)
563{
564	PVOP_VCALL2(lock.wait, ptr, val);
565}
566
567static __always_inline void pv_kick(int cpu)
568{
569	PVOP_VCALL1(lock.kick, cpu);
570}
571
572static __always_inline bool pv_vcpu_is_preempted(long cpu)
573{
574	return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
575}
576
577void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
578bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
579
580#endif /* SMP && PARAVIRT_SPINLOCKS */
581
582#ifdef CONFIG_X86_32
583/* save and restore all caller-save registers, except return value */
584#define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
585#define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
586#else
587/* save and restore all caller-save registers, except return value */
588#define PV_SAVE_ALL_CALLER_REGS						\
589	"push %rcx;"							\
590	"push %rdx;"							\
591	"push %rsi;"							\
592	"push %rdi;"							\
593	"push %r8;"							\
594	"push %r9;"							\
595	"push %r10;"							\
596	"push %r11;"
597#define PV_RESTORE_ALL_CALLER_REGS					\
598	"pop %r11;"							\
599	"pop %r10;"							\
600	"pop %r9;"							\
601	"pop %r8;"							\
602	"pop %rdi;"							\
603	"pop %rsi;"							\
604	"pop %rdx;"							\
605	"pop %rcx;"
606#endif
607
608/*
609 * Generate a thunk around a function which saves all caller-save
610 * registers except for the return value.  This allows C functions to
611 * be called from assembler code where fewer than normal registers are
612 * available.  It may also help code generation around calls from C
613 * code if the common case doesn't use many registers.
614 *
615 * When a callee is wrapped in a thunk, the caller can assume that all
616 * arg regs and all scratch registers are preserved across the
617 * call. The return value in rax/eax will not be saved, even for void
618 * functions.
619 */
620#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
621#define PV_CALLEE_SAVE_REGS_THUNK(func)					\
622	extern typeof(func) __raw_callee_save_##func;			\
623									\
624	asm(".pushsection .text;"					\
625	    ".globl " PV_THUNK_NAME(func) ";"				\
626	    ".type " PV_THUNK_NAME(func) ", @function;"			\
627	    PV_THUNK_NAME(func) ":"					\
628	    FRAME_BEGIN							\
629	    PV_SAVE_ALL_CALLER_REGS					\
630	    "call " #func ";"						\
631	    PV_RESTORE_ALL_CALLER_REGS					\
632	    FRAME_END							\
633	    ASM_RET							\
634	    ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";"	\
635	    ".popsection")
636
637/* Get a reference to a callee-save function */
638#define PV_CALLEE_SAVE(func)						\
639	((struct paravirt_callee_save) { __raw_callee_save_##func })
640
641/* Promise that "func" already uses the right calling convention */
642#define __PV_IS_CALLEE_SAVE(func)			\
643	((struct paravirt_callee_save) { func })
644
645#ifdef CONFIG_PARAVIRT_XXL
646static inline notrace unsigned long arch_local_save_flags(void)
647{
648	return PVOP_CALLEE0(unsigned long, irq.save_fl);
649}
650
651static inline notrace void arch_local_irq_restore(unsigned long f)
652{
653	PVOP_VCALLEE1(irq.restore_fl, f);
654}
655
656static inline notrace void arch_local_irq_disable(void)
657{
658	PVOP_VCALLEE0(irq.irq_disable);
659}
660
661static inline notrace void arch_local_irq_enable(void)
662{
663	PVOP_VCALLEE0(irq.irq_enable);
664}
665
666static inline notrace unsigned long arch_local_irq_save(void)
667{
668	unsigned long f;
669
670	f = arch_local_save_flags();
671	arch_local_irq_disable();
672	return f;
673}
674#endif
675
676
677/* Make sure as little as possible of this mess escapes. */
678#undef PARAVIRT_CALL
679#undef __PVOP_CALL
680#undef __PVOP_VCALL
681#undef PVOP_VCALL0
682#undef PVOP_CALL0
683#undef PVOP_VCALL1
684#undef PVOP_CALL1
685#undef PVOP_VCALL2
686#undef PVOP_CALL2
687#undef PVOP_VCALL3
688#undef PVOP_CALL3
689#undef PVOP_VCALL4
690#undef PVOP_CALL4
691
692extern void default_banner(void);
693
694#else  /* __ASSEMBLY__ */
695
696#define _PVSITE(ptype, ops, word, algn)		\
697771:;						\
698	ops;					\
699772:;						\
700	.pushsection .parainstructions,"a";	\
701	 .align	algn;				\
702	 word 771b;				\
703	 .byte ptype;				\
704	 .byte 772b-771b;			\
705	.popsection
706
707
708#define COND_PUSH(set, mask, reg)			\
709	.if ((~(set)) & mask); push %reg; .endif
710#define COND_POP(set, mask, reg)			\
711	.if ((~(set)) & mask); pop %reg; .endif
712
713#ifdef CONFIG_X86_64
714
715#define PV_SAVE_REGS(set)			\
716	COND_PUSH(set, CLBR_RAX, rax);		\
717	COND_PUSH(set, CLBR_RCX, rcx);		\
718	COND_PUSH(set, CLBR_RDX, rdx);		\
719	COND_PUSH(set, CLBR_RSI, rsi);		\
720	COND_PUSH(set, CLBR_RDI, rdi);		\
721	COND_PUSH(set, CLBR_R8, r8);		\
722	COND_PUSH(set, CLBR_R9, r9);		\
723	COND_PUSH(set, CLBR_R10, r10);		\
724	COND_PUSH(set, CLBR_R11, r11)
725#define PV_RESTORE_REGS(set)			\
726	COND_POP(set, CLBR_R11, r11);		\
727	COND_POP(set, CLBR_R10, r10);		\
728	COND_POP(set, CLBR_R9, r9);		\
729	COND_POP(set, CLBR_R8, r8);		\
730	COND_POP(set, CLBR_RDI, rdi);		\
731	COND_POP(set, CLBR_RSI, rsi);		\
732	COND_POP(set, CLBR_RDX, rdx);		\
733	COND_POP(set, CLBR_RCX, rcx);		\
734	COND_POP(set, CLBR_RAX, rax)
735
736#define PARA_PATCH(off)		((off) / 8)
737#define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .quad, 8)
738#define PARA_INDIRECT(addr)	*addr(%rip)
739#else
740#define PV_SAVE_REGS(set)			\
741	COND_PUSH(set, CLBR_EAX, eax);		\
742	COND_PUSH(set, CLBR_EDI, edi);		\
743	COND_PUSH(set, CLBR_ECX, ecx);		\
744	COND_PUSH(set, CLBR_EDX, edx)
745#define PV_RESTORE_REGS(set)			\
746	COND_POP(set, CLBR_EDX, edx);		\
747	COND_POP(set, CLBR_ECX, ecx);		\
748	COND_POP(set, CLBR_EDI, edi);		\
749	COND_POP(set, CLBR_EAX, eax)
750
751#define PARA_PATCH(off)		((off) / 4)
752#define PARA_SITE(ptype, ops)	_PVSITE(ptype, ops, .long, 4)
753#define PARA_INDIRECT(addr)	*%cs:addr
754#endif
755
756#ifdef CONFIG_PARAVIRT_XXL
757#define INTERRUPT_RETURN						\
758	PARA_SITE(PARA_PATCH(PV_CPU_iret),				\
759		  ANNOTATE_RETPOLINE_SAFE;				\
760		  jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
761
762#define DISABLE_INTERRUPTS(clobbers)					\
763	PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),			\
764		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
765		  ANNOTATE_RETPOLINE_SAFE;				\
766		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);	\
767		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
768
769#define ENABLE_INTERRUPTS(clobbers)					\
770	PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),			\
771		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
772		  ANNOTATE_RETPOLINE_SAFE;				\
773		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);		\
774		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
775#endif
776
777#ifdef CONFIG_X86_64
778#ifdef CONFIG_PARAVIRT_XXL
779#define USERGS_SYSRET64							\
780	PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64),			\
781		  ANNOTATE_RETPOLINE_SAFE;				\
782		  jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
783
784#ifdef CONFIG_DEBUG_ENTRY
785#define SAVE_FLAGS(clobbers)                                        \
786	PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),			    \
787		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
788		  ANNOTATE_RETPOLINE_SAFE;			    \
789		  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);	    \
790		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
791#endif
792#endif /* CONFIG_PARAVIRT_XXL */
793#endif	/* CONFIG_X86_64 */
794
795#ifdef CONFIG_PARAVIRT_XXL
796
797#define GET_CR2_INTO_AX							\
798	PARA_SITE(PARA_PATCH(PV_MMU_read_cr2),				\
799		  ANNOTATE_RETPOLINE_SAFE;				\
800		  call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);		\
801		 )
802
803#endif /* CONFIG_PARAVIRT_XXL */
804
805
806#endif /* __ASSEMBLY__ */
807#else  /* CONFIG_PARAVIRT */
808# define default_banner x86_init_noop
809#endif /* !CONFIG_PARAVIRT */
810
811#ifndef __ASSEMBLY__
812#ifndef CONFIG_PARAVIRT_XXL
813static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
814					  struct mm_struct *mm)
815{
816}
817#endif
818
819#ifndef CONFIG_PARAVIRT
820static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
821{
822}
823#endif
824#endif /* __ASSEMBLY__ */
825#endif /* _ASM_X86_PARAVIRT_H */
826