1// SPDX-License-Identifier: GPL-2.0-or-later 2/* Paravirtualization interfaces 3 Copyright (C) 2006 Rusty Russell IBM Corporation 4 5 6 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc 7*/ 8 9#include <linux/errno.h> 10#include <linux/init.h> 11#include <linux/export.h> 12#include <linux/efi.h> 13#include <linux/bcd.h> 14#include <linux/highmem.h> 15#include <linux/kprobes.h> 16#include <linux/pgtable.h> 17 18#include <asm/bug.h> 19#include <asm/paravirt.h> 20#include <asm/debugreg.h> 21#include <asm/desc.h> 22#include <asm/setup.h> 23#include <asm/time.h> 24#include <asm/pgalloc.h> 25#include <asm/irq.h> 26#include <asm/delay.h> 27#include <asm/fixmap.h> 28#include <asm/apic.h> 29#include <asm/tlbflush.h> 30#include <asm/timer.h> 31#include <asm/special_insns.h> 32#include <asm/tlb.h> 33#include <asm/io_bitmap.h> 34 35/* 36 * nop stub, which must not clobber anything *including the stack* to 37 * avoid confusing the entry prologues. 38 */ 39extern void _paravirt_nop(void); 40asm (".pushsection .entry.text, \"ax\"\n" 41 ".global _paravirt_nop\n" 42 "_paravirt_nop:\n\t" 43 ASM_RET 44 ".size _paravirt_nop, . - _paravirt_nop\n\t" 45 ".type _paravirt_nop, @function\n\t" 46 ".popsection"); 47 48void __init default_banner(void) 49{ 50 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 51 pv_info.name); 52} 53 54/* Undefined instruction for dealing with missing ops pointers. */ 55static const unsigned char ud2a[] = { 0x0f, 0x0b }; 56 57struct branch { 58 unsigned char opcode; 59 u32 delta; 60} __attribute__((packed)); 61 62static unsigned paravirt_patch_call(void *insn_buff, const void *target, 63 unsigned long addr, unsigned len) 64{ 65 const int call_len = 5; 66 struct branch *b = insn_buff; 67 unsigned long delta = (unsigned long)target - (addr+call_len); 68 69 if (len < call_len) { 70 pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr); 71 /* Kernel might not be viable if patching fails, bail out: */ 72 BUG_ON(1); 73 } 74 75 b->opcode = 0xe8; /* call */ 76 b->delta = delta; 77 BUILD_BUG_ON(sizeof(*b) != call_len); 78 79 return call_len; 80} 81 82#ifdef CONFIG_PARAVIRT_XXL 83/* identity function, which can be inlined */ 84u64 notrace _paravirt_ident_64(u64 x) 85{ 86 return x; 87} 88 89static unsigned paravirt_patch_jmp(void *insn_buff, const void *target, 90 unsigned long addr, unsigned len) 91{ 92 struct branch *b = insn_buff; 93 unsigned long delta = (unsigned long)target - (addr+5); 94 95 if (len < 5) { 96#ifdef CONFIG_RETPOLINE 97 WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr); 98#endif 99 return len; /* call too long for patch site */ 100 } 101 102 b->opcode = 0xe9; /* jmp */ 103 b->delta = delta; 104 105 return 5; 106} 107#endif 108 109DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); 110 111void __init native_pv_lock_init(void) 112{ 113 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 114 static_branch_disable(&virt_spin_lock_key); 115} 116 117unsigned paravirt_patch_default(u8 type, void *insn_buff, 118 unsigned long addr, unsigned len) 119{ 120 /* 121 * Neat trick to map patch type back to the call within the 122 * corresponding structure. 123 */ 124 void *opfunc = *((void **)&pv_ops + type); 125 unsigned ret; 126 127 if (opfunc == NULL) 128 /* If there's no function, patch it with a ud2a (BUG) */ 129 ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a)); 130 else if (opfunc == _paravirt_nop) 131 ret = 0; 132 133#ifdef CONFIG_PARAVIRT_XXL 134 /* identity functions just return their single argument */ 135 else if (opfunc == _paravirt_ident_64) 136 ret = paravirt_patch_ident_64(insn_buff, len); 137 138 else if (type == PARAVIRT_PATCH(cpu.iret) || 139 type == PARAVIRT_PATCH(cpu.usergs_sysret64)) 140 /* If operation requires a jmp, then jmp */ 141 ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len); 142#endif 143 else 144 /* Otherwise call the function. */ 145 ret = paravirt_patch_call(insn_buff, opfunc, addr, len); 146 147 return ret; 148} 149 150unsigned paravirt_patch_insns(void *insn_buff, unsigned len, 151 const char *start, const char *end) 152{ 153 unsigned insn_len = end - start; 154 155 /* Alternative instruction is too large for the patch site and we cannot continue: */ 156 BUG_ON(insn_len > len || start == NULL); 157 158 memcpy(insn_buff, start, insn_len); 159 160 return insn_len; 161} 162 163struct static_key paravirt_steal_enabled; 164struct static_key paravirt_steal_rq_enabled; 165 166static u64 native_steal_clock(int cpu) 167{ 168 return 0; 169} 170 171/* These are in entry.S */ 172extern void native_iret(void); 173extern void native_usergs_sysret64(void); 174 175static struct resource reserve_ioports = { 176 .start = 0, 177 .end = IO_SPACE_LIMIT, 178 .name = "paravirt-ioport", 179 .flags = IORESOURCE_IO | IORESOURCE_BUSY, 180}; 181 182/* 183 * Reserve the whole legacy IO space to prevent any legacy drivers 184 * from wasting time probing for their hardware. This is a fairly 185 * brute-force approach to disabling all non-virtual drivers. 186 * 187 * Note that this must be called very early to have any effect. 188 */ 189int paravirt_disable_iospace(void) 190{ 191 return request_resource(&ioport_resource, &reserve_ioports); 192} 193 194static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; 195 196static inline void enter_lazy(enum paravirt_lazy_mode mode) 197{ 198 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); 199 200 this_cpu_write(paravirt_lazy_mode, mode); 201} 202 203static void leave_lazy(enum paravirt_lazy_mode mode) 204{ 205 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode); 206 207 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); 208} 209 210void paravirt_enter_lazy_mmu(void) 211{ 212 enter_lazy(PARAVIRT_LAZY_MMU); 213} 214 215void paravirt_leave_lazy_mmu(void) 216{ 217 leave_lazy(PARAVIRT_LAZY_MMU); 218} 219 220void paravirt_flush_lazy_mmu(void) 221{ 222 preempt_disable(); 223 224 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 225 arch_leave_lazy_mmu_mode(); 226 arch_enter_lazy_mmu_mode(); 227 } 228 229 preempt_enable(); 230} 231 232#ifdef CONFIG_PARAVIRT_XXL 233void paravirt_start_context_switch(struct task_struct *prev) 234{ 235 BUG_ON(preemptible()); 236 237 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { 238 arch_leave_lazy_mmu_mode(); 239 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); 240 } 241 enter_lazy(PARAVIRT_LAZY_CPU); 242} 243 244void paravirt_end_context_switch(struct task_struct *next) 245{ 246 BUG_ON(preemptible()); 247 248 leave_lazy(PARAVIRT_LAZY_CPU); 249 250 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) 251 arch_enter_lazy_mmu_mode(); 252} 253#endif 254 255enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 256{ 257 if (in_interrupt()) 258 return PARAVIRT_LAZY_NONE; 259 260 return this_cpu_read(paravirt_lazy_mode); 261} 262 263struct pv_info pv_info = { 264 .name = "bare hardware", 265#ifdef CONFIG_PARAVIRT_XXL 266 .extra_user_64bit_cs = __USER_CS, 267#endif 268}; 269 270/* 64-bit pagetable entries */ 271#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) 272 273struct paravirt_patch_template pv_ops = { 274 /* Init ops. */ 275 .init.patch = native_patch, 276 277 /* Time ops. */ 278 .time.sched_clock = native_sched_clock, 279 .time.steal_clock = native_steal_clock, 280 281 /* Cpu ops. */ 282 .cpu.io_delay = native_io_delay, 283 284#ifdef CONFIG_PARAVIRT_XXL 285 .cpu.cpuid = native_cpuid, 286 .cpu.get_debugreg = native_get_debugreg, 287 .cpu.set_debugreg = native_set_debugreg, 288 .cpu.read_cr0 = native_read_cr0, 289 .cpu.write_cr0 = native_write_cr0, 290 .cpu.write_cr4 = native_write_cr4, 291 .cpu.wbinvd = native_wbinvd, 292 .cpu.read_msr = native_read_msr, 293 .cpu.write_msr = native_write_msr, 294 .cpu.read_msr_safe = native_read_msr_safe, 295 .cpu.write_msr_safe = native_write_msr_safe, 296 .cpu.read_pmc = native_read_pmc, 297 .cpu.load_tr_desc = native_load_tr_desc, 298 .cpu.set_ldt = native_set_ldt, 299 .cpu.load_gdt = native_load_gdt, 300 .cpu.load_idt = native_load_idt, 301 .cpu.store_tr = native_store_tr, 302 .cpu.load_tls = native_load_tls, 303 .cpu.load_gs_index = native_load_gs_index, 304 .cpu.write_ldt_entry = native_write_ldt_entry, 305 .cpu.write_gdt_entry = native_write_gdt_entry, 306 .cpu.write_idt_entry = native_write_idt_entry, 307 308 .cpu.alloc_ldt = paravirt_nop, 309 .cpu.free_ldt = paravirt_nop, 310 311 .cpu.load_sp0 = native_load_sp0, 312 313 .cpu.usergs_sysret64 = native_usergs_sysret64, 314 .cpu.iret = native_iret, 315 316#ifdef CONFIG_X86_IOPL_IOPERM 317 .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, 318 .cpu.update_io_bitmap = native_tss_update_io_bitmap, 319#endif 320 321 .cpu.start_context_switch = paravirt_nop, 322 .cpu.end_context_switch = paravirt_nop, 323 324 /* Irq ops. */ 325 .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), 326 .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), 327 .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), 328 .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), 329 .irq.safe_halt = native_safe_halt, 330 .irq.halt = native_halt, 331#endif /* CONFIG_PARAVIRT_XXL */ 332 333 /* Mmu ops. */ 334 .mmu.flush_tlb_user = native_flush_tlb_local, 335 .mmu.flush_tlb_kernel = native_flush_tlb_global, 336 .mmu.flush_tlb_one_user = native_flush_tlb_one_user, 337 .mmu.flush_tlb_others = native_flush_tlb_others, 338 .mmu.tlb_remove_table = 339 (void (*)(struct mmu_gather *, void *))tlb_remove_page, 340 341 .mmu.exit_mmap = paravirt_nop, 342 343#ifdef CONFIG_PARAVIRT_XXL 344 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2), 345 .mmu.write_cr2 = native_write_cr2, 346 .mmu.read_cr3 = __native_read_cr3, 347 .mmu.write_cr3 = native_write_cr3, 348 349 .mmu.pgd_alloc = __paravirt_pgd_alloc, 350 .mmu.pgd_free = paravirt_nop, 351 352 .mmu.alloc_pte = paravirt_nop, 353 .mmu.alloc_pmd = paravirt_nop, 354 .mmu.alloc_pud = paravirt_nop, 355 .mmu.alloc_p4d = paravirt_nop, 356 .mmu.release_pte = paravirt_nop, 357 .mmu.release_pmd = paravirt_nop, 358 .mmu.release_pud = paravirt_nop, 359 .mmu.release_p4d = paravirt_nop, 360 361 .mmu.set_pte = native_set_pte, 362 .mmu.set_pmd = native_set_pmd, 363 364 .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, 365 .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, 366 367 .mmu.set_pud = native_set_pud, 368 369 .mmu.pmd_val = PTE_IDENT, 370 .mmu.make_pmd = PTE_IDENT, 371 372 .mmu.pud_val = PTE_IDENT, 373 .mmu.make_pud = PTE_IDENT, 374 375 .mmu.set_p4d = native_set_p4d, 376 377#if CONFIG_PGTABLE_LEVELS >= 5 378 .mmu.p4d_val = PTE_IDENT, 379 .mmu.make_p4d = PTE_IDENT, 380 381 .mmu.set_pgd = native_set_pgd, 382#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ 383 384 .mmu.pte_val = PTE_IDENT, 385 .mmu.pgd_val = PTE_IDENT, 386 387 .mmu.make_pte = PTE_IDENT, 388 .mmu.make_pgd = PTE_IDENT, 389 390 .mmu.dup_mmap = paravirt_nop, 391 .mmu.activate_mm = paravirt_nop, 392 393 .mmu.lazy_mode = { 394 .enter = paravirt_nop, 395 .leave = paravirt_nop, 396 .flush = paravirt_nop, 397 }, 398 399 .mmu.set_fixmap = native_set_fixmap, 400#endif /* CONFIG_PARAVIRT_XXL */ 401 402#if defined(CONFIG_PARAVIRT_SPINLOCKS) 403 /* Lock ops. */ 404#ifdef CONFIG_SMP 405 .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, 406 .lock.queued_spin_unlock = 407 PV_CALLEE_SAVE(__native_queued_spin_unlock), 408 .lock.wait = paravirt_nop, 409 .lock.kick = paravirt_nop, 410 .lock.vcpu_is_preempted = 411 PV_CALLEE_SAVE(__native_vcpu_is_preempted), 412#endif /* SMP */ 413#endif 414}; 415 416#ifdef CONFIG_PARAVIRT_XXL 417/* At this point, native_get/set_debugreg has real function entries */ 418NOKPROBE_SYMBOL(native_get_debugreg); 419NOKPROBE_SYMBOL(native_set_debugreg); 420NOKPROBE_SYMBOL(native_load_idt); 421#endif 422 423EXPORT_SYMBOL(pv_ops); 424EXPORT_SYMBOL_GPL(pv_info); 425