1/**************************************************************************** 2 **************************************************************************** 3 *** 4 *** This header was automatically generated from a Linux kernel header 5 *** of the same name, to make information necessary for userspace to 6 *** call into the kernel available to libc. It contains only constants, 7 *** structures, and macros generated from the original header, and thus, 8 *** contains no copyrightable information. 9 *** 10 *** To edit the content of this header, modify the corresponding 11 *** source file (e.g. under external/kernel-headers/original/) then 12 *** run bionic/libc/kernel/tools/update_all.py 13 *** 14 *** Any manual change here will be lost the next time this script will 15 *** be run. You've been warned! 16 *** 17 **************************************************************************** 18 ****************************************************************************/ 19#ifndef _ASM_X86_KVM_H 20#define _ASM_X86_KVM_H 21#include <linux/types.h> 22#include <linux/ioctl.h> 23#define KVM_PIO_PAGE_OFFSET 1 24#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 25#define DE_VECTOR 0 26#define DB_VECTOR 1 27#define BP_VECTOR 3 28#define OF_VECTOR 4 29#define BR_VECTOR 5 30#define UD_VECTOR 6 31#define NM_VECTOR 7 32#define DF_VECTOR 8 33#define TS_VECTOR 10 34#define NP_VECTOR 11 35#define SS_VECTOR 12 36#define GP_VECTOR 13 37#define PF_VECTOR 14 38#define MF_VECTOR 16 39#define AC_VECTOR 17 40#define MC_VECTOR 18 41#define XM_VECTOR 19 42#define VE_VECTOR 20 43#define __KVM_HAVE_PIT 44#define __KVM_HAVE_IOAPIC 45#define __KVM_HAVE_IRQ_LINE 46#define __KVM_HAVE_MSI 47#define __KVM_HAVE_USER_NMI 48#define __KVM_HAVE_GUEST_DEBUG 49#define __KVM_HAVE_MSIX 50#define __KVM_HAVE_MCE 51#define __KVM_HAVE_PIT_STATE2 52#define __KVM_HAVE_XEN_HVM 53#define __KVM_HAVE_VCPU_EVENTS 54#define __KVM_HAVE_DEBUGREGS 55#define __KVM_HAVE_XSAVE 56#define __KVM_HAVE_XCRS 57#define __KVM_HAVE_READONLY_MEM 58#define KVM_NR_INTERRUPTS 256 59struct kvm_memory_alias { 60 __u32 slot; 61 __u32 flags; 62 __u64 guest_phys_addr; 63 __u64 memory_size; 64 __u64 target_phys_addr; 65}; 66struct kvm_pic_state { 67 __u8 last_irr; 68 __u8 irr; 69 __u8 imr; 70 __u8 isr; 71 __u8 priority_add; 72 __u8 irq_base; 73 __u8 read_reg_select; 74 __u8 poll; 75 __u8 special_mask; 76 __u8 init_state; 77 __u8 auto_eoi; 78 __u8 rotate_on_auto_eoi; 79 __u8 special_fully_nested_mode; 80 __u8 init4; 81 __u8 elcr; 82 __u8 elcr_mask; 83}; 84#define KVM_IOAPIC_NUM_PINS 24 85struct kvm_ioapic_state { 86 __u64 base_address; 87 __u32 ioregsel; 88 __u32 id; 89 __u32 irr; 90 __u32 pad; 91 union { 92 __u64 bits; 93 struct { 94 __u8 vector; 95 __u8 delivery_mode : 3; 96 __u8 dest_mode : 1; 97 __u8 delivery_status : 1; 98 __u8 polarity : 1; 99 __u8 remote_irr : 1; 100 __u8 trig_mode : 1; 101 __u8 mask : 1; 102 __u8 reserve : 7; 103 __u8 reserved[4]; 104 __u8 dest_id; 105 } fields; 106 } redirtbl[KVM_IOAPIC_NUM_PINS]; 107}; 108#define KVM_IRQCHIP_PIC_MASTER 0 109#define KVM_IRQCHIP_PIC_SLAVE 1 110#define KVM_IRQCHIP_IOAPIC 2 111#define KVM_NR_IRQCHIPS 3 112#define KVM_RUN_X86_SMM (1 << 0) 113struct kvm_regs { 114 __u64 rax, rbx, rcx, rdx; 115 __u64 rsi, rdi, rsp, rbp; 116 __u64 r8, r9, r10, r11; 117 __u64 r12, r13, r14, r15; 118 __u64 rip, rflags; 119}; 120#define KVM_APIC_REG_SIZE 0x400 121struct kvm_lapic_state { 122 char regs[KVM_APIC_REG_SIZE]; 123}; 124struct kvm_segment { 125 __u64 base; 126 __u32 limit; 127 __u16 selector; 128 __u8 type; 129 __u8 present, dpl, db, s, l, g, avl; 130 __u8 unusable; 131 __u8 padding; 132}; 133struct kvm_dtable { 134 __u64 base; 135 __u16 limit; 136 __u16 padding[3]; 137}; 138struct kvm_sregs { 139 struct kvm_segment cs, ds, es, fs, gs, ss; 140 struct kvm_segment tr, ldt; 141 struct kvm_dtable gdt, idt; 142 __u64 cr0, cr2, cr3, cr4, cr8; 143 __u64 efer; 144 __u64 apic_base; 145 __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; 146}; 147struct kvm_fpu { 148 __u8 fpr[8][16]; 149 __u16 fcw; 150 __u16 fsw; 151 __u8 ftwx; 152 __u8 pad1; 153 __u16 last_opcode; 154 __u64 last_ip; 155 __u64 last_dp; 156 __u8 xmm[16][16]; 157 __u32 mxcsr; 158 __u32 pad2; 159}; 160struct kvm_msr_entry { 161 __u32 index; 162 __u32 reserved; 163 __u64 data; 164}; 165struct kvm_msrs { 166 __u32 nmsrs; 167 __u32 pad; 168 struct kvm_msr_entry entries[0]; 169}; 170struct kvm_msr_list { 171 __u32 nmsrs; 172 __u32 indices[0]; 173}; 174#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600 175struct kvm_msr_filter_range { 176#define KVM_MSR_FILTER_READ (1 << 0) 177#define KVM_MSR_FILTER_WRITE (1 << 1) 178 __u32 flags; 179 __u32 nmsrs; 180 __u32 base; 181 __u8 * bitmap; 182}; 183#define KVM_MSR_FILTER_MAX_RANGES 16 184struct kvm_msr_filter { 185#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0) 186#define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0) 187 __u32 flags; 188 struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES]; 189}; 190struct kvm_cpuid_entry { 191 __u32 function; 192 __u32 eax; 193 __u32 ebx; 194 __u32 ecx; 195 __u32 edx; 196 __u32 padding; 197}; 198struct kvm_cpuid { 199 __u32 nent; 200 __u32 padding; 201 struct kvm_cpuid_entry entries[0]; 202}; 203struct kvm_cpuid_entry2 { 204 __u32 function; 205 __u32 index; 206 __u32 flags; 207 __u32 eax; 208 __u32 ebx; 209 __u32 ecx; 210 __u32 edx; 211 __u32 padding[3]; 212}; 213#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX (1 << 0) 214#define KVM_CPUID_FLAG_STATEFUL_FUNC (1 << 1) 215#define KVM_CPUID_FLAG_STATE_READ_NEXT (1 << 2) 216struct kvm_cpuid2 { 217 __u32 nent; 218 __u32 padding; 219 struct kvm_cpuid_entry2 entries[0]; 220}; 221struct kvm_pit_channel_state { 222 __u32 count; 223 __u16 latched_count; 224 __u8 count_latched; 225 __u8 status_latched; 226 __u8 status; 227 __u8 read_state; 228 __u8 write_state; 229 __u8 write_latch; 230 __u8 rw_mode; 231 __u8 mode; 232 __u8 bcd; 233 __u8 gate; 234 __s64 count_load_time; 235}; 236struct kvm_debug_exit_arch { 237 __u32 exception; 238 __u32 pad; 239 __u64 pc; 240 __u64 dr6; 241 __u64 dr7; 242}; 243#define KVM_GUESTDBG_USE_SW_BP 0x00010000 244#define KVM_GUESTDBG_USE_HW_BP 0x00020000 245#define KVM_GUESTDBG_INJECT_DB 0x00040000 246#define KVM_GUESTDBG_INJECT_BP 0x00080000 247struct kvm_guest_debug_arch { 248 __u64 debugreg[8]; 249}; 250struct kvm_pit_state { 251 struct kvm_pit_channel_state channels[3]; 252}; 253#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 254struct kvm_pit_state2 { 255 struct kvm_pit_channel_state channels[3]; 256 __u32 flags; 257 __u32 reserved[9]; 258}; 259struct kvm_reinject_control { 260 __u8 pit_reinject; 261 __u8 reserved[31]; 262}; 263#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 264#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 265#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 266#define KVM_VCPUEVENT_VALID_SMM 0x00000008 267#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 268#define KVM_X86_SHADOW_INT_MOV_SS 0x01 269#define KVM_X86_SHADOW_INT_STI 0x02 270struct kvm_vcpu_events { 271 struct { 272 __u8 injected; 273 __u8 nr; 274 __u8 has_error_code; 275 __u8 pending; 276 __u32 error_code; 277 } exception; 278 struct { 279 __u8 injected; 280 __u8 nr; 281 __u8 soft; 282 __u8 shadow; 283 } interrupt; 284 struct { 285 __u8 injected; 286 __u8 pending; 287 __u8 masked; 288 __u8 pad; 289 } nmi; 290 __u32 sipi_vector; 291 __u32 flags; 292 struct { 293 __u8 smm; 294 __u8 pending; 295 __u8 smm_inside_nmi; 296 __u8 latched_init; 297 } smi; 298 __u8 reserved[27]; 299 __u8 exception_has_payload; 300 __u64 exception_payload; 301}; 302struct kvm_debugregs { 303 __u64 db[4]; 304 __u64 dr6; 305 __u64 dr7; 306 __u64 flags; 307 __u64 reserved[9]; 308}; 309struct kvm_xsave { 310 __u32 region[1024]; 311}; 312#define KVM_MAX_XCRS 16 313struct kvm_xcr { 314 __u32 xcr; 315 __u32 reserved; 316 __u64 value; 317}; 318struct kvm_xcrs { 319 __u32 nr_xcrs; 320 __u32 flags; 321 struct kvm_xcr xcrs[KVM_MAX_XCRS]; 322 __u64 padding[16]; 323}; 324#define KVM_SYNC_X86_REGS (1UL << 0) 325#define KVM_SYNC_X86_SREGS (1UL << 1) 326#define KVM_SYNC_X86_EVENTS (1UL << 2) 327#define KVM_SYNC_X86_VALID_FIELDS (KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS | KVM_SYNC_X86_EVENTS) 328struct kvm_sync_regs { 329 struct kvm_regs regs; 330 struct kvm_sregs sregs; 331 struct kvm_vcpu_events events; 332}; 333#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) 334#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) 335#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) 336#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) 337#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) 338#define KVM_STATE_NESTED_FORMAT_VMX 0 339#define KVM_STATE_NESTED_FORMAT_SVM 1 340#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 341#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 342#define KVM_STATE_NESTED_EVMCS 0x00000004 343#define KVM_STATE_NESTED_MTF_PENDING 0x00000008 344#define KVM_STATE_NESTED_GIF_SET 0x00000100 345#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 346#define KVM_STATE_NESTED_SMM_VMXON 0x00000002 347#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000 348#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000 349#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 350struct kvm_vmx_nested_state_data { 351 __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; 352 __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; 353}; 354struct kvm_vmx_nested_state_hdr { 355 __u64 vmxon_pa; 356 __u64 vmcs12_pa; 357 struct { 358 __u16 flags; 359 } smm; 360 __u32 flags; 361 __u64 preemption_timer_deadline; 362}; 363struct kvm_svm_nested_state_data { 364 __u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE]; 365}; 366struct kvm_svm_nested_state_hdr { 367 __u64 vmcb_pa; 368}; 369struct kvm_nested_state { 370 __u16 flags; 371 __u16 format; 372 __u32 size; 373 union { 374 struct kvm_vmx_nested_state_hdr vmx; 375 struct kvm_svm_nested_state_hdr svm; 376 __u8 pad[120]; 377 } hdr; 378 union { 379 struct kvm_vmx_nested_state_data vmx[0]; 380 struct kvm_svm_nested_state_data svm[0]; 381 } data; 382}; 383struct kvm_pmu_event_filter { 384 __u32 action; 385 __u32 nevents; 386 __u32 fixed_counter_bitmap; 387 __u32 flags; 388 __u32 pad[4]; 389 __u64 events[0]; 390}; 391#define KVM_PMU_EVENT_ALLOW 0 392#define KVM_PMU_EVENT_DENY 1 393#endif 394