Lines Matching refs:vmcs12

59 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
66 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
336 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
351 vmcs12->guest_physical_address = fault->address;
377 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
382 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
384 (error_code & vmcs12->page_fault_error_code_mask) !=
385 vmcs12->page_fault_error_code_match;
396 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
406 if (nested_vmx_is_page_fault_vmexit(vmcs12,
411 } else if (vmcs12->exception_bitmap & (1u << nr)) {
431 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
435 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
437 vmcs12->vm_exit_intr_error_code = fault->error_code;
448 struct vmcs12 *vmcs12)
450 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
453 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
454 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
461 struct vmcs12 *vmcs12)
463 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
466 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
473 struct vmcs12 *vmcs12)
475 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
478 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
565 struct vmcs12 *vmcs12)
574 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
577 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
589 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
590 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
609 if (nested_cpu_has_vid(vmcs12)) {
664 struct vmcs12 *vmcs12)
667 struct vmcs12 *shadow;
669 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
670 vmcs12->vmcs_link_pointer == -1ull)
675 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
683 struct vmcs12 *vmcs12)
687 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
688 vmcs12->vmcs_link_pointer == -1ull)
691 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
706 struct vmcs12 *vmcs12)
708 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
709 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
716 struct vmcs12 *vmcs12)
718 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
719 !nested_cpu_has_apic_reg_virt(vmcs12) &&
720 !nested_cpu_has_vid(vmcs12) &&
721 !nested_cpu_has_posted_intr(vmcs12))
728 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
729 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
736 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
746 if (nested_cpu_has_posted_intr(vmcs12) &&
747 (CC(!nested_cpu_has_vid(vmcs12)) ||
749 CC((vmcs12->posted_intr_nv & 0xff00)) ||
750 CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
751 CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
755 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
777 struct vmcs12 *vmcs12)
780 vmcs12->vm_exit_msr_load_count,
781 vmcs12->vm_exit_msr_load_addr)) ||
783 vmcs12->vm_exit_msr_store_count,
784 vmcs12->vm_exit_msr_store_addr)))
791 struct vmcs12 *vmcs12)
794 vmcs12->vm_entry_msr_load_count,
795 vmcs12->vm_entry_msr_load_addr)))
802 struct vmcs12 *vmcs12)
804 if (!nested_cpu_has_pml(vmcs12))
807 if (CC(!nested_cpu_has_ept(vmcs12)) ||
808 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
815 struct vmcs12 *vmcs12)
817 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
818 !nested_cpu_has_ept(vmcs12)))
824 struct vmcs12 *vmcs12)
826 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
827 !nested_cpu_has_ept(vmcs12)))
833 struct vmcs12 *vmcs12)
835 if (!nested_cpu_has_shadow_vmcs(vmcs12))
838 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
839 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
1016 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1017 u32 count = vmcs12->vm_exit_msr_store_count;
1018 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1183 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1186 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1190 struct vmcs12 *vmcs12,
1204 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1222 if (!nested_cpu_has_vpid(vmcs12)) {
1227 vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1228 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1558 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1573 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1593 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1606 val = vmcs12_read_any(vmcs12, field.encoding,
1618 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1622 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1623 vmcs12->guest_rip = evmcs->guest_rip;
1627 vmcs12->guest_rsp = evmcs->guest_rsp;
1628 vmcs12->guest_rflags = evmcs->guest_rflags;
1629 vmcs12->guest_interruptibility_info =
1635 vmcs12->cpu_based_vm_exec_control =
1641 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1646 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1651 vmcs12->vm_entry_intr_info_field =
1653 vmcs12->vm_entry_exception_error_code =
1655 vmcs12->vm_entry_instruction_len =
1661 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1662 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1663 vmcs12->host_cr0 = evmcs->host_cr0;
1664 vmcs12->host_cr3 = evmcs->host_cr3;
1665 vmcs12->host_cr4 = evmcs->host_cr4;
1666 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1667 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1668 vmcs12->host_rip = evmcs->host_rip;
1669 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1670 vmcs12->host_es_selector = evmcs->host_es_selector;
1671 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1672 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1673 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1674 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1675 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1676 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1681 vmcs12->pin_based_vm_exec_control =
1683 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1684 vmcs12->secondary_vm_exec_control =
1690 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1691 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1696 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1701 vmcs12->guest_es_base = evmcs->guest_es_base;
1702 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1703 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1704 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1705 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1706 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1707 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1708 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1709 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1710 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1711 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1712 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1713 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1714 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1715 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1716 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1717 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1718 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1719 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1720 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1721 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1722 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1723 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1724 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1725 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1726 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1727 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1728 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1729 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1730 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1731 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1732 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1733 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1734 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1735 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1736 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1741 vmcs12->tsc_offset = evmcs->tsc_offset;
1742 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1743 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1748 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1749 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1750 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1751 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1752 vmcs12->guest_cr0 = evmcs->guest_cr0;
1753 vmcs12->guest_cr3 = evmcs->guest_cr3;
1754 vmcs12->guest_cr4 = evmcs->guest_cr4;
1755 vmcs12->guest_dr7 = evmcs->guest_dr7;
1760 vmcs12->host_fs_base = evmcs->host_fs_base;
1761 vmcs12->host_gs_base = evmcs->host_gs_base;
1762 vmcs12->host_tr_base = evmcs->host_tr_base;
1763 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1764 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1765 vmcs12->host_rsp = evmcs->host_rsp;
1770 vmcs12->ept_pointer = evmcs->ept_pointer;
1771 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1776 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1777 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1778 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1779 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1780 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1781 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1782 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1783 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1784 vmcs12->guest_pending_dbg_exceptions =
1786 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1787 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1788 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1789 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1790 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1795 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1796 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1797 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1798 * vmcs12->page_fault_error_code_mask =
1800 * vmcs12->page_fault_error_code_match =
1802 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1803 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1804 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1805 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1810 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1811 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1812 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1813 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1814 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1815 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1816 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1817 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1818 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1819 * vmcs12->exit_qualification = evmcs->exit_qualification;
1820 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1822 * Not present in struct vmcs12:
1823 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1824 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1825 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1826 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1834 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1840 * evmcs->host_es_selector = vmcs12->host_es_selector;
1841 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1842 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1843 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1844 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1845 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1846 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1847 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1848 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1849 * evmcs->host_cr0 = vmcs12->host_cr0;
1850 * evmcs->host_cr3 = vmcs12->host_cr3;
1851 * evmcs->host_cr4 = vmcs12->host_cr4;
1852 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1853 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1854 * evmcs->host_rip = vmcs12->host_rip;
1855 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1856 * evmcs->host_fs_base = vmcs12->host_fs_base;
1857 * evmcs->host_gs_base = vmcs12->host_gs_base;
1858 * evmcs->host_tr_base = vmcs12->host_tr_base;
1859 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1860 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1861 * evmcs->host_rsp = vmcs12->host_rsp;
1863 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1864 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1865 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1866 * evmcs->ept_pointer = vmcs12->ept_pointer;
1867 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1868 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1869 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1870 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1871 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1872 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1873 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1874 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1875 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1876 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1877 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1879 * vmcs12->page_fault_error_code_mask;
1881 * vmcs12->page_fault_error_code_match;
1882 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1883 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1884 * evmcs->tsc_offset = vmcs12->tsc_offset;
1885 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1886 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1887 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1888 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1889 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1890 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1891 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1892 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1894 * Not present in struct vmcs12:
1895 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1896 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1897 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1898 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1901 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1902 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1903 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1904 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1905 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1906 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1907 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1908 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1910 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1911 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1912 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1913 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1914 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1915 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1916 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1917 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1918 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1919 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1921 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1922 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1923 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1924 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1925 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1926 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1927 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1928 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1930 evmcs->guest_es_base = vmcs12->guest_es_base;
1931 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1932 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1933 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1934 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1935 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1936 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1937 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1938 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1939 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1941 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1942 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1944 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1945 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1946 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1947 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1950 vmcs12->guest_pending_dbg_exceptions;
1951 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1952 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1954 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1955 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1957 evmcs->guest_cr0 = vmcs12->guest_cr0;
1958 evmcs->guest_cr3 = vmcs12->guest_cr3;
1959 evmcs->guest_cr4 = vmcs12->guest_cr4;
1960 evmcs->guest_dr7 = vmcs12->guest_dr7;
1962 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1964 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1965 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1966 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1967 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1968 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1969 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1970 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1971 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1973 evmcs->exit_qualification = vmcs12->exit_qualification;
1975 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1976 evmcs->guest_rsp = vmcs12->guest_rsp;
1977 evmcs->guest_rflags = vmcs12->guest_rflags;
1980 vmcs12->guest_interruptibility_info;
1981 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1982 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1983 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1985 vmcs12->vm_entry_exception_error_code;
1986 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1988 evmcs->guest_rip = vmcs12->guest_rip;
1990 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
2058 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2064 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2065 memset(vmcs12, 0, sizeof(*vmcs12));
2066 vmcs12->hdr.revision_id = VMCS12_REVISION;
2113 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2120 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2151 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2154 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2155 return vmcs12->guest_ia32_efer;
2156 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2166 * according to L0's settings (vmcs12 is irrelevant here). Host
2221 struct vmcs12 *vmcs12)
2228 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2236 struct vmcs12 *vmcs12)
2239 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2242 prepare_vmcs02_early_rare(vmx, vmcs12);
2248 exec_control |= (vmcs12->pin_based_vm_exec_control &
2251 /* Posted interrupts setting is only taken from vmcs12. */
2253 if (nested_cpu_has_posted_intr(vmcs12))
2254 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2266 exec_control |= vmcs12->cpu_based_vm_exec_control;
2270 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2301 /* Take the following fields only from vmcs12 */
2313 if (nested_cpu_has(vmcs12,
2315 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2328 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2333 vmcs12->guest_intr_status);
2335 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2344 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2354 exec_control |= (vmcs12->vm_entry_controls &
2384 vmcs12->vm_entry_intr_info_field);
2386 vmcs12->vm_entry_exception_error_code);
2388 vmcs12->vm_entry_instruction_len);
2390 vmcs12->guest_interruptibility_info);
2392 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2398 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2404 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2405 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2406 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2407 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2408 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2409 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2410 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2411 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2412 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2413 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2414 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2415 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2416 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2417 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2418 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2419 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2420 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2421 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2422 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2423 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2424 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2425 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2426 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2427 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2428 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2429 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2430 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2431 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2432 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2433 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2434 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2435 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2436 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2437 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2438 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2439 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2446 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2448 vmcs12->guest_pending_dbg_exceptions);
2449 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2450 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2454 * vmcs12
2457 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2458 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2459 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2460 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2464 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2465 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2468 if (nested_cpu_has_xsaves(vmcs12))
2469 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2480 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2481 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2492 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2493 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2497 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2498 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2499 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2500 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2518 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2527 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2535 prepare_vmcs02_rare(vmx, vmcs12);
2544 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2545 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2546 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2552 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2554 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2561 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2565 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2566 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2567 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2577 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2579 if (nested_cpu_has_ept(vmcs12))
2583 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2587 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2590 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2591 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2593 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2594 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2596 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2611 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2616 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2619 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2623 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2626 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2628 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2629 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2630 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2631 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2637 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2639 vmcs12->guest_ia32_perf_global_ctrl))) {
2644 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2645 kvm_rip_write(vcpu, vmcs12->guest_rip);
2649 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2651 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2652 nested_cpu_has_virtual_nmis(vmcs12)))
2655 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2656 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2712 struct vmcs12 *vmcs12)
2716 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2719 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2724 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2725 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2730 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2731 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2733 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2734 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2735 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2736 nested_vmx_check_nmi_controls(vmcs12) ||
2737 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2738 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2739 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2740 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2741 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2744 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2745 nested_cpu_has_save_preemption_timer(vmcs12))
2748 if (nested_cpu_has_ept(vmcs12) &&
2749 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2752 if (nested_cpu_has_vmfunc(vmcs12)) {
2753 if (CC(vmcs12->vm_function_control &
2757 if (nested_cpu_has_eptp_switching(vmcs12)) {
2758 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2759 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2771 struct vmcs12 *vmcs12)
2775 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2778 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2788 struct vmcs12 *vmcs12)
2792 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2803 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2804 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2809 bool urg = nested_cpu_has2(vmcs12,
2811 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2834 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2846 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2847 CC(vmcs12->vm_entry_instruction_len == 0 &&
2853 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2860 struct vmcs12 *vmcs12)
2862 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2863 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2864 nested_check_vm_entry_controls(vcpu, vmcs12))
2868 return nested_evmcs_check_controls(vmcs12);
2874 struct vmcs12 *vmcs12)
2877 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2885 struct vmcs12 *vmcs12)
2889 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2890 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2891 CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
2894 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2895 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2898 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2899 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2902 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2904 vmcs12->host_ia32_perf_global_ctrl)))
2908 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2914 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2917 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2918 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2919 CC((vmcs12->host_rip) >> 32))
2923 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2924 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2925 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2926 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2927 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2928 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2929 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2930 CC(vmcs12->host_cs_selector == 0) ||
2931 CC(vmcs12->host_tr_selector == 0) ||
2932 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2935 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2936 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2937 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2938 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2939 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2940 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2949 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2950 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2951 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2952 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2960 struct vmcs12 *vmcs12)
2963 struct vmcs12 *shadow;
2966 if (vmcs12->vmcs_link_pointer == -1ull)
2969 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2972 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
2978 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2988 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2990 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2991 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT))
2998 struct vmcs12 *vmcs12,
3001 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
3005 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3006 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
3009 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3010 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3013 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3014 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
3017 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
3022 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3024 vmcs12->guest_ia32_perf_global_ctrl)))
3027 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
3030 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
3031 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
3044 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3045 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3046 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3047 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3048 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3052 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3053 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3054 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3057 if (nested_check_guest_non_reg_state(vmcs12))
3147 * L2 was running), map it here to make sure vmcs12 changes are
3164 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3170 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3181 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
3187 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3197 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3200 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3202 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3203 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3204 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3223 if (nested_cpu_has_posted_intr(vmcs12)) {
3226 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3229 offset_in_page(vmcs12->posted_intr_desc_addr));
3231 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3234 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3263 struct vmcs12 *vmcs12;
3277 vmcs12 = get_vmcs12(vcpu);
3278 if (!nested_cpu_has_pml(vmcs12))
3281 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3287 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3293 vmcs12->guest_pml_index--;
3329 struct vmcs12 *vmcs12);
3345 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3363 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3367 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3391 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3404 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3407 vmcs12->exit_qualification = entry_failure_code;
3413 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3414 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
3416 if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
3418 vmcs12->exit_qualification = entry_failure_code;
3424 vmcs12->vm_entry_msr_load_addr,
3425 vmcs12->vm_entry_msr_load_count);
3428 vmcs12->exit_qualification = failed_index;
3465 if (nested_cpu_has_preemption_timer(vmcs12)) {
3484 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3485 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3494 load_vmcs12_host_state(vcpu, vmcs12);
3495 vmcs12->vm_exit_reason = exit_reason.full;
3507 struct vmcs12 *vmcs12;
3527 vmcs12 = get_vmcs12(vcpu);
3535 if (CC(vmcs12->hdr.shadow_vmcs))
3541 vmcs12->launch_state = !launch;
3548 * on vmcs12 as required by the Intel SDM, and act appropriately when
3559 if (CC(vmcs12->launch_state == launch))
3564 if (nested_vmx_check_controls(vcpu, vmcs12))
3567 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3570 if (nested_vmx_check_host_state(vcpu, vmcs12))
3584 if (nested_cpu_has_posted_intr(vmcs12) &&
3599 * In this flow, it is assumed that vmcs12 cache was
3604 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3611 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3612 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3613 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) &&
3614 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) &&
3615 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3632 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3634 * This function returns the new value we should put in vmcs12.guest_cr0.
3641 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3649 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3653 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3654 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3659 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3663 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3664 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3669 struct vmcs12 *vmcs12,
3695 vmcs12->idt_vectoring_info_field = 0;
3701 vmcs12->vm_exit_instruction_len =
3709 vmcs12->idt_vectoring_error_code =
3713 vmcs12->idt_vectoring_info_field = idt_vectoring;
3715 vmcs12->idt_vectoring_info_field =
3723 vmcs12->vm_entry_instruction_len =
3728 vmcs12->idt_vectoring_info_field = idt_vectoring;
3730 vmcs12->idt_vectoring_info_field = 0;
3737 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3745 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3746 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3750 if (nested_cpu_has_posted_intr(vmcs12)) {
3751 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3792 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3806 vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
3815 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
4017 struct vmcs12 *vmcs12)
4021 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4022 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4023 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4024 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4025 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4026 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4027 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4028 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4029 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4030 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4031 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4032 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4033 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4034 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4035 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4036 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4037 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4038 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4039 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4040 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4041 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4042 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4043 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4044 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4045 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4046 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4047 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4048 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4049 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4050 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4051 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4052 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4053 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4054 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4055 vmcs12->guest_pending_dbg_exceptions =
4058 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
4064 struct vmcs12 *vmcs12)
4079 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4087 * Update the guest state fields of vmcs12 to reflect changes that
4092 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4097 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4101 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4102 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4104 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4105 vmcs12->guest_rip = kvm_rip_read(vcpu);
4106 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4108 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4109 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4111 vmcs12->guest_interruptibility_info =
4115 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4117 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4119 if (nested_cpu_has_preemption_timer(vmcs12) &&
4120 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4122 vmcs12->vmx_preemption_timer_value =
4129 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4131 * Additionally, restore L2's PDPTR to vmcs12.
4134 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4135 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4136 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4137 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4138 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4139 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4143 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4145 if (nested_cpu_has_vid(vmcs12))
4146 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4148 vmcs12->vm_entry_controls =
4149 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4152 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4153 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4155 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4156 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4161 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4168 * which already writes to vmcs12 directly.
4170 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4175 vmcs12->vm_exit_reason = vm_exit_reason;
4176 vmcs12->exit_qualification = exit_qualification;
4183 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4184 vmcs12->launch_state = 1;
4188 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4194 vmcs12_save_pending_event(vcpu, vmcs12,
4197 vmcs12->vm_exit_intr_info = exit_intr_info;
4198 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4199 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4208 vmcs12->vm_exit_msr_store_addr,
4209 vmcs12->vm_exit_msr_store_count))
4218 * in vmcs12.
4225 struct vmcs12 *vmcs12)
4230 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4231 vcpu->arch.efer = vmcs12->host_ia32_efer;
4232 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4238 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4239 kvm_rip_write(vcpu, vmcs12->host_rip);
4251 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4255 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4263 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
4269 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4271 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4272 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4273 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4274 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4275 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4280 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4283 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4284 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4285 vcpu->arch.pat = vmcs12->host_ia32_pat;
4287 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
4289 vmcs12->host_ia32_perf_global_ctrl));
4296 .selector = vmcs12->host_cs_selector,
4302 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4316 seg.selector = vmcs12->host_ds_selector;
4318 seg.selector = vmcs12->host_es_selector;
4320 seg.selector = vmcs12->host_ss_selector;
4322 seg.selector = vmcs12->host_fs_selector;
4323 seg.base = vmcs12->host_fs_base;
4325 seg.selector = vmcs12->host_gs_selector;
4326 seg.base = vmcs12->host_gs_base;
4329 .base = vmcs12->host_tr_base,
4331 .selector = vmcs12->host_tr_selector,
4343 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4344 vmcs12->vm_exit_msr_load_count))
4373 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4381 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4435 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4436 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4444 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4445 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4481 * and modify vmcs12 to make it see what it would expect to see there if
4488 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4517 if (nested_cpu_has_preemption_timer(vmcs12))
4520 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
4521 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
4524 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4527 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4532 * also be used to capture vmcs12 cache as part of
4539 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4557 * be captured in vmcs12 (see vmcs12_save_pending_event()).
4617 vmcs12->vm_exit_intr_info = irq |
4622 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4623 vmcs12->exit_qualification,
4624 vmcs12->idt_vectoring_info_field,
4625 vmcs12->vm_exit_intr_info,
4626 vmcs12->vm_exit_intr_error_code,
4629 load_vmcs12_host_state(vcpu, vmcs12);
5080 vmptr + offsetof(struct vmcs12,
5103 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5135 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5138 value = vmcs12_read_any(vmcs12, field, offset);
5187 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5203 * bits into the vmcs12 field.
5246 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5247 * vmcs12, else we may crush a field or consume a stale value.
5250 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5263 vmcs12_write_any(vmcs12, field, offset, value);
5266 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5267 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5269 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5329 struct vmcs12 *new_vmcs12;
5573 struct vmcs12 *vmcs12)
5578 if (!nested_cpu_has_eptp_switching(vmcs12) ||
5579 !nested_cpu_has_ept(vmcs12))
5585 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5593 if (vmcs12->ept_pointer != new_eptp) {
5597 vmcs12->ept_pointer = new_eptp;
5608 struct vmcs12 *vmcs12;
5621 vmcs12 = get_vmcs12(vcpu);
5622 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5627 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5654 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5663 bitmap = vmcs12->io_bitmap_a;
5665 bitmap = vmcs12->io_bitmap_b;
5685 struct vmcs12 *vmcs12)
5691 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5692 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5709 struct vmcs12 *vmcs12,
5715 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5723 bitmap = vmcs12->msr_bitmap;
5747 struct vmcs12 *vmcs12)
5760 if (vmcs12->cr0_guest_host_mask &
5761 (val ^ vmcs12->cr0_read_shadow))
5765 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5769 if (vmcs12->cr4_guest_host_mask &
5770 (vmcs12->cr4_read_shadow ^ val))
5774 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5780 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5781 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5787 if (vmcs12->cpu_based_vm_exec_control &
5792 if (vmcs12->cpu_based_vm_exec_control &
5804 if (vmcs12->cr0_guest_host_mask & 0xe &
5805 (val ^ vmcs12->cr0_read_shadow))
5807 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5808 !(vmcs12->cr0_read_shadow & 0x1) &&
5817 struct vmcs12 *vmcs12, gpa_t bitmap)
5823 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5840 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5842 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5844 if (nested_cpu_has_mtf(vmcs12))
5929 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5939 return vmcs12->exception_bitmap &
5946 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
5948 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
5954 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5958 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5960 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5962 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5964 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5966 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5968 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5969 vmcs12->vmread_bitmap);
5971 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5972 vmcs12->vmwrite_bitmap);
5984 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5986 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5988 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5990 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5993 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5997 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5999 return nested_vmx_exit_handled_mtf(vmcs12);
6001 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6003 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6004 nested_cpu_has2(vmcs12,
6009 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6016 * delivery" only come from vmcs12.
6021 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6022 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6024 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6032 * the XSS exit bitmap in vmcs12.
6034 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
6037 return nested_cpu_has2(vmcs12,
6082 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6088 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6090 vmcs12->vm_exit_intr_error_code =
6105 struct vmcs12 *vmcs12;
6122 vmcs12 = get_vmcs12(vcpu);
6130 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6136 nested_cpu_has_shadow_vmcs(vmcs12) &&
6137 vmcs12->vmcs_link_pointer != -1ull)
6156 if (nested_cpu_has_preemption_timer(vmcs12) &&
6176 * When running L2, the authoritative vmcs12 state is in the
6177 * vmcs02. When running L1, the authoritative vmcs12 state is
6180 * vmcs12 state is in the vmcs12 already.
6183 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
6184 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
6195 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6199 * Copy over the full allocated size of vmcs12 rather than just the size
6202 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6205 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6206 vmcs12->vmcs_link_pointer != -1ull) {
6232 struct vmcs12 *vmcs12;
6251 * code was changed such that flag signals vmcs12 should
6308 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6344 vmcs12 = get_vmcs12(vcpu);
6345 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6348 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6361 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6362 vmcs12->vmcs_link_pointer != -1ull) {
6363 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6367 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6389 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6390 nested_vmx_check_host_state(vcpu, vmcs12) ||
6391 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6417 * The same values should also be used to verify that vmcs12 control fields are