Lines Matching refs:vmcs12
56 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
63 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
381 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
403 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
408 vmcs12->guest_physical_address = fault->address;
441 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
446 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
448 (error_code & vmcs12->page_fault_error_code_mask) !=
449 vmcs12->page_fault_error_code_match;
456 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
466 return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code);
468 return (vmcs12->exception_bitmap & (1u << vector));
472 struct vmcs12 *vmcs12)
474 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
477 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
478 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
485 struct vmcs12 *vmcs12)
487 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
490 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
497 struct vmcs12 *vmcs12)
499 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
502 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
569 struct vmcs12 *vmcs12)
580 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
596 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
608 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
609 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
628 if (nested_cpu_has_vid(vmcs12)) {
671 struct vmcs12 *vmcs12)
676 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
677 vmcs12->vmcs_link_pointer == INVALID_GPA)
680 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
682 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
690 struct vmcs12 *vmcs12)
695 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
696 vmcs12->vmcs_link_pointer == INVALID_GPA)
699 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
701 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
719 struct vmcs12 *vmcs12)
721 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
722 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
729 struct vmcs12 *vmcs12)
731 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
732 !nested_cpu_has_apic_reg_virt(vmcs12) &&
733 !nested_cpu_has_vid(vmcs12) &&
734 !nested_cpu_has_posted_intr(vmcs12))
741 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
742 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
749 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
759 if (nested_cpu_has_posted_intr(vmcs12) &&
760 (CC(!nested_cpu_has_vid(vmcs12)) ||
762 CC((vmcs12->posted_intr_nv & 0xff00)) ||
763 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
767 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
787 struct vmcs12 *vmcs12)
790 vmcs12->vm_exit_msr_load_count,
791 vmcs12->vm_exit_msr_load_addr)) ||
793 vmcs12->vm_exit_msr_store_count,
794 vmcs12->vm_exit_msr_store_addr)))
801 struct vmcs12 *vmcs12)
804 vmcs12->vm_entry_msr_load_count,
805 vmcs12->vm_entry_msr_load_addr)))
812 struct vmcs12 *vmcs12)
814 if (!nested_cpu_has_pml(vmcs12))
817 if (CC(!nested_cpu_has_ept(vmcs12)) ||
818 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
825 struct vmcs12 *vmcs12)
827 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
828 !nested_cpu_has_ept(vmcs12)))
834 struct vmcs12 *vmcs12)
836 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
837 !nested_cpu_has_ept(vmcs12)))
843 struct vmcs12 *vmcs12)
845 if (!nested_cpu_has_shadow_vmcs(vmcs12))
848 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
849 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
1026 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1027 u32 count = vmcs12->vm_exit_msr_store_count;
1028 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1130 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1133 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1137 struct vmcs12 *vmcs12,
1152 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1165 if (!nested_cpu_has_vpid(vmcs12)) {
1174 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
1180 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1181 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1521 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1536 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1556 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1569 val = vmcs12_read_any(vmcs12, field.encoding,
1581 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1586 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1587 vmcs12->guest_rip = evmcs->guest_rip;
1598 vmcs12->guest_rsp = evmcs->guest_rsp;
1599 vmcs12->guest_rflags = evmcs->guest_rflags;
1600 vmcs12->guest_interruptibility_info =
1603 * Not present in struct vmcs12:
1604 * vmcs12->guest_ssp = evmcs->guest_ssp;
1610 vmcs12->cpu_based_vm_exec_control =
1616 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1621 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1626 vmcs12->vm_entry_intr_info_field =
1628 vmcs12->vm_entry_exception_error_code =
1630 vmcs12->vm_entry_instruction_len =
1636 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1637 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1638 vmcs12->host_cr0 = evmcs->host_cr0;
1639 vmcs12->host_cr3 = evmcs->host_cr3;
1640 vmcs12->host_cr4 = evmcs->host_cr4;
1641 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1642 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1643 vmcs12->host_rip = evmcs->host_rip;
1644 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1645 vmcs12->host_es_selector = evmcs->host_es_selector;
1646 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1647 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1648 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1649 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1650 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1651 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1652 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl;
1654 * Not present in struct vmcs12:
1655 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet;
1656 * vmcs12->host_ssp = evmcs->host_ssp;
1657 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr;
1663 vmcs12->pin_based_vm_exec_control =
1665 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1666 vmcs12->secondary_vm_exec_control =
1672 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1673 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1678 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1683 vmcs12->guest_es_base = evmcs->guest_es_base;
1684 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1685 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1686 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1687 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1688 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1689 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1690 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1691 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1692 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1693 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1694 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1695 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1696 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1697 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1698 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1699 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1700 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1701 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1702 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1703 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1704 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1705 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1706 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1707 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1708 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1709 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1710 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1711 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1712 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1713 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1714 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1715 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1716 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1717 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1718 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1723 vmcs12->tsc_offset = evmcs->tsc_offset;
1724 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1725 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1726 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap;
1727 vmcs12->tsc_multiplier = evmcs->tsc_multiplier;
1732 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1733 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1734 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1735 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1736 vmcs12->guest_cr0 = evmcs->guest_cr0;
1737 vmcs12->guest_cr3 = evmcs->guest_cr3;
1738 vmcs12->guest_cr4 = evmcs->guest_cr4;
1739 vmcs12->guest_dr7 = evmcs->guest_dr7;
1744 vmcs12->host_fs_base = evmcs->host_fs_base;
1745 vmcs12->host_gs_base = evmcs->host_gs_base;
1746 vmcs12->host_tr_base = evmcs->host_tr_base;
1747 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1748 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1749 vmcs12->host_rsp = evmcs->host_rsp;
1754 vmcs12->ept_pointer = evmcs->ept_pointer;
1755 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1760 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1761 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1762 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1763 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1764 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1765 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1766 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1767 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1768 vmcs12->guest_pending_dbg_exceptions =
1770 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1771 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1772 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1773 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1774 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1775 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl;
1777 * Not present in struct vmcs12:
1778 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet;
1779 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl;
1780 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr;
1786 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1787 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1788 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1789 * vmcs12->page_fault_error_code_mask =
1791 * vmcs12->page_fault_error_code_match =
1793 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1794 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1795 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1796 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1801 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1802 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1803 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1804 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1805 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1806 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1807 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1808 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1809 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1810 * vmcs12->exit_qualification = evmcs->exit_qualification;
1811 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1813 * Not present in struct vmcs12:
1814 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1815 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1816 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1817 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1825 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1831 * evmcs->host_es_selector = vmcs12->host_es_selector;
1832 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1833 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1834 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1835 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1836 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1837 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1838 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1839 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1840 * evmcs->host_cr0 = vmcs12->host_cr0;
1841 * evmcs->host_cr3 = vmcs12->host_cr3;
1842 * evmcs->host_cr4 = vmcs12->host_cr4;
1843 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1844 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1845 * evmcs->host_rip = vmcs12->host_rip;
1846 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1847 * evmcs->host_fs_base = vmcs12->host_fs_base;
1848 * evmcs->host_gs_base = vmcs12->host_gs_base;
1849 * evmcs->host_tr_base = vmcs12->host_tr_base;
1850 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1851 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1852 * evmcs->host_rsp = vmcs12->host_rsp;
1854 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1855 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1856 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1857 * evmcs->ept_pointer = vmcs12->ept_pointer;
1858 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1859 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1860 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1861 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1862 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1863 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1864 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1865 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1866 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1867 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1868 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1870 * vmcs12->page_fault_error_code_mask;
1872 * vmcs12->page_fault_error_code_match;
1873 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1874 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1875 * evmcs->tsc_offset = vmcs12->tsc_offset;
1876 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1877 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1878 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1879 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1880 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1881 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1882 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1883 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1884 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl;
1885 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl;
1886 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap;
1887 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier;
1889 * Not present in struct vmcs12:
1890 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1891 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1892 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1893 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1894 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet;
1895 * evmcs->host_ssp = vmcs12->host_ssp;
1896 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr;
1897 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet;
1898 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl;
1899 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr;
1900 * evmcs->guest_ssp = vmcs12->guest_ssp;
1903 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1904 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1905 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1906 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1907 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1908 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1909 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1910 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1912 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1913 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1914 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1915 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1916 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1917 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1918 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1919 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1920 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1921 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1923 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1924 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1925 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1926 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1927 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1928 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1929 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1930 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1932 evmcs->guest_es_base = vmcs12->guest_es_base;
1933 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1934 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1935 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1936 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1937 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1938 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1939 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1940 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1941 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1943 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1944 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1946 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1947 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1948 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1949 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1952 vmcs12->guest_pending_dbg_exceptions;
1953 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1954 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1956 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1957 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1959 evmcs->guest_cr0 = vmcs12->guest_cr0;
1960 evmcs->guest_cr3 = vmcs12->guest_cr3;
1961 evmcs->guest_cr4 = vmcs12->guest_cr4;
1962 evmcs->guest_dr7 = vmcs12->guest_dr7;
1964 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1966 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1967 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1968 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1969 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1970 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1971 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1972 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1973 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1975 evmcs->exit_qualification = vmcs12->exit_qualification;
1977 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1978 evmcs->guest_rsp = vmcs12->guest_rsp;
1979 evmcs->guest_rflags = vmcs12->guest_rflags;
1982 vmcs12->guest_interruptibility_info;
1983 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1984 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1985 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1987 vmcs12->vm_entry_exception_error_code;
1988 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1990 evmcs->guest_rip = vmcs12->guest_rip;
1992 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
2060 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2066 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2067 memset(vmcs12, 0, sizeof(*vmcs12));
2068 vmcs12->hdr.revision_id = VMCS12_REVISION;
2114 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2121 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2152 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2155 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2156 return vmcs12->guest_ia32_efer;
2157 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2169 * according to L0's settings (vmcs12 is irrelevant here). Host
2225 struct vmcs12 *vmcs12)
2232 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2240 struct vmcs12 *vmcs12)
2243 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2246 prepare_vmcs02_early_rare(vmx, vmcs12);
2252 exec_control |= (vmcs12->pin_based_vm_exec_control &
2255 /* Posted interrupts setting is only taken from vmcs12. */
2257 if (nested_cpu_has_posted_intr(vmcs12))
2258 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2270 exec_control |= vmcs12->cpu_based_vm_exec_control;
2274 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2305 /* Take the following fields only from vmcs12 */
2317 if (nested_cpu_has(vmcs12,
2319 exec_control |= vmcs12->secondary_vm_exec_control;
2331 if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP))
2336 vmcs12->guest_intr_status);
2338 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2342 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2350 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2360 exec_control |= (vmcs12->vm_entry_controls &
2390 vmcs12->vm_entry_intr_info_field);
2392 vmcs12->vm_entry_exception_error_code);
2394 vmcs12->vm_entry_instruction_len);
2396 vmcs12->guest_interruptibility_info);
2398 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2404 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2410 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2411 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2412 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2413 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2414 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2415 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2416 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2417 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2418 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2419 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2420 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2421 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2422 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2423 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2424 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2425 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2426 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2427 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2428 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2429 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2430 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2431 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2432 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2433 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2434 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2435 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2436 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2437 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2438 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2439 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2440 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2441 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2442 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2443 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2444 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2445 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2452 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2454 vmcs12->guest_pending_dbg_exceptions);
2455 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2456 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2460 * vmcs12
2463 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2464 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2465 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2466 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2470 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2471 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2474 if (nested_cpu_has_xsaves(vmcs12))
2475 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2486 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2487 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2498 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2499 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2503 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2504 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2505 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2506 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2524 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2533 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2541 prepare_vmcs02_rare(vmx, vmcs12);
2550 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2551 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2552 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2558 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2560 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2567 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2571 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2572 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2573 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2591 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2593 if (nested_cpu_has_ept(vmcs12))
2599 * account for vmcs12's cr0/4_guest_host_mask.
2601 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2602 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2604 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2605 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2607 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2627 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2632 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2635 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2639 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2642 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2644 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2645 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2646 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2647 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2650 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2653 vmcs12->guest_ia32_perf_global_ctrl))) {
2658 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2659 kvm_rip_write(vcpu, vmcs12->guest_rip);
2674 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2676 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2677 nested_cpu_has_virtual_nmis(vmcs12)))
2680 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2681 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2736 struct vmcs12 *vmcs12)
2740 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2743 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2748 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2749 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2754 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2755 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2756 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2757 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2758 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2759 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2760 nested_vmx_check_nmi_controls(vmcs12) ||
2761 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2762 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2763 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2764 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2765 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2768 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2769 nested_cpu_has_save_preemption_timer(vmcs12))
2772 if (nested_cpu_has_ept(vmcs12) &&
2773 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2776 if (nested_cpu_has_vmfunc(vmcs12)) {
2777 if (CC(vmcs12->vm_function_control &
2781 if (nested_cpu_has_eptp_switching(vmcs12)) {
2782 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2783 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2795 struct vmcs12 *vmcs12)
2799 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2802 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2812 struct vmcs12 *vmcs12)
2816 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2827 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2828 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2833 bool urg = nested_cpu_has2(vmcs12,
2835 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2858 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2870 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2871 CC(vmcs12->vm_entry_instruction_len == 0 &&
2877 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2884 struct vmcs12 *vmcs12)
2886 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2887 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2888 nested_check_vm_entry_controls(vcpu, vmcs12))
2892 return nested_evmcs_check_controls(vmcs12);
2898 struct vmcs12 *vmcs12)
2901 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2909 struct vmcs12 *vmcs12)
2911 bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2913 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2914 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2915 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
2918 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2919 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2922 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2923 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2926 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2928 vmcs12->host_ia32_perf_global_ctrl)))
2932 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2935 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2936 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2937 CC((vmcs12->host_rip) >> 32))
2941 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2942 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2943 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2944 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2945 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2946 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2947 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2948 CC(vmcs12->host_cs_selector == 0) ||
2949 CC(vmcs12->host_tr_selector == 0) ||
2950 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2953 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2954 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2955 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2956 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2957 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2958 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2967 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2968 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2969 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2970 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2978 struct vmcs12 *vmcs12)
2984 if (vmcs12->vmcs_link_pointer == INVALID_GPA)
2987 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2990 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
2992 vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
2996 offsetof(struct vmcs12, hdr),
3001 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
3010 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
3012 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
3013 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
3014 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
3021 struct vmcs12 *vmcs12,
3024 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
3028 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3029 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
3032 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3033 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3036 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3037 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
3040 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
3045 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3047 vmcs12->guest_ia32_perf_global_ctrl)))
3050 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
3053 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
3054 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
3067 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3068 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3069 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3070 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3071 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3075 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3076 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3077 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3080 if (nested_check_guest_non_reg_state(vmcs12))
3170 * L2 was running), map it here to make sure vmcs12 changes are
3194 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3199 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3210 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3213 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) {
3216 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n",
3226 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3229 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3231 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3232 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3233 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3252 if (nested_cpu_has_posted_intr(vmcs12)) {
3255 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3258 offset_in_page(vmcs12->posted_intr_desc_addr));
3260 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3272 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3307 struct vmcs12 *vmcs12;
3321 vmcs12 = get_vmcs12(vcpu);
3322 if (!nested_cpu_has_pml(vmcs12))
3325 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3331 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3337 vmcs12->guest_pml_index--;
3373 struct vmcs12 *vmcs12);
3389 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3400 vmcs12->guest_rip,
3401 vmcs12->guest_intr_status,
3402 vmcs12->vm_entry_intr_info_field,
3403 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT,
3404 vmcs12->ept_pointer,
3405 vmcs12->guest_cr3,
3418 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3422 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3446 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3459 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3462 vmcs12->exit_qualification = entry_failure_code;
3469 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
3471 vmcs12->exit_qualification = entry_failure_code;
3477 vmcs12->vm_entry_msr_load_addr,
3478 vmcs12->vm_entry_msr_load_count);
3481 vmcs12->exit_qualification = failed_index;
3510 if (nested_cpu_has_preemption_timer(vmcs12)) {
3529 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3530 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3539 load_vmcs12_host_state(vcpu, vmcs12);
3540 vmcs12->vm_exit_reason = exit_reason.full;
3552 struct vmcs12 *vmcs12;
3576 vmcs12 = get_vmcs12(vcpu);
3584 if (CC(vmcs12->hdr.shadow_vmcs))
3590 vmcs12->launch_state = !launch;
3597 * on vmcs12 as required by the Intel SDM, and act appropriately when
3608 if (CC(vmcs12->launch_state == launch))
3613 if (nested_vmx_check_controls(vcpu, vmcs12))
3616 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3619 if (nested_vmx_check_host_state(vcpu, vmcs12))
3633 if (nested_cpu_has_posted_intr(vmcs12) &&
3648 * In this flow, it is assumed that vmcs12 cache was
3653 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3655 switch (vmcs12->guest_activity_state) {
3662 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3663 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3664 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3665 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3691 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3693 * This function returns the new value we should put in vmcs12.guest_cr0.
3700 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3708 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3712 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3713 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3718 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3722 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3723 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3728 struct vmcs12 *vmcs12,
3754 vmcs12->idt_vectoring_info_field = 0;
3760 vmcs12->vm_exit_instruction_len =
3768 vmcs12->idt_vectoring_error_code =
3772 vmcs12->idt_vectoring_info_field = idt_vectoring;
3774 vmcs12->idt_vectoring_info_field =
3782 vmcs12->vm_entry_instruction_len =
3787 vmcs12->idt_vectoring_info_field = idt_vectoring;
3789 vmcs12->idt_vectoring_info_field = 0;
3796 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3804 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3805 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3809 if (nested_cpu_has_posted_intr(vmcs12)) {
3810 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3861 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3891 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code;
3900 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
4255 struct vmcs12 *vmcs12)
4259 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4260 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4261 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4262 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4263 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4264 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4265 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4266 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4267 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4268 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4269 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4270 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4271 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4272 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4273 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4274 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4275 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4276 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4277 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4278 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4279 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4280 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4281 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4282 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4283 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4284 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4285 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4286 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4287 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4288 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4289 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4290 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4291 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4292 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4293 vmcs12->guest_pending_dbg_exceptions =
4300 struct vmcs12 *vmcs12)
4315 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4323 * Update the guest state fields of vmcs12 to reflect changes that
4328 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4333 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4338 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4339 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4341 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4342 vmcs12->guest_rip = kvm_rip_read(vcpu);
4343 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4345 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4346 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4348 vmcs12->guest_interruptibility_info =
4352 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4354 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
4356 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4358 if (nested_cpu_has_preemption_timer(vmcs12) &&
4359 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4361 vmcs12->vmx_preemption_timer_value =
4368 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4370 * Additionally, restore L2's PDPTR to vmcs12.
4373 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4374 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4375 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4376 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4377 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4378 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4382 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4384 if (nested_cpu_has_vid(vmcs12))
4385 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4387 vmcs12->vm_entry_controls =
4388 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4391 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4392 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4394 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4395 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4400 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4407 * which already writes to vmcs12 directly.
4409 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4414 vmcs12->vm_exit_reason = vm_exit_reason;
4416 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
4417 vmcs12->exit_qualification = exit_qualification;
4424 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4425 vmcs12->launch_state = 1;
4429 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4435 vmcs12_save_pending_event(vcpu, vmcs12,
4438 vmcs12->vm_exit_intr_info = exit_intr_info;
4439 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4440 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4449 vmcs12->vm_exit_msr_store_addr,
4450 vmcs12->vm_exit_msr_store_count))
4459 * in vmcs12.
4466 struct vmcs12 *vmcs12)
4471 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4472 vcpu->arch.efer = vmcs12->host_ia32_efer;
4473 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4479 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4480 kvm_rip_write(vcpu, vmcs12->host_rip);
4492 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4496 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4504 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
4507 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4509 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4510 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4511 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4512 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4513 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4518 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4521 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4522 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4523 vcpu->arch.pat = vmcs12->host_ia32_pat;
4525 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
4528 vmcs12->host_ia32_perf_global_ctrl));
4535 .selector = vmcs12->host_cs_selector,
4541 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4555 seg.selector = vmcs12->host_ds_selector;
4557 seg.selector = vmcs12->host_es_selector;
4559 seg.selector = vmcs12->host_ss_selector;
4561 seg.selector = vmcs12->host_fs_selector;
4562 seg.base = vmcs12->host_fs_base;
4564 seg.selector = vmcs12->host_gs_selector;
4565 seg.base = vmcs12->host_gs_base;
4568 .base = vmcs12->host_tr_base,
4570 .selector = vmcs12->host_tr_selector,
4583 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4584 vmcs12->vm_exit_msr_load_count))
4615 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4623 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4674 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4675 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4683 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4684 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4720 * and modify vmcs12 to make it see what it would expect to see there if
4727 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4758 if (nested_cpu_has_preemption_timer(vmcs12))
4761 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4763 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4768 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4771 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4776 * also be used to capture vmcs12 cache as part of
4783 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4801 * be captured in vmcs12 (see vmcs12_save_pending_event()).
4868 vmcs12->vm_exit_intr_info = irq |
4873 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4874 vmcs12->exit_qualification,
4875 vmcs12->idt_vectoring_info_field,
4876 vmcs12->vm_exit_intr_info,
4877 vmcs12->vm_exit_intr_error_code,
4880 load_vmcs12_host_state(vcpu, vmcs12);
5320 vmptr + offsetof(struct vmcs12,
5345 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5378 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5381 value = vmcs12_read_any(vmcs12, field, offset);
5451 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5467 * bits into the vmcs12 field.
5510 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5511 * vmcs12, else we may crush a field or consume a stale value.
5514 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5527 vmcs12_write_any(vmcs12, field, offset, value);
5530 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5531 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5533 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5608 offsetof(struct vmcs12, hdr),
5838 struct vmcs12 *vmcs12)
5843 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
5848 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5856 if (vmcs12->ept_pointer != new_eptp) {
5860 vmcs12->ept_pointer = new_eptp;
5863 if (!nested_cpu_has_vpid(vmcs12))
5873 struct vmcs12 *vmcs12;
5885 vmcs12 = get_vmcs12(vcpu);
5889 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5891 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5896 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5901 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5928 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5937 bitmap = vmcs12->io_bitmap_a;
5939 bitmap = vmcs12->io_bitmap_b;
5959 struct vmcs12 *vmcs12)
5965 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5966 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5983 struct vmcs12 *vmcs12,
5989 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5997 bitmap = vmcs12->msr_bitmap;
6021 struct vmcs12 *vmcs12)
6034 if (vmcs12->cr0_guest_host_mask &
6035 (val ^ vmcs12->cr0_read_shadow))
6039 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
6043 if (vmcs12->cr4_guest_host_mask &
6044 (vmcs12->cr4_read_shadow ^ val))
6048 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
6054 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
6055 (vmcs12->cr0_read_shadow & X86_CR0_TS))
6061 if (vmcs12->cpu_based_vm_exec_control &
6066 if (vmcs12->cpu_based_vm_exec_control &
6078 if (vmcs12->cr0_guest_host_mask & 0xe &
6079 (val ^ vmcs12->cr0_read_shadow))
6081 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
6082 !(vmcs12->cr0_read_shadow & 0x1) &&
6091 struct vmcs12 *vmcs12)
6096 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
6102 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
6106 struct vmcs12 *vmcs12, gpa_t bitmap)
6112 if (!nested_cpu_has_shadow_vmcs(vmcs12))
6129 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
6131 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
6133 if (nested_cpu_has_mtf(vmcs12))
6229 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6239 return vmcs12->exception_bitmap &
6246 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
6248 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
6254 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6258 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6260 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6262 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
6264 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
6266 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6268 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6269 vmcs12->vmread_bitmap);
6271 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6272 vmcs12->vmwrite_bitmap);
6284 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6286 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6288 return nested_vmx_exit_handled_io(vcpu, vmcs12);
6290 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
6293 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6297 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6299 return nested_vmx_exit_handled_mtf(vmcs12);
6301 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6303 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6304 nested_cpu_has2(vmcs12,
6309 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6316 * delivery" only come from vmcs12.
6321 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6322 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6324 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6332 * the XSS exit bitmap in vmcs12.
6334 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
6337 return nested_cpu_has2(vmcs12,
6340 return nested_vmx_exit_handled_encls(vcpu, vmcs12);
6387 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6393 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6395 vmcs12->vm_exit_intr_error_code =
6410 struct vmcs12 *vmcs12;
6427 vmcs12 = get_vmcs12(vcpu);
6435 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6442 nested_cpu_has_shadow_vmcs(vmcs12) &&
6443 vmcs12->vmcs_link_pointer != INVALID_GPA)
6462 if (nested_cpu_has_preemption_timer(vmcs12) &&
6482 * When running L2, the authoritative vmcs12 state is in the
6483 * vmcs02. When running L1, the authoritative vmcs12 state is
6486 * vmcs12 state is in the vmcs12 already.
6489 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
6490 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
6508 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6512 * Copy over the full allocated size of vmcs12 rather than just the size
6515 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6518 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6519 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6542 struct vmcs12 *vmcs12;
6561 * code was changed such that flag signals vmcs12 should
6619 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6656 vmcs12 = get_vmcs12(vcpu);
6657 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6660 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6673 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6674 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6675 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6679 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6701 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6702 nested_vmx_check_host_state(vcpu, vmcs12) ||
6703 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6731 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6740 * the index into vmcs12.
6747 * vmcs12, regardless of whether or not the associated feature is
6752 /* The vmcs12 table is very, very sparsely populated. */
6996 * The same values should also be used to verify that vmcs12 control fields are