Lines Matching defs:data
555 void *data)
557 struct kvm_tlb_range *range = data;
752 struct vmx_uret_msr *msr, u64 data)
756 u64 old_msr_data = msr->data;
757 msr->data = data;
760 ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
763 msr->data = old_msr_data;
1139 vmx->guest_uret_msrs[i].data = guest_efer;
1294 vmx->guest_uret_msrs[i].data,
1392 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1396 wrmsrl(MSR_KERNEL_GS_BASE, data);
1398 vmx->msr_guest_kernel_gs_base = data;
1575 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1584 if (data & vmx->pt_desc.ctl_bitmask)
1592 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1600 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1601 !(data & RTIT_CTL_FABRIC_EN) &&
1612 !test_bit((data & RTIT_CTL_MTC_RANGE) >>
1618 !test_bit((data & RTIT_CTL_CYC_THRESH) >>
1623 !test_bit((data & RTIT_CTL_PSB_FREQ) >>
1631 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1634 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1637 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1640 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1876 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
1878 msr->data = vmx_get_perf_capabilities();
1899 msr_info->data = vmcs_readl(GUEST_FS_BASE);
1902 msr_info->data = vmcs_readl(GUEST_GS_BASE);
1905 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
1919 msr_info->data = vmx->msr_ia32_umwait_control;
1926 msr_info->data = to_vmx(vcpu)->spec_ctrl;
1929 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
1932 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
1935 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
1942 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
1949 msr_info->data = vcpu->arch.mcg_ext_ctl;
1952 msr_info->data = vmx->msr_ia32_feature_control;
1958 &msr_info->data))
1970 &msr_info->data);
1975 msr_info->data = vmx->pt_desc.guest.ctl;
1980 msr_info->data = vmx->pt_desc.guest.status;
1987 msr_info->data = vmx->pt_desc.guest.cr3_match;
1996 msr_info->data = vmx->pt_desc.guest.output_base;
2005 msr_info->data = vmx->pt_desc.guest.output_mask;
2014 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2016 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2027 msr_info->data = msr->data;
2037 u64 data)
2041 return (u32)data;
2043 return (unsigned long)data;
2057 u64 data = msr_info->data;
2067 vmcs_writel(GUEST_FS_BASE, data);
2071 vmcs_writel(GUEST_GS_BASE, data);
2074 vmx_write_guest_kernel_gs_base(vmx, data);
2079 get_vmcs12(vcpu)->guest_sysenter_cs = data;
2080 vmcs_write32(GUEST_SYSENTER_CS, data);
2084 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2085 get_vmcs12(vcpu)->guest_sysenter_eip = data;
2087 vmcs_writel(GUEST_SYSENTER_EIP, data);
2091 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2092 get_vmcs12(vcpu)->guest_sysenter_esp = data;
2094 vmcs_writel(GUEST_SYSENTER_ESP, data);
2099 get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2109 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
2110 (data & MSR_IA32_BNDCFGS_RSVD))
2112 vmcs_write64(GUEST_BNDCFGS, data);
2119 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2122 vmx->msr_ia32_umwait_control = data;
2129 if (kvm_spec_ctrl_test_value(data))
2132 vmx->spec_ctrl = data;
2133 if (!data)
2156 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2164 if (data & ~PRED_CMD_IBPB)
2168 if (!data)
2187 if (!kvm_pat_valid(data))
2192 get_vmcs12(vcpu)->guest_ia32_pat = data;
2195 vmcs_write64(GUEST_IA32_PAT, data);
2196 vcpu->arch.pat = data;
2208 (data & ~MCG_EXT_CTL_LMCE_EN))
2210 vcpu->arch.mcg_ext_ctl = data;
2213 if (!vmx_feature_control_msr_valid(vcpu, data) ||
2217 vmx->msr_ia32_feature_control = data;
2218 if (msr_info->host_initiated && data == 0)
2226 return vmx_set_vmx_msr(vcpu, msr_index, data);
2229 vmx_rtit_ctl_check(vcpu, data) ||
2232 vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2233 vmx->pt_desc.guest.ctl = data;
2239 if (data & MSR_IA32_RTIT_STATUS_MASK)
2241 vmx->pt_desc.guest.status = data;
2249 vmx->pt_desc.guest.cr3_match = data;
2259 if (!pt_output_base_valid(vcpu, data))
2261 vmx->pt_desc.guest.output_base = data;
2271 vmx->pt_desc.guest.output_mask = data;
2280 if (is_noncanonical_address(data, vcpu))
2283 vmx->pt_desc.guest.addr_b[index / 2] = data;
2285 vmx->pt_desc.guest.addr_a[index / 2] = data;
2292 if ((data >> 32) != 0)
2300 ret = vmx_set_guest_uret_msr(vmx, msr, data);
2937 msr->data = efer;
2941 msr->data = efer & ~EFER_LME;
3632 u16 data = 0;
3640 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3641 r = kvm_write_guest_page(kvm, fn++, &data,
3651 data = ~0;
3652 r = kvm_write_guest_page(kvm, fn, &data,
4525 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
4528 apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
4936 vcpu->run->internal.data[0] = vect_info;
4937 vcpu->run->internal.data[1] = intr_info;
4938 vcpu->run->internal.data[2] = error_code;
4939 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
6161 vcpu->run->internal.data[0] = vectoring_info;
6162 vcpu->run->internal.data[1] = exit_reason.full;
6163 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6165 vcpu->run->internal.data[ndata++] =
6168 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6227 vcpu->run->internal.data[0] = exit_reason.full;
6228 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
7031 vmx->guest_uret_msrs[j].data = 0;
7145 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7146 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"