Lines Matching defs:data

325 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
331 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
332 op, msr, data);
336 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
337 op, msr, data);
488 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
492 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
501 kvm_lapic_set_base(vcpu, msr_info->data);
1425 u64 data;
1427 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
1432 kvm_rax_write(vcpu, (u32)data);
1433 kvm_rdx_write(vcpu, data >> 32);
1628 u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
1636 data |= ARCH_CAP_PSCHANGE_MC_NO;
1648 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1651 data |= ARCH_CAP_RDCL_NO;
1653 data |= ARCH_CAP_SSB_NO;
1655 data |= ARCH_CAP_MDS_NO;
1657 data |= ARCH_CAP_RFDS_NO;
1667 data &= ~ARCH_CAP_TAA_NO;
1669 data |= ARCH_CAP_TAA_NO;
1679 data |= ARCH_CAP_GDS_NO;
1681 return data;
1688 msr->data = kvm_get_arch_capabilities();
1691 msr->data = kvm_caps.supported_perf_cap;
1694 rdmsrl_safe(msr->index, &msr->data);
1702 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1712 *data = 0;
1720 *data = msr.data;
1758 u64 efer = msr_info->data;
1838 * Write @data into the MSR specified by @index. Select MSR specific fault
1843 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1854 if (is_noncanonical_address(data, vcpu))
1871 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
1891 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
1894 data = (u32)data;
1898 msr.data = data;
1906 u32 index, u64 data, bool host_initiated)
1908 int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1911 if (kvm_msr_ignored_check(index, data, true))
1918 * Read the MSR specified by @index into @data. Select MSR specific fault
1923 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1946 *data = msr.data;
1951 u32 index, u64 *data, bool host_initiated)
1953 int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1956 /* Unconditionally clear *data for simplicity */
1957 *data = 0;
1965 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1969 return kvm_get_msr_ignored_check(vcpu, index, data, false);
1972 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1976 return kvm_set_msr_ignored_check(vcpu, index, data, false);
1979 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1981 return kvm_get_msr_ignored_check(vcpu, index, data, false);
1985 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1987 return kvm_set_msr_ignored_check(vcpu, index, data, false);
1994 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
1995 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
2034 u32 exit_reason, u64 data,
2049 vcpu->run->msr.data = data;
2058 u64 data;
2061 r = kvm_get_msr_with_filter(vcpu, ecx, &data);
2064 trace_kvm_msr_read(ecx, data);
2066 kvm_rax_write(vcpu, data & -1u);
2067 kvm_rdx_write(vcpu, (data >> 32) & -1u);
2083 u64 data = kvm_read_edx_eax(vcpu);
2086 r = kvm_set_msr_with_filter(vcpu, ecx, data);
2089 trace_kvm_msr_write(ecx, data);
2092 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
2098 trace_kvm_msr_write_ex(ecx, data);
2160 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
2165 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
2166 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
2167 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
2168 ((u32)(data >> 32) != X2APIC_BROADCAST))
2169 return kvm_x2apic_icr_write(vcpu->arch.apic, data);
2174 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
2179 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2186 u64 data;
2193 data = kvm_read_edx_eax(vcpu);
2194 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
2200 data = kvm_read_edx_eax(vcpu);
2201 if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
2211 trace_kvm_msr_write(msr, data);
2222 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2224 return kvm_get_msr_ignored_check(vcpu, index, data, true);
2227 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2239 if (do_get_msr(vcpu, index, &val) || *data != val)
2245 return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2277 /* copy pvclock gtod data */
2720 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2729 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2734 if (data == 0) {
2750 synchronizing = data < tsc_exp + tsc_hz &&
2751 data + tsc_hz > tsc_exp;
2767 data += delta;
2768 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2773 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
2806 * very likely) and there's a data dependence, so force GCC
3046 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3054 data->flags = 0;
3060 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) {
3061 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec;
3062 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC;
3065 data->host_tsc = rdtsc();
3067 data->flags |= KVM_CLOCK_TSC_STABLE;
3073 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc);
3075 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset;
3081 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3088 __get_kvmclock(kvm, data);
3094 struct kvm_clock_data data;
3096 get_kvmclock(kvm, &data);
3097 return data.clock;
3123 * it says that version is odd if data is being modified, and even after
3331 u64 data = msr_info->data;
3336 vcpu->arch.mcg_status = data;
3340 (data || !msr_info->host_initiated))
3342 if (data != 0 && data != ~(u64)0)
3344 vcpu->arch.mcg_ctl = data;
3351 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated))
3354 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK))
3358 vcpu->arch.mci_ctl2_banks[offset] = data;
3373 * single-bit ECC data errors.
3376 data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
3385 data != 0 && !can_set_mci_status(vcpu))
3390 vcpu->arch.mce_banks[offset] = data;
3405 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3407 gpa_t gpa = data & ~0x3f;
3410 if (data & 0x30)
3414 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3418 (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3422 return data ? 1 : 0;
3424 vcpu->arch.apf.msr_en_val = data;
3432 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3436 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
3437 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3444 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3447 if (data >> 8)
3453 vcpu->arch.apf.msr_int_val = data;
3455 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3635 u64 data = msr_info->data;
3638 return kvm_xen_write_hypercall_page(vcpu, data);
3653 vcpu->arch.microcode_version = data;
3658 vcpu->arch.arch_capabilities = data;
3663 if (data & ~kvm_caps.supported_perf_cap)
3671 if (vcpu->arch.perf_capabilities == data)
3674 vcpu->arch.perf_capabilities = data;
3681 if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
3683 if (!data)
3693 if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
3695 if (!data)
3703 data &= ~(u64)0x40; /* ignore flush filter disable */
3704 data &= ~(u64)0x100; /* ignore ignne emulation enable */
3705 data &= ~(u64)0x8; /* ignore TLB cache disable */
3708 if (data == BIT_ULL(18)) {
3709 vcpu->arch.msr_hwcr = data;
3710 } else if (data != 0) {
3711 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3716 if (data != 0) {
3717 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3722 if (!kvm_pat_valid(data))
3725 vcpu->arch.pat = data;
3729 return kvm_mtrr_set_msr(vcpu, msr, data);
3733 return kvm_x2apic_msr_write(vcpu, msr, data);
3735 kvm_set_lapic_tscdeadline_msr(vcpu, data);
3740 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3747 vcpu->arch.ia32_tsc_adjust_msr = data;
3755 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)
3759 data = data & ~MSR_IA32_MISC_ENABLE_EMON;
3760 data |= old_val & MSR_IA32_MISC_ENABLE_EMON;
3764 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
3767 vcpu->arch.ia32_misc_enable_msr = data;
3770 vcpu->arch.ia32_misc_enable_msr = data;
3777 vcpu->arch.smbase = data;
3780 vcpu->arch.msr_ia32_power_ctl = data;
3784 kvm_synchronize_tsc(vcpu, data);
3786 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3800 if (data & ~kvm_caps.supported_xss)
3802 vcpu->arch.ia32_xss = data;
3808 vcpu->arch.smi_count = data;
3814 vcpu->kvm->arch.wall_clock = data;
3815 kvm_write_wall_clock(vcpu->kvm, data, 0);
3821 vcpu->kvm->arch.wall_clock = data;
3822 kvm_write_wall_clock(vcpu->kvm, data, 0);
3828 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
3834 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
3840 if (kvm_pv_enable_async_pf(vcpu, data))
3847 if (kvm_pv_enable_async_pf_int(vcpu, data))
3853 if (data & 0x1) {
3865 if (data & KVM_STEAL_RESERVED_MASK)
3868 vcpu->arch.st.msr_val = data;
3870 if (!(data & KVM_MSR_ENABLED))
3880 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
3889 if (data & (-1ULL << 1))
3892 vcpu->arch.msr_kvm_poll_control = data;
3908 if (data)
3909 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3931 return kvm_hv_set_msr_common(vcpu, msr, data,
3937 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3942 vcpu->arch.osvw.length = data;
3947 vcpu->arch.osvw.status = data;
3951 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
3954 vcpu->arch.msr_platform_info = data;
3957 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
3958 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3961 vcpu->arch.msr_misc_features_enables = data;
3969 if (data & ~kvm_guest_supported_xfd(vcpu))
3972 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
3979 if (data & ~kvm_guest_supported_xfd(vcpu))
3982 vcpu->arch.guest_fpu.xfd_err = data;
3993 if (msr_info->host_initiated && !data &&
4005 u64 data;
4013 data = 0;
4016 data = vcpu->arch.mcg_cap;
4021 data = vcpu->arch.mcg_ctl;
4024 data = vcpu->arch.mcg_status;
4035 data = vcpu->arch.mci_ctl2_banks[offset];
4044 data = vcpu->arch.mce_banks[offset];
4049 *pdata = data;
4077 * data here. Do not conditionalize this on CPUID, as KVM does not do
4085 msr_info->data = 0;
4093 msr_info->data = 0;
4096 msr_info->data = vcpu->arch.microcode_version;
4102 msr_info->data = vcpu->arch.arch_capabilities;
4108 msr_info->data = vcpu->arch.perf_capabilities;
4111 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4133 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
4137 msr_info->data = vcpu->arch.pat;
4142 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4144 msr_info->data = 3;
4158 msr_info->data = 1 << 24;
4161 msr_info->data = kvm_get_apic_base(vcpu);
4164 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4166 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
4169 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4172 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4177 msr_info->data = vcpu->arch.smbase;
4180 msr_info->data = vcpu->arch.smi_count;
4184 msr_info->data = 1000ULL;
4186 msr_info->data |= (((uint64_t)4ULL) << 40);
4189 msr_info->data = vcpu->arch.efer;
4195 msr_info->data = vcpu->kvm->arch.wall_clock;
4201 msr_info->data = vcpu->kvm->arch.wall_clock;
4207 msr_info->data = vcpu->arch.time;
4213 msr_info->data = vcpu->arch.time;
4219 msr_info->data = vcpu->arch.apf.msr_en_val;
4225 msr_info->data = vcpu->arch.apf.msr_int_val;
4231 msr_info->data = 0;
4237 msr_info->data = vcpu->arch.st.msr_val;
4243 msr_info->data = vcpu->arch.pv_eoi.msr_val;
4249 msr_info->data = vcpu->arch.msr_kvm_poll_control;
4258 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4264 msr_info->data = vcpu->arch.ia32_xss;
4276 msr_info->data = 0x20000000;
4289 msr_info->index, &msr_info->data,
4296 * read data here should the balance of the register be
4302 msr_info->data = 0xbe702111;
4307 msr_info->data = vcpu->arch.osvw.length;
4312 msr_info->data = vcpu->arch.osvw.status;
4318 msr_info->data = vcpu->arch.msr_platform_info;
4321 msr_info->data = vcpu->arch.msr_misc_features_enables;
4324 msr_info->data = vcpu->arch.msr_hwcr;
4332 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4339 msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4352 msr_info->data = 0;
4370 unsigned index, u64 *data))
4375 if (do_msr(vcpu, entries[i].index, &entries[i].data))
4388 unsigned index, u64 *data),
6718 struct kvm_clock_data data = { 0 };
6720 get_kvmclock(kvm, &data);
6721 if (copy_to_user(argp, &data, sizeof(data)))
6730 struct kvm_clock_data data;
6733 if (copy_from_user(&data, argp, sizeof(data)))
6740 if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
6752 * 'system_time' when 'data.clock' is very small.
6754 if (data.flags & KVM_CLOCK_REALTIME) {
6760 if (now_real_ns > data.realtime)
6761 data.clock += now_real_ns - data.realtime;
6768 ka->kvmclock_offset = data.clock - now_raw_ns;
7350 void *data = val;
7361 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
7369 data += toread;
7442 void *data = val;
7453 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
7460 data += towrite;
7637 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
7701 frag->data = val;
7857 unsigned short port, void *data,
7866 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data);
7868 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data);
7879 memset(data, 0, size * (count - i));
7883 data += size;
7896 memcpy(vcpu->arch.pio_data, data, size * count);
8200 u32 msr_index, u64 data)
8205 r = kvm_set_msr_with_filter(vcpu, msr_index, data);
8210 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
8214 trace_kvm_msr_write_ex(msr_index, data);
8218 trace_kvm_msr_write(msr_index, data);
8471 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8499 /* Always include the flags as a 'data' entry. */
8515 memcpy(&run->internal.data[info_start], info, sizeof(info));
8516 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data,
8517 ndata * sizeof(data[0]));
8526 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data,
8527 ctxt->fetch.end - ctxt->fetch.data);
8530 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8533 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
9180 static void tsc_khz_changed(void *data)
9182 struct cpufreq_freqs *freq = data;
9187 if (data)
9304 void *data)
9306 struct cpufreq_freqs *freq = data;
9386 * Notification about pvclock gtod data update.
11037 * copy data
11044 * copy data
11059 memcpy(frag->data, run->mmio.data, len);
11067 frag->data += len;
11085 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
11513 apic_base_msr.data = sregs->apic_base;
13053 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13061 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13070 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13455 memcpy(frag->data, run->mmio.data, len);
13463 frag->data += len;
13481 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
13490 void *data)
13495 if (!data)
13498 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13504 data += handled;
13511 frag->data = data;
13519 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
13529 void *data)
13534 if (!data)
13537 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13543 data += handled;
13550 frag->data = data;
13644 unsigned int port, void *data, unsigned int count,
13647 vcpu->arch.sev_pio_data = data;