Lines Matching defs:msr

245 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
251 hv_vcpu->exit.u.synic.msr = msr;
260 u32 msr, u64 data, bool host)
268 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
271 switch (msr) {
275 synic_exit(synic, msr);
294 synic_exit(synic, msr);
306 synic_exit(synic, msr);
319 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
340 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
346 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
352 hv_vcpu->exit.u.syndbg.msr = msr;
363 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
371 to_hv_vcpu(vcpu)->vp_index, msr, data);
372 switch (msr) {
376 syndbg_exit(vcpu, msr);
390 syndbg_exit(vcpu, msr);
402 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
409 switch (msr) {
432 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
437 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
446 switch (msr) {
463 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
1008 static bool kvm_hv_msr_partition_wide(u32 msr)
1012 switch (msr) {
1249 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1254 switch (msr) {
1325 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1331 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1334 switch (msr) {
1396 msr - HV_X64_MSR_CRASH_P0,
1450 return syndbg_set_msr(vcpu, msr, data, host);
1452 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1468 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1472 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1475 switch (msr) {
1547 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1552 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1561 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1573 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1580 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1587 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1590 switch (msr) {
1605 msr - HV_X64_MSR_CRASH_P0,
1626 return syndbg_get_msr(vcpu, msr, pdata, host);
1628 kvm_pr_unimpl_rdmsr(vcpu, msr);
1636 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1642 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1645 switch (msr) {
1667 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1672 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1681 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1693 kvm_pr_unimpl_rdmsr(vcpu, msr);
1700 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1710 if (kvm_hv_msr_partition_wide(msr)) {
1714 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1718 return kvm_hv_set_msr(vcpu, msr, data, host);
1721 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1731 if (kvm_hv_msr_partition_wide(msr)) {
1735 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1739 return kvm_hv_get_msr(vcpu, msr, pdata, host);