Lines Matching refs:hv_vcpu

223 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
230 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
231 stimer = &hv_vcpu->stimer[idx];
248 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
250 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
251 hv_vcpu->exit.u.synic.msr = msr;
252 hv_vcpu->exit.u.synic.control = synic->control;
253 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
254 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
330 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
332 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
349 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
351 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
352 hv_vcpu->exit.u.syndbg.msr = msr;
353 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
354 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
355 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
356 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
690 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
696 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
697 !(hv_vcpu->cpuid_cache.features_edx &
865 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
870 if (!hv_vcpu)
873 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
874 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
875 stimer = &hv_vcpu->stimer[i];
898 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
901 if (!hv_vcpu)
904 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
905 stimer_cleanup(&hv_vcpu->stimer[i]);
907 kfree(hv_vcpu);
913 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
915 if (!hv_vcpu)
918 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
926 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
928 if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
932 &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
962 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
965 if (hv_vcpu)
968 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
969 if (!hv_vcpu)
972 vcpu->arch.hyperv = hv_vcpu;
973 hv_vcpu->vcpu = vcpu;
975 synic_init(&hv_vcpu->synic);
977 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
978 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
979 stimer_init(&hv_vcpu->stimer[i], i);
981 hv_vcpu->vp_index = vcpu->vcpu_idx;
984 INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
985 spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
1249 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1251 if (!hv_vcpu->enforce_cpuid)
1257 return hv_vcpu->cpuid_cache.features_eax &
1260 return hv_vcpu->cpuid_cache.features_eax &
1263 return hv_vcpu->cpuid_cache.features_eax &
1266 return hv_vcpu->cpuid_cache.features_eax &
1269 return hv_vcpu->cpuid_cache.features_eax &
1272 return hv_vcpu->cpuid_cache.features_eax &
1280 return hv_vcpu->cpuid_cache.features_eax &
1290 return hv_vcpu->cpuid_cache.features_eax &
1296 return hv_vcpu->cpuid_cache.features_eax &
1300 return hv_vcpu->cpuid_cache.features_eax &
1305 return hv_vcpu->cpuid_cache.features_eax &
1308 return hv_vcpu->cpuid_cache.features_eax &
1312 return hv_vcpu->cpuid_cache.features_edx &
1316 return hv_vcpu->cpuid_cache.features_edx &
1470 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1472 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1483 if (new_vp_index == hv_vcpu->vp_index)
1492 if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1497 hv_vcpu->vp_index = new_vp_index;
1505 hv_vcpu->hv_vapic = data;
1522 hv_vcpu->hv_vapic = data;
1539 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1640 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1642 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1647 data = hv_vcpu->vp_index;
1656 data = hv_vcpu->hv_vapic;
1659 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1885 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1888 if (!hv_vcpu)
1916 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1921 if (!tdp_enabled || !hv_vcpu)
1953 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1954 u64 *sparse_banks = hv_vcpu->sparse_banks;
2123 if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
2169 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2170 u64 *sparse_banks = hv_vcpu->sparse_banks;
2249 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2254 if (!hv_vcpu) {
2263 memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
2270 hv_vcpu->cpuid_cache.features_eax = entry->eax;
2271 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2272 hv_vcpu->cpuid_cache.features_edx = entry->edx;
2277 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2278 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2283 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2287 hv_vcpu->cpuid_cache.nested_eax = entry->eax;
2288 hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
2294 struct kvm_vcpu_hv *hv_vcpu;
2307 hv_vcpu = to_hv_vcpu(vcpu);
2308 hv_vcpu->enforce_cpuid = enforce;
2419 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2421 if (!hv_vcpu->enforce_cpuid)
2426 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2427 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2429 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2431 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2439 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2440 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2443 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2449 return hv_vcpu->cpuid_cache.enlightenments_eax &
2452 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2457 return hv_vcpu->cpuid_cache.enlightenments_eax &
2460 return hv_vcpu->cpuid_cache.features_ebx &
2471 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2510 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2521 if (unlikely(hv_vcpu->enforce_cpuid &&
2522 !(hv_vcpu->cpuid_cache.features_edx &