Lines Matching defs:ctl
62 u64 cr3 = svm->nested.ctl.nested_cr3;
77 return svm->nested.ctl.nested_cr3;
95 svm->nested.ctl.nested_cr3);
116 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
135 g = &svm->nested.ctl;
190 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
204 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
207 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
223 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
324 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
326 return __nested_vmcb_check_controls(vcpu, ctl);
377 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
409 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
410 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
423 !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
432 svm->nested.ctl.int_ctl &= ~mask;
433 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
592 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
649 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
681 svm->nested.ctl.tsc_offset,
691 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
694 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
695 vmcb02->control.int_state = svm->nested.ctl.int_state;
696 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
697 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
708 vmcb02->control.next_rip = svm->nested.ctl.next_rip;
718 svm->soft_int_next_rip = svm->nested.ctl.next_rip;
727 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
733 pause_count12 = svm->nested.ctl.pause_filter_count;
737 pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
751 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
1026 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1027 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1028 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1063 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1104 svm->nested.ctl.nested_cr3 = 0;
1168 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1254 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1268 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1281 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1287 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1312 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1317 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1335 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1375 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1416 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1581 struct vmcb_control_area *ctl;
1625 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1626 if (!ctl)
1629 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1630 r = copy_to_user(&user_vmcb->control, ctl,
1632 kfree(ctl);
1650 struct vmcb_control_area *ctl;
1694 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1696 if (!ctl || !save)
1700 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1706 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1750 nested_copy_vmcb_control_to_cache(svm, ctl);
1773 kfree(ctl);