Lines Matching defs:events
66 #include <trace/events/ipi.h>
67 #include <trace/events/kvm.h>
4977 * instruction boundary and with no events half-injected.
5148 struct kvm_vcpu_events *events)
5185 memset(events, 0, sizeof(*events));
5194 events->exception.injected = ex->injected;
5195 events->exception.pending = ex->pending;
5202 events->exception.injected |= ex->pending;
5204 events->exception.nr = ex->vector;
5205 events->exception.has_error_code = ex->has_error_code;
5206 events->exception.error_code = ex->error_code;
5207 events->exception_has_payload = ex->has_payload;
5208 events->exception_payload = ex->payload;
5210 events->interrupt.injected =
5212 events->interrupt.nr = vcpu->arch.interrupt.nr;
5213 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
5215 events->nmi.injected = vcpu->arch.nmi_injected;
5216 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu);
5217 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu);
5219 /* events->sipi_vector is never valid when reporting to user space */
5222 events->smi.smm = is_smm(vcpu);
5223 events->smi.pending = vcpu->arch.smi_pending;
5224 events->smi.smm_inside_nmi =
5227 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
5229 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
5233 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5235 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5236 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5241 struct kvm_vcpu_events *events)
5243 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5251 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5254 if (events->exception.pending)
5255 events->exception.injected = 0;
5257 events->exception_has_payload = 0;
5259 events->exception.pending = 0;
5260 events->exception_has_payload = 0;
5263 if ((events->exception.injected || events->exception.pending) &&
5264 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
5268 if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
5269 (events->smi.smm || events->smi.pending) &&
5283 vcpu->arch.exception_from_userspace = events->exception.pending;
5287 vcpu->arch.exception.injected = events->exception.injected;
5288 vcpu->arch.exception.pending = events->exception.pending;
5289 vcpu->arch.exception.vector = events->exception.nr;
5290 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5291 vcpu->arch.exception.error_code = events->exception.error_code;
5292 vcpu->arch.exception.has_payload = events->exception_has_payload;
5293 vcpu->arch.exception.payload = events->exception_payload;
5295 vcpu->arch.interrupt.injected = events->interrupt.injected;
5296 vcpu->arch.interrupt.nr = events->interrupt.nr;
5297 vcpu->arch.interrupt.soft = events->interrupt.soft;
5298 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
5300 events->interrupt.shadow);
5302 vcpu->arch.nmi_injected = events->nmi.injected;
5303 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
5305 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5306 if (events->nmi.pending)
5309 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
5311 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
5313 vcpu->arch.apic->sipi_vector = events->sipi_vector;
5315 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
5317 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5319 kvm_smm_changed(vcpu, events->smi.smm);
5322 vcpu->arch.smi_pending = events->smi.pending;
5324 if (events->smi.smm) {
5325 if (events->smi.smm_inside_nmi)
5332 if (events->smi.smm || events->smi.pending ||
5333 events->smi.smm_inside_nmi)
5338 if (events->smi.latched_init)
5345 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5348 if (events->triple_fault.pending)
5815 struct kvm_vcpu_events events;
5817 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
5820 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
5826 struct kvm_vcpu_events events;
5829 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
5832 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
9643 * handling wake events.
10033 * priority. This handles both "pending" events, i.e. events that have never
10034 * been injected into the guest, and "injected" events, i.e. events that were
10042 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
10045 * For simplicity, KVM uses a single path to inject all events (except events
10047 * instruction boundaries for asynchronous events. However, because VM-Exits
10076 * Process nested events first, as nested VM-Exit supercedes event
10086 * Re-inject exceptions and events *especially* if immediate entry+exit
10106 * new events in order to fully complete the previous instruction.
10130 * Otherwise, continue processing events even if VM-Exit occurred. The
10132 * there may now be events that can be injected into L1.
10146 * New events, other than exceptions, cannot be injected if KVM needs
10188 * Finally, inject interrupt events. If an event cannot be injected
10260 * Mode events (see kvm_inject_realmode_interrupt()).
10929 * Evaluate nested events before exiting the halted state. This allows
11433 * forcing the guest into INIT/SIPI if those events are supposed to be
11808 vcpu, &vcpu->run->s.regs.events);
11828 struct kvm_vcpu_events events = vcpu->run->s.regs.events;
11830 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))