Lines Matching refs:irq

7 #include <linux/irq.h>
63 struct vgic_irq *irq = NULL;
68 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
69 if (irq->intid != intid)
76 vgic_get_irq_kref(irq);
79 irq = NULL;
84 return irq;
126 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
130 if (!kref_put(&irq->refcount, vgic_irq_release))
133 list_del(&irq->lpi_list);
136 kfree(irq);
139 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
144 if (irq->intid < VGIC_MIN_LPI)
148 __vgic_put_lpi_locked(kvm, irq);
155 struct vgic_irq *irq, *tmp;
160 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
161 if (irq->intid >= VGIC_MIN_LPI) {
162 raw_spin_lock(&irq->irq_lock);
163 list_del(&irq->ap_list);
164 irq->vcpu = NULL;
165 raw_spin_unlock(&irq->irq_lock);
166 vgic_put_irq(vcpu->kvm, irq);
173 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
175 WARN_ON(irq_set_irqchip_state(irq->host_irq,
180 bool vgic_get_phys_line_level(struct vgic_irq *irq)
184 BUG_ON(!irq->hw);
186 if (irq->ops && irq->ops->get_input_level)
187 return irq->ops->get_input_level(irq->intid);
189 WARN_ON(irq_get_irqchip_state(irq->host_irq,
196 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
199 BUG_ON(!irq->hw);
200 WARN_ON(irq_set_irqchip_state(irq->host_irq,
206 * kvm_vgic_target_oracle - compute the target vcpu for an irq
208 * @irq: The irq to route. Must be already locked.
216 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
218 lockdep_assert_held(&irq->irq_lock);
221 if (irq->active)
222 return irq->vcpu ? : irq->target_vcpu;
230 if (irq->enabled && irq_is_pending(irq)) {
231 if (unlikely(irq->target_vcpu &&
232 !irq->target_vcpu->kvm->arch.vgic.enabled))
235 return irq->target_vcpu;
313 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
315 if (irq->owner != owner)
318 switch (irq->config) {
320 return irq->line_level != level;
336 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
341 lockdep_assert_held(&irq->irq_lock);
344 vcpu = vgic_target_oracle(irq);
345 if (irq->vcpu || !vcpu) {
351 * Otherwise, if the irq is not pending and enabled, it does
355 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
374 * We must unlock the irq lock to take the ap_list_lock where
377 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
382 raw_spin_lock(&irq->irq_lock);
388 * 1) The irq lost its pending state or was disabled behind our
390 * 2) Someone changed the affinity on this irq behind our
396 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
397 raw_spin_unlock(&irq->irq_lock);
401 raw_spin_lock_irqsave(&irq->irq_lock, flags);
406 * Grab a reference to the irq to reflect the fact that it is
409 vgic_get_irq_kref(irq);
410 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
411 irq->vcpu = vcpu;
413 raw_spin_unlock(&irq->irq_lock);
443 struct vgic_irq *irq;
457 irq = vgic_get_irq(kvm, vcpu, intid);
458 if (!irq)
461 raw_spin_lock_irqsave(&irq->irq_lock, flags);
463 if (!vgic_validate_injection(irq, level, owner)) {
465 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
466 vgic_put_irq(kvm, irq);
470 if (irq->config == VGIC_CONFIG_LEVEL)
471 irq->line_level = level;
473 irq->pending_latch = true;
475 vgic_queue_irq_unlock(kvm, irq, flags);
476 vgic_put_irq(kvm, irq);
481 /* @irq->irq_lock must be held */
482 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
501 irq->hw = true;
502 irq->host_irq = host_irq;
503 irq->hwintid = data->hwirq;
504 irq->ops = ops;
508 /* @irq->irq_lock must be held */
509 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
511 irq->hw = false;
512 irq->hwintid = 0;
513 irq->ops = NULL;
519 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
523 BUG_ON(!irq);
525 raw_spin_lock_irqsave(&irq->irq_lock, flags);
526 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
528 vgic_put_irq(vcpu->kvm, irq);
544 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
547 if (!irq->hw)
550 raw_spin_lock_irqsave(&irq->irq_lock, flags);
551 irq->active = false;
552 irq->pending_latch = false;
553 irq->line_level = false;
554 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
556 vgic_put_irq(vcpu->kvm, irq);
561 struct vgic_irq *irq;
567 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
568 BUG_ON(!irq);
570 raw_spin_lock_irqsave(&irq->irq_lock, flags);
571 kvm_vgic_unmap_irq(irq);
572 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
573 vgic_put_irq(vcpu->kvm, irq);
580 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
584 raw_spin_lock_irqsave(&irq->irq_lock, flags);
585 if (irq->hw)
586 ret = irq->hwintid;
587 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
589 vgic_put_irq(vcpu->kvm, irq);
605 struct vgic_irq *irq;
616 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
617 raw_spin_lock_irqsave(&irq->irq_lock, flags);
618 if (irq->owner && irq->owner != owner)
621 irq->owner = owner;
622 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
638 struct vgic_irq *irq, *tmp;
645 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
649 raw_spin_lock(&irq->irq_lock);
651 BUG_ON(vcpu != irq->vcpu);
653 target_vcpu = vgic_target_oracle(irq);
660 list_del(&irq->ap_list);
661 irq->vcpu = NULL;
662 raw_spin_unlock(&irq->irq_lock);
668 * we remove the irq from the list, we drop
671 vgic_put_irq(vcpu->kvm, irq);
677 raw_spin_unlock(&irq->irq_lock);
683 raw_spin_unlock(&irq->irq_lock);
701 raw_spin_lock(&irq->irq_lock);
712 if (target_vcpu == vgic_target_oracle(irq)) {
715 list_del(&irq->ap_list);
716 irq->vcpu = target_vcpu;
717 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
721 raw_spin_unlock(&irq->irq_lock);
746 struct vgic_irq *irq, int lr)
748 lockdep_assert_held(&irq->irq_lock);
751 vgic_v2_populate_lr(vcpu, irq, lr);
753 vgic_v3_populate_lr(vcpu, irq, lr);
777 struct vgic_irq *irq;
784 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
787 raw_spin_lock(&irq->irq_lock);
789 w = vgic_irq_get_lr_count(irq);
790 raw_spin_unlock(&irq->irq_lock);
802 struct vgic_irq *irq;
816 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
817 raw_spin_lock(&irq->irq_lock);
826 if (multi_sgi && irq->priority > prio) {
827 _raw_spin_unlock(&irq->irq_lock);
831 if (likely(vgic_target_oracle(irq) == vcpu)) {
832 vgic_populate_lr(vcpu, irq, count++);
834 if (irq->source)
835 prio = irq->priority;
838 raw_spin_unlock(&irq->irq_lock);
841 if (!list_is_last(&irq->ap_list,
976 struct vgic_irq *irq;
991 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
992 raw_spin_lock(&irq->irq_lock);
993 pending = irq_is_pending(irq) && irq->enabled &&
994 !irq->active &&
995 irq->priority < vmcr.pmr;
996 raw_spin_unlock(&irq->irq_lock);
1026 struct vgic_irq *irq;
1033 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1034 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1035 map_is_active = irq->hw && irq->active;
1036 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1037 vgic_put_irq(vcpu->kvm, irq);
1062 void vgic_irq_handle_resampling(struct vgic_irq *irq,
1065 if (vgic_irq_is_mapped_level(irq)) {
1068 if (unlikely(vgic_irq_needs_resampling(irq))) {
1069 resample = !(irq->active || irq->pending_latch);
1070 } else if (lr_pending || (lr_deactivated && irq->line_level)) {
1071 irq->line_level = vgic_get_phys_line_level(irq);
1072 resample = !irq->line_level;
1076 vgic_irq_set_phys_active(irq, false);