Lines Matching refs:irq
7 #include <linux/irq.h>
61 struct vgic_irq *irq = NULL;
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
67 if (irq->intid != intid)
74 vgic_get_irq_kref(irq);
77 irq = NULL;
82 return irq;
125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
129 if (!kref_put(&irq->refcount, vgic_irq_release))
132 list_del(&irq->lpi_list);
135 kfree(irq);
138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
143 if (irq->intid < VGIC_MIN_LPI)
147 __vgic_put_lpi_locked(kvm, irq);
154 struct vgic_irq *irq, *tmp;
159 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
160 if (irq->intid >= VGIC_MIN_LPI) {
161 raw_spin_lock(&irq->irq_lock);
162 list_del(&irq->ap_list);
163 irq->vcpu = NULL;
164 raw_spin_unlock(&irq->irq_lock);
165 vgic_put_irq(vcpu->kvm, irq);
172 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
174 WARN_ON(irq_set_irqchip_state(irq->host_irq,
179 bool vgic_get_phys_line_level(struct vgic_irq *irq)
183 BUG_ON(!irq->hw);
185 if (irq->get_input_level)
186 return irq->get_input_level(irq->intid);
188 WARN_ON(irq_get_irqchip_state(irq->host_irq,
195 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
198 BUG_ON(!irq->hw);
199 WARN_ON(irq_set_irqchip_state(irq->host_irq,
205 * kvm_vgic_target_oracle - compute the target vcpu for an irq
207 * @irq: The irq to route. Must be already locked.
215 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
217 lockdep_assert_held(&irq->irq_lock);
220 if (irq->active)
221 return irq->vcpu ? : irq->target_vcpu;
229 if (irq->enabled && irq_is_pending(irq)) {
230 if (unlikely(irq->target_vcpu &&
231 !irq->target_vcpu->kvm->arch.vgic.enabled))
234 return irq->target_vcpu;
312 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
314 if (irq->owner != owner)
317 switch (irq->config) {
319 return irq->line_level != level;
335 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
340 lockdep_assert_held(&irq->irq_lock);
343 vcpu = vgic_target_oracle(irq);
344 if (irq->vcpu || !vcpu) {
350 * Otherwise, if the irq is not pending and enabled, it does
354 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
373 * We must unlock the irq lock to take the ap_list_lock where
376 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
381 raw_spin_lock(&irq->irq_lock);
387 * 1) The irq lost its pending state or was disabled behind our
389 * 2) Someone changed the affinity on this irq behind our
395 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
396 raw_spin_unlock(&irq->irq_lock);
400 raw_spin_lock_irqsave(&irq->irq_lock, flags);
405 * Grab a reference to the irq to reflect the fact that it is
408 vgic_get_irq_kref(irq);
409 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
410 irq->vcpu = vcpu;
412 raw_spin_unlock(&irq->irq_lock);
442 struct vgic_irq *irq;
456 irq = vgic_get_irq(kvm, vcpu, intid);
457 if (!irq)
460 raw_spin_lock_irqsave(&irq->irq_lock, flags);
462 if (!vgic_validate_injection(irq, level, owner)) {
464 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
465 vgic_put_irq(kvm, irq);
469 if (irq->config == VGIC_CONFIG_LEVEL)
470 irq->line_level = level;
472 irq->pending_latch = true;
474 vgic_queue_irq_unlock(kvm, irq, flags);
475 vgic_put_irq(kvm, irq);
480 /* @irq->irq_lock must be held */
481 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
500 irq->hw = true;
501 irq->host_irq = host_irq;
502 irq->hwintid = data->hwirq;
503 irq->get_input_level = get_input_level;
507 /* @irq->irq_lock must be held */
508 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
510 irq->hw = false;
511 irq->hwintid = 0;
512 irq->get_input_level = NULL;
518 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
522 BUG_ON(!irq);
524 raw_spin_lock_irqsave(&irq->irq_lock, flags);
525 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
526 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527 vgic_put_irq(vcpu->kvm, irq);
543 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
546 if (!irq->hw)
549 raw_spin_lock_irqsave(&irq->irq_lock, flags);
550 irq->active = false;
551 irq->pending_latch = false;
552 irq->line_level = false;
553 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
555 vgic_put_irq(vcpu->kvm, irq);
560 struct vgic_irq *irq;
566 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
567 BUG_ON(!irq);
569 raw_spin_lock_irqsave(&irq->irq_lock, flags);
570 kvm_vgic_unmap_irq(irq);
571 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
572 vgic_put_irq(vcpu->kvm, irq);
589 struct vgic_irq *irq;
600 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
601 raw_spin_lock_irqsave(&irq->irq_lock, flags);
602 if (irq->owner && irq->owner != owner)
605 irq->owner = owner;
606 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
622 struct vgic_irq *irq, *tmp;
629 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
633 raw_spin_lock(&irq->irq_lock);
635 BUG_ON(vcpu != irq->vcpu);
637 target_vcpu = vgic_target_oracle(irq);
644 list_del(&irq->ap_list);
645 irq->vcpu = NULL;
646 raw_spin_unlock(&irq->irq_lock);
652 * we remove the irq from the list, we drop
655 vgic_put_irq(vcpu->kvm, irq);
661 raw_spin_unlock(&irq->irq_lock);
667 raw_spin_unlock(&irq->irq_lock);
685 raw_spin_lock(&irq->irq_lock);
696 if (target_vcpu == vgic_target_oracle(irq)) {
699 list_del(&irq->ap_list);
700 irq->vcpu = target_vcpu;
701 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
705 raw_spin_unlock(&irq->irq_lock);
730 struct vgic_irq *irq, int lr)
732 lockdep_assert_held(&irq->irq_lock);
735 vgic_v2_populate_lr(vcpu, irq, lr);
737 vgic_v3_populate_lr(vcpu, irq, lr);
761 struct vgic_irq *irq;
768 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
771 raw_spin_lock(&irq->irq_lock);
773 w = vgic_irq_get_lr_count(irq);
774 raw_spin_unlock(&irq->irq_lock);
786 struct vgic_irq *irq;
800 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
801 raw_spin_lock(&irq->irq_lock);
810 if (multi_sgi && irq->priority > prio) {
811 _raw_spin_unlock(&irq->irq_lock);
815 if (likely(vgic_target_oracle(irq) == vcpu)) {
816 vgic_populate_lr(vcpu, irq, count++);
818 if (irq->source)
819 prio = irq->priority;
822 raw_spin_unlock(&irq->irq_lock);
825 if (!list_is_last(&irq->ap_list,
957 struct vgic_irq *irq;
972 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
973 raw_spin_lock(&irq->irq_lock);
974 pending = irq_is_pending(irq) && irq->enabled &&
975 !irq->active &&
976 irq->priority < vmcr.pmr;
977 raw_spin_unlock(&irq->irq_lock);
1007 struct vgic_irq *irq;
1014 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1015 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1016 map_is_active = irq->hw && irq->active;
1017 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1018 vgic_put_irq(vcpu->kvm, irq);