Lines Matching refs:info
251 /* Get info for IRQ */
260 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
263 legacy_info_ptrs[irq] = info;
265 irq_set_chip_data(irq, info);
282 static void channels_on_cpu_dec(struct irq_info *info)
284 if (!info->is_accounted)
287 info->is_accounted = 0;
289 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
292 WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
295 static void channels_on_cpu_inc(struct irq_info *info)
297 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
300 if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
304 info->is_accounted = 1;
316 struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
318 unsigned int irq = info->irq;
320 /* Remove the info pointer only now, with no potential users left. */
323 kfree(info);
329 static int xen_irq_info_common_setup(struct irq_info *info,
336 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
338 info->type = type;
339 info->evtchn = evtchn;
340 info->cpu = cpu;
341 info->mask_reason = EVT_MASK_REASON_EXPLICIT;
342 raw_spin_lock_init(&info->lock);
344 ret = set_evtchn_to_irq(evtchn, info->irq);
348 irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
353 static int xen_irq_info_evtchn_setup(struct irq_info *info,
359 ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
360 info->u.interdomain = dev;
367 static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
370 info->u.ipi = ipi;
372 per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
375 return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
378 static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
381 info->u.virq = virq;
383 per_cpu(virq_to_irq, cpu)[virq] = info->irq;
385 return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
388 static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
392 info->u.pirq.pirq = pirq;
393 info->u.pirq.gsi = gsi;
394 info->u.pirq.domid = domid;
395 info->u.pirq.flags = flags;
397 return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
400 static void xen_irq_info_cleanup(struct irq_info *info)
402 set_evtchn_to_irq(info->evtchn, -1);
403 xen_evtchn_port_remove(info->evtchn, info->cpu);
404 info->evtchn = 0;
405 channels_on_cpu_dec(info);
413 const struct irq_info *info = NULL;
416 info = info_for_irq(irq);
417 if (!info)
420 return info->evtchn;
425 struct irq_info *info = evtchn_to_info(evtchn);
427 return info ? info->irq : -1;
441 static enum ipi_vector ipi_from_irq(struct irq_info *info)
443 BUG_ON(info == NULL);
444 BUG_ON(info->type != IRQT_IPI);
446 return info->u.ipi;
449 static unsigned int virq_from_irq(struct irq_info *info)
451 BUG_ON(info == NULL);
452 BUG_ON(info->type != IRQT_VIRQ);
454 return info->u.virq;
459 struct irq_info *info = info_for_irq(irq);
461 BUG_ON(info == NULL);
462 BUG_ON(info->type != IRQT_PIRQ);
464 return info->u.pirq.pirq;
469 struct irq_info *info = evtchn_to_info(evtchn);
471 return info ? info->cpu : 0;
474 static void do_mask(struct irq_info *info, u8 reason)
478 raw_spin_lock_irqsave(&info->lock, flags);
480 if (!info->mask_reason)
481 mask_evtchn(info->evtchn);
483 info->mask_reason |= reason;
485 raw_spin_unlock_irqrestore(&info->lock, flags);
488 static void do_unmask(struct irq_info *info, u8 reason)
492 raw_spin_lock_irqsave(&info->lock, flags);
494 info->mask_reason &= ~reason;
496 if (!info->mask_reason)
497 unmask_evtchn(info->evtchn);
499 raw_spin_unlock_irqrestore(&info->lock, flags);
511 struct irq_info *info = info_for_irq(irq);
512 BUG_ON(info->type != IRQT_PIRQ);
514 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
517 static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
521 struct irq_data *data = irq_get_irq_data(info->irq);
527 xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
529 channels_on_cpu_dec(info);
530 info->cpu = cpu;
531 channels_on_cpu_inc(info);
559 static void lateeoi_list_del(struct irq_info *info)
561 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
565 list_del_init(&info->eoi_list);
569 static void lateeoi_list_add(struct irq_info *info)
571 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
577 if (now < info->eoi_time)
578 delay = info->eoi_time - now;
586 if (!elem || info->eoi_time < elem->eoi_time) {
587 list_add(&info->eoi_list, &eoi->eoi_list);
588 mod_delayed_work_on(info->eoi_cpu, system_wq,
592 if (elem->eoi_time <= info->eoi_time)
595 list_add(&info->eoi_list, &elem->eoi_list);
601 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
607 evtchn = info->evtchn;
608 if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
612 struct xenbus_device *dev = info->u.interdomain;
618 if ((1 << info->spurious_cnt) < (HZ << 2)) {
619 if (info->spurious_cnt != 0xFF)
620 info->spurious_cnt++;
622 if (info->spurious_cnt > threshold) {
623 delay = 1 << (info->spurious_cnt - 1 - threshold);
626 if (!info->eoi_time)
627 info->eoi_cpu = smp_processor_id();
628 info->eoi_time = get_jiffies_64() + delay;
635 info->spurious_cnt = 0;
638 cpu = info->eoi_cpu;
639 if (info->eoi_time &&
640 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
641 lateeoi_list_add(info);
645 info->eoi_time = 0;
648 smp_store_release(&info->is_active, 0);
649 do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
655 struct irq_info *info;
666 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
669 if (info == NULL)
672 if (now < info->eoi_time) {
673 mod_delayed_work_on(info->eoi_cpu, system_wq,
675 info->eoi_time - now);
679 list_del_init(&info->eoi_list);
683 info->eoi_time = 0;
685 xen_irq_lateeoi_locked(info, false);
704 struct irq_info *info;
708 info = info_for_irq(irq);
710 if (info)
711 xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
719 struct irq_info *info;
721 info = kzalloc(sizeof(*info), GFP_KERNEL);
722 if (info) {
723 info->irq = irq;
724 info->type = IRQT_UNBOUND;
725 info->refcnt = -1;
726 INIT_RCU_WORK(&info->rwork, delayed_free_irq);
728 set_info_for_irq(irq, info);
735 INIT_LIST_HEAD(&info->eoi_list);
736 list_add_tail(&info->list, &xen_irq_list_head);
739 return info;
745 struct irq_info *info = NULL;
748 info = xen_irq_init(irq);
749 if (!info)
753 return info;
759 struct irq_info *info;
776 info = xen_irq_init(irq);
777 if (!info)
780 return info;
783 static void xen_free_irq(struct irq_info *info)
785 if (WARN_ON(!info))
788 if (!list_empty(&info->eoi_list))
789 lateeoi_list_del(info);
791 list_del(&info->list);
793 WARN_ON(info->refcnt > 0);
795 queue_rcu_work(system_wq, &info->rwork);
799 static void event_handler_exit(struct irq_info *info)
801 smp_store_release(&info->is_active, 0);
802 clear_evtchn(info->evtchn);
808 struct irq_info *info = info_for_irq(irq);
810 BUG_ON(info->type != IRQT_PIRQ);
816 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
818 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
823 struct irq_info *info = info_for_irq(data->irq);
824 evtchn_port_t evtchn = info ? info->evtchn : 0;
831 event_handler_exit(info);
848 struct irq_info *info = info_for_irq(irq);
852 BUG_ON(info->type != IRQT_PIRQ);
859 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
874 info->evtchn = evtchn;
875 bind_evtchn_to_cpu(info, 0, false);
882 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
902 struct irq_info *info = info_for_irq(irq);
905 BUG_ON(info->type != IRQT_PIRQ);
910 do_mask(info, EVT_MASK_REASON_EXPLICIT);
911 xen_irq_info_cleanup(info);
927 struct irq_info *info;
929 list_for_each_entry(info, &xen_irq_list_head, list) {
930 if (info->type != IRQT_PIRQ)
933 if (info->u.pirq.gsi == gsi)
934 return info->irq;
941 static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
946 if (!info) {
951 if (info->refcnt > 0) {
952 info->refcnt--;
953 if (info->refcnt != 0)
957 evtchn = info->evtchn;
960 unsigned int cpu = info->cpu;
963 if (!info->is_static)
966 switch (info->type) {
968 per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
971 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
972 per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
975 dev = info->u.interdomain;
983 xen_irq_info_cleanup(info);
989 xen_free_irq(info);
1005 struct irq_info *info;
1018 info = xen_allocate_irq_gsi(gsi);
1019 if (!info)
1022 irq_op.irq = info->irq;
1030 xen_free_irq(info);
1035 ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
1038 __unbind_from_irq(info, info->irq);
1042 pirq_query_unmask(info->irq);
1059 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1062 irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1065 ret = info->irq;
1092 struct irq_info *info;
1101 info = xen_irq_init(irq + i);
1102 if (!info) {
1109 ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
1124 info = info_for_irq(irq + nvec);
1125 __unbind_from_irq(info, irq + nvec);
1135 struct irq_info *info = info_for_irq(irq);
1145 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1146 unmap_irq.pirq = info->u.pirq.pirq;
1147 unmap_irq.domid = info->u.pirq.domid;
1153 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1155 info->u.pirq.domid, info->u.pirq.pirq);
1162 xen_free_irq(info);
1173 struct irq_info *info;
1177 list_for_each_entry(info, &xen_irq_list_head, list) {
1178 if (info->type != IRQT_PIRQ)
1180 irq = info->irq;
1181 if (info->u.pirq.pirq == pirq)
1202 struct irq_info *info;
1209 info = evtchn_to_info(evtchn);
1211 if (!info) {
1212 info = xen_allocate_irq_dynamic();
1213 if (!info)
1216 irq_set_chip_and_handler_name(info->irq, chip,
1219 ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
1221 __unbind_from_irq(info, info->irq);
1231 bind_evtchn_to_cpu(info, 0, false);
1232 } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
1233 if (shared && !WARN_ON(info->refcnt < 0))
1234 info->refcnt++;
1237 ret = info->irq;
1261 struct irq_info *info;
1269 info = xen_allocate_irq_dynamic();
1270 if (!info)
1273 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1282 ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
1284 __unbind_from_irq(info, info->irq);
1291 bind_evtchn_to_cpu(info, cpu, true);
1292 ret = info->irq;
1294 info = info_for_irq(ret);
1295 WARN_ON(info == NULL || info->type != IRQT_IPI);
1369 struct irq_info *info;
1377 info = xen_allocate_irq_dynamic();
1378 if (!info)
1382 irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1385 irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
1400 ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1402 __unbind_from_irq(info, info->irq);
1410 bind_evtchn_to_cpu(info, cpu, percpu);
1411 ret = info->irq;
1413 info = info_for_irq(ret);
1414 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1425 struct irq_info *info;
1428 info = info_for_irq(irq);
1429 __unbind_from_irq(info, irq);
1554 struct irq_info *info = info_for_irq(irq);
1556 if (WARN_ON(!info))
1582 struct irq_info *info = evtchn_to_info(evtchn);
1584 if (!info)
1587 WARN_ON(info->refcnt != -1);
1589 info->refcnt = 1;
1590 info->is_static = is_static;
1598 struct irq_info *info;
1606 info = evtchn_to_info(evtchn);
1608 if (!info)
1612 if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1615 info->refcnt++;
1626 struct irq_info *info = evtchn_to_info(evtchn);
1628 if (WARN_ON(!info))
1630 unbind_from_irq(info->irq);
1660 struct irq_info *info = evtchn_to_info(port);
1663 if (!info)
1688 if (xchg_acquire(&info->is_active, 1))
1691 dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
1696 info->eoi_cpu = smp_processor_id();
1697 info->irq_epoch = __this_cpu_read(irq_epoch);
1698 info->eoi_time = get_jiffies_64() + event_eoi_delay;
1701 generic_handle_irq(info->irq);
1747 struct irq_info *info = info_for_irq(irq);
1749 if (WARN_ON(!info))
1762 BUG_ON(info->type == IRQT_UNBOUND);
1764 info->irq = irq;
1765 (void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
1769 bind_evtchn_to_cpu(info, info->cpu, false);
1776 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1779 evtchn_port_t evtchn = info ? info->evtchn : 0;
1795 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1803 bind_evtchn_to_cpu(info, tcpu, false);
1805 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1853 struct irq_info *info = info_for_irq(data->irq);
1854 evtchn_port_t evtchn = info ? info->evtchn : 0;
1857 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1862 struct irq_info *info = info_for_irq(data->irq);
1863 evtchn_port_t evtchn = info ? info->evtchn : 0;
1866 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1871 struct irq_info *info = info_for_irq(data->irq);
1872 evtchn_port_t evtchn = info ? info->evtchn : 0;
1875 event_handler_exit(info);
1886 struct irq_info *info = info_for_irq(data->irq);
1887 evtchn_port_t evtchn = info ? info->evtchn : 0;
1890 do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1902 struct irq_info *info = info_for_irq(data->irq);
1903 evtchn_port_t evtchn = info ? info->evtchn : 0;
1906 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1907 event_handler_exit(info);
1913 struct irq_info *info = info_for_irq(data->irq);
1914 evtchn_port_t evtchn = info ? info->evtchn : 0;
1919 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1921 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1930 struct irq_info *info;
1932 list_for_each_entry(info, &xen_irq_list_head, list) {
1933 if (info->type != IRQT_PIRQ)
1936 pirq = info->u.pirq.pirq;
1937 gsi = info->u.pirq.gsi;
1938 irq = info->irq;
1954 xen_free_irq(info);
1968 struct irq_info *info;
1974 info = info_for_irq(irq);
1976 BUG_ON(virq_from_irq(info) != virq);
1987 xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1989 bind_evtchn_to_cpu(info, cpu, false);
1997 struct irq_info *info;
2003 info = info_for_irq(irq);
2005 BUG_ON(ipi_from_irq(info) != ipi);
2015 xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
2017 bind_evtchn_to_cpu(info, cpu, false);
2024 struct irq_info *info = info_for_irq(irq);
2025 evtchn_port_t evtchn = info ? info->evtchn : 0;
2028 event_handler_exit(info);
2078 struct irq_info *info = info_for_irq(irq);
2081 if (WARN_ON(!info))
2084 irq_status.irq = info->u.pirq.pirq;
2095 struct irq_info *info;
2101 list_for_each_entry(info, &xen_irq_list_head, list) {
2103 info->evtchn = 0;
2105 channels_on_cpu_dec(info);