Lines Matching refs:its_dev
108 u64 (*get_msi_base)(struct its_device *its_dev);
233 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
234 return d->hwirq - its_dev->event_map.lpi_base;
237 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
240 struct its_node *its = its_dev->its;
242 return its->collections + its_dev->event_map.col_map[event];
245 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
248 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
251 return &its_dev->event_map.vlpi_maps[event];
257 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
260 return dev_event_to_vlpi_map(its_dev, event);
296 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
297 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
1479 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1481 WARN_ON(!is_v4_1(its_dev->its));
1495 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1499 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1502 its_send_inv(its_dev, its_get_event_id(d));
1504 its_send_vinv(its_dev, its_get_event_id(d));
1509 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1517 if (is_v4_1(its_dev->its))
1520 map = dev_event_to_vlpi_map(its_dev, event);
1537 its_send_vmovi(its_dev, event);
1604 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1610 node = its_dev->its->numa_node;
1642 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1660 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1676 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1685 prev_cpu = its_dev->event_map.col_map[id];
1698 target_col = &its_dev->its->collections[cpu];
1699 its_send_movi(its_dev, target_col, id);
1700 its_dev->event_map.col_map[id] = cpu;
1713 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1715 struct its_node *its = its_dev->its;
1722 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1726 its = its_dev->its;
1727 addr = its->get_msi_base(its_dev);
1740 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1748 its_send_vint(its_dev, event);
1750 its_send_vclear(its_dev, event);
1753 its_send_int(its_dev, event);
1755 its_send_clear(its_dev, event);
1841 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1848 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1850 if (!its_dev->event_map.vm) {
1853 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1860 its_dev->event_map.vm = info->map->vm;
1861 its_dev->event_map.vlpi_maps = maps;
1862 } else if (its_dev->event_map.vm != info->map->vm) {
1868 its_dev->event_map.vlpi_maps[event] = *info->map;
1872 its_send_vmovi(its_dev, event);
1875 its_map_vm(its_dev->its, info->map->vm);
1887 its_send_discard(its_dev, event);
1890 its_send_vmapti(its_dev, event);
1893 its_dev->event_map.nr_vlpis++;
1897 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1903 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1907 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1911 if (!its_dev->event_map.vm || !map) {
1920 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1926 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1930 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1932 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1938 its_send_discard(its_dev, event);
1942 its_send_mapti(its_dev, d->hwirq, event);
1948 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1954 if (!--its_dev->event_map.nr_vlpis) {
1955 its_dev->event_map.vm = NULL;
1956 kfree(its_dev->event_map.vlpi_maps);
1960 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1966 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1968 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1982 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1986 if (!is_v4(its_dev->its))
3281 struct its_device *its_dev = NULL, *tmp;
3288 its_dev = tmp;
3295 return its_dev;
3478 static void its_free_device(struct its_device *its_dev)
3482 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3483 list_del(&its_dev->entry);
3484 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3485 kfree(its_dev->event_map.col_map);
3486 kfree(its_dev->itt);
3487 kfree(its_dev);
3510 struct its_device *its_dev;
3537 its_dev = its_find_device(its, dev_id);
3538 if (its_dev) {
3544 its_dev->shared = true;
3549 its_dev = its_create_device(its, dev_id, nvec, true);
3550 if (!its_dev) {
3556 its_dev->shared = true;
3561 info->scratchpad[0].ptr = its_dev;
3597 struct its_device *its_dev = info->scratchpad[0].ptr;
3598 struct its_node *its = its_dev->its;
3604 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3608 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3618 hwirq + i, &its_irq_chip, its_dev);
3624 (int)(hwirq + i - its_dev->event_map.lpi_base),
3634 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3643 its_dev->event_map.col_map[event] = cpu;
3647 its_send_mapti(its_dev, d->hwirq, event);
3654 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3657 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3659 its_send_discard(its_dev, event);
3666 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3667 struct its_node *its = its_dev->its;
3670 bitmap_release_region(its_dev->event_map.lpi_map,
3687 if (!its_dev->shared &&
3688 bitmap_empty(its_dev->event_map.lpi_map,
3689 its_dev->event_map.nr_lpis)) {
3690 its_lpi_free(its_dev->event_map.lpi_map,
3691 its_dev->event_map.lpi_base,
3692 its_dev->event_map.nr_lpis);
3695 its_send_mapd(its_dev, 0);
3696 its_free_device(its_dev);
4707 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4709 struct its_node *its = its_dev->its;
4718 return its->pre_its_base + (its_dev->device_id << 2);