Lines Matching refs:devid
132 return p->devid;
140 int devid;
143 devid = get_pci_device_id(dev);
145 devid = get_acpihid_device_id(dev, NULL);
147 return devid;
186 static struct iommu_dev_data *alloc_dev_data(u16 devid)
195 dev_data->devid = devid;
202 static struct iommu_dev_data *search_dev_data(u16 devid)
212 if (dev_data->devid == devid)
221 u16 devid = pci_dev_id(pdev);
223 if (devid == alias)
227 amd_iommu_rlookup_table[devid];
229 amd_iommu_dev_table[devid].data,
273 static struct iommu_dev_data *find_dev_data(u16 devid)
276 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
278 dev_data = search_dev_data(devid);
281 dev_data = alloc_dev_data(devid);
298 int devid;
300 devid = get_acpihid_device_id(dev, &entry);
301 if (devid < 0)
302 return ERR_PTR(devid);
305 if ((devid == p->devid) && p->group)
352 int devid;
357 devid = get_device_id(dev);
358 if (devid < 0)
362 if (devid > amd_iommu_last_bdf)
365 if (amd_iommu_rlookup_table[devid] == NULL)
374 int devid;
379 devid = get_device_id(dev);
380 if (devid < 0)
381 return devid;
383 dev_data = find_dev_data(devid);
399 iommu = amd_iommu_rlookup_table[dev_data->devid];
410 int devid;
412 devid = get_device_id(dev);
413 if (devid < 0)
416 amd_iommu_rlookup_table[devid] = NULL;
417 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
470 static void dump_dte_entry(u16 devid)
476 amd_iommu_dev_table[devid].data[i]);
491 int devid, vmg_tag, flags;
495 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
500 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
501 devid & 0xff);
510 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
521 int devid, flags_rmp, vmg_tag, flags;
525 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
531 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
532 devid & 0xff);
541 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
549 static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
555 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
556 devid & 0xff);
565 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
576 int type, devid, flags, tag;
584 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
601 amd_iommu_report_page_fault(devid, pasid, address, flags);
608 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
610 dump_dte_entry(devid);
615 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
620 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
633 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
638 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
651 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
787 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
931 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
934 cmd->data[0] = devid;
969 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
990 cmd->data[0] = devid;
992 cmd->data[1] = devid;
1018 static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1025 cmd->data[0] = devid;
1028 cmd->data[1] = devid;
1038 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1043 cmd->data[0] = devid;
1060 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1063 cmd->data[0] = devid;
1158 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1162 build_inv_dte(&cmd, devid);
1169 u32 devid;
1171 for (devid = 0; devid <= 0xffff; ++devid)
1172 iommu_flush_dte(iommu, devid);
1216 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1220 build_inv_irt(&cmd, devid);
1227 u32 devid;
1229 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1230 iommu_flush_irt(iommu, devid);
1257 iommu = amd_iommu_rlookup_table[dev_data->devid];
1259 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1280 iommu = amd_iommu_rlookup_table[dev_data->devid];
1286 ret = iommu_flush_dte(iommu, dev_data->devid);
1290 alias = amd_iommu_alias_table[dev_data->devid];
1291 if (alias != dev_data->devid) {
1908 static void set_dte_entry(u16 devid, struct protection_domain *domain,
1923 flags = amd_iommu_dev_table[devid].data[1];
1929 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1964 old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
1965 amd_iommu_dev_table[devid].data[1] = flags;
1966 amd_iommu_dev_table[devid].data[0] = pte_root;
1974 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1980 static void clear_dte_entry(u16 devid)
1983 amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
1984 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
1986 amd_iommu_apply_erratum_63(devid);
1996 iommu = amd_iommu_rlookup_table[dev_data->devid];
2009 set_dte_entry(dev_data->devid, domain, &pgtable,
2021 iommu = amd_iommu_rlookup_table[dev_data->devid];
2026 clear_dte_entry(dev_data->devid);
2228 int ret, devid;
2233 devid = get_device_id(dev);
2234 if (devid < 0)
2235 return ERR_PTR(devid);
2237 iommu = amd_iommu_rlookup_table[devid];
2270 int devid = get_device_id(dev);
2276 iommu = amd_iommu_rlookup_table[devid];
2322 set_dte_entry(dev_data->devid, domain, pgtable,
2526 int devid;
2531 devid = get_device_id(dev);
2532 if (devid < 0)
2538 iommu = amd_iommu_rlookup_table[devid];
2565 iommu = amd_iommu_rlookup_table[dev_data->devid];
2671 int devid;
2673 devid = get_device_id(dev);
2674 if (devid < 0)
2681 if (devid < entry->devid_start || devid > entry->devid_end)
2920 iommu = amd_iommu_rlookup_table[dev_data->devid];
2922 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3087 iommu = amd_iommu_rlookup_table[dev_data->devid];
3089 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3195 static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3199 dte = amd_iommu_dev_table[devid].data[2];
3206 amd_iommu_dev_table[devid].data[2] = dte;
3209 static struct irq_remap_table *get_irq_table(u16 devid)
3213 if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
3214 "%s: no iommu for devid %x\n", __func__, devid))
3217 table = irq_lookup_table[devid];
3218 if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
3248 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
3251 irq_lookup_table[devid] = table;
3252 set_dte_irq_entry(devid, table);
3253 iommu_flush_dte(iommu, devid);
3269 static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
3279 iommu = amd_iommu_rlookup_table[devid];
3283 table = irq_lookup_table[devid];
3287 alias = amd_iommu_alias_table[devid];
3290 set_remap_table_entry(iommu, devid, table);
3302 table = irq_lookup_table[devid];
3308 set_remap_table_entry(iommu, devid, table);
3319 set_remap_table_entry(iommu, devid, table);
3321 if (devid != alias)
3337 static int alloc_irq_index(u16 devid, int count, bool align,
3343 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3348 table = alloc_irq_table(devid, pdev);
3387 static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
3396 iommu = amd_iommu_rlookup_table[devid];
3400 table = get_irq_table(devid);
3425 iommu_flush_irt(iommu, devid);
3431 static int modify_irte(u16 devid, int index, union irte *irte)
3437 iommu = amd_iommu_rlookup_table[devid];
3441 table = get_irq_table(devid);
3449 iommu_flush_irt(iommu, devid);
3455 static void free_irte(u16 devid, int index)
3461 iommu = amd_iommu_rlookup_table[devid];
3465 table = get_irq_table(devid);
3473 iommu_flush_irt(iommu, devid);
3479 u8 vector, u32 dest_apicid, int devid)
3493 u8 vector, u32 dest_apicid, int devid)
3507 static void irte_activate(void *entry, u16 devid, u16 index)
3512 modify_irte(devid, index, irte);
3515 static void irte_ga_activate(void *entry, u16 devid, u16 index)
3520 modify_irte_ga(devid, index, irte, NULL);
3523 static void irte_deactivate(void *entry, u16 devid, u16 index)
3528 modify_irte(devid, index, irte);
3531 static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
3536 modify_irte_ga(devid, index, irte, NULL);
3539 static void irte_set_affinity(void *entry, u16 devid, u16 index,
3546 modify_irte(devid, index, irte);
3549 static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
3560 modify_irte_ga(devid, index, irte, NULL);
3615 return get_ioapic_devid(info->devid);
3618 return get_hpet_devid(info->devid);
3629 int devid)
3631 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3648 int devid;
3653 devid = get_devid(info);
3654 if (devid < 0)
3656 return get_irq_domain_for_devid(info, devid);
3671 int devid, int index, int sub_handle)
3676 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3681 data->irq_2_irte.devid = devid;
3685 irq_cfg->dest_apicid, devid);
3743 int i, ret, devid;
3759 devid = get_devid(info);
3760 if (devid < 0)
3771 table = alloc_irq_table(devid, NULL);
3779 iommu = amd_iommu_rlookup_table[devid];
3792 index = alloc_irq_index(devid, nr_irqs, align,
3795 index = alloc_irq_index(devid, nr_irqs, false, NULL);
3827 irq_data->hwirq = (devid << 16) + i;
3830 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3843 free_irte(devid, index + i);
3862 free_irte(irte_info->devid, irte_info->index);
3880 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3886 iommu->irte_ops->activate(data->entry, irte_info->devid,
3897 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3900 iommu->irte_ops->deactivate(data->entry, irte_info->devid,
3932 return modify_irte_ga(ir_data->irq_2_irte.devid,
3962 return modify_irte_ga(ir_data->irq_2_irte.devid,
3975 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
3997 iommu = amd_iommu_rlookup_table[irte_info->devid];
4034 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
4046 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4108 int devid = ir_data->irq_2_irte.devid;
4116 iommu = amd_iommu_rlookup_table[devid];
4120 table = get_irq_table(devid);
4139 iommu_flush_irt(iommu, devid);