Lines Matching refs:devid

104 			return p->devid;
143 u32 devid = get_acpihid_device_id(dev, NULL);
145 seg = PCI_SBDF_TO_SEGID(devid);
152 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
156 pci_seg->rlookup_table[devid] = iommu;
159 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
165 return pci_seg->rlookup_table[devid];
173 int devid = get_device_sbdf_id(dev);
175 if (devid < 0)
177 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
185 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
195 dev_data->devid = devid;
202 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
213 if (dev_data->devid == devid)
224 u16 devid = pci_dev_id(pdev);
226 if (devid == alias)
236 dev_table[devid].data,
282 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
286 dev_data = search_dev_data(iommu, devid);
289 dev_data = alloc_dev_data(iommu, devid);
306 int devid;
308 devid = get_acpihid_device_id(dev, &entry);
309 if (devid < 0)
310 return ERR_PTR(devid);
313 if ((devid == p->devid) && p->group)
353 int devid, sbdf;
361 devid = PCI_SBDF_TO_DEVID(sbdf);
369 if (devid > pci_seg->last_bdf)
378 int devid, sbdf;
387 devid = PCI_SBDF_TO_DEVID(sbdf);
388 dev_data = find_dev_data(iommu, devid);
415 int devid, sbdf;
421 devid = PCI_SBDF_TO_DEVID(sbdf);
422 pci_seg->rlookup_table[devid] = NULL;
423 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
453 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
459 pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
474 int devid, vmg_tag, flags;
478 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
483 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
484 devid & 0xff);
495 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
506 int devid, flags_rmp, vmg_tag, flags;
510 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
516 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
517 devid & 0xff);
528 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
543 u16 devid, u16 domain_id,
549 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
550 devid & 0xff);
565 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
566 PCI_FUNC(devid), domain_id);
584 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
596 int type, devid, flags, tag;
604 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
621 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
628 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
630 dump_dte_entry(iommu, devid);
635 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
640 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
653 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
658 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
671 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
816 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
981 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
984 cmd->data[0] = devid;
1044 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
1050 cmd->data[0] = devid;
1052 cmd->data[1] = devid;
1076 static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1083 cmd->data[0] = devid;
1086 cmd->data[1] = devid;
1096 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1101 cmd->data[0] = devid;
1118 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1121 cmd->data[0] = devid;
1216 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1220 build_inv_dte(&cmd, devid);
1227 u32 devid;
1230 for (devid = 0; devid <= last_bdf; ++devid)
1231 iommu_flush_dte(iommu, devid);
1276 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1280 build_inv_irt(&cmd, devid);
1287 u32 devid;
1293 for (devid = 0; devid <= last_bdf; devid++)
1294 iommu_flush_irt(iommu, devid);
1325 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1359 ret = iommu_flush_dte(iommu, dev_data->devid);
1364 alias = pci_seg->alias_table[dev_data->devid];
1365 if (alias != dev_data->devid) {
1580 static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
1603 flags = dev_table[devid].data[1];
1639 dev_table[devid].data[2] |=
1650 old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
1651 dev_table[devid].data[1] = flags;
1652 dev_table[devid].data[0] = pte_root;
1664 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
1669 dev_table[devid].data[0] = DTE_FLAG_V;
1672 dev_table[devid].data[0] |= DTE_FLAG_TV;
1674 dev_table[devid].data[1] &= DTE_FLAG_MASK;
1676 amd_iommu_apply_erratum_63(iommu, devid);
1703 set_dte_entry(iommu, dev_data->devid, domain,
1722 clear_dte_entry(iommu, dev_data->devid);
1982 set_dte_entry(iommu, dev_data->devid, domain,
2349 int devid, sbdf;
2355 devid = PCI_SBDF_TO_DEVID(sbdf);
2365 if (devid < entry->devid_start || devid > entry->devid_end)
2619 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
2784 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
2844 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
2854 build_inv_irt(&cmd, devid);
2870 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
2876 dte = dev_table[devid].data[2];
2883 dev_table[devid].data[2] = dte;
2886 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
2891 if (WARN_ONCE(!pci_seg->rlookup_table[devid],
2892 "%s: no iommu for devid %x:%x\n",
2893 __func__, pci_seg->id, devid))
2896 table = pci_seg->irq_lookup_table[devid];
2897 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
2898 __func__, pci_seg->id, devid))
2928 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
2933 pci_seg->irq_lookup_table[devid] = table;
2934 set_dte_irq_entry(iommu, devid, table);
2935 iommu_flush_dte(iommu, devid);
2957 u16 devid, struct pci_dev *pdev)
2968 table = pci_seg->irq_lookup_table[devid];
2972 alias = pci_seg->alias_table[devid];
2975 set_remap_table_entry(iommu, devid, table);
2987 table = pci_seg->irq_lookup_table[devid];
2993 set_remap_table_entry(iommu, devid, table);
3004 set_remap_table_entry(iommu, devid, table);
3006 if (devid != alias)
3022 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3029 table = alloc_irq_table(iommu, devid, pdev);
3068 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3076 table = get_irq_table(iommu, devid);
3096 iommu_flush_irt_and_complete(iommu, devid);
3102 u16 devid, int index, union irte *irte)
3107 table = get_irq_table(iommu, devid);
3115 iommu_flush_irt_and_complete(iommu, devid);
3120 static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3125 table = get_irq_table(iommu, devid);
3133 iommu_flush_irt_and_complete(iommu, devid);
3138 u8 vector, u32 dest_apicid, int devid)
3152 u8 vector, u32 dest_apicid, int devid)
3166 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3171 modify_irte(iommu, devid, index, irte);
3174 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3179 modify_irte_ga(iommu, devid, index, irte);
3182 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3187 modify_irte(iommu, devid, index, irte);
3190 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3195 modify_irte_ga(iommu, devid, index, irte);
3198 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3205 modify_irte(iommu, devid, index, irte);
3208 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3219 modify_irte_ga(iommu, devid, index, irte);
3273 return get_ioapic_devid(info->devid);
3275 return get_hpet_devid(info->devid);
3304 int devid, int index, int sub_handle)
3312 data->irq_2_irte.devid = devid;
3316 irq_cfg->dest_apicid, devid);
3360 int i, ret, devid, seg, sbdf;
3373 devid = PCI_SBDF_TO_DEVID(sbdf);
3374 iommu = __rlookup_amd_iommu(seg, devid);
3385 table = alloc_irq_table(iommu, devid, NULL);
3405 index = alloc_irq_index(iommu, devid, nr_irqs, align,
3408 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
3441 irq_data->hwirq = (devid << 16) + i;
3444 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3457 free_irte(iommu, devid, index + i);
3476 free_irte(data->iommu, irte_info->devid, irte_info->index);
3500 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3514 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3522 int devid = -1;
3528 devid = get_ioapic_devid(fwspec->param[0]);
3530 devid = get_hpet_devid(fwspec->param[0]);
3532 if (devid < 0)
3534 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3568 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3598 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3615 dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
3670 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
3779 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,