Lines Matching refs:devid
109 u16 devid;
127 u16 devid;
145 u16 devid;
287 static inline void update_last_devid(u16 devid)
289 if (devid > amd_iommu_last_bdf)
290 amd_iommu_last_bdf = devid;
584 update_last_devid(dev->devid);
920 static void set_dev_entry_bit(u16 devid, u8 bit)
925 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
928 static int get_dev_entry_bit(u16 devid, u8 bit)
933 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
941 u32 lo, hi, devid, old_devtb_size;
999 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1000 old_dev_tbl_cpy[devid] = old_devtb[devid];
1001 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
1002 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1005 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1006 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1009 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1012 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1015 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1019 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1020 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1021 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
1025 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1029 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1037 void amd_iommu_apply_erratum_63(u16 devid)
1041 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
1042 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
1045 set_dev_entry_bit(devid, DEV_ENTRY_IW);
1049 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
1051 amd_iommu_rlookup_table[devid] = iommu;
1059 u16 devid, u32 flags, u32 ext_flags)
1062 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
1064 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
1066 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
1068 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
1070 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
1072 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
1074 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1076 amd_iommu_apply_erratum_63(devid);
1078 set_iommu_for_device(iommu, devid);
1081 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1100 *devid = entry->devid;
1110 entry->devid = *devid;
1118 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1132 *devid = entry->devid;
1142 entry->devid = *devid;
1144 entry->root_devid = (entry->devid & (~0x7));
1161 &early_ioapic_map[i].devid,
1170 &early_hpet_map[i].devid,
1179 &early_acpihid_map[i].devid,
1197 u16 devid = 0, devid_start = 0, devid_to = 0;
1242 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1244 PCI_BUS_NUM(e->devid),
1245 PCI_SLOT(e->devid),
1246 PCI_FUNC(e->devid),
1249 devid = e->devid;
1250 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1255 "devid: %02x:%02x.%x flags: %02x\n",
1256 PCI_BUS_NUM(e->devid),
1257 PCI_SLOT(e->devid),
1258 PCI_FUNC(e->devid),
1261 devid_start = e->devid;
1268 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1270 PCI_BUS_NUM(e->devid),
1271 PCI_SLOT(e->devid),
1272 PCI_FUNC(e->devid),
1278 devid = e->devid;
1280 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1282 amd_iommu_alias_table[devid] = devid_to;
1287 "devid: %02x:%02x.%x flags: %02x "
1289 PCI_BUS_NUM(e->devid),
1290 PCI_SLOT(e->devid),
1291 PCI_FUNC(e->devid),
1297 devid_start = e->devid;
1305 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1307 PCI_BUS_NUM(e->devid),
1308 PCI_SLOT(e->devid),
1309 PCI_FUNC(e->devid),
1312 devid = e->devid;
1313 set_dev_entry_from_acpi(iommu, devid, e->flags,
1318 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1320 PCI_BUS_NUM(e->devid),
1321 PCI_SLOT(e->devid),
1322 PCI_FUNC(e->devid),
1325 devid_start = e->devid;
1332 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1333 PCI_BUS_NUM(e->devid),
1334 PCI_SLOT(e->devid),
1335 PCI_FUNC(e->devid));
1337 devid = e->devid;
1338 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1351 u16 devid;
1355 devid = (e->ext >> 8) & 0xffff;
1367 PCI_BUS_NUM(devid),
1368 PCI_SLOT(devid),
1369 PCI_FUNC(devid));
1371 ret = add_special_device(type, handle, &devid, false);
1376 * add_special_device might update the devid in case a
1380 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1385 u16 devid;
1427 devid = e->devid;
1430 PCI_BUS_NUM(devid),
1431 PCI_SLOT(devid),
1432 PCI_FUNC(devid));
1436 ret = add_acpi_hid_device(hid, uid, &devid, false);
1441 * add_special_device might update the devid in case a
1445 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1565 iommu->devid = h->devid;
1662 amd_iommu_rlookup_table[iommu->devid] = NULL;
1679 u16 devid = ivhd->devid;
1685 if (ivhd->devid == devid)
1713 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1714 PCI_FUNC(h->devid), h->cap_ptr,
1822 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1823 iommu->devid & 0xff);
2098 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2099 iommu->devid, iommu->dev->irq);
2172 e->devid_start = e->devid_end = m->devid;
2181 e->devid_start = m->devid;
2236 u32 devid;
2238 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2239 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2240 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2246 u32 devid;
2248 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2249 amd_iommu_dev_table[devid].data[0] = 0ULL;
2250 amd_iommu_dev_table[devid].data[1] = 0ULL;
2256 u32 devid;
2261 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2262 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2534 int devid, id = mpc_ioapic_id(idx);
2536 devid = get_ioapic_devid(id);
2537 if (devid < 0) {
2541 } else if (devid == IOAPIC_SB_DEVID) {
3055 u32 devid;
3078 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3083 early_ioapic_map[i].devid = devid;
3093 u32 devid;
3116 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3121 early_hpet_map[i].devid = devid;
3193 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);