Lines Matching refs:pe
53 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
56 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
68 if (pe->flags & PNV_IODA_PE_DEV)
69 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
70 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
72 pci_domain_nr(pe->pbus), pe->pbus->number);
74 else if (pe->flags & PNV_IODA_PE_VF)
76 pci_domain_nr(pe->parent_dev->bus),
77 (pe->rid & 0xff00) >> 8,
78 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
82 level, pfix, pe->pe_number, &vaf);
160 int run = 0, pe, i;
165 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
166 if (test_bit(pe, phb->ioda.pe_alloc)) {
178 for (i = pe; i < pe + count; i++) {
182 ret = &phb->ioda.pe_array[pe];
189 void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
191 struct pnv_phb *phb = pe->phb;
192 unsigned int pe_num = pe->pe_number;
194 WARN_ON(pe->pdev);
195 WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
196 kfree(pe->npucomp);
197 memset(pe, 0, sizeof(struct pnv_ioda_pe));
382 struct pnv_ioda_pe *master_pe, *pe;
420 pe = &phb->ioda.pe_array[i];
422 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
424 pe->flags |= PNV_IODA_PE_MASTER;
425 INIT_LIST_HEAD(&pe->slaves);
426 master_pe = pe;
428 pe->flags |= PNV_IODA_PE_SLAVE;
429 pe->master = master_pe;
430 list_add_tail(&pe->list, &master_pe->slaves);
530 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
535 if (pe->flags & PNV_IODA_PE_SLAVE) {
536 pe = pe->master;
537 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
540 pe_no = pe->pe_number;
554 if (!(pe->flags & PNV_IODA_PE_MASTER))
557 list_for_each_entry(slave, &pe->slaves, list) {
570 struct pnv_ioda_pe *pe, *slave;
574 pe = &phb->ioda.pe_array[pe_no];
575 if (pe->flags & PNV_IODA_PE_SLAVE) {
576 pe = pe->master;
577 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
578 pe_no = pe->pe_number;
589 if (!(pe->flags & PNV_IODA_PE_MASTER))
593 list_for_each_entry(slave, &pe->slaves, list) {
610 struct pnv_ioda_pe *slave, *pe;
623 pe = &phb->ioda.pe_array[pe_no];
624 if (pe->flags & PNV_IODA_PE_SLAVE) {
625 pe = pe->master;
626 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
627 pe_no = pe->pe_number;
642 if (!(pe->flags & PNV_IODA_PE_MASTER))
645 list_for_each_entry(slave, &pe->slaves, list) {
730 struct pnv_ioda_pe *pe,
742 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
744 if (pe->flags & PNV_IODA_PE_MASTER) {
745 list_for_each_entry(slave, &pe->slaves, list)
758 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
763 if (pe->flags & PNV_IODA_PE_MASTER) {
764 list_for_each_entry(slave, &pe->slaves, list) {
765 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
771 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
772 pdev = pe->pbus->self;
773 else if (pe->flags & PNV_IODA_PE_DEV)
774 pdev = pe->pdev->bus->self;
776 else if (pe->flags & PNV_IODA_PE_VF)
777 pdev = pe->parent_dev;
785 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
797 struct pnv_ioda_pe *pe,
807 pe->pe_number,
814 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
818 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
819 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
821 pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
824 int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
832 if (pe->pbus) {
837 parent = pe->pbus->self;
838 if (pe->flags & PNV_IODA_PE_BUS_ALL)
839 count = resource_size(&pe->pbus->busn_res);
851 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
856 rid_end = pe->rid + (count << 8);
859 if (pe->flags & PNV_IODA_PE_VF)
860 parent = pe->parent_dev;
863 parent = pe->pdev->bus->self;
867 rid_end = pe->rid + 1;
871 for (rid = pe->rid; rid < rid_end; rid++)
879 pnv_ioda_unset_peltv(phb, pe, parent);
881 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
884 pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc);
886 pe->pbus = NULL;
887 pe->pdev = NULL;
889 pe->parent_dev = NULL;
895 int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
901 if (pe->pbus) {
906 if (pe->flags & PNV_IODA_PE_BUS_ALL)
907 count = resource_size(&pe->pbus->busn_res);
919 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
924 rid_end = pe->rid + (count << 8);
929 rid_end = pe->rid + 1;
938 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
941 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
950 pnv_ioda_set_peltv(phb, pe, true);
953 for (rid = pe->rid; rid < rid_end; rid++)
954 phb->ioda.pe_rmap[rid] = pe->pe_number;
958 pe->mve_number = 0;
962 pe->mve_number = pe->pe_number;
963 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
965 pe_err(pe, "OPAL error %ld setting up MVE %x\n",
966 rc, pe->mve_number);
967 pe->mve_number = -1;
970 pe->mve_number, OPAL_ENABLE_MVE);
972 pe_err(pe, "OPAL error %ld enabling MVE %x\n",
973 rc, pe->mve_number);
974 pe->mve_number = -1;
986 struct pnv_ioda_pe *pe;
996 pe = pnv_ioda_alloc_pe(phb, 1);
997 if (!pe) {
1010 pdn->pe_number = pe->pe_number;
1011 pe->flags = PNV_IODA_PE_DEV;
1012 pe->pdev = dev;
1013 pe->pbus = NULL;
1014 pe->mve_number = -1;
1015 pe->rid = dev->bus->number << 8 | pdn->devfn;
1016 pe->device_count++;
1018 pe_info(pe, "Associated device to PE\n");
1020 if (pnv_ioda_configure_pe(phb, pe)) {
1022 pnv_ioda_free_pe(pe);
1024 pe->pdev = NULL;
1030 list_add_tail(&pe->list, &phb->ioda.pe_list);
1032 return pe;
1044 struct pnv_ioda_pe *pe = NULL;
1053 pe = &phb->ioda.pe_array[pe_num];
1059 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1062 if (!pe)
1063 pe = pnv_ioda_pick_m64_pe(bus, all);
1066 if (!pe)
1067 pe = pnv_ioda_alloc_pe(phb, 1);
1069 if (!pe) {
1075 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1076 pe->pbus = bus;
1077 pe->pdev = NULL;
1078 pe->mve_number = -1;
1079 pe->rid = bus->busn_res.start << 8;
1082 pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n",
1084 pe->pe_number);
1086 pe_info(pe, "Secondary bus %pad associated with PE#%x\n",
1087 &bus->busn_res.start, pe->pe_number);
1089 if (pnv_ioda_configure_pe(phb, pe)) {
1091 pnv_ioda_free_pe(pe);
1092 pe->pbus = NULL;
1097 list_add_tail(&pe->list, &phb->ioda.pe_list);
1099 return pe;
1106 struct pnv_ioda_pe *pe;
1130 pe = &phb->ioda.pe_array[pe_num];
1131 if (!pe->pdev)
1134 if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
1145 phb->ioda.pe_rmap[rid] = pe->pe_number;
1146 pe->device_count++;
1167 return pe;
1182 struct pnv_ioda_pe *pe;
1199 list_for_each_entry(pe, &phb->ioda.pe_list, list)
1200 pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
1210 struct pnv_ioda_pe *pe);
1216 struct pnv_ioda_pe *pe;
1219 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
1220 if (!pe) {
1226 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
1227 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff);
1234 if (WARN_ON(!pe))
1237 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number);
1244 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) {
1247 pnv_pci_ioda1_setup_dma_pe(phb, pe);
1250 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1259 pdn->pe_number = pe->pe_number;
1260 pe->device_count++;
1263 pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
1264 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1267 if (pe->table_group.group)
1268 iommu_add_device(&pe->table_group, &pdev->dev);
1288 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
1307 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
1323 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
1324 pe->pe_number,
1326 (pe->pe_number << 1) + 0,
1332 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
1336 pe_err(pe, "Error configuring 64-bit DMA bypass\n");
1345 struct pnv_ioda_pe *pe;
1350 pe = &phb->ioda.pe_array[pdn->pe_number];
1351 if (pe->tce_bypass_enabled) {
1352 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1365 /* pe->pdev should be set if it's a single device, pe->pbus if not */
1366 (pe->device_count == 1 || !pe->pbus) &&
1369 s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
1393 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1395 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1483 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1486 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
1487 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
1493 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
1497 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1502 start |= (pe->pe_number & 0xFF);
1520 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1522 struct pnv_phb *phb = pe->phb;
1525 pnv_pci_phb3_tce_invalidate_pe(pe);
1528 pe->pe_number, 0, 0, 0);
1537 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1539 struct pnv_phb *phb = pe->phb;
1557 pnv_pci_phb3_tce_invalidate(pe, rm, shift,
1562 pe->pe_number, 1u << shift,
1631 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
1637 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
1638 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
1643 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
1644 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
1645 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
1648 list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
1650 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
1651 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
1658 struct pnv_ioda_pe *pe)
1671 weight = pnv_pci_ioda_pe_dma_weight(pe);
1701 pe_warn(pe, "No available DMA32 segments\n");
1710 iommu_register_group(&pe->table_group, phb->hose->global_number,
1711 pe->pe_number);
1712 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
1715 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
1717 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1733 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1742 pe->pe_number,
1747 pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n",
1755 phb->ioda.dma32_segmap[i] = pe->pe_number;
1763 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
1764 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
1767 pe->dma_setup_done = true;
1774 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1782 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1784 struct pnv_phb *phb = pe->phb;
1791 pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n",
1800 pe->pe_number,
1801 (pe->pe_number << 1) + num,
1807 pe_err(pe, "Failed to configure TCE table, err %lld\n", rc);
1812 tbl, &pe->table_group);
1813 pnv_pci_ioda2_tce_invalidate_pe(pe);
1818 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
1820 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1823 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
1828 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1829 pe->pe_number,
1831 pe->tce_bypass_base,
1834 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1835 pe->pe_number,
1837 pe->tce_bypass_base,
1841 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
1843 pe->tce_bypass_enabled = enable;
1850 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1852 int nid = pe->phb->hose->node;
1853 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
1876 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
1918 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT,
1921 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
1929 if (window_size > pe->phb->ioda.m32_pci_base) {
1930 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
1933 iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end);
1935 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
1937 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
1944 pnv_pci_ioda2_set_bypass(pe, true);
1951 if (pe->pdev)
1952 set_iommu_table_base(&pe->pdev->dev, tbl);
1960 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1962 struct pnv_phb *phb = pe->phb;
1965 pe_info(pe, "Removing DMA window #%d\n", num);
1967 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1968 (pe->pe_number << 1) + num,
1972 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
1974 pnv_pci_ioda2_tce_invalidate_pe(pe);
2028 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
2033 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
2034 dev->dev.archdata.dma_offset = pe->tce_bypass_base;
2036 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
2037 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
2043 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2046 struct iommu_table *tbl = pe->table_group.tables[0];
2048 pnv_pci_ioda2_set_bypass(pe, false);
2049 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2050 if (pe->pbus)
2051 pnv_ioda_setup_bus_dma(pe, pe->pbus);
2052 else if (pe->pdev)
2053 set_iommu_table_base(&pe->pdev->dev, NULL);
2059 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2062 pnv_pci_ioda2_setup_default_config(pe);
2063 if (pe->pbus)
2064 pnv_ioda_setup_bus_dma(pe, pe->pbus);
2078 struct pnv_ioda_pe *pe)
2083 pe->tce_bypass_base = 1ull << 59;
2086 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
2090 pe->table_group.tce32_start = 0;
2091 pe->table_group.tce32_size = phb->ioda.m32_pci_base;
2092 pe->table_group.max_dynamic_windows_supported =
2094 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
2095 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
2097 rc = pnv_pci_ioda2_setup_default_config(pe);
2102 pe->table_group.ops = &pnv_pci_ioda2_ops;
2103 iommu_register_group(&pe->table_group, phb->hose->global_number,
2104 pe->pe_number);
2106 pe->dma_setup_done = true;
2167 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2173 if (pe == NULL)
2177 if (pe->mve_number < 0)
2185 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2195 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2207 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2224 msg->address_hi, msg->address_lo, data, pe->pe_number);
2255 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2258 struct pnv_phb *phb = pe->phb;
2274 phb->ioda.io_segmap[index] = pe->pe_number;
2276 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2279 __func__, rc, index, pe->pe_number);
2298 phb->ioda.m32_segmap[index] = pe->pe_number;
2300 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2303 __func__, rc, index, pe->pe_number);
2318 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
2328 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2330 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
2332 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
2339 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
2342 pnv_ioda_setup_pe_res(pe,
2373 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num];
2378 pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n",
2379 pe->rid, pe->device_count,
2380 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "",
2381 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "",
2382 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "",
2383 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "",
2384 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "",
2385 (pe->flags & PNV_IODA_PE_VF) ? "vf " : "");
2571 struct pnv_ioda_pe *pe;
2588 pe = pnv_ioda_setup_bus_PE(bus, all);
2589 if (!pe)
2592 pnv_ioda_setup_pe_seg(pe);
2628 struct pnv_ioda_pe *pe;
2638 pe = pnv_ioda_setup_dev_PE(dev);
2639 if (!pe)
2648 struct pnv_ioda_pe *pe = container_of(table_group,
2650 struct pnv_phb *phb = pe->phb;
2654 pe_info(pe, "Removing DMA window #%d\n", num);
2656 if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
2659 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2662 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
2674 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
2676 struct iommu_table *tbl = pe->table_group.tables[0];
2679 if (!pe->dma_setup_done)
2682 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
2687 if (pe->table_group.group) {
2688 iommu_group_put(pe->table_group.group);
2689 WARN_ON(pe->table_group.group);
2696 void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
2698 struct iommu_table *tbl = pe->table_group.tables[0];
2701 if (!pe->dma_setup_done)
2704 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2706 pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
2708 pnv_pci_ioda2_set_bypass(pe, false);
2709 if (pe->table_group.group) {
2710 iommu_group_put(pe->table_group.group);
2711 WARN_ON(pe->table_group.group);
2717 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
2721 struct pnv_phb *phb = pe->phb;
2726 if (map[idx] != pe->pe_number)
2733 pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
2740 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
2742 struct pnv_phb *phb = pe->phb;
2745 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
2747 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
2751 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
2756 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
2758 struct pnv_phb *phb = pe->phb;
2761 pe_info(pe, "Releasing PE\n");
2764 list_del(&pe->list);
2769 pnv_pci_ioda1_release_pe_dma(pe);
2772 pnv_pci_ioda2_release_pe_dma(pe);
2780 pnv_ioda_release_pe_seg(pe);
2781 pnv_ioda_deconfigure_pe(pe->phb, pe);
2784 if (pe->flags & PNV_IODA_PE_MASTER) {
2785 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
2797 if (phb->ioda.root_pe_idx == pe->pe_number)
2800 pnv_ioda_free_pe(pe);
2807 struct pnv_ioda_pe *pe;
2834 pe = &phb->ioda.pe_array[pdn->pe_number];
2837 WARN_ON(--pe->device_count < 0);
2838 if (pe->device_count == 0)
2839 pnv_ioda_release_pe(pe);
2845 struct eeh_pe *eehpe = edev ? edev->pe : NULL;
2862 struct pnv_ioda_pe *pe;
2864 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2865 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
2868 if (!pe->pbus)
2871 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
2872 pe->pbus = bus;
3010 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);