Lines Matching defs:ioc

225 struct ioc {
253 struct ioc *next; /* Linked list of discovered iocs */
255 unsigned int hw_path; /* the hardware path this ioc is associatd with */
260 static struct ioc *ioc_list;
295 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
299 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
304 ioc->res_hint = res_idx + (size >> 3); \
310 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
311 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
312 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
313 res_ptr = (u##size *)&(ioc)->res_map[0]; \
331 * ccio_alloc_range - Allocate pages in the ioc's resource map.
332 * @ioc: The I/O Controller.
336 * This function searches the resource map of the ioc to locate a range
340 ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
374 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
376 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
379 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
381 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
384 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
397 __func__, res_idx, ioc->res_hint);
406 ioc->avg_search[ioc->avg_idx++] = cr_start;
407 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
408 ioc->used_pages += pages_needed;
416 #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
417 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
422 * ccio_free_range - Free pages from the ioc's resource map.
423 * @ioc: The I/O Controller.
431 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
444 ioc->used_pages -= pages_mapped;
451 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
453 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
458 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
618 * @ioc: The I/O Controller.
627 ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
629 u32 chain_size = 1 << ioc->chainid_shift;
635 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
643 * @ioc: The I/O Controller.
660 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
671 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
673 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
687 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
728 struct ioc *ioc;
736 ioc = GET_IOC(dev);
737 if (!ioc)
747 spin_lock_irqsave(&ioc->res_lock, flags);
750 ioc->msingle_calls++;
751 ioc->msingle_pages += size >> IOVP_SHIFT;
754 idx = ccio_alloc_range(ioc, dev, size);
757 pdir_start = &(ioc->pdir_base[idx]);
778 spin_unlock_irqrestore(&ioc->res_lock, flags);
806 struct ioc *ioc;
811 ioc = GET_IOC(dev);
812 if (!ioc) {
813 WARN_ON(!ioc);
824 spin_lock_irqsave(&ioc->res_lock, flags);
827 ioc->usingle_calls++;
828 ioc->usingle_pages += size >> IOVP_SHIFT;
831 ccio_mark_invalid(ioc, iova, size);
832 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
833 spin_unlock_irqrestore(&ioc->res_lock, flags);
911 struct ioc *ioc;
919 ioc = GET_IOC(dev);
920 if (!ioc)
937 spin_lock_irqsave(&ioc->res_lock, flags);
940 ioc->msg_calls++;
951 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
961 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
963 spin_unlock_irqrestore(&ioc->res_lock, flags);
990 struct ioc *ioc;
993 ioc = GET_IOC(dev);
994 if (!ioc) {
995 WARN_ON(!ioc);
1003 ioc->usg_calls++;
1009 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1036 struct ioc *ioc = ioc_list;
1038 while (ioc != NULL) {
1039 unsigned int total_pages = ioc->res_size << 3;
1045 seq_printf(m, "%s\n", ioc->name);
1048 (ioc->cujo20_bug ? "yes" : "no"));
1055 total_pages - ioc->used_pages, ioc->used_pages,
1056 (int)(ioc->used_pages * 100 / total_pages));
1060 ioc->res_size, total_pages);
1063 min = max = ioc->avg_search[0];
1065 avg += ioc->avg_search[j];
1066 if(ioc->avg_search[j] > max)
1067 max = ioc->avg_search[j];
1068 if(ioc->avg_search[j] < min)
1069 min = ioc->avg_search[j];
1076 ioc->msingle_calls, ioc->msingle_pages,
1077 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1080 min = ioc->usingle_calls - ioc->usg_calls;
1081 max = ioc->usingle_pages - ioc->usg_pages;
1086 ioc->msg_calls, ioc->msg_pages,
1087 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1090 ioc->usg_calls, ioc->usg_pages,
1091 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1094 ioc = ioc->next;
1102 struct ioc *ioc = ioc_list;
1104 while (ioc != NULL) {
1105 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1106 ioc->res_size, false);
1108 ioc = ioc->next;
1117 * ccio_find_ioc - Find the ioc in the ioc_list
1118 * @hw_path: The hardware path of the ioc.
1120 * This function searches the ioc_list for an ioc that matches
1123 static struct ioc * ccio_find_ioc(int hw_path)
1126 struct ioc *ioc;
1128 ioc = ioc_list;
1130 if (ioc->hw_path == hw_path)
1131 return ioc;
1133 ioc = ioc->next;
1165 struct ioc *ioc = ccio_get_iommu(dev);
1168 ioc->cujo20_bug = 1;
1169 res_ptr = ioc->res_map;
1172 while (idx < ioc->res_size) {
1223 * @ioc: The I/O Controller.
1230 ccio_ioc_init(struct ioc *ioc)
1277 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1279 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */
1282 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1285 __func__, ioc->ioc_regs,
1290 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1291 get_order(ioc->pdir_size));
1292 if(NULL == ioc->pdir_base) {
1295 memset(ioc->pdir_base, 0, ioc->pdir_size);
1297 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1298 DBG_INIT(" base %p\n", ioc->pdir_base);
1301 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1302 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1304 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1305 get_order(ioc->res_size));
1306 if(NULL == ioc->res_map) {
1309 memset(ioc->res_map, 0, ioc->res_size);
1312 ioc->res_hint = 16;
1315 spin_lock_init(&ioc->res_lock);
1321 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1322 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1327 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1328 &ioc->ioc_regs->io_chain_id_mask);
1330 WRITE_U32(virt_to_phys(ioc->pdir_base),
1331 &ioc->ioc_regs->io_pdir_base);
1336 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1341 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1342 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1345 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1346 &ioc->ioc_regs->io_command);
1383 static int __init ccio_init_resources(struct ioc *ioc)
1385 struct resource *res = ioc->mmio_region;
1389 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1391 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1392 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1445 * Some other issues: one of the resources in the ioc may be unassigned.
1452 struct ioc *ioc = ccio_get_iommu(dev);
1453 if (!ioc)
1456 parent = ioc->mmio_region;
1468 &ioc->ioc_regs->io_io_low);
1470 &ioc->ioc_regs->io_io_high);
1474 &ioc->ioc_regs->io_io_low_hv);
1476 &ioc->ioc_regs->io_io_high_hv);
1489 struct ioc *ioc = ccio_get_iommu(dev);
1491 if (!ioc) {
1493 } else if ((ioc->mmio_region->start <= res->start) &&
1494 (res->end <= ioc->mmio_region->end)) {
1495 parent = ioc->mmio_region;
1496 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1497 (res->end <= (ioc->mmio_region + 1)->end)) {
1498 parent = ioc->mmio_region + 1;
1522 struct ioc *ioc, **ioc_p = &ioc_list;
1525 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1526 if (ioc == NULL) {
1531 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1533 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1539 *ioc_p = ioc;
1541 ioc->hw_path = dev->hw_path;
1542 ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
1543 if (!ioc->ioc_regs) {
1544 kfree(ioc);
1547 ccio_ioc_init(ioc);
1548 if (ccio_init_resources(ioc)) {
1549 iounmap(ioc->ioc_regs);
1550 kfree(ioc);
1559 hba->iommu = ioc;