Lines Matching defs:ioc
191 * @ioc: IO MMU structure which owns the pdir we are interested in.
198 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
224 * @ioc: IO MMU structure which owns the pdir we are interested in.
230 sba_check_pdir(struct ioc *ioc, char *msg)
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
233 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
234 u64 *pptr = ioc->pdir_base; /* pdir ptr */
250 sba_dump_pdir_entry(ioc, msg, pide);
267 * @ioc: IO MMU structure which owns the pdir we are interested in.
274 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
306 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
310 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
311 #define SBA_IOVP(ioc,iova) (iova)
319 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
328 * @ioc: IO MMU structure which owns the pdir we are interested in.
336 sba_search_bitmap(struct ioc *ioc, struct device *dev,
339 unsigned long *res_ptr = ioc->res_hint;
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
349 BUG_ON(ioc->ibase & ~IOVP_MASK);
350 shift = ioc->ibase >> IOVP_SHIFT;
358 tpide = ptr_to_pide(ioc, res_ptr, 0);
370 ioc->res_bitshift = 0;
379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
393 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
411 ioc->res_bitshift = bitshiftcnt + bits_wanted;
416 ioc->res_hint = (unsigned long *) ioc->res_map;
417 ioc->res_bitshift = 0;
419 ioc->res_hint = res_ptr;
427 * @ioc: IO MMU structure which owns the pdir we are interested in.
434 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
442 pide = sba_search_bitmap(ioc, dev, pages_needed);
443 if (pide >= (ioc->res_size << 3)) {
444 pide = sba_search_bitmap(ioc, dev, pages_needed);
445 if (pide >= (ioc->res_size << 3))
447 __FILE__, ioc->ioc_hpa);
452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
453 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
460 ioc->res_bitshift );
469 ioc->avg_search[ioc->avg_idx++] = cr_start;
470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
472 ioc->used_pages += pages_needed;
481 * @ioc: IO MMU structure which owns the pdir we are interested in.
485 * clear bits in the ioc's resource map
488 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
490 unsigned long iovp = SBA_IOVP(ioc, iova);
493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
505 ioc->used_pages -= bits_not_wanted;
519 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
592 * @ioc: IO MMU structure which owns the pdir we are interested in.
607 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
609 u32 iovp = (u32) SBA_IOVP(ioc,iova);
610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
620 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
673 struct ioc *ioc;
681 ioc = GET_IOC(dev);
682 if (!ioc)
689 return((int)(mask >= (ioc->ibase - 1 +
690 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
707 struct ioc *ioc;
714 ioc = GET_IOC(dev);
715 if (!ioc)
724 spin_lock_irqsave(&ioc->res_lock, flags);
726 sba_check_pdir(ioc,"Check before sba_map_single()");
730 ioc->msingle_calls++;
731 ioc->msingle_pages += size >> IOVP_SHIFT;
733 pide = sba_alloc_range(ioc, dev, size);
739 pdir_start = &(ioc->pdir_base[pide]);
765 sba_check_pdir(ioc,"Check after sba_map_single()");
767 spin_unlock_irqrestore(&ioc->res_lock, flags);
770 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
797 struct ioc *ioc;
806 ioc = GET_IOC(dev);
807 if (!ioc) {
808 WARN_ON(!ioc);
816 spin_lock_irqsave(&ioc->res_lock, flags);
819 ioc->usingle_calls++;
820 ioc->usingle_pages += size >> IOVP_SHIFT;
823 sba_mark_invalid(ioc, iova, size);
829 d = &(ioc->saved[ioc->saved_cnt]);
832 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
833 int cnt = ioc->saved_cnt;
835 sba_free_range(ioc, d->iova, d->size);
838 ioc->saved_cnt = 0;
840 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
843 sba_free_range(ioc, iova, size);
848 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
851 spin_unlock_irqrestore(&ioc->res_lock, flags);
942 struct ioc *ioc;
948 ioc = GET_IOC(dev);
949 if (!ioc)
960 spin_lock_irqsave(&ioc->res_lock, flags);
963 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
965 sba_dump_sg(ioc, sglist, nents);
971 ioc->msg_calls++;
982 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
992 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
998 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1000 sba_dump_sg(ioc, sglist, nents);
1005 spin_unlock_irqrestore(&ioc->res_lock, flags);
1026 struct ioc *ioc;
1034 ioc = GET_IOC(dev);
1035 if (!ioc) {
1036 WARN_ON(!ioc);
1041 ioc->usg_calls++;
1045 spin_lock_irqsave(&ioc->res_lock, flags);
1046 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1047 spin_unlock_irqrestore(&ioc->res_lock, flags);
1055 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1056 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1065 spin_lock_irqsave(&ioc->res_lock, flags);
1066 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1067 spin_unlock_irqrestore(&ioc->res_lock, flags);
1109 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1220 struct ioc *ioc;
1232 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1238 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1241 .ioc = ioc,
1263 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1276 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
1277 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1279 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1289 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1292 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1295 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1296 get_order(ioc->pdir_size));
1297 if (!ioc->pdir_base)
1300 memset(ioc->pdir_base, 0, ioc->pdir_size);
1303 __func__, ioc->pdir_base, ioc->pdir_size);
1306 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1307 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1310 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1313 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1314 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1319 ioc->imask = iova_space_mask;
1321 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1323 sba_dump_tlb(ioc->ioc_hpa);
1325 setup_ibase_imask(sba, ioc, ioc_num);
1327 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1334 ioc->imask |= 0xFFFFFFFF00000000UL;
1348 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1354 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1360 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1377 ioc->pdir_size /= 2;
1378 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1384 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1423 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1427 ioc->ioc_hpa,
1432 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1435 __func__, ioc->pdir_base, pdir_size);
1439 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1440 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1443 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1446 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1456 ioc->ibase = 0;
1457 ioc->imask = iova_space_mask; /* save it */
1459 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1463 __func__, ioc->ibase, ioc->imask);
1471 setup_ibase_imask(sba, ioc, ioc_num);
1476 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1477 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1491 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1497 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1499 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1591 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1603 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1619 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1620 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1629 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1657 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1659 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1661 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1662 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1665 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1666 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1670 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1672 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1697 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1708 sba_dev->ioc[i].res_size = res_size;
1709 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1712 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1716 if (NULL == sba_dev->ioc[i].res_map)
1722 memset(sba_dev->ioc[i].res_map, 0, res_size);
1724 sba_dev->ioc[i].res_hint = (unsigned long *)
1725 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1729 sba_dev->ioc[i].res_map[0] = 0x80;
1730 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1739 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1740 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1749 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1751 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1756 __func__, i, res_size, sba_dev->ioc[i].res_map);
1780 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1781 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1792 (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1796 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1812 total_pages - ioc->used_pages, ioc->used_pages,
1813 (int)(ioc->used_pages * 100 / total_pages));
1815 min = max = ioc->avg_search[0];
1817 avg += ioc->avg_search[i];
1818 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1819 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1826 ioc->msingle_calls, ioc->msingle_pages,
1827 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1830 min = ioc->usingle_calls;
1831 max = ioc->usingle_pages - ioc->usg_pages;
1836 ioc->msg_calls, ioc->msg_pages,
1837 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1840 ioc->usg_calls, ioc->usg_pages,
1841 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1851 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1853 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1854 ioc->res_size, false);
1945 spin_lock_init(&(sba_dev->ioc[i].res_lock));
2003 return &(sba->ioc[iocnum]);