Lines Matching defs:tbl

207 		struct iommu_table *tbl = container->tables[i];
209 if (tbl) {
210 unsigned long entry = ioba >> tbl->it_page_shift;
211 unsigned long start = tbl->it_offset;
212 unsigned long end = start + tbl->it_size;
215 *ptbl = tbl;
337 struct iommu_table *tbl,
340 struct iommu_table *tbl);
360 struct iommu_table *tbl = container->tables[i];
362 if (!tbl)
365 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
366 tce_iommu_free_table(container, tbl);
410 struct iommu_table *tbl, unsigned long entry)
415 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
421 tbl->it_page_shift, &hpa, &mem);
432 struct iommu_table *tbl,
441 if (tbl->it_indirect_levels && tbl->it_userspace) {
450 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
454 entry |= tbl->it_level_size - 1;
463 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
472 tce_iommu_unuse_page_v2(container, tbl, entry);
479 iommu_tce_kill(tbl, firstentry, pages);
500 struct iommu_table *tbl,
509 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
516 tbl->it_page_shift)) {
523 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
528 __func__, entry << tbl->it_page_shift,
536 tce += IOMMU_PAGE_SIZE(tbl);
540 tce_iommu_clear(container, tbl, entry, i);
542 iommu_tce_kill(tbl, entry, pages);
548 struct iommu_table *tbl,
558 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
561 tce, tbl->it_page_shift, &hpa, &mem);
566 tbl->it_page_shift)) {
572 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
579 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
583 tce_iommu_unuse_page_v2(container, tbl, entry + i);
585 __func__, entry << tbl->it_page_shift,
591 tce_iommu_unuse_page_v2(container, tbl, entry + i);
595 tce += IOMMU_PAGE_SIZE(tbl);
599 tce_iommu_clear(container, tbl, entry, i);
601 iommu_tce_kill(tbl, entry, pages);
635 struct iommu_table *tbl)
637 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
639 iommu_tce_table_put(tbl);
649 struct iommu_table *tbl = NULL;
673 page_shift, window_size, levels, &tbl);
677 BUG_ON(!tbl->it_ops->free);
686 ret = table_group->ops->set_window(table_group, num, tbl);
691 container->tables[num] = tbl;
694 *start_addr = tbl->it_offset << tbl->it_page_shift;
703 tce_iommu_free_table(container, tbl);
712 struct iommu_table *tbl;
716 num = tce_iommu_find_table(container, start_addr, &tbl);
720 BUG_ON(!tbl->it_size);
740 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
741 tce_iommu_free_table(container, tbl);
857 struct iommu_table *tbl = NULL;
880 num = tce_iommu_find_table(container, param.iova, &tbl);
884 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
885 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
901 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
906 ret = tce_iommu_build_v2(container, tbl,
907 param.iova >> tbl->it_page_shift,
909 param.size >> tbl->it_page_shift,
912 ret = tce_iommu_build(container, tbl,
913 param.iova >> tbl->it_page_shift,
915 param.size >> tbl->it_page_shift,
918 iommu_flush_tce(tbl);
924 struct iommu_table *tbl = NULL;
947 num = tce_iommu_find_table(container, param.iova, &tbl);
951 if (param.size & ~IOMMU_PAGE_MASK(tbl))
954 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
955 param.size >> tbl->it_page_shift);
959 ret = tce_iommu_clear(container, tbl,
960 param.iova >> tbl->it_page_shift,
961 param.size >> tbl->it_page_shift);
962 iommu_flush_tce(tbl);
1147 struct iommu_table *tbl = container->tables[i];
1149 if (!tbl)
1152 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1153 if (tbl->it_map)
1154 iommu_release_ownership(tbl);
1166 struct iommu_table *tbl = table_group->tables[i];
1168 if (!tbl || !tbl->it_map)
1171 rc = iommu_take_ownership(tbl);
1219 struct iommu_table *tbl = container->tables[i];
1221 if (!tbl)
1224 ret = table_group->ops->set_window(table_group, i, tbl);