Lines Matching refs:container

48  * A container needs to remember which preregistered region  it has
57 * The container descriptor supports only a single group per container.
58 * Required by the API as the container is not supplied with the IOMMU group
73 static long tce_iommu_mm_set(struct tce_container *container)
75 if (container->mm) {
76 if (container->mm == current->mm)
81 container->mm = current->mm;
82 mmgrab(container->mm);
87 static long tce_iommu_prereg_free(struct tce_container *container,
92 ret = mm_iommu_put(container->mm, tcemem->mem);
102 static long tce_iommu_unregister_pages(struct tce_container *container,
113 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
117 list_for_each_entry(tcemem, &container->prereg_list, next) {
127 ret = tce_iommu_prereg_free(container, tcemem);
129 mm_iommu_put(container->mm, mem);
134 static long tce_iommu_register_pages(struct tce_container *container,
146 mem = mm_iommu_get(container->mm, vaddr, entries);
148 list_for_each_entry(tcemem, &container->prereg_list, next) {
155 ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
167 list_add(&tcemem->next, &container->prereg_list);
169 container->enabled = true;
174 mm_iommu_put(container->mm, mem);
196 static inline bool tce_groups_attached(struct tce_container *container)
198 return !list_empty(&container->group_list);
201 static long tce_iommu_find_table(struct tce_container *container,
207 struct iommu_table *tbl = container->tables[i];
224 static int tce_iommu_find_free_table(struct tce_container *container)
229 if (!container->tables[i])
236 static int tce_iommu_enable(struct tce_container *container)
243 if (container->enabled)
271 * So we do not allow enabling a container without a group attached
275 if (!tce_groups_attached(container))
278 tcegrp = list_first_entry(&container->group_list,
287 ret = tce_iommu_mm_set(container);
292 ret = account_locked_vm(container->mm, locked, true);
296 container->locked_pages = locked;
298 container->enabled = true;
303 static void tce_iommu_disable(struct tce_container *container)
305 if (!container->enabled)
308 container->enabled = false;
310 BUG_ON(!container->mm);
311 account_locked_vm(container->mm, container->locked_pages, false);
316 struct tce_container *container;
323 container = kzalloc(sizeof(*container), GFP_KERNEL);
324 if (!container)
327 mutex_init(&container->lock);
328 INIT_LIST_HEAD_RCU(&container->group_list);
329 INIT_LIST_HEAD_RCU(&container->prereg_list);
331 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
333 return container;
336 static int tce_iommu_clear(struct tce_container *container,
339 static void tce_iommu_free_table(struct tce_container *container,
344 struct tce_container *container = iommu_data;
349 while (tce_groups_attached(container)) {
350 tcegrp = list_first_entry(&container->group_list,
360 struct iommu_table *tbl = container->tables[i];
365 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
366 tce_iommu_free_table(container, tbl);
369 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
370 WARN_ON(tce_iommu_prereg_free(container, tcemem));
372 tce_iommu_disable(container);
373 if (container->mm)
374 mmdrop(container->mm);
375 mutex_destroy(&container->lock);
377 kfree(container);
380 static void tce_iommu_unuse_page(struct tce_container *container,
389 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
396 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
409 static void tce_iommu_unuse_page_v2(struct tce_container *container,
420 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
431 static int tce_iommu_clear(struct tce_container *container,
463 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
471 if (container->v2) {
472 tce_iommu_unuse_page_v2(container, tbl, entry);
476 tce_iommu_unuse_page(container, oldhpa);
499 static long tce_iommu_build(struct tce_container *container,
515 if (!tce_page_is_contained(container->mm, hpa,
523 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
526 tce_iommu_unuse_page(container, hpa);
534 tce_iommu_unuse_page(container, hpa);
540 tce_iommu_clear(container, tbl, entry, i);
547 static long tce_iommu_build_v2(struct tce_container *container,
560 ret = tce_iommu_prereg_ua_to_hpa(container,
565 if (!tce_page_is_contained(container->mm, hpa,
579 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
583 tce_iommu_unuse_page_v2(container, tbl, entry + i);
591 tce_iommu_unuse_page_v2(container, tbl, entry + i);
599 tce_iommu_clear(container, tbl, entry, i);
606 static long tce_iommu_create_table(struct tce_container *container,
621 ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
634 static void tce_iommu_free_table(struct tce_container *container,
640 account_locked_vm(container->mm, pages, false);
643 static long tce_iommu_create_window(struct tce_container *container,
652 num = tce_iommu_find_free_table(container);
657 tcegrp = list_first_entry(&container->group_list,
672 ret = tce_iommu_create_table(container, table_group, num,
683 list_for_each_entry(tcegrp, &container->group_list, next) {
691 container->tables[num] = tbl;
699 list_for_each_entry(tcegrp, &container->group_list, next) {
703 tce_iommu_free_table(container, tbl);
708 static long tce_iommu_remove_window(struct tce_container *container,
716 num = tce_iommu_find_table(container, start_addr, &tbl);
723 list_for_each_entry(tcegrp, &container->group_list, next) {
740 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
741 tce_iommu_free_table(container, tbl);
742 container->tables[num] = NULL;
747 static long tce_iommu_create_default_window(struct tce_container *container)
754 if (!container->def_window_pending)
757 if (!tce_groups_attached(container))
760 tcegrp = list_first_entry(&container->group_list,
766 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
771 container->def_window_pending = false;
779 struct tce_container *container = iommu_data;
802 BUG_ON(!container);
803 if (container->mm && container->mm != current->mm)
812 if (!tce_groups_attached(container))
815 tcegrp = list_first_entry(&container->group_list,
837 container->v2) {
861 if (!container->enabled)
876 ret = tce_iommu_create_default_window(container);
880 num = tce_iommu_find_table(container, param.iova, &tbl);
905 if (container->v2)
906 ret = tce_iommu_build_v2(container, tbl,
912 ret = tce_iommu_build(container, tbl,
927 if (!container->enabled)
943 ret = tce_iommu_create_default_window(container);
947 num = tce_iommu_find_table(container, param.iova, &tbl);
959 ret = tce_iommu_clear(container, tbl,
969 if (!container->v2)
975 ret = tce_iommu_mm_set(container);
989 mutex_lock(&container->lock);
990 ret = tce_iommu_register_pages(container, param.vaddr,
992 mutex_unlock(&container->lock);
999 if (!container->v2)
1002 if (!container->mm)
1018 mutex_lock(&container->lock);
1019 ret = tce_iommu_unregister_pages(container, param.vaddr,
1021 mutex_unlock(&container->lock);
1026 if (container->v2)
1029 mutex_lock(&container->lock);
1030 ret = tce_iommu_enable(container);
1031 mutex_unlock(&container->lock);
1036 if (container->v2)
1039 mutex_lock(&container->lock);
1040 tce_iommu_disable(container);
1041 mutex_unlock(&container->lock);
1048 list_for_each_entry(tcegrp, &container->group_list, next) {
1060 if (!container->v2)
1063 ret = tce_iommu_mm_set(container);
1067 if (!tce_groups_attached(container))
1082 mutex_lock(&container->lock);
1084 ret = tce_iommu_create_default_window(container);
1086 ret = tce_iommu_create_window(container,
1091 mutex_unlock(&container->lock);
1101 if (!container->v2)
1104 ret = tce_iommu_mm_set(container);
1108 if (!tce_groups_attached(container))
1123 if (container->def_window_pending && !remove.start_addr) {
1124 container->def_window_pending = false;
1128 mutex_lock(&container->lock);
1130 ret = tce_iommu_remove_window(container, remove.start_addr);
1132 mutex_unlock(&container->lock);
1141 static void tce_iommu_release_ownership(struct tce_container *container,
1147 struct iommu_table *tbl = container->tables[i];
1152 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1156 container->tables[i] = NULL;
1160 static int tce_iommu_take_ownership(struct tce_container *container,
1182 container->tables[i] = table_group->tables[i];
1187 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1198 if (container->tables[i])
1204 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1219 struct iommu_table *tbl = container->tables[i];
1244 struct tce_container *container = iommu_data;
1248 mutex_lock(&container->lock);
1258 if (tce_groups_attached(container) && (!table_group->ops ||
1266 list_for_each_entry(tcegrp, &container->group_list, next) {
1294 if (container->v2) {
1298 ret = tce_iommu_take_ownership(container, table_group);
1300 if (!container->v2) {
1304 ret = tce_iommu_take_ownership_ddw(container, table_group);
1305 if (!tce_groups_attached(container) && !container->tables[0])
1306 container->def_window_pending = true;
1311 list_add(&tcegrp->next, &container->group_list);
1319 mutex_unlock(&container->lock);
1327 struct tce_container *container = iommu_data;
1332 mutex_lock(&container->lock);
1334 list_for_each_entry(tcegrp, &container->group_list, next) {
1354 tce_iommu_release_ownership(container, table_group);
1356 tce_iommu_release_ownership_ddw(container, table_group);
1359 mutex_unlock(&container->lock);