Lines Matching refs:ioas
347 rc = iopt_table_enforce_dev_resv_regions(&hwpt->ioas->iopt, idev->dev,
374 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
391 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
443 if (hwpt->ioas != old_hwpt->ioas) {
446 &hwpt->ioas->iopt, cur->dev, NULL);
460 if (hwpt->ioas != old_hwpt->ioas) {
462 iopt_remove_reserved_iova(&old_hwpt->ioas->iopt,
482 iopt_remove_reserved_iova(&hwpt->ioas->iopt, cur->dev);
498 struct iommufd_ioas *ioas, u32 *pt_id,
517 mutex_lock(&ioas->mutex);
518 list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) {
542 hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev,
561 mutex_unlock(&ioas->mutex);
567 mutex_unlock(&ioas->mutex);
592 struct iommufd_ioas *ioas =
595 destroy_hwpt = iommufd_device_auto_get_domain(idev, ioas, pt_id,
687 * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should
694 struct iommufd_ioas *cur_ioas = access->ioas;
707 * Set ioas to NULL to block any further iommufd_access_pin_pages().
710 access->ioas = NULL;
715 access->ioas = cur_ioas;
731 access->ioas = new_ioas;
739 struct iommufd_ioas *ioas = iommufd_get_ioas(access->ictx, id);
742 if (IS_ERR(ioas))
743 return PTR_ERR(ioas);
744 rc = iommufd_access_change_ioas(access, ioas);
745 iommufd_put_object(&ioas->obj);
755 if (access->ioas)
822 if (WARN_ON(!access->ioas)) {
836 if (WARN_ON(access->ioas)) {
852 if (!access->ioas) {
881 struct iommufd_ioas *ioas =
886 xa_lock(&ioas->iopt.access_list);
887 xa_for_each(&ioas->iopt.access_list, index, access) {
890 xa_unlock(&ioas->iopt.access_list);
895 xa_lock(&ioas->iopt.access_list);
897 xa_unlock(&ioas->iopt.access_list);
979 * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However
1004 if (!access->ioas) {
1008 iopt = &access->ioas->iopt;
1087 if (!access->ioas) {
1091 iopt = &access->ioas->iopt;