Lines Matching refs:group
75 #define for_each_group_device(group, pos) \
76 list_for_each_entry(pos, &(group)->devices, list)
80 ssize_t (*show)(struct iommu_group *group, char *buf);
81 ssize_t (*store)(struct iommu_group *group,
104 struct iommu_group *group);
110 static int __iommu_device_set_domain(struct iommu_group *group,
114 static int __iommu_group_set_domain_internal(struct iommu_group *group,
117 static int __iommu_group_set_domain(struct iommu_group *group,
120 return __iommu_group_set_domain_internal(group, new_domain, 0);
122 static void __iommu_group_set_domain_nofail(struct iommu_group *group,
126 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED));
129 static int iommu_setup_default_domain(struct iommu_group *group,
133 static ssize_t iommu_group_store_type(struct iommu_group *group,
135 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
137 static void __iommu_group_free_device(struct iommu_group *group,
391 struct iommu_group *group;
412 group = ops->device_group(dev);
413 if (WARN_ON_ONCE(group == NULL))
414 group = ERR_PTR(-EINVAL);
415 if (IS_ERR(group)) {
416 ret = PTR_ERR(group);
419 dev->iommu_group = group;
441 struct iommu_group *group = dev->iommu_group;
444 lockdep_assert_held(&group->mutex);
450 * If there are still other devices in the group they are not effected
461 * If this is the last driver to use the group then we must free the
464 if (list_empty(&group->devices)) {
465 if (group->default_domain) {
466 iommu_domain_free(group->default_domain);
467 group->default_domain = NULL;
469 if (group->blocking_domain) {
470 iommu_domain_free(group->blocking_domain);
471 group->blocking_domain = NULL;
473 group->domain = NULL;
487 struct iommu_group *group;
502 /* Device is probed already if in a group */
510 group = dev->iommu_group;
511 gdev = iommu_group_alloc_device(group, dev);
512 mutex_lock(&group->mutex);
522 list_add_tail(&gdev->list, &group->devices);
523 WARN_ON(group->default_domain && !group->domain);
524 if (group->default_domain)
525 iommu_create_device_direct_mappings(group->default_domain, dev);
526 if (group->domain) {
527 ret = __iommu_device_set_domain(group, dev, group->domain, 0);
530 } else if (!group->default_domain && !group_list) {
531 ret = iommu_setup_default_domain(group, 0);
534 } else if (!group->default_domain) {
540 if (list_empty(&group->entry))
541 list_add_tail(&group->entry, group_list);
543 mutex_unlock(&group->mutex);
552 __iommu_group_free_device(group, gdev);
555 mutex_unlock(&group->mutex);
556 iommu_group_put(group);
579 static void __iommu_group_free_device(struct iommu_group *group,
584 sysfs_remove_link(group->devices_kobj, grp_dev->name);
587 trace_remove_device_from_group(group->id, dev);
590 * If the group has become empty then ownership must have been
594 if (list_empty(&group->devices))
595 WARN_ON(group->owner_cnt ||
596 group->domain != group->default_domain);
605 struct iommu_group *group = dev->iommu_group;
608 mutex_lock(&group->mutex);
609 for_each_group_device(group, device) {
614 __iommu_group_free_device(group, device);
621 mutex_unlock(&group->mutex);
627 iommu_group_put(group);
632 struct iommu_group *group = dev->iommu_group;
634 if (group)
681 struct iommu_group *group = to_iommu_group(kobj);
685 ret = attr->show(group, buf);
694 struct iommu_group *group = to_iommu_group(kobj);
698 ret = attr->store(group, buf, count);
707 static int iommu_group_create_file(struct iommu_group *group,
710 return sysfs_create_file(&group->kobj, &attr->attr);
713 static void iommu_group_remove_file(struct iommu_group *group,
716 sysfs_remove_file(&group->kobj, &attr->attr);
719 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
721 return sysfs_emit(buf, "%s\n", group->name);
800 int iommu_get_group_resv_regions(struct iommu_group *group,
806 mutex_lock(&group->mutex);
807 for_each_group_device(group, device) {
824 mutex_unlock(&group->mutex);
829 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
837 iommu_get_group_resv_regions(group, &group_resv_regions);
851 static ssize_t iommu_group_show_type(struct iommu_group *group,
856 mutex_lock(&group->mutex);
857 if (group->default_domain) {
858 switch (group->default_domain->type) {
876 mutex_unlock(&group->mutex);
891 struct iommu_group *group = to_iommu_group(kobj);
893 pr_debug("Releasing group %d\n", group->id);
895 if (group->iommu_data_release)
896 group->iommu_data_release(group->iommu_data);
898 ida_free(&iommu_group_ida, group->id);
901 WARN_ON(group->default_domain);
902 WARN_ON(group->blocking_domain);
904 kfree(group->name);
905 kfree(group);
914 * iommu_group_alloc - Allocate a new group
917 * group. The iommu group represents the minimum granularity of the iommu.
919 * group in order to hold the group until devices are added. Use
921 * group to be automatically reclaimed once it has no devices or external
926 struct iommu_group *group;
929 group = kzalloc(sizeof(*group), GFP_KERNEL);
930 if (!group)
933 group->kobj.kset = iommu_group_kset;
934 mutex_init(&group->mutex);
935 INIT_LIST_HEAD(&group->devices);
936 INIT_LIST_HEAD(&group->entry);
937 xa_init(&group->pasid_array);
941 kfree(group);
944 group->id = ret;
946 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
947 NULL, "%d", group->id);
949 kobject_put(&group->kobj);
953 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
954 if (!group->devices_kobj) {
955 kobject_put(&group->kobj); /* triggers .release & free */
960 * The devices_kobj holds a reference on the group kobject, so
961 * as long as that exists so will the group. We can therefore
964 kobject_put(&group->kobj);
966 ret = iommu_group_create_file(group,
969 kobject_put(group->devices_kobj);
973 ret = iommu_group_create_file(group, &iommu_group_attr_type);
975 kobject_put(group->devices_kobj);
979 pr_debug("Allocated group %d\n", group->id);
981 return group;
986 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
987 * @group: the group
989 * iommu drivers can store data in the group for use when doing iommu
991 * should hold a group reference.
993 void *iommu_group_get_iommudata(struct iommu_group *group)
995 return group->iommu_data;
1000 * iommu_group_set_iommudata - set iommu_data for a group
1001 * @group: the group
1005 * iommu drivers can store data in the group for use when doing iommu
1007 * the group has been allocated. Caller should hold a group reference.
1009 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
1012 group->iommu_data = iommu_data;
1013 group->iommu_data_release = release;
1018 * iommu_group_set_name - set name for a group
1019 * @group: the group
1022 * Allow iommu driver to set a name for a group. When set it will
1023 * appear in a name attribute file under the group in sysfs.
1025 int iommu_group_set_name(struct iommu_group *group, const char *name)
1029 if (group->name) {
1030 iommu_group_remove_file(group, &iommu_group_attr_name);
1031 kfree(group->name);
1032 group->name = NULL;
1037 group->name = kstrdup(name, GFP_KERNEL);
1038 if (!group->name)
1041 ret = iommu_group_create_file(group, &iommu_group_attr_name);
1043 kfree(group->name);
1044 group->name = NULL;
1119 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
1131 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
1142 ret = sysfs_create_link_nowarn(group->devices_kobj,
1158 trace_add_device_to_group(group->id, dev);
1160 dev_info(dev, "Adding to iommu group %d\n", group->id);
1170 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
1175 * iommu_group_add_device - add a device to an iommu group
1176 * @group: the group into which to add the device (reference should be held)
1180 * group. Adding a device increments the group reference count.
1182 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
1186 gdev = iommu_group_alloc_device(group, dev);
1190 iommu_group_ref_get(group);
1191 dev->iommu_group = group;
1193 mutex_lock(&group->mutex);
1194 list_add_tail(&gdev->list, &group->devices);
1195 mutex_unlock(&group->mutex);
1201 * iommu_group_remove_device - remove a device from it's current group
1205 * it's current group. This decrements the iommu group reference count.
1209 struct iommu_group *group = dev->iommu_group;
1211 if (!group)
1214 dev_info(dev, "Removing from iommu group %d\n", group->id);
1221 * iommu_group_for_each_dev - iterate over each device in the group
1222 * @group: the group
1226 * This function is called by group users to iterate over group devices.
1227 * Callers should hold a reference count to the group during callback.
1228 * The group->mutex is held across callbacks, which will block calls to
1231 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1237 mutex_lock(&group->mutex);
1238 for_each_group_device(group, device) {
1243 mutex_unlock(&group->mutex);
1250 * iommu_group_get - Return the group for a device and increment reference
1251 * @dev: get the group that this device belongs to
1253 * This function is called by iommu drivers and users to get the group
1254 * for the specified device. If found, the group is returned and the group
1259 struct iommu_group *group = dev->iommu_group;
1261 if (group)
1262 kobject_get(group->devices_kobj);
1264 return group;
1269 * iommu_group_ref_get - Increment reference on a group
1270 * @group: the group to use, must not be NULL
1273 * existing group. Returns the given group for convenience.
1275 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1277 kobject_get(group->devices_kobj);
1278 return group;
1283 * iommu_group_put - Decrement group reference
1284 * @group: the group to use
1287 * iommu group. Once the reference count is zero, the group is released.
1289 void iommu_group_put(struct iommu_group *group)
1291 if (group)
1292 kobject_put(group->devices_kobj);
1481 * matched using the group ID, the PASID valid bit and the PASID
1482 * value. Otherwise only the group ID matches request and
1508 * iommu_group_id - Return ID for a group
1509 * @group: the group to ID
1511 * Return the unique ID for the group matching the sysfs group number.
1513 int iommu_group_id(struct iommu_group *group)
1515 return group->id;
1536 * that may already have a group.
1542 struct iommu_group *group;
1553 group = get_pci_alias_group(tmp, devfns);
1554 if (group) {
1556 return group;
1576 struct iommu_group *group;
1581 group = iommu_group_get(&pdev->dev);
1582 if (group)
1583 return group;
1591 group = get_pci_alias_group(tmp, devfns);
1592 if (group) {
1594 return group;
1597 group = get_pci_function_alias_group(tmp, devfns);
1598 if (group) {
1600 return group;
1610 struct iommu_group *group;
1615 * the IOMMU group if we find one along the way.
1622 data->group = iommu_group_get(&pdev->dev);
1624 return data->group != NULL;
1629 * iommu-group per device.
1639 * to find or create an IOMMU group for a device.
1646 struct iommu_group *group = NULL;
1654 * be aliased due to topology in order to have its own IOMMU group.
1656 * group, use it.
1659 return data.group;
1667 * group, use it.
1678 group = iommu_group_get(&pdev->dev);
1679 if (group)
1680 return group;
1685 * device or another device aliases us, use the same group.
1687 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1688 if (group)
1689 return group;
1696 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1697 if (group)
1698 return group;
1700 /* No shared group found, allocate new */
1705 /* Get the IOMMU group for device on fsl-mc bus */
1709 struct iommu_group *group;
1711 group = iommu_group_get(cont_dev);
1712 if (!group)
1713 group = iommu_group_alloc();
1714 return group;
1733 struct iommu_group *group, int req_type)
1735 if (group->default_domain && group->default_domain->type == req_type)
1736 return group->default_domain;
1745 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
1748 list_first_entry(&group->devices, struct group_device, list)
1752 lockdep_assert_held(&group->mutex);
1755 return __iommu_group_alloc_default_domain(bus, group, req_type);
1758 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type);
1765 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA);
1769 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1770 iommu_def_domain_type, group->name);
1774 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1776 return group->default_domain;
1812 static int iommu_get_default_domain_type(struct iommu_group *group,
1819 lockdep_assert_held(&group->mutex);
1821 for_each_group_device(group, gdev) {
1835 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1857 struct iommu_group *group, *next;
1865 list_for_each_entry_safe(group, next, &group_list, entry) {
1868 mutex_lock(&group->mutex);
1871 list_del_init(&group->entry);
1875 * that the cross-group default domain type and the setup of the
1878 ret = iommu_setup_default_domain(group, 0);
1880 mutex_unlock(&group->mutex);
1883 mutex_unlock(&group->mutex);
1889 * to take group->mutex, resulting in a deadlock.
1891 for_each_group_device(group, gdev)
1929 * for a group
1930 * @group: Group to query
1933 * msi_device_has_isolated_msi() for devices in a group. However nothing
1937 bool iommu_group_has_isolated_msi(struct iommu_group *group)
1942 mutex_lock(&group->mutex);
1943 for_each_group_device(group, group_dev)
1945 mutex_unlock(&group->mutex);
2020 * Put the group's domain back to the appropriate core-owned domain - either the
2023 static void __iommu_group_set_core_domain(struct iommu_group *group)
2027 if (group->owner)
2028 new_domain = group->blocking_domain;
2030 new_domain = group->default_domain;
2032 __iommu_group_set_domain_nofail(group, new_domain);
2065 struct iommu_group *group;
2068 group = iommu_group_get(dev);
2069 if (!group)
2073 * Lock the group to make sure the device-count doesn't
2076 mutex_lock(&group->mutex);
2078 if (list_count_nodes(&group->devices) != 1)
2081 ret = __iommu_attach_group(domain, group);
2084 mutex_unlock(&group->mutex);
2085 iommu_group_put(group);
2101 struct iommu_group *group;
2103 group = iommu_group_get(dev);
2104 if (!group)
2107 mutex_lock(&group->mutex);
2108 if (WARN_ON(domain != group->domain) ||
2109 WARN_ON(list_count_nodes(&group->devices) != 1))
2111 __iommu_group_set_core_domain(group);
2114 mutex_unlock(&group->mutex);
2115 iommu_group_put(group);
2122 struct iommu_group *group;
2124 group = iommu_group_get(dev);
2125 if (!group)
2128 domain = group->domain;
2130 iommu_group_put(group);
2138 * guarantees that the group and its default domain are valid and correct.
2146 struct iommu_group *group)
2148 if (group->domain && group->domain != group->default_domain &&
2149 group->domain != group->blocking_domain)
2152 return __iommu_group_set_domain(group, domain);
2156 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2158 * @group: IOMMU group that will be attached
2164 * the group. In this case attaching a different domain to the
2165 * group may succeed.
2167 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2171 mutex_lock(&group->mutex);
2172 ret = __iommu_attach_group(domain, group);
2173 mutex_unlock(&group->mutex);
2180 * iommu_group_replace_domain - replace the domain that a group is attached to
2182 * @group: IOMMU group that will be attached to the new domain
2184 * This API allows the group to switch domains without being forced to go to
2190 int iommu_group_replace_domain(struct iommu_group *group,
2198 mutex_lock(&group->mutex);
2199 ret = __iommu_group_set_domain(group, new_domain);
2200 mutex_unlock(&group->mutex);
2205 static int __iommu_device_set_domain(struct iommu_group *group,
2221 new_domain == group->blocking_domain)) {
2228 if (new_domain == group->default_domain)
2241 group->blocking_domain &&
2242 group->blocking_domain != new_domain)
2243 __iommu_attach_device(group->blocking_domain, dev);
2250 * If 0 is returned the group's domain is new_domain. If an error is returned
2251 * then the group's domain will be set back to the existing domain unless
2252 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
2258 * devices in a group. Ideally we'd have a single device which represents the
2259 * requestor ID of the group, but we also allow IOMMU drivers to create policy
2261 * members, but we wish to group them at a higher level (ex. untrusted
2264 static int __iommu_group_set_domain_internal(struct iommu_group *group,
2273 lockdep_assert_held(&group->mutex);
2275 if (group->domain == new_domain)
2284 for_each_group_device(group, gdev) {
2290 group->domain = NULL;
2298 * either new_domain or group->domain, never something else.
2301 for_each_group_device(group, gdev) {
2302 ret = __iommu_device_set_domain(group, gdev->dev, new_domain,
2307 * Keep trying the other devices in the group. If a
2318 group->domain = new_domain;
2327 for_each_group_device(group, gdev) {
2333 * group->domain as NULL and let release clean everything up.
2335 if (group->domain)
2337 group, gdev->dev, group->domain,
2347 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2349 mutex_lock(&group->mutex);
2350 __iommu_group_set_core_domain(group);
2351 mutex_unlock(&group->mutex);
2935 * iommu_setup_default_domain - Set the default_domain for the group
2936 * @group: Group to change
2939 * Allocate a default domain and set it as the current domain on the group. If
2940 * the group already has a default domain it will be changed to the target_type.
2944 static int iommu_setup_default_domain(struct iommu_group *group,
2947 struct iommu_domain *old_dom = group->default_domain;
2954 lockdep_assert_held(&group->mutex);
2956 req_type = iommu_get_default_domain_type(group, target_type);
2962 * we ignore the failure and leave group->default_domain NULL.
2967 dom = iommu_group_alloc_default_domain(group, req_type);
2970 if (group->default_domain)
2972 group->default_domain = NULL;
2976 if (group->default_domain == dom)
2985 for_each_group_device(group, gdev) {
2995 group->default_domain = dom;
2996 if (!group->domain) {
3001 * in group->default_domain so it is freed after.
3004 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
3008 ret = __iommu_group_set_domain(group, dom);
3020 for_each_group_device(group, gdev) {
3035 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
3039 group->default_domain = old_dom;
3046 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3050 * group->mutex is used here to guarantee that the device release path
3053 static ssize_t iommu_group_store_type(struct iommu_group *group,
3062 if (WARN_ON(!group) || !group->default_domain)
3076 mutex_lock(&group->mutex);
3079 group->default_domain->type == IOMMU_DOMAIN_DMA) {
3080 ret = iommu_dma_init_fq(group->default_domain);
3084 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;
3090 if (list_empty(&group->devices) || group->owner_cnt) {
3095 ret = iommu_setup_default_domain(group, req_type);
3103 * group->mutex, resulting in a deadlock.
3105 mutex_unlock(&group->mutex);
3108 for_each_group_device(group, gdev)
3113 mutex_unlock(&group->mutex);
3117 static bool iommu_is_default_domain(struct iommu_group *group)
3119 if (group->domain == group->default_domain)
3128 if (group->default_domain &&
3129 group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
3130 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
3145 struct iommu_group *group = iommu_group_get(dev);
3148 if (!group)
3151 mutex_lock(&group->mutex);
3152 if (group->owner_cnt) {
3153 if (group->owner || !iommu_is_default_domain(group) ||
3154 !xa_empty(&group->pasid_array)) {
3160 group->owner_cnt++;
3163 mutex_unlock(&group->mutex);
3164 iommu_group_put(group);
3179 struct iommu_group *group = iommu_group_get(dev);
3181 if (!group)
3184 mutex_lock(&group->mutex);
3185 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
3186 group->owner_cnt--;
3188 mutex_unlock(&group->mutex);
3189 iommu_group_put(group);
3192 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
3195 list_first_entry(&group->devices, struct group_device, list);
3197 if (group->blocking_domain)
3200 group->blocking_domain =
3202 if (!group->blocking_domain) {
3207 group->blocking_domain = __iommu_domain_alloc(
3209 if (!group->blocking_domain)
3215 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner)
3219 if ((group->domain && group->domain != group->default_domain) ||
3220 !xa_empty(&group->pasid_array))
3223 ret = __iommu_group_alloc_blocking_domain(group);
3226 ret = __iommu_group_set_domain(group, group->blocking_domain);
3230 group->owner = owner;
3231 group->owner_cnt++;
3236 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3237 * @group: The group.
3242 * prohibited. Only a single owner may exist for a group.
3244 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
3251 mutex_lock(&group->mutex);
3252 if (group->owner_cnt) {
3257 ret = __iommu_take_dma_ownership(group, owner);
3259 mutex_unlock(&group->mutex);
3270 * Claim the DMA ownership of a device. Multiple devices in the same group may
3276 struct iommu_group *group;
3282 group = iommu_group_get(dev);
3283 if (!group)
3286 mutex_lock(&group->mutex);
3287 if (group->owner_cnt) {
3288 if (group->owner != owner) {
3292 group->owner_cnt++;
3296 ret = __iommu_take_dma_ownership(group, owner);
3298 mutex_unlock(&group->mutex);
3299 iommu_group_put(group);
3305 static void __iommu_release_dma_ownership(struct iommu_group *group)
3307 if (WARN_ON(!group->owner_cnt || !group->owner ||
3308 !xa_empty(&group->pasid_array)))
3311 group->owner_cnt = 0;
3312 group->owner = NULL;
3313 __iommu_group_set_domain_nofail(group, group->default_domain);
3317 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3318 * @group: The group
3322 void iommu_group_release_dma_owner(struct iommu_group *group)
3324 mutex_lock(&group->mutex);
3325 __iommu_release_dma_ownership(group);
3326 mutex_unlock(&group->mutex);
3338 struct iommu_group *group = iommu_group_get(dev);
3340 mutex_lock(&group->mutex);
3341 if (group->owner_cnt > 1)
3342 group->owner_cnt--;
3344 __iommu_release_dma_ownership(group);
3345 mutex_unlock(&group->mutex);
3346 iommu_group_put(group);
3351 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3352 * @group: The group.
3354 * This provides status query on a given group. It is racy and only for
3357 bool iommu_group_dma_owner_claimed(struct iommu_group *group)
3361 mutex_lock(&group->mutex);
3362 user = group->owner_cnt;
3363 mutex_unlock(&group->mutex);
3370 struct iommu_group *group, ioasid_t pasid)
3375 for_each_group_device(group, device) {
3384 static void __iommu_remove_group_pasid(struct iommu_group *group,
3390 for_each_group_device(group, device) {
3407 struct iommu_group *group;
3414 group = iommu_group_get(dev);
3415 if (!group)
3418 mutex_lock(&group->mutex);
3419 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
3425 ret = __iommu_set_group_pasid(domain, group, pasid);
3427 __iommu_remove_group_pasid(group, pasid);
3428 xa_erase(&group->pasid_array, pasid);
3431 mutex_unlock(&group->mutex);
3432 iommu_group_put(group);
3450 struct iommu_group *group = iommu_group_get(dev);
3452 mutex_lock(&group->mutex);
3453 __iommu_remove_group_pasid(group, pasid);
3454 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
3455 mutex_unlock(&group->mutex);
3457 iommu_group_put(group);
3480 struct iommu_group *group;
3482 group = iommu_group_get(dev);
3483 if (!group)
3486 xa_lock(&group->pasid_array);
3487 domain = xa_load(&group->pasid_array, pasid);
3490 xa_unlock(&group->pasid_array);
3491 iommu_group_put(group);