Lines Matching refs:group

105  * removes the device from the dummy group and cannot be nested.
109 struct iommu_group *group;
112 group = iommu_group_get(dev);
116 * With noiommu enabled, an IOMMU group will be created for a device
121 if (group || !noiommu || iommu_present(dev->bus))
122 return group;
124 group = iommu_group_alloc();
125 if (IS_ERR(group))
128 iommu_group_set_name(group, "vfio-noiommu");
129 iommu_group_set_iommudata(group, &noiommu, NULL);
130 ret = iommu_group_add_device(group, dev);
132 iommu_group_put(group);
137 * Where to taint? At this point we've added an IOMMU group for a
145 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
148 return group;
152 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
155 if (iommu_group_get_iommudata(group) == &noiommu)
159 iommu_group_put(group);
262 static int vfio_alloc_group_minor(struct vfio_group *group)
264 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
274 static void vfio_group_get(struct vfio_group *group);
279 * it's freed via kref. Must support container/group/device being
300 static void vfio_group_unlock_and_free(struct vfio_group *group)
305 * that the group is no longer in vfio.group_list.
307 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
308 kfree(group);
316 struct vfio_group *group, *tmp;
320 group = kzalloc(sizeof(*group), GFP_KERNEL);
321 if (!group)
324 kref_init(&group->kref);
325 INIT_LIST_HEAD(&group->device_list);
326 mutex_init(&group->device_lock);
327 INIT_LIST_HEAD(&group->unbound_list);
328 mutex_init(&group->unbound_lock);
329 atomic_set(&group->container_users, 0);
330 atomic_set(&group->opened, 0);
331 init_waitqueue_head(&group->container_q);
332 group->iommu_group = iommu_group;
334 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
336 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
338 group->nb.notifier_call = vfio_iommu_group_notifier;
344 * do anything unless it can find the group in vfio.group_list, so
347 ret = iommu_group_register_notifier(iommu_group, &group->nb);
349 kfree(group);
355 /* Did we race creating this group? */
359 vfio_group_unlock_and_free(group);
364 minor = vfio_alloc_group_minor(group);
366 vfio_group_unlock_and_free(group);
372 group, "%s%d", group->noiommu ? "noiommu-" : "",
376 vfio_group_unlock_and_free(group);
380 group->minor = minor;
381 group->dev = dev;
383 list_add(&group->vfio_next, &vfio.group_list);
387 return group;
393 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
395 struct iommu_group *iommu_group = group->iommu_group;
397 WARN_ON(!list_empty(&group->device_list));
398 WARN_ON(group->notifier.head);
401 &group->unbound_list, unbound_next) {
406 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
407 list_del(&group->vfio_next);
408 vfio_free_group_minor(group->minor);
409 vfio_group_unlock_and_free(group);
413 static void vfio_group_put(struct vfio_group *group)
415 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
420 struct vfio_group *group;
429 vfio_group_put(do_work->group);
433 static void vfio_group_schedule_put(struct vfio_group *group)
442 do_work->group = group;
446 /* Assume group_lock or group reference is held */
447 static void vfio_group_get(struct vfio_group *group)
449 kref_get(&group->kref);
454 * sure the group pointer is valid under lock and get a reference.
456 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
458 struct vfio_group *target = group;
461 list_for_each_entry(group, &vfio.group_list, vfio_next) {
462 if (group == target) {
463 vfio_group_get(group);
465 return group;
476 struct vfio_group *group;
479 list_for_each_entry(group, &vfio.group_list, vfio_next) {
480 if (group->iommu_group == iommu_group) {
481 vfio_group_get(group);
483 return group;
493 struct vfio_group *group;
496 group = idr_find(&vfio.group_idr, minor);
497 if (!group) {
501 vfio_group_get(group);
504 return group;
510 struct vfio_group *group;
516 group = vfio_group_get_from_iommu(iommu_group);
519 return group;
525 /* Device reference always implies a group reference */
538 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
543 mutex_lock(&group->device_lock);
544 list_for_each_entry(device, &group->device_list, group_next) {
546 mutex_unlock(&group->device_lock);
550 mutex_unlock(&group->device_lock);
557 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
563 * then all of the downstream devices will be part of the same IOMMU group as
587 * A vfio group is viable for use by userspace if all devices are in
596 * group. The second is to test if the device exists on the group
602 struct vfio_group *group = data;
608 mutex_lock(&group->unbound_lock);
609 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
615 mutex_unlock(&group->unbound_lock);
620 device = vfio_group_get_device(group, dev);
632 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
637 device = vfio_group_get_device(group, dev);
644 if (!atomic_read(&group->container_users))
648 dev_WARN(dev, "Device added to live group %d!\n",
649 iommu_group_id(group->iommu_group));
654 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
656 /* We don't care what happens when the group isn't in use */
657 if (!atomic_read(&group->container_users))
660 return vfio_dev_viable(dev, group);
666 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
672 * risk racing a group being removed. Ignore spurious notifies.
674 group = vfio_group_try_get(group);
675 if (!group)
680 vfio_group_nb_add_dev(group, dev);
692 dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
693 iommu_group_id(group->iommu_group));
696 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
697 iommu_group_id(group->iommu_group), dev->driver->name);
698 BUG_ON(vfio_group_nb_verify(group, dev));
701 dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
702 __func__, iommu_group_id(group->iommu_group),
706 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
707 iommu_group_id(group->iommu_group));
709 * XXX An unbound device in a live group is ok, but we'd
716 mutex_lock(&group->unbound_lock);
718 &group->unbound_list, unbound_next) {
725 mutex_unlock(&group->unbound_lock);
730 * If we're the last reference to the group, the group will be
731 * released, which includes unregistering the iommu group notifier.
736 vfio_group_schedule_put(group);
757 struct vfio_group *group;
763 group = vfio_group_get_from_iommu(iommu_group);
764 if (!group) {
765 group = vfio_create_group(iommu_group);
766 if (IS_ERR(group)) {
768 return PTR_ERR(group);
778 existing_device = vfio_group_get_device(group, device->dev);
780 dev_WARN(device->dev, "Device already exists on group %d\n",
783 vfio_group_put(group);
787 /* Our reference on group is moved to the device */
788 device->group = group;
793 mutex_lock(&group->device_lock);
794 list_add(&device->group_next, &group->device_list);
795 group->dev_counter++;
796 mutex_unlock(&group->device_lock);
834 struct vfio_group *group;
837 group = vfio_group_get_from_dev(dev);
838 if (!group)
841 device = vfio_group_get_device(group, dev);
842 vfio_group_put(group);
848 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
853 mutex_lock(&group->device_lock);
854 list_for_each_entry(it, &group->device_list, group_next) {
872 mutex_unlock(&group->device_lock);
891 struct vfio_group *group = device->group;
898 * When the device is removed from the group, the group suddenly
900 * completes), but it's not present in the group. This is bad news
901 * for any external users that need to re-acquire a group reference
909 mutex_lock(&group->unbound_lock);
910 list_add(&unbound->unbound_next, &group->unbound_list);
911 mutex_unlock(&group->unbound_lock);
938 mutex_lock(&group->device_lock);
940 group->dev_counter--;
941 mutex_unlock(&group->device_lock);
944 * In order to support multiple devices per group, devices can be
945 * plucked from the group while other devices in the group are still
946 * in use. The container persists with this group and those remaining
948 * by binding this device to another driver while the group is still in
950 * or potentially the only, device in the group there can be no other
951 * in-use devices in the group. The user has done their due diligence
953 * we need to make sure the group is detached from the container.
957 if (list_empty(&group->device_list))
958 wait_event(group->container_q, !group->container);
961 vfio_group_put(group);
1037 struct vfio_group *group;
1040 list_for_each_entry(group, &container->group_list, container_next) {
1041 ret = driver->ops->attach_group(data, group->iommu_group);
1049 list_for_each_entry_continue_reverse(group, &container->group_list,
1051 driver->ops->detach_group(data, group->iommu_group);
1067 * the group can be assigned to specific users. Therefore, only by
1068 * adding a group to a container does the user get the privilege of
1252 static void __vfio_group_unset_container(struct vfio_group *group)
1254 struct vfio_container *container = group->container;
1262 group->iommu_group);
1264 group->container = NULL;
1265 wake_up(&group->container_q);
1266 list_del(&group->container_next);
1268 /* Detaching the last group deprivileges a container, remove iommu */
1284 * the group, we know that still exists, therefore the only valid
1287 static int vfio_group_unset_container(struct vfio_group *group)
1289 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1296 __vfio_group_unset_container(group);
1303 * implicitly removes the group from the container. That is, if the
1304 * group file descriptor is closed, as well as any device file descriptors,
1305 * the group is free.
1307 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1309 if (0 == atomic_dec_if_positive(&group->container_users))
1310 __vfio_group_unset_container(group);
1313 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1320 if (atomic_read(&group->container_users))
1323 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1343 container->noiommu != group->noiommu) {
1351 group->iommu_group);
1356 group->container = container;
1357 container->noiommu = group->noiommu;
1358 list_add(&group->container_next, &container->group_list);
1360 /* Get a reference on the container and mark a user within the group */
1362 atomic_inc(&group->container_users);
1370 static bool vfio_group_viable(struct vfio_group *group)
1372 return (iommu_group_for_each_dev(group->iommu_group,
1373 group, vfio_dev_viable) == 0);
1376 static int vfio_group_add_container_user(struct vfio_group *group)
1378 if (!atomic_inc_not_zero(&group->container_users))
1381 if (group->noiommu) {
1382 atomic_dec(&group->container_users);
1385 if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1386 atomic_dec(&group->container_users);
1395 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1401 if (0 == atomic_read(&group->container_users) ||
1402 !group->container->iommu_driver || !vfio_group_viable(group))
1405 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1408 device = vfio_device_get_from_name(group, buf);
1446 atomic_inc(&group->container_users);
1450 if (group->noiommu)
1460 struct vfio_group *group = filep->private_data;
1479 if (vfio_group_viable(group))
1482 if (group->container)
1501 ret = vfio_group_set_container(group, fd);
1505 ret = vfio_group_unset_container(group);
1515 ret = vfio_group_get_device_fd(group, buf);
1526 struct vfio_group *group;
1529 group = vfio_group_get_from_minor(iminor(inode));
1530 if (!group)
1533 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1534 vfio_group_put(group);
1538 /* Do we need multiple instances of the group open? Seems not. */
1539 opened = atomic_cmpxchg(&group->opened, 0, 1);
1541 vfio_group_put(group);
1546 if (group->container) {
1547 atomic_dec(&group->opened);
1548 vfio_group_put(group);
1553 if (WARN_ON(group->notifier.head))
1554 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1556 filep->private_data = group;
1563 struct vfio_group *group = filep->private_data;
1567 vfio_group_try_dissolve_container(group);
1569 atomic_dec(&group->opened);
1571 vfio_group_put(group);
1593 vfio_group_try_dissolve_container(device->group);
1660 * - attaching group(s) to it;
1665 * 2. User space passes a group fd to an external user.
1668 * - the group is initialized;
1672 * the VFIO group from disposal before KVM exits.
1678 * vfio_group_put_external_user() to release the VFIO group.
1683 struct vfio_group *group = filep->private_data;
1689 ret = vfio_group_add_container_user(group);
1693 vfio_group_get(group);
1695 return group;
1703 * - A VFIO group is assiciated with the device;
1704 * - IOMMU is set for the group.
1706 * increments the container user counter to prevent the VFIO group
1708 * to the VFIO group.
1710 * When the external user finishes using the VFIO group, it calls
1711 * vfio_group_put_external_user() to release the VFIO group and
1715 * Return error PTR or pointer to VFIO group.
1720 struct vfio_group *group;
1723 group = vfio_group_get_from_dev(dev);
1724 if (!group)
1727 ret = vfio_group_add_container_user(group);
1729 vfio_group_put(group);
1733 return group;
1737 void vfio_group_put_external_user(struct vfio_group *group)
1739 vfio_group_try_dissolve_container(group);
1740 vfio_group_put(group);
1747 struct vfio_group *group = filep->private_data;
1749 return (filep->f_op == &vfio_group_fops) && (group == test_group);
1753 int vfio_external_user_iommu_id(struct vfio_group *group)
1755 return iommu_group_id(group->iommu_group);
1759 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1761 return vfio_ioctl_check_extension(group->container, arg);
1899 struct vfio_group *group;
1909 group = vfio_group_get_from_dev(dev);
1910 if (!group)
1913 if (group->dev_counter > 1) {
1918 ret = vfio_group_add_container_user(group);
1922 container = group->container;
1926 group->iommu_group, user_pfn,
1931 vfio_group_try_dissolve_container(group);
1934 vfio_group_put(group);
1951 struct vfio_group *group;
1961 group = vfio_group_get_from_dev(dev);
1962 if (!group)
1965 ret = vfio_group_add_container_user(group);
1969 container = group->container;
1977 vfio_group_try_dissolve_container(group);
1980 vfio_group_put(group);
1987 * VFIO group.
1991 * so as to prevent the VFIO group from disposal in the middle of the call.
1992 * But it can keep the reference to the VFIO group for several calls into
1994 * After finishing using of the VFIO group, the caller needs to release the
1995 * VFIO group by calling vfio_group_put_external_user().
1997 * @group [in] : VFIO group
2006 int vfio_group_pin_pages(struct vfio_group *group,
2014 if (!group || !user_iova_pfn || !phys_pfn || !npage)
2017 if (group->dev_counter > 1)
2023 container = group->container;
2027 group->iommu_group, user_iova_pfn,
2037 * Unpin a set of guest IOVA PFNs for a VFIO group.
2041 * so as to prevent the VFIO group from disposal in the middle of the call.
2042 * But it can keep the reference to the VFIO group for several calls into
2044 * After finishing using of the VFIO group, the caller needs to release the
2045 * VFIO group by calling vfio_group_put_external_user().
2047 * @group [in] : vfio group
2054 int vfio_group_unpin_pages(struct vfio_group *group,
2061 if (!group || !user_iova_pfn || !npage)
2067 container = group->container;
2092 * so as to prevent the VFIO group from disposal in the middle of the call.
2093 * But it can keep the reference to the VFIO group for several calls into
2095 * After finishing using of the VFIO group, the caller needs to release the
2096 * VFIO group by calling vfio_group_put_external_user().
2098 * @group [in] : VFIO group
2105 int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
2112 if (!group || !data || len <= 0)
2115 container = group->container;
2128 static int vfio_register_iommu_notifier(struct vfio_group *group,
2136 ret = vfio_group_add_container_user(group);
2140 container = group->container;
2148 vfio_group_try_dissolve_container(group);
2153 static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2160 ret = vfio_group_add_container_user(group);
2164 container = group->container;
2172 vfio_group_try_dissolve_container(group);
2177 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2179 group->kvm = kvm;
2180 blocking_notifier_call_chain(&group->notifier,
2185 static int vfio_register_group_notifier(struct vfio_group *group,
2202 ret = vfio_group_add_container_user(group);
2206 ret = blocking_notifier_chain_register(&group->notifier, nb);
2212 if (!ret && set_kvm && group->kvm)
2213 blocking_notifier_call_chain(&group->notifier,
2214 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2216 vfio_group_try_dissolve_container(group);
2221 static int vfio_unregister_group_notifier(struct vfio_group *group,
2226 ret = vfio_group_add_container_user(group);
2230 ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2232 vfio_group_try_dissolve_container(group);
2240 struct vfio_group *group;
2246 group = vfio_group_get_from_dev(dev);
2247 if (!group)
2252 ret = vfio_register_iommu_notifier(group, events, nb);
2255 ret = vfio_register_group_notifier(group, events, nb);
2261 vfio_group_put(group);
2269 struct vfio_group *group;
2275 group = vfio_group_get_from_dev(dev);
2276 if (!group)
2281 ret = vfio_unregister_iommu_notifier(group, nb);
2284 ret = vfio_unregister_group_notifier(group, nb);
2290 vfio_group_put(group);