Lines Matching refs:device
15 #include <linux/device.h>
55 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
60 int vfio_assign_device_set(struct vfio_device *device, void *set_id)
103 device->dev_set = dev_set;
104 list_add_tail(&device->dev_set_list, &dev_set->device_list);
110 static void vfio_release_device_set(struct vfio_device *device)
112 struct vfio_device_set *dev_set = device->dev_set;
118 list_del(&device->dev_set_list);
146 struct device *dev)
163 void vfio_device_put_registration(struct vfio_device *device)
165 if (refcount_dec_and_test(&device->refcount))
166 complete(&device->comp);
169 bool vfio_device_try_get_registration(struct vfio_device *device)
171 return refcount_inc_not_zero(&device->refcount);
178 static void vfio_device_release(struct device *dev)
180 struct vfio_device *device =
181 container_of(dev, struct vfio_device, device);
183 vfio_release_device_set(device);
184 ida_free(&vfio.device_ida, device->index);
186 if (device->ops->release)
187 device->ops->release(device);
189 kvfree(device);
192 static int vfio_init_device(struct vfio_device *device, struct device *dev,
203 * Driver may provide an @init callback to cover device private data.
207 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
210 struct vfio_device *device;
216 device = kvzalloc(size, GFP_KERNEL);
217 if (!device)
220 ret = vfio_init_device(device, dev, ops);
223 return device;
226 kvfree(device);
234 static int vfio_init_device(struct vfio_device *device, struct device *dev,
245 device->index = ret;
246 init_completion(&device->comp);
247 device->dev = dev;
248 device->ops = ops;
251 ret = ops->init(device);
256 device_initialize(&device->device);
257 device->device.release = vfio_device_release;
258 device->device.class = vfio.device_class;
259 device->device.parent = device->dev;
263 vfio_release_device_set(device);
264 ida_free(&vfio.device_ida, device->index);
268 static int __vfio_register_dev(struct vfio_device *device,
274 (!device->ops->bind_iommufd ||
275 !device->ops->unbind_iommufd ||
276 !device->ops->attach_ioas ||
277 !device->ops->detach_ioas)))
281 * If the driver doesn't specify a set then the device is added to a
284 if (!device->dev_set)
285 vfio_assign_device_set(device, device);
287 ret = dev_set_name(&device->device, "vfio%d", device->index);
291 ret = vfio_device_set_group(device, type);
300 if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) &&
301 !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) {
306 ret = vfio_device_add(device);
311 refcount_set(&device->refcount, 1);
313 vfio_device_group_register(device);
317 vfio_device_remove_group(device);
321 int vfio_register_group_dev(struct vfio_device *device)
323 return __vfio_register_dev(device, VFIO_IOMMU);
328 * Register a virtual device without IOMMU backing. The user of this
329 * device must not be able to directly trigger unmediated DMA.
331 int vfio_register_emulated_iommu_dev(struct vfio_device *device)
333 return __vfio_register_dev(device, VFIO_EMULATED_IOMMU);
338 * Decrement the device reference count and wait for the device to be
339 * removed. Open file descriptors for the device... */
340 void vfio_unregister_group_dev(struct vfio_device *device)
347 * Prevent new device opened by userspace via the
350 vfio_device_group_unregister(device);
354 * new device opened by userspace in the cdev path.
356 vfio_device_del(device);
358 vfio_device_put_registration(device);
359 rc = try_wait_for_completion(&device->comp);
361 if (device->ops->request)
362 device->ops->request(device, i++);
365 rc = wait_for_completion_timeout(&device->comp,
369 &device->comp, HZ * 10);
372 dev_warn(device->dev,
375 "blocked until device is released",
382 vfio_device_remove_group(device);
387 void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
393 lockdep_assert_held(&device->dev_set->lock);
415 device->put_kvm = pfn;
416 device->kvm = kvm;
419 void vfio_device_put_kvm(struct vfio_device *device)
421 lockdep_assert_held(&device->dev_set->lock);
423 if (!device->kvm)
426 if (WARN_ON(!device->put_kvm))
429 device->put_kvm(device->kvm);
430 device->put_kvm = NULL;
434 device->kvm = NULL;
439 static bool vfio_assert_device_open(struct vfio_device *device)
441 return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
445 vfio_allocate_device_file(struct vfio_device *device)
453 df->device = device;
461 struct vfio_device *device = df->device;
465 lockdep_assert_held(&device->dev_set->lock);
467 if (!try_module_get(device->dev->driver->owner))
473 ret = vfio_device_group_use_iommu(device);
477 if (device->ops->open_device) {
478 ret = device->ops->open_device(device);
488 vfio_device_group_unuse_iommu(device);
490 module_put(device->dev->driver->owner);
496 struct vfio_device *device = df->device;
499 lockdep_assert_held(&device->dev_set->lock);
501 if (device->ops->close_device)
502 device->ops->close_device(device);
506 vfio_device_group_unuse_iommu(device);
507 module_put(device->dev->driver->owner);
512 struct vfio_device *device = df->device;
515 lockdep_assert_held(&device->dev_set->lock);
518 * Only the group path allows the device to be opened multiple
519 * times. The device cdev path doesn't have a secure way for it.
521 if (device->open_count != 0 && !df->group)
524 device->open_count++;
525 if (device->open_count == 1) {
528 device->open_count--;
536 struct vfio_device *device = df->device;
538 lockdep_assert_held(&device->dev_set->lock);
540 vfio_assert_device_open(device);
541 if (device->open_count == 1)
543 device->open_count--;
550 static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
552 struct device *dev = device->dev;
571 static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
573 struct device *dev = device->dev;
585 struct vfio_device *device = df->device;
592 vfio_device_put_registration(device);
601 * @cur_fsm - The current state the device is in
614 int vfio_mig_get_next_state(struct vfio_device *device,
786 (state_flags_table[cur_fsm] & device->migration_flags) !=
791 (state_flags_table[new_fsm] & device->migration_flags) !=
801 while ((state_flags_table[*next_fsm] & device->migration_flags) !=
840 vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
850 if (!device->mig_ops)
866 ret = device->mig_ops->migration_get_state(device,
875 filp = device->mig_ops->migration_set_state(device, mig.device_state);
890 vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device,
898 if (!device->mig_ops)
906 ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length);
917 static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
922 .flags = device->migration_flags,
926 if (!device->mig_ops)
990 vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
1006 if (!device->log_ops)
1059 ret = device->log_ops->log_start(device, &root, nnodes,
1066 device->log_ops->log_stop(device);
1075 vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
1081 if (!device->log_ops)
1089 return device->log_ops->log_stop(device);
1096 struct vfio_device *device = opaque;
1098 return device->log_ops->log_read_and_clear(device, iova, length, iter);
1102 vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
1114 if (!device->log_ops)
1139 ret = iova_bitmap_for_each(iter, device,
1146 static int vfio_ioctl_device_feature(struct vfio_device *device,
1173 device, feature.flags, arg->data,
1177 device, feature.flags, arg->data,
1181 device, feature.flags, arg->data,
1185 device, feature.flags, arg->data,
1189 device, feature.flags, arg->data,
1193 device, feature.flags, arg->data,
1196 if (unlikely(!device->ops->device_feature))
1198 return device->ops->device_feature(device, feature.flags,
1208 struct vfio_device *device = df->device;
1219 ret = vfio_device_pm_runtime_get(device);
1238 ret = vfio_ioctl_device_feature(device, uptr);
1242 if (unlikely(!device->ops->ioctl))
1245 ret = device->ops->ioctl(device, cmd, arg);
1249 vfio_device_pm_runtime_put(device);
1257 struct vfio_device *device = df->device;
1263 if (unlikely(!device->ops->read))
1266 return device->ops->read(device, buf, count, ppos);
1274 struct vfio_device *device = df->device;
1280 if (unlikely(!device->ops->write))
1283 return device->ops->write(device, buf, count, ppos);
1289 struct vfio_device *device = df->device;
1295 if (unlikely(!device->ops->mmap))
1298 return device->ops->mmap(device, vma);
1318 return df->device;
1323 * @file: VFIO group file or VFIO device file
1335 * @file: VFIO group file or VFIO device file
1343 struct vfio_device *device;
1350 device = vfio_device_from_file(file);
1351 if (device)
1352 return device_iommu_capable(device->dev,
1366 * iommufd successfully in the vfio device cdev path.
1375 * @file: VFIO group file or VFIO device file
1378 * When a VFIO device is first opened the KVM will be available in
1379 * device->kvm if one was associated with the file.
1522 * @device [in] : device
1533 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
1536 /* group->container cannot change while a vfio device is open */
1537 if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
1539 if (!device->ops->dma_unmap)
1541 if (vfio_device_has_container(device))
1542 return vfio_device_container_pin_pages(device, iova,
1544 if (device->iommufd_access) {
1556 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE),
1569 * @device [in] : device
1574 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
1576 if (WARN_ON(!vfio_assert_device_open(device)))
1578 if (WARN_ON(!device->ops->dma_unmap))
1581 if (vfio_device_has_container(device)) {
1582 vfio_device_container_unpin_pages(device, iova, npage);
1585 if (device->iommufd_access) {
1588 iommufd_access_unpin_pages(device->iommufd_access,
1598 * behalf of the device.
1604 * not a real device DMA, it is not necessary to pin the user space memory.
1606 * @device [in] : VFIO device
1613 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
1616 if (!data || len <= 0 || !vfio_assert_device_open(device))
1619 if (vfio_device_has_container(device))
1620 return vfio_device_container_dma_rw(device, iova,
1623 if (device->iommufd_access) {
1634 return iommufd_access_rw(device->iommufd_access, iova, data,