Lines Matching refs:private
21 static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
27 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
28 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
29 if (private->state == VFIO_CCW_STATE_NOT_OPER)
37 struct vfio_ccw_private *private =
41 if (!cp_iova_pinned(&private->cp, iova, length))
44 vfio_ccw_mdev_reset(private);
49 struct vfio_ccw_private *private =
52 mutex_init(&private->io_mutex);
53 private->state = VFIO_CCW_STATE_STANDBY;
54 INIT_LIST_HEAD(&private->crw);
55 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
56 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
58 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
60 if (!private->cp.guest_cp)
63 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
65 if (!private->io_region)
68 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
70 if (!private->cmd_region)
73 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
75 if (!private->schib_region)
78 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
80 if (!private->crw_region)
86 kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
88 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
90 kmem_cache_free(vfio_ccw_io_region, private->io_region);
92 kfree(private->cp.guest_cp);
94 mutex_destroy(&private->io_mutex);
102 struct vfio_ccw_private *private;
105 private = vfio_alloc_device(vfio_ccw_private, vdev, &mdev->dev,
107 if (IS_ERR(private))
108 return PTR_ERR(private);
110 dev_set_drvdata(&parent->dev, private);
117 ret = vfio_register_emulated_iommu_dev(&private->vdev);
120 dev_set_drvdata(&mdev->dev, private);
125 vfio_put_device(&private->vdev);
131 struct vfio_ccw_private *private =
135 list_for_each_entry_safe(crw, temp, &private->crw, next) {
140 kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
141 kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
142 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
143 kmem_cache_free(vfio_ccw_io_region, private->io_region);
144 kfree(private->cp.guest_cp);
145 mutex_destroy(&private->io_mutex);
152 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
159 vfio_unregister_group_dev(&private->vdev);
162 vfio_put_device(&private->vdev);
167 struct vfio_ccw_private *private =
172 if (private->state == VFIO_CCW_STATE_NOT_OPER)
175 ret = vfio_ccw_register_async_dev_regions(private);
179 ret = vfio_ccw_register_schib_dev_regions(private);
183 ret = vfio_ccw_register_crw_dev_regions(private);
187 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
188 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
196 vfio_ccw_unregister_dev_regions(private);
202 struct vfio_ccw_private *private =
205 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
206 vfio_ccw_unregister_dev_regions(private);
209 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
220 mutex_lock(&private->io_mutex);
221 region = private->io_region;
226 mutex_unlock(&private->io_mutex);
235 struct vfio_ccw_private *private =
239 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
244 return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
247 return private->region[index].ops->read(private, buf, count,
254 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
265 if (!mutex_trylock(&private->io_mutex))
268 region = private->io_region;
274 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
278 mutex_unlock(&private->io_mutex);
287 struct vfio_ccw_private *private =
291 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
296 return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
299 return private->region[index].ops->write(private, buf, count,
306 static int vfio_ccw_mdev_get_device_info(struct vfio_ccw_private *private,
310 info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
316 static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private,
338 VFIO_CCW_NUM_REGIONS + private->num_regions)
343 private->num_regions);
348 info->size = private->region[i].size;
349 info->flags = private->region[i].flags;
351 cap_type.type = private->region[i].type;
352 cap_type.subtype = private->region[i].subtype;
396 static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
408 ctx = &private->io_trigger;
411 ctx = &private->crw_trigger;
414 ctx = &private->req_trigger;
470 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
477 region = krealloc(private->region,
478 (private->num_regions + 1) * sizeof(*region),
483 private->region = region;
484 private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
485 private->region[private->num_regions].subtype = subtype;
486 private->region[private->num_regions].ops = ops;
487 private->region[private->num_regions].size = size;
488 private->region[private->num_regions].flags = flags;
489 private->region[private->num_regions].data = data;
491 private->num_regions++;
496 void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
500 for (i = 0; i < private->num_regions; i++)
501 private->region[i].ops->release(private, &private->region[i]);
502 private->num_regions = 0;
503 kfree(private->region);
504 private->region = NULL;
511 struct vfio_ccw_private *private =
529 ret = vfio_ccw_mdev_get_device_info(private, &info);
547 ret = vfio_ccw_mdev_get_region_info(private, &info, arg);
592 return vfio_ccw_mdev_set_irqs(private, hdr.flags, hdr.index,
596 return vfio_ccw_mdev_reset(private);
605 struct vfio_ccw_private *private =
609 if (private->req_trigger) {
615 eventfd_signal(private->req_trigger, 1);