Lines Matching refs:vgpu

66 	size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
68 void (*release)(struct intel_vgpu *vgpu,
93 struct intel_vgpu *vgpu;
102 struct intel_vgpu *vgpu;
112 struct intel_vgpu *vgpu;
137 static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
139 return intel_vgpu_vdev(vgpu);
151 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
154 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
155 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
171 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
174 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
217 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
221 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
224 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
228 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
237 gvt_unpin_guest_page(vgpu, gfn, size);
244 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
247 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
250 gvt_unpin_guest_page(vgpu, gfn, size);
253 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
256 struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
272 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
274 struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
290 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
295 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
301 new->vgpu = vgpu;
340 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
343 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
351 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
355 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
365 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
366 __gvt_cache_remove_entry(vgpu, dma);
371 static void gvt_cache_init(struct intel_vgpu *vgpu)
373 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
449 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
452 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
460 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
469 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
479 static int handle_edid_regs(struct intel_vgpu *vgpu,
505 intel_gvt_ops->emulate_hotplug(vgpu, true);
507 intel_gvt_ops->emulate_hotplug(vgpu, false);
550 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
557 (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
561 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
573 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
584 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
589 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
611 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
612 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
626 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
630 /* Each vgpu has its own opregion, although VFIO would create another
634 base = vgpu_opregion(vgpu)->va;
643 ret = intel_vgpu_register_reg(vgpu,
654 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
655 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
671 ret = intel_vgpu_register_reg(vgpu,
682 static void kvmgt_put_vfio_device(void *vgpu)
684 struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
694 struct intel_vgpu *vgpu = NULL;
711 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
712 if (IS_ERR_OR_NULL(vgpu)) {
713 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
714 gvt_err("failed to create intel vgpu: %d\n", ret);
718 INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
720 kvmgt_vdev(vgpu)->mdev = mdev;
721 mdev_set_drvdata(mdev, vgpu);
733 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
735 if (handle_valid(vgpu->handle))
738 intel_gvt_ops->vgpu_destroy(vgpu);
748 struct intel_vgpu *vgpu = vdev->vgpu;
760 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
764 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
766 __gvt_cache_remove_entry(vgpu, entry);
794 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
795 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
841 intel_gvt_ops->vgpu_activate(vgpu);
861 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
863 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
873 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
875 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
876 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
880 if (!handle_valid(vgpu->handle))
886 intel_gvt_ops->vgpu_release(vgpu);
901 info = (struct kvmgt_guest_info *)vgpu->handle;
904 intel_vgpu_release_msi_eventfd_ctx(vgpu);
908 vgpu->handle = 0;
913 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
915 __intel_vgpu_release(vgpu);
923 __intel_vgpu_release(vdev->vgpu);
926 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
931 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
933 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
938 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
953 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
956 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
960 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
963 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
968 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
970 return off >= vgpu_aperture_offset(vgpu) &&
971 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
974 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
979 if (!intel_vgpu_in_aperture(vgpu, off) ||
980 !intel_vgpu_in_aperture(vgpu, off + count)) {
985 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
1004 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1005 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1019 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
1022 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
1026 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
1030 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
1044 return vdev->region[index].ops->rw(vgpu, buf, count,
1053 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1055 struct intel_gvt *gvt = vgpu->gvt;
1063 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
1222 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1242 if (!intel_vgpu_in_aperture(vgpu, req_start))
1245 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1248 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1253 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1261 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1269 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1276 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1283 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1297 kvmgt_vdev(vgpu)->msi_trigger = trigger;
1299 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1304 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1308 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1342 return func(vgpu, index, start, count, flags, data);
1348 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1349 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1352 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1394 info.size = vgpu->gvt->device_info.cfg_space_size;
1400 info.size = vgpu->cfg_space.bar[info.index].size;
1420 info.size = gvt_aperture_sz(vgpu->gvt);
1432 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1433 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1546 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1568 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1584 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1590 intel_gvt_ops->vgpu_reset(vgpu);
1603 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1616 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1631 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1633 return sprintf(buf, "%d\n", vgpu->id);
1762 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1787 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1794 mutex_lock(&vgpu->gvt->lock);
1795 for_each_active_vgpu(vgpu->gvt, itr, id) {
1806 mutex_unlock(&vgpu->gvt->lock);
1813 struct intel_vgpu *vgpu;
1817 vgpu = mdev_get_drvdata(mdev);
1818 if (handle_valid(vgpu->handle))
1821 vdev = kvmgt_vdev(vgpu);
1828 if (__kvmgt_vgpu_exist(vgpu, kvm))
1835 vgpu->handle = (unsigned long)info;
1836 info->vgpu = vgpu;
1841 gvt_cache_init(vgpu);
1849 0444, vgpu->debugfs,
1861 gvt_cache_destroy(info->vgpu);
1869 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1871 vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
1873 if (!vgpu->vdev)
1876 kvmgt_vdev(vgpu)->vgpu = vgpu;
1884 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1885 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1892 vdev->region[i].ops->release(vgpu,
1904 struct intel_vgpu *vgpu;
1911 vgpu = info->vgpu;
1912 vdev = kvmgt_vdev(vgpu);
1915 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1917 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1918 * may be enabled, then once this vgpu is active, it will get inject
1952 struct intel_vgpu *vgpu;
1960 vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
1961 vdev = kvmgt_vdev(vgpu);
1965 entry = __gvt_cache_find_gfn(vgpu, gfn);
1967 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1971 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1976 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1977 __gvt_cache_remove_entry(vgpu, entry);
1979 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1983 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1995 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
2012 vdev = kvmgt_vdev(info->vgpu);
2015 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2029 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
2031 __gvt_cache_remove_entry(entry->vgpu, entry);
2036 struct intel_vgpu *vgpu;
2043 vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
2044 vdev = kvmgt_vdev(vgpu);
2047 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
2063 return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,