Lines Matching refs:vgpu
71 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
73 void (*release)(struct intel_vgpu *vgpu,
97 struct intel_vgpu *vgpu;
128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
152 ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
180 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
184 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
193 gvt_unpin_guest_page(vgpu, gfn, size);
200 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
203 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
206 gvt_unpin_guest_page(vgpu, gfn, size);
209 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
212 struct rb_node *node = vgpu->dma_addr_cache.rb_node;
228 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
230 struct rb_node *node = vgpu->gfn_cache.rb_node;
246 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
256 new->vgpu = vgpu;
263 link = &vgpu->gfn_cache.rb_node;
274 rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
278 link = &vgpu->dma_addr_cache.rb_node;
289 rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
291 vgpu->nr_cache_entries++;
295 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
298 rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
299 rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
301 vgpu->nr_cache_entries--;
304 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
310 mutex_lock(&vgpu->cache_lock);
311 node = rb_first(&vgpu->gfn_cache);
313 mutex_unlock(&vgpu->cache_lock);
317 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
318 __gvt_cache_remove_entry(vgpu, dma);
319 mutex_unlock(&vgpu->cache_lock);
323 static void gvt_cache_init(struct intel_vgpu *vgpu)
325 vgpu->gfn_cache = RB_ROOT;
326 vgpu->dma_addr_cache = RB_ROOT;
327 vgpu->nr_cache_entries = 0;
328 mutex_init(&vgpu->cache_lock);
399 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
404 void *base = vgpu->region[i].data;
408 if (pos >= vgpu->region[i].size || iswrite) {
409 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
412 count = min(count, (size_t)(vgpu->region[i].size - pos));
418 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
428 static int handle_edid_regs(struct intel_vgpu *vgpu,
454 intel_vgpu_emulate_hotplug(vgpu, true);
456 intel_vgpu_emulate_hotplug(vgpu, false);
499 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
505 struct vfio_edid_region *region = vgpu->region[i].data;
509 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
521 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
532 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
539 region = krealloc(vgpu->region,
540 (vgpu->num_regions + 1) * sizeof(*region),
545 vgpu->region = region;
546 vgpu->region[vgpu->num_regions].type = type;
547 vgpu->region[vgpu->num_regions].subtype = subtype;
548 vgpu->region[vgpu->num_regions].ops = ops;
549 vgpu->region[vgpu->num_regions].size = size;
550 vgpu->region[vgpu->num_regions].flags = flags;
551 vgpu->region[vgpu->num_regions].data = data;
552 vgpu->num_regions++;
556 int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
561 /* Each vgpu has its own opregion, although VFIO would create another
565 base = vgpu_opregion(vgpu)->va;
574 ret = intel_vgpu_register_reg(vgpu,
583 int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
585 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
601 ret = intel_vgpu_register_reg(vgpu,
615 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
620 mutex_lock(&vgpu->cache_lock);
622 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
626 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
628 __gvt_cache_remove_entry(vgpu, entry);
630 mutex_unlock(&vgpu->cache_lock);
633 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
639 mutex_lock(&vgpu->gvt->lock);
640 for_each_active_vgpu(vgpu->gvt, itr, id) {
644 if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
650 mutex_unlock(&vgpu->gvt->lock);
656 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
659 if (__kvmgt_vgpu_exist(vgpu))
662 vgpu->track_node.track_write = kvmgt_page_track_write;
663 vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
664 ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
665 &vgpu->track_node);
671 set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
673 debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
674 &vgpu->nr_cache_entries);
676 intel_gvt_activate_vgpu(vgpu);
681 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
685 trigger = vgpu->msi_trigger;
688 vgpu->msi_trigger = NULL;
694 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
696 intel_gvt_release_vgpu(vgpu);
698 clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
700 debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
702 kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
703 &vgpu->track_node);
705 kvmgt_protect_table_destroy(vgpu);
706 gvt_cache_destroy(vgpu);
708 WARN_ON(vgpu->nr_cache_entries);
710 vgpu->gfn_cache = RB_ROOT;
711 vgpu->dma_addr_cache = RB_ROOT;
713 intel_vgpu_release_msi_eventfd_ctx(vgpu);
716 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
721 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
723 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
728 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
743 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
746 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
750 ret = intel_vgpu_emulate_mmio_write(vgpu,
753 ret = intel_vgpu_emulate_mmio_read(vgpu,
758 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
760 return off >= vgpu_aperture_offset(vgpu) &&
761 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
764 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
769 if (!intel_vgpu_in_aperture(vgpu, off) ||
770 !intel_vgpu_in_aperture(vgpu, off + count)) {
775 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
791 static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
799 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
807 ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
810 ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
814 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
818 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
828 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
832 return vgpu->region[index].ops->rw(vgpu, buf, count,
839 static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
842 struct intel_gvt *gvt = vgpu->gvt;
850 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
860 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
869 gtt_entry(vgpu, ppos)) {
872 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
884 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
896 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
908 ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
935 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
944 gtt_entry(vgpu, ppos)) {
950 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
962 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
974 ret = intel_vgpu_rw(vgpu, (char *)&val,
986 ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
1008 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1032 if (!intel_vgpu_in_aperture(vgpu, req_start))
1035 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1038 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1043 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1051 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1059 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1066 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1073 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1087 vgpu->msi_trigger = trigger;
1089 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1094 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1098 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1132 return func(vgpu, index, start, count, flags, data);
1138 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1141 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1157 vgpu->num_regions;
1183 info.size = vgpu->gvt->device_info.cfg_space_size;
1189 info.size = vgpu->cfg_space.bar[info.index].size;
1209 info.size = gvt_aperture_sz(vgpu->gvt);
1221 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1222 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1248 vgpu->num_regions)
1253 vgpu->num_regions);
1259 info.size = vgpu->region[i].size;
1260 info.flags = vgpu->region[i].flags;
1262 cap_type.type = vgpu->region[i].type;
1263 cap_type.subtype = vgpu->region[i].subtype;
1335 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1357 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1373 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1379 intel_gvt_reset_vgpu(vgpu);
1392 ret = intel_vgpu_query_plane(vgpu, &dmabuf);
1403 return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
1413 struct intel_vgpu *vgpu = dev_get_drvdata(dev);
1415 return sprintf(buf, "%d\n", vgpu->id);
1438 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1443 vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
1444 ret = intel_gvt_create_vgpu(vgpu, type->conf);
1448 kvmgt_protect_table_init(vgpu);
1449 gvt_cache_init(vgpu);
1456 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1458 intel_gvt_destroy_vgpu(vgpu);
1479 struct intel_vgpu *vgpu;
1482 vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
1484 if (IS_ERR(vgpu)) {
1485 gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
1486 return PTR_ERR(vgpu);
1489 dev_set_drvdata(&mdev->dev, vgpu);
1490 ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
1499 vfio_put_device(&vgpu->vfio_device);
1505 struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
1507 vfio_unregister_group_dev(&vgpu->vfio_device);
1508 vfio_put_device(&vgpu->vfio_device);
1613 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
1617 if (!vgpu->region)
1620 for (i = 0; i < vgpu->num_regions; i++)
1621 if (vgpu->region[i].ops->release)
1622 vgpu->region[i].ops->release(vgpu,
1623 &vgpu->region[i]);
1624 vgpu->num_regions = 0;
1625 kfree(vgpu->region);
1626 vgpu->region = NULL;
1629 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
1635 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1638 mutex_lock(&vgpu->cache_lock);
1640 entry = __gvt_cache_find_gfn(vgpu, gfn);
1642 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1646 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1651 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1652 __gvt_cache_remove_entry(vgpu, entry);
1654 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1658 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1666 mutex_unlock(&vgpu->cache_lock);
1670 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1672 mutex_unlock(&vgpu->cache_lock);
1676 int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
1681 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1684 mutex_lock(&vgpu->cache_lock);
1685 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1690 mutex_unlock(&vgpu->cache_lock);
1699 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1701 __gvt_cache_remove_entry(entry->vgpu, entry);
1704 void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
1709 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1712 mutex_lock(&vgpu->cache_lock);
1713 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1716 mutex_unlock(&vgpu->cache_lock);
1738 struct intel_vgpu *vgpu;
1742 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
1745 if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
1746 intel_vgpu_emulate_vblank(vgpu);
1846 struct intel_vgpu *vgpu;
1905 vgpu = intel_gvt_create_idle_vgpu(gvt);
1906 if (IS_ERR(vgpu)) {
1907 ret = PTR_ERR(vgpu);
1908 gvt_err("failed to create idle vgpu\n");
1911 gvt->idle_vgpu = vgpu;