Lines Matching refs:info

149 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
381 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
383 hash_init(info->ptable);
386 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
392 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
399 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
403 hash_for_each_possible(info->ptable, p, hnode, gfn) {
413 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
418 p = __kvmgt_protect_table_find(info, gfn);
422 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
426 if (kvmgt_gfn_is_write_protected(info, gfn))
434 hash_add(info->ptable, &p->hnode, gfn);
437 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
442 p = __kvmgt_protect_table_find(info, gfn);
877 struct kvmgt_guest_info *info;
901 info = (struct kvmgt_guest_info *)vgpu->handle;
902 kvmgt_guest_exit(info);
1355 struct vfio_device_info info;
1359 if (copy_from_user(&info, (void __user *)arg, minsz))
1362 if (info.argsz < minsz)
1365 info.flags = VFIO_DEVICE_FLAGS_PCI;
1366 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1367 info.num_regions = VFIO_PCI_NUM_REGIONS +
1369 info.num_irqs = VFIO_PCI_NUM_IRQS;
1371 return copy_to_user((void __user *)arg, &info, minsz) ?
1375 struct vfio_region_info info;
1385 if (copy_from_user(&info, (void __user *)arg, minsz))
1388 if (info.argsz < minsz)
1391 switch (info.index) {
1393 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1394 info.size = vgpu->gvt->device_info.cfg_space_size;
1395 info.flags = VFIO_REGION_INFO_FLAG_READ |
1399 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1400 info.size = vgpu->cfg_space.bar[info.index].size;
1401 if (!info.size) {
1402 info.flags = 0;
1406 info.flags = VFIO_REGION_INFO_FLAG_READ |
1410 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1411 info.size = 0;
1412 info.flags = 0;
1415 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1416 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1420 info.size = gvt_aperture_sz(vgpu->gvt);
1437 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1438 info.size = 0;
1439 info.flags = 0;
1441 gvt_dbg_core("get region info bar:%d\n", info.index);
1446 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1447 info.size = 0;
1448 info.flags = 0;
1450 gvt_dbg_core("get region info index:%d\n", info.index);
1458 if (info.index >= VFIO_PCI_NUM_REGIONS +
1461 info.index =
1462 array_index_nospec(info.index,
1466 i = info.index - VFIO_PCI_NUM_REGIONS;
1468 info.offset =
1469 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1470 info.size = vdev->region[i].size;
1471 info.flags = vdev->region[i].flags;
1484 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1503 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1504 if (info.argsz < sizeof(info) + caps.size) {
1505 info.argsz = sizeof(info) + caps.size;
1506 info.cap_offset = 0;
1508 vfio_info_cap_shift(&caps, sizeof(info));
1510 sizeof(info), caps.buf,
1516 info.cap_offset = sizeof(info);
1523 return copy_to_user((void __user *)arg, &info, minsz) ?
1526 struct vfio_irq_info info;
1530 if (copy_from_user(&info, (void __user *)arg, minsz))
1533 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1536 switch (info.index) {
1544 info.flags = VFIO_IRQ_INFO_EVENTFD;
1546 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1548 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1549 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1552 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1554 return copy_to_user((void __user *)arg, &info, minsz) ?
1688 struct kvmgt_guest_info *info;
1696 info = (struct kvmgt_guest_info *)handle;
1697 kvm = info->kvm;
1708 if (kvmgt_gfn_is_write_protected(info, gfn))
1712 kvmgt_protect_table_add(info, gfn);
1722 struct kvmgt_guest_info *info;
1730 info = (struct kvmgt_guest_info *)handle;
1731 kvm = info->kvm;
1742 if (!kvmgt_gfn_is_write_protected(info, gfn))
1746 kvmgt_protect_table_del(info, gfn);
1758 struct kvmgt_guest_info *info = container_of(node,
1761 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1762 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1772 struct kvmgt_guest_info *info = container_of(node,
1778 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1781 kvmgt_protect_table_del(info, gfn);
1790 struct kvmgt_guest_info *info;
1799 info = (struct kvmgt_guest_info *)itr->handle;
1800 if (kvm && kvm == info->kvm) {
1812 struct kvmgt_guest_info *info;
1831 info = vzalloc(sizeof(struct kvmgt_guest_info));
1832 if (!info)
1835 vgpu->handle = (unsigned long)info;
1836 info->vgpu = vgpu;
1837 info->kvm = kvm;
1838 kvm_get_kvm(info->kvm);
1840 kvmgt_protect_table_init(info);
1843 info->track_node.track_write = kvmgt_page_track_write;
1844 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1845 kvm_page_track_register_notifier(kvm, &info->track_node);
1847 info->debugfs_cache_entries = debugfs_create_ulong(
1854 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1856 debugfs_remove(info->debugfs_cache_entries);
1858 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1859 kvm_put_kvm(info->kvm);
1860 kvmgt_protect_table_destroy(info);
1861 gvt_cache_destroy(info->vgpu);
1862 vfree(info);
1903 struct kvmgt_guest_info *info;
1910 info = (struct kvmgt_guest_info *)handle;
1911 vgpu = info->vgpu;
1934 struct kvmgt_guest_info *info;
1940 info = (struct kvmgt_guest_info *)handle;
1942 pfn = gfn_to_pfn(info->kvm, gfn);
2003 struct kvmgt_guest_info *info;
2011 info = (struct kvmgt_guest_info *)handle;
2012 vdev = kvmgt_vdev(info->vgpu);
2015 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2056 struct kvmgt_guest_info *info;
2061 info = (struct kvmgt_guest_info *)handle;
2063 return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
2086 struct kvmgt_guest_info *info;
2094 info = (struct kvmgt_guest_info *)handle;
2095 kvm = info->kvm;