Lines Matching defs:gfn
673 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
702 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
758 if (spt->guest_page.gfn) {
762 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
808 /* Find a spt by guest gfn. */
810 struct intel_vgpu *vgpu, unsigned long gfn)
814 track = intel_vgpu_find_page_track(vgpu, gfn);
880 /* Allocate shadow page table associated with specific gfn. */
883 unsigned long gfn, bool guest_pde_ips)
895 ret = intel_vgpu_register_page_track(vgpu, gfn,
903 spt->guest_page.gfn = gfn;
906 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
1015 spt->guest_page.gfn, spt->shadow_page.type);
1051 spt->guest_page.gfn, spt->shadow_page.type);
1120 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1128 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1232 sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1274 unsigned long gfn, page_size = PAGE_SIZE;
1281 gfn = pte_ops->get_pfn(ge);
1312 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1329 unsigned long gfn, i;
1333 spt->guest_page.gfn, spt->shadow_page.type);
1346 gfn = ops->get_pfn(&ge);
1347 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1466 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1514 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1534 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1580 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
2256 unsigned long gma, gfn;
2324 gfn = ops->get_pfn(&e);
2329 * first write may not construct a valid gfn
2331 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2336 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,