Lines Matching refs:vgpu

54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
57 return vgpu_gmadr_is_valid(vgpu, addr);
59 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
60 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
62 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
63 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
72 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
74 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
76 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
80 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
81 *h_addr = vgpu_aperture_gmadr_base(vgpu)
82 + (g_addr - vgpu_aperture_offset(vgpu));
84 *h_addr = vgpu_hidden_gmadr_base(vgpu)
85 + (g_addr - vgpu_hidden_offset(vgpu));
90 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
92 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
94 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
98 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
99 *g_addr = vgpu_aperture_gmadr_base(vgpu)
100 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
102 *g_addr = vgpu_hidden_gmadr_base(vgpu)
103 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
107 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
113 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
122 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
128 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
306 struct intel_vgpu *vgpu)
308 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
315 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
321 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
331 struct intel_vgpu *vgpu)
333 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
340 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
346 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
556 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
563 entry, index, false, 0, mm->vgpu);
583 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
587 entry, index, false, 0, mm->vgpu);
605 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
611 false, 0, mm->vgpu);
617 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
622 false, 0, mm->vgpu);
628 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
632 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
638 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
643 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
644 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
646 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
647 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
651 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
663 struct intel_gvt *gvt = spt->vgpu->gvt;
674 spt->vgpu);
692 struct intel_gvt *gvt = spt->vgpu->gvt;
703 spt->vgpu);
744 static int detach_oos_page(struct intel_vgpu *vgpu,
749 struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
751 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
756 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
760 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
762 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
769 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
777 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
810 struct intel_vgpu *vgpu, unsigned long gfn)
814 track = intel_vgpu_find_page_track(vgpu, gfn);
823 struct intel_vgpu *vgpu, unsigned long mfn)
825 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
832 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
834 struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
842 if (reclaim_one_ppgtt_mm(vgpu->gvt))
849 spt->vgpu = vgpu;
867 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
882 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
888 spt = ppgtt_alloc_spt(vgpu, type);
895 ret = intel_vgpu_register_page_track(vgpu, gfn,
906 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
912 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
921 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
927 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
938 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
946 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
952 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
955 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
956 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
977 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
980 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
992 struct intel_vgpu *vgpu = spt->vgpu;
993 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1001 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1004 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1009 struct intel_vgpu *vgpu = spt->vgpu;
1014 trace_spt_change(spt->vgpu->id, "die", spt,
1041 spt->vgpu, &e);
1050 trace_spt_change(spt->vgpu->id, "release", spt,
1060 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1062 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1065 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1079 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1081 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1089 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1091 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1114 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1120 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1128 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1145 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1159 * @vgpu: target vgpu
1165 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1168 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1171 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1174 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
1181 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1185 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1197 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1202 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1231 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1237 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1241 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1257 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1268 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1272 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1294 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1297 ret = is_2MB_gtt_possible(vgpu, ge);
1299 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1312 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1324 struct intel_vgpu *vgpu = spt->vgpu;
1325 struct intel_gvt *gvt = vgpu->gvt;
1332 trace_spt_change(spt->vgpu->id, "born", spt,
1337 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1347 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1353 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1368 struct intel_vgpu *vgpu = spt->vgpu;
1369 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1372 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1382 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1387 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1413 struct intel_vgpu *vgpu = spt->vgpu;
1418 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1425 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1434 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1445 static int sync_oos_page(struct intel_vgpu *vgpu,
1448 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1449 struct intel_gvt *gvt = vgpu->gvt;
1456 trace_oos_change(vgpu->id, "sync", oos_page->id,
1464 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1466 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1472 trace_oos_sync(vgpu->id, oos_page->id,
1476 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1480 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1488 static int detach_oos_page(struct intel_vgpu *vgpu,
1491 struct intel_gvt *gvt = vgpu->gvt;
1494 trace_oos_change(vgpu->id, "detach", oos_page->id,
1510 struct intel_gvt *gvt = spt->vgpu->gvt;
1513 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1524 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1534 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1538 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1542 return sync_oos_page(spt->vgpu, oos_page);
1547 struct intel_gvt *gvt = spt->vgpu->gvt;
1560 ret = detach_oos_page(spt->vgpu, oos_page);
1576 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1579 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1580 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1585 * @vgpu: a vGPU
1593 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1602 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1619 struct intel_vgpu *vgpu = spt->vgpu;
1621 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1653 vgpu->gtt.scratch_pt[type].page_mfn);
1660 vgpu->gtt.scratch_pt[type].page_mfn);
1664 vgpu->gtt.scratch_pt[type].page_mfn);
1693 &spt->vgpu->gtt.post_shadow_list_head);
1698 * @vgpu: a vGPU
1706 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1714 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1737 struct intel_vgpu *vgpu = spt->vgpu;
1738 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1739 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1772 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1785 false, 0, vgpu);
1800 struct intel_vgpu *vgpu = mm->vgpu;
1801 struct intel_gvt *gvt = vgpu->gvt;
1816 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1820 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1830 struct intel_vgpu *vgpu = mm->vgpu;
1831 struct intel_gvt *gvt = vgpu->gvt;
1849 trace_spt_guest_change(vgpu->id, __func__, NULL,
1852 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1861 trace_spt_guest_change(vgpu->id, "populate root pointer",
1871 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1879 mm->vgpu = vgpu;
1893 * @vgpu: a vGPU
1902 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1905 struct intel_gvt *gvt = vgpu->gvt;
1909 mm = vgpu_alloc_mm(vgpu);
1936 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1945 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1950 mm = vgpu_alloc_mm(vgpu);
1956 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1959 vgpu->gvt->device_info.gtt_entry_size));
1965 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1972 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1995 gvt_err("vgpu mm pin count bug detected\n");
2000 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2002 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2027 * @mm: target vgpu mm
2047 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2049 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2050 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2084 struct intel_vgpu *vgpu = mm->vgpu;
2085 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2088 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2112 struct intel_vgpu *vgpu = mm->vgpu;
2113 struct intel_gvt *gvt = vgpu->gvt;
2126 if (!vgpu_gmadr_is_valid(vgpu, gma))
2135 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2174 trace_gma_translate(vgpu->id, "ppgtt", 0,
2184 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2187 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2188 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2197 if (!intel_gvt_ggtt_validate_range(vgpu,
2212 * @vgpu: a vGPU
2222 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2225 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2232 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2236 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2239 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2243 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2244 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
2248 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2251 struct intel_gvt *gvt = vgpu->gvt;
2253 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2270 if (!vgpu_gmadr_is_valid(vgpu, gma))
2331 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2336 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
2356 ggtt_invalidate_pte(vgpu, &e);
2365 * @vgpu: a vGPU
2375 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2378 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2380 struct intel_vgpu_submission *s = &vgpu->submission;
2388 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2394 for_each_engine(engine, vgpu->gvt->gt, i) {
2404 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2407 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2408 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2409 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2411 vgpu->gvt->device_info.gtt_entry_size_shift;
2414 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
2437 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2438 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2463 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2469 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2472 struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
2476 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2477 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2480 __free_page(vgpu->gtt.scratch_pt[i].page);
2481 vgpu->gtt.scratch_pt[i].page = NULL;
2482 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2489 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2494 ret = alloc_scratch_pages(vgpu, i);
2502 release_scratch_page_tree(vgpu);
2508 * @vgpu: a vGPU
2516 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2518 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2526 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2532 intel_vgpu_reset_ggtt(vgpu, false);
2536 return create_scratch_page_tree(vgpu);
2539 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2544 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2549 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2550 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2552 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2554 ppgtt_free_all_spt(vgpu);
2558 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2563 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2569 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2570 vgpu->gtt.ggtt_mm = NULL;
2575 * @vgpu: a vGPU
2583 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2585 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2586 intel_vgpu_destroy_ggtt_mm(vgpu);
2587 release_scratch_page_tree(vgpu);
2646 * @vgpu: a vGPU
2654 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2660 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2682 * @vgpu: a vGPU
2691 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2696 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2700 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2709 * @vgpu: a vGPU
2717 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2721 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2807 * @vgpu: a vGPU
2812 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2817 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2820 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2822 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2831 * @vgpu: a vGPU
2838 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2840 struct intel_gvt *gvt = vgpu->gvt;
2841 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2850 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2851 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2854 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2855 ggtt_invalidate_pte(vgpu, &old_entry);
2857 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2860 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2861 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2864 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2865 ggtt_invalidate_pte(vgpu, &old_entry);
2867 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2883 struct intel_vgpu *vgpu;
2890 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2891 mm = vgpu->gtt.ggtt_mm;
2893 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2894 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2898 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2901 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2902 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2906 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);