Lines Matching refs:vgpu

56 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
59 return vgpu_gmadr_is_valid(vgpu, addr);
61 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
62 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
64 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
65 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
74 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
76 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
78 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
82 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
83 *h_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (g_addr - vgpu_aperture_offset(vgpu));
86 *h_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (g_addr - vgpu_hidden_offset(vgpu));
92 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
94 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
96 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
100 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
101 *g_addr = vgpu_aperture_gmadr_base(vgpu)
102 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
104 *g_addr = vgpu_hidden_gmadr_base(vgpu)
105 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
109 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
115 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
124 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
130 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
303 struct intel_vgpu *vgpu)
305 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
312 ret = intel_gvt_read_gpa(vgpu, gpa +
318 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
328 struct intel_vgpu *vgpu)
330 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
337 ret = intel_gvt_write_gpa(vgpu, gpa +
343 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
553 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
560 entry, index, false, 0, mm->vgpu);
580 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
584 entry, index, false, 0, mm->vgpu);
596 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
602 false, 0, mm->vgpu);
608 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
613 false, 0, mm->vgpu);
619 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
623 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
629 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
634 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
635 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
637 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
638 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
642 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
654 struct intel_gvt *gvt = spt->vgpu->gvt;
665 spt->vgpu);
683 struct intel_gvt *gvt = spt->vgpu->gvt;
694 spt->vgpu);
735 static int detach_oos_page(struct intel_vgpu *vgpu,
740 struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
742 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
747 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
751 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
753 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
760 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
768 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
801 struct intel_vgpu *vgpu, unsigned long gfn)
805 track = intel_vgpu_find_page_track(vgpu, gfn);
814 struct intel_vgpu *vgpu, unsigned long mfn)
816 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
823 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
825 struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
833 if (reclaim_one_ppgtt_mm(vgpu->gvt))
840 spt->vgpu = vgpu;
858 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
873 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
879 spt = ppgtt_alloc_spt(vgpu, type);
886 ret = intel_vgpu_register_page_track(vgpu, gfn,
897 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
903 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
912 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
918 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
929 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
937 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
943 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
946 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
947 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
968 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
971 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
983 struct intel_vgpu *vgpu = spt->vgpu;
984 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
992 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
995 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1000 struct intel_vgpu *vgpu = spt->vgpu;
1005 trace_spt_change(spt->vgpu->id, "die", spt,
1032 spt->vgpu, &e);
1041 trace_spt_change(spt->vgpu->id, "release", spt,
1051 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1053 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1056 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1070 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1072 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1080 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1082 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1105 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1111 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1119 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1136 const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1148 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1152 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1164 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1169 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
1198 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1204 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1208 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1224 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
1235 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1239 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1253 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
1264 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1267 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) ||
1268 intel_gvt_dma_map_guest_page(vgpu, gfn,
1270 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1288 struct intel_vgpu *vgpu = spt->vgpu;
1294 trace_spt_change(spt->vgpu->id, "born", spt,
1299 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1308 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1323 struct intel_vgpu *vgpu = spt->vgpu;
1324 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1327 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1337 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1342 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1368 struct intel_vgpu *vgpu = spt->vgpu;
1373 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1380 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1389 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1400 static int sync_oos_page(struct intel_vgpu *vgpu,
1403 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1404 struct intel_gvt *gvt = vgpu->gvt;
1411 trace_oos_change(vgpu->id, "sync", oos_page->id,
1419 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1421 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1427 trace_oos_sync(vgpu->id, oos_page->id,
1431 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1435 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1443 static int detach_oos_page(struct intel_vgpu *vgpu,
1446 struct intel_gvt *gvt = vgpu->gvt;
1449 trace_oos_change(vgpu->id, "detach", oos_page->id,
1465 struct intel_gvt *gvt = spt->vgpu->gvt;
1468 ret = intel_gvt_read_gpa(spt->vgpu,
1479 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1489 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1493 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1497 return sync_oos_page(spt->vgpu, oos_page);
1502 struct intel_gvt *gvt = spt->vgpu->gvt;
1515 ret = detach_oos_page(spt->vgpu, oos_page);
1531 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1534 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1535 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1540 * @vgpu: a vGPU
1548 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1557 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1574 struct intel_vgpu *vgpu = spt->vgpu;
1576 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1608 vgpu->gtt.scratch_pt[type].page_mfn);
1615 vgpu->gtt.scratch_pt[type].page_mfn);
1619 vgpu->gtt.scratch_pt[type].page_mfn);
1648 &spt->vgpu->gtt.post_shadow_list_head);
1653 * @vgpu: a vGPU
1661 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1669 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1692 struct intel_vgpu *vgpu = spt->vgpu;
1693 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1694 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1727 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1740 false, 0, vgpu);
1755 struct intel_vgpu *vgpu = mm->vgpu;
1756 struct intel_gvt *gvt = vgpu->gvt;
1771 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1775 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1785 struct intel_vgpu *vgpu = mm->vgpu;
1786 struct intel_gvt *gvt = vgpu->gvt;
1796 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1807 trace_spt_guest_change(vgpu->id, __func__, NULL,
1810 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1819 trace_spt_guest_change(vgpu->id, "populate root pointer",
1829 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1837 mm->vgpu = vgpu;
1851 * @vgpu: a vGPU
1860 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1863 struct intel_gvt *gvt = vgpu->gvt;
1867 mm = vgpu_alloc_mm(vgpu);
1894 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1903 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1908 mm = vgpu_alloc_mm(vgpu);
1914 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1917 vgpu->gvt->device_info.gtt_entry_size));
1923 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1930 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1953 gvt_err("vgpu mm pin count bug detected\n");
1958 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1960 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1985 * @mm: target vgpu mm
2005 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2007 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2008 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2042 struct intel_vgpu *vgpu = mm->vgpu;
2043 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2046 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2070 struct intel_vgpu *vgpu = mm->vgpu;
2071 struct intel_gvt *gvt = vgpu->gvt;
2084 if (!vgpu_gmadr_is_valid(vgpu, gma))
2093 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2132 trace_gma_translate(vgpu->id, "ppgtt", 0,
2142 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2145 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2146 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2155 if (!intel_gvt_ggtt_validate_range(vgpu,
2170 * @vgpu: a vGPU
2180 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2183 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2190 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2194 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2197 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2201 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2202 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
2205 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2208 struct intel_gvt *gvt = vgpu->gvt;
2210 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2227 if (!vgpu_gmadr_is_valid(vgpu, gma))
2285 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
2304 ggtt_invalidate_pte(vgpu, &e);
2313 * @vgpu: a vGPU
2323 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2326 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2328 struct intel_vgpu_submission *s = &vgpu->submission;
2336 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2342 for_each_engine(engine, vgpu->gvt->gt, i) {
2352 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2355 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2356 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2357 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2359 vgpu->gvt->device_info.gtt_entry_size_shift;
2362 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2384 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2385 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2410 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2416 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2419 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2423 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2424 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2427 __free_page(vgpu->gtt.scratch_pt[i].page);
2428 vgpu->gtt.scratch_pt[i].page = NULL;
2429 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2436 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2441 ret = alloc_scratch_pages(vgpu, i);
2449 release_scratch_page_tree(vgpu);
2455 * @vgpu: a vGPU
2463 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2465 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2473 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2479 intel_vgpu_reset_ggtt(vgpu, false);
2483 return create_scratch_page_tree(vgpu);
2486 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2491 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2496 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2497 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2499 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2501 ppgtt_free_all_spt(vgpu);
2505 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2510 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2516 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2517 vgpu->gtt.ggtt_mm = NULL;
2522 * @vgpu: a vGPU
2530 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2532 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2533 intel_vgpu_destroy_ggtt_mm(vgpu);
2534 release_scratch_page_tree(vgpu);
2593 * @vgpu: a vGPU
2601 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2607 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2629 * @vgpu: a vGPU
2638 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2643 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2647 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2656 * @vgpu: a vGPU
2664 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2668 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2754 * @vgpu: a vGPU
2759 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2764 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2767 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2769 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2778 * @vgpu: a vGPU
2785 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2787 struct intel_gvt *gvt = vgpu->gvt;
2788 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2797 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2798 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2801 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2802 ggtt_invalidate_pte(vgpu, &old_entry);
2804 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2807 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2808 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2811 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2812 ggtt_invalidate_pte(vgpu, &old_entry);
2814 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2830 struct intel_vgpu *vgpu;
2837 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2838 mm = vgpu->gtt.ggtt_mm;
2840 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2841 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2845 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2848 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2849 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2853 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);