Lines Matching refs:gvt

41 #include "gvt.h"
63 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
65 struct drm_i915_private *i915 = gvt->gt->i915;
81 static bool intel_gvt_match_device(struct intel_gvt *gvt,
84 return intel_gvt_get_device_type(gvt) & device;
99 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
104 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
111 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
118 if (!intel_gvt_match_device(gvt, device))
128 p = intel_gvt_find_mmio_info(gvt, i);
135 gvt->mmio.mmio_attribute[i / 4] = flags;
146 * @gvt: a GVT device
153 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
159 for_each_engine(engine, gvt->gt, id)
220 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
256 struct intel_gvt *gvt = vgpu->gvt;
265 mmio_hw_access_pre(gvt->gt);
268 mmio_hw_access_post(gvt->gt);
286 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
348 engine_mask &= vgpu->gvt->gt->info.engine_mask;
507 refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
538 int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
649 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
775 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1011 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1052 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1076 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1179 if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1183 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1499 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1560 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1616 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1635 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1687 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1688 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1689 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1690 IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1700 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1713 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1714 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1715 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1716 IS_COMETLAKE(vgpu->gvt->gt->i915))
1741 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1772 if (IS_BROXTON(vgpu->gvt->gt->i915))
1946 struct intel_gvt *gvt = vgpu->gvt;
1948 intel_gvt_render_mmio_to_engine(gvt, offset);
1958 vgpu == gvt->scheduler.engine_owner[engine->id] ||
1961 mmio_hw_access_pre(gvt->gt);
1963 intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1964 mmio_hw_access_post(gvt->gt);
1973 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1974 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2017 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2022 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2023 IS_COMETLAKE(vgpu->gvt->gt->i915))
2032 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2033 IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2137 ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2163 if (HAS_ENGINE(gvt->gt, VCS1)) \
2179 static int init_generic_mmio_info(struct intel_gvt *gvt)
2181 struct drm_i915_private *dev_priv = gvt->gt->i915;
2442 static int init_bdw_mmio_info(struct intel_gvt *gvt)
2577 static int init_skl_mmio_info(struct intel_gvt *gvt)
2579 struct drm_i915_private *dev_priv = gvt->gt->i915;
2749 static int init_bxt_mmio_info(struct intel_gvt *gvt)
2797 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2800 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2801 int num = gvt->mmio.num_mmio_block;
2814 * @gvt: GVT device
2820 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2826 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
2829 kfree(gvt->mmio.mmio_block);
2830 gvt->mmio.mmio_block = NULL;
2831 gvt->mmio.num_mmio_block = 0;
2833 vfree(gvt->mmio.mmio_attribute);
2834 gvt->mmio.mmio_attribute = NULL;
2840 struct intel_gvt *gvt = iter->data;
2851 p = intel_gvt_find_mmio_info(gvt, i);
2870 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
2871 gvt->mmio.num_tracked_mmio++;
2879 struct intel_gvt *gvt = iter->data;
2880 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2884 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
2889 gvt->mmio.mmio_block = block = ret;
2891 block += gvt->mmio.num_mmio_block;
2898 gvt->mmio.num_mmio_block++;
2912 static int init_mmio_info(struct intel_gvt *gvt)
2915 .i915 = gvt->gt->i915,
2916 .data = gvt,
2923 static int init_mmio_block_handlers(struct intel_gvt *gvt)
2927 block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
2930 i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
2942 * @gvt: GVT device
2950 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2952 struct intel_gvt_device_info *info = &gvt->device_info;
2953 struct drm_i915_private *i915 = gvt->gt->i915;
2954 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2957 gvt->mmio.mmio_attribute = vzalloc(size);
2958 if (!gvt->mmio.mmio_attribute)
2961 ret = init_mmio_info(gvt);
2965 ret = init_mmio_block_handlers(gvt);
2969 ret = init_generic_mmio_info(gvt);
2974 ret = init_bdw_mmio_info(gvt);
2981 ret = init_bdw_mmio_info(gvt);
2984 ret = init_skl_mmio_info(gvt);
2988 ret = init_bdw_mmio_info(gvt);
2991 ret = init_skl_mmio_info(gvt);
2994 ret = init_bxt_mmio_info(gvt);
3001 intel_gvt_clean_mmio_info(gvt);
3007 * @gvt: a GVT device
3014 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3015 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3018 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3022 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3023 ret = handler(gvt, e->offset, data);
3028 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3034 ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
3104 * @gvt: a GVT device
3111 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3131 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3132 struct intel_gvt *gvt = vgpu->gvt;
3144 mmio_block = find_mmio_block(gvt, offset);
3155 mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3168 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3186 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3202 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3207 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3208 mmio_hw_access_pre(gvt->gt);
3211 mmio_hw_access_post(gvt->gt);
3215 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3218 struct drm_i915_private *dev_priv = gvt->gt->i915;
3220 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3226 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3231 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3232 mmio_hw_access_pre(gvt->gt);
3233 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3234 mmio_hw_access_post(gvt->gt);