Lines Matching defs:gvt

40 #include "gvt.h"
50 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
52 struct drm_i915_private *i915 = gvt->gt->i915;
68 bool intel_gvt_match_device(struct intel_gvt *gvt,
71 return intel_gvt_get_device_type(gvt) & device;
86 static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
91 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
98 static int new_mmio_info(struct intel_gvt *gvt,
106 if (!intel_gvt_match_device(gvt, device))
121 p = find_mmio_info(gvt, info->offset);
138 gvt->mmio.mmio_attribute[info->offset / 4] = flags;
140 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
141 gvt->mmio.num_tracked_mmio++;
148 * @gvt: a GVT device
155 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
161 for_each_engine(engine, gvt->gt, id)
222 if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
258 struct intel_gvt *gvt = vgpu->gvt;
267 mmio_hw_access_pre(gvt->gt);
270 mmio_hw_access_post(gvt->gt);
288 if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
350 engine_mask &= vgpu->gvt->gt->info.engine_mask;
460 intel_gvt_check_vblank_emulation(vgpu->gvt);
524 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
760 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
801 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
825 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
928 if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
932 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1248 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1309 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1365 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1384 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1436 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1437 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1438 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1439 IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1449 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1462 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1463 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1464 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1465 IS_COMETLAKE(vgpu->gvt->gt->i915))
1490 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1521 if (IS_BROXTON(vgpu->gvt->gt->i915))
1695 struct intel_gvt *gvt = vgpu->gvt;
1697 intel_gvt_render_mmio_to_engine(gvt, offset);
1707 vgpu == gvt->scheduler.engine_owner[engine->id] ||
1710 mmio_hw_access_pre(gvt->gt);
1712 intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1713 mmio_hw_access_post(gvt->gt);
1722 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1723 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1766 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1771 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1772 IS_COMETLAKE(vgpu->gvt->gt->i915))
1781 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1782 IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
1886 ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
1915 if (HAS_ENGINE(gvt->gt, VCS1)) \
1934 static int init_generic_mmio_info(struct intel_gvt *gvt)
1936 struct drm_i915_private *dev_priv = gvt->gt->i915;
2752 static int init_bdw_mmio_info(struct intel_gvt *gvt)
2754 struct drm_i915_private *dev_priv = gvt->gt->i915;
2941 static int init_skl_mmio_info(struct intel_gvt *gvt)
2943 struct drm_i915_private *dev_priv = gvt->gt->i915;
3192 static int init_bxt_mmio_info(struct intel_gvt *gvt)
3194 struct drm_i915_private *dev_priv = gvt->gt->i915;
3379 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
3382 unsigned long device = intel_gvt_get_device_type(gvt);
3383 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3384 int num = gvt->mmio.num_mmio_block;
3399 * @gvt: GVT device
3405 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
3411 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
3414 vfree(gvt->mmio.mmio_attribute);
3415 gvt->mmio.mmio_attribute = NULL;
3434 * @gvt: GVT device
3442 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
3444 struct intel_gvt_device_info *info = &gvt->device_info;
3445 struct drm_i915_private *i915 = gvt->gt->i915;
3446 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
3449 gvt->mmio.mmio_attribute = vzalloc(size);
3450 if (!gvt->mmio.mmio_attribute)
3453 ret = init_generic_mmio_info(gvt);
3458 ret = init_bdw_mmio_info(gvt);
3465 ret = init_bdw_mmio_info(gvt);
3468 ret = init_skl_mmio_info(gvt);
3472 ret = init_bdw_mmio_info(gvt);
3475 ret = init_skl_mmio_info(gvt);
3478 ret = init_bxt_mmio_info(gvt);
3483 gvt->mmio.mmio_block = mmio_blocks;
3484 gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
3488 intel_gvt_clean_mmio_info(gvt);
3494 * @gvt: a GVT device
3501 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3502 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3505 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3509 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3510 ret = handler(gvt, e->offset, data);
3515 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3521 ret = handler(gvt,
3593 * @gvt: a GVT device
3600 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3620 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3621 struct intel_gvt *gvt = vgpu->gvt;
3633 mmio_block = find_mmio_block(gvt, offset);
3644 mmio_info = find_mmio_info(gvt, offset);
3657 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3675 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3691 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3696 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3697 mmio_hw_access_pre(gvt->gt);
3700 mmio_hw_access_post(gvt->gt);
3704 static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
3708 struct drm_i915_private *dev_priv = gvt->gt->i915;
3710 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3716 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3721 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3722 mmio_hw_access_pre(gvt->gt);
3723 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3724 mmio_hw_access_post(gvt->gt);