Lines Matching refs:vgpu
87 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
90 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
93 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
96 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
173 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
188 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
189 vgpu->failsafe = true;
192 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
195 unsigned int max_fence = vgpu_fence_sz(vgpu);
203 * and we will let vgpu enter failsafe mode.
205 if (!vgpu->pv_notified)
206 enter_failsafe_mode(vgpu,
215 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
220 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
222 gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
224 gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
236 write_vreg(vgpu, offset, p_data, bytes);
240 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
245 ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
249 read_vreg(vgpu, off, p_data, bytes);
253 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
256 struct intel_gvt *gvt = vgpu->gvt;
260 ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
263 write_vreg(vgpu, off, p_data, bytes);
266 intel_vgpu_write_fence(vgpu, fence_num,
267 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
277 static int mul_force_wake_write(struct intel_vgpu *vgpu,
283 old = vgpu_vreg(vgpu, offset);
286 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
306 vgpu_vreg(vgpu, offset) = new;
307 vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
311 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
317 write_vreg(vgpu, offset, p_data, bytes);
318 data = vgpu_vreg(vgpu, offset);
321 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
325 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
329 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
333 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
337 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
341 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
345 gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
346 vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
348 engine_mask &= vgpu->gvt->gt->info.engine_mask;
352 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
355 vgpu_vreg(vgpu, offset) = 0;
360 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
363 return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
366 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
369 return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
372 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
375 write_vreg(vgpu, offset, p_data, bytes);
377 if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
378 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
379 vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
380 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
381 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
384 vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
390 static int transconf_mmio_write(struct intel_vgpu *vgpu,
393 write_vreg(vgpu, offset, p_data, bytes);
395 if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
396 vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
398 vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
402 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
405 write_vreg(vgpu, offset, p_data, bytes);
407 if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
408 vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
410 vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
412 if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
413 vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
415 vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
420 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
428 vgpu_vreg(vgpu, offset) = 1 << 17;
431 vgpu_vreg(vgpu, offset) = 0x3;
434 vgpu_vreg(vgpu, offset) = 0x2f << 16;
440 read_vreg(vgpu, offset, p_data, bytes);
460 static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
463 u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
477 switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
488 gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
489 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
501 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
503 wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
507 refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
513 gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
514 vgpu->id, port_name(port), wrpll_ctl);
526 gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
527 vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
535 static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
538 int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
559 gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
563 temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
565 gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
566 vgpu->id, port_name(port), temp);
572 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
573 if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
575 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
577 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
579 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
581 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
586 gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
599 static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
605 if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
606 (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
607 dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
611 gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
612 vgpu->id, port_name(port));
617 switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
640 gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
641 vgpu->id, port_name(port), dpll_id);
647 static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
649 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
654 port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
657 gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
663 dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
665 dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
667 dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
670 link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
671 link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
674 htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
675 vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
680 u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
692 gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
693 vgpu->id, pipe_name(PIPE_A), new_rate);
697 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
702 write_vreg(vgpu, offset, p_data, bytes);
703 data = vgpu_vreg(vgpu, offset);
706 vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
707 vgpu_update_refresh_rate(vgpu);
708 vgpu_update_vblank_emulation(vgpu, true);
710 vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
711 vgpu_update_vblank_emulation(vgpu, false);
770 static int force_nonpriv_write(struct intel_vgpu *vgpu,
775 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
778 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
779 vgpu->id, offset, bytes);
785 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
786 vgpu->id, reg_nonpriv, offset);
788 intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
793 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
796 write_vreg(vgpu, offset, p_data, bytes);
798 if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
799 vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
801 vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
803 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
809 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
812 vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
819 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
821 u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
822 u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
823 u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
835 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
864 if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
867 if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
869 && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
900 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
918 write_vreg(vgpu, offset, p_data, bytes);
922 ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
926 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
928 ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
932 vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
935 if (fdi_auto_training_started(vgpu))
936 vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
944 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
951 write_vreg(vgpu, offset, p_data, bytes);
954 data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
957 vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
962 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
971 vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
972 (vgpu_vreg(vgpu, offset) & sticky_mask);
973 vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
977 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
982 write_vreg(vgpu, offset, p_data, bytes);
983 data = vgpu_vreg(vgpu, offset);
986 vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
990 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
995 write_vreg(vgpu, offset, p_data, bytes);
996 data = vgpu_vreg(vgpu, offset);
999 vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1001 vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1008 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1011 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1015 write_vreg(vgpu, offset, p_data, bytes);
1016 vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1018 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1020 if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
1021 intel_vgpu_trigger_virtual_event(vgpu, event);
1023 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1031 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1037 write_vreg(vgpu, offset, p_data, bytes);
1038 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1040 if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1041 intel_vgpu_trigger_virtual_event(vgpu, event);
1043 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1048 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1052 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1057 write_vreg(vgpu, offset, p_data, bytes);
1059 vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1060 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1062 vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1065 if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1066 intel_vgpu_trigger_virtual_event(vgpu, event);
1068 set_bit(event, vgpu->irq.flip_done_event[pipe]);
1073 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1076 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1095 intel_vgpu_trigger_virtual_event(vgpu, event);
1099 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1115 vgpu_vreg(vgpu, reg) = value;
1118 return trigger_aux_channel_interrupt(vgpu, reg);
1161 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1164 struct intel_vgpu_display *display = &vgpu->display;
1176 write_vreg(vgpu, offset, p_data, bytes);
1177 data = vgpu_vreg(vgpu, offset);
1179 if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1183 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1191 vgpu_vreg(vgpu, offset) = 0;
1199 msg = vgpu_vreg(vgpu, offset + 4);
1219 vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1220 dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1236 u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1258 vgpu_vreg(vgpu, offset + 4) = 0;
1259 dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1277 vgpu_vreg(vgpu, offset + 4) = 0;
1278 vgpu_vreg(vgpu, offset + 8) = 0;
1279 vgpu_vreg(vgpu, offset + 12) = 0;
1280 vgpu_vreg(vgpu, offset + 16) = 0;
1281 vgpu_vreg(vgpu, offset + 20) = 0;
1283 dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1290 vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1312 vgpu_vreg(vgpu, offset +
1318 dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1324 intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1327 trigger_aux_channel_interrupt(vgpu, offset);
1331 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1335 write_vreg(vgpu, offset, p_data, bytes);
1339 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1344 write_vreg(vgpu, offset, p_data, bytes);
1345 vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1347 gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1352 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1355 struct intel_vgpu_display *display = &vgpu->display;
1369 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1372 struct intel_vgpu_display *display = &vgpu->display;
1393 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1396 if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1398 unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1400 vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
1403 read_vreg(vgpu, offset, p_data, bytes);
1407 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1412 write_vreg(vgpu, offset, p_data, bytes);
1413 data = vgpu_vreg(vgpu, offset);
1421 vgpu_vreg(vgpu, offset) = data;
1423 if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1425 unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1428 write_virtual_sbi_register(vgpu, sbi_offset,
1429 vgpu_vreg_t(vgpu, SBI_DATA));
1437 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1442 read_vreg(vgpu, offset, p_data, bytes);
1465 vgpu->pv_notified = true;
1469 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1475 pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1482 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1486 return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1497 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1499 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1507 snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1513 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1521 send_display_ready_uevent(vgpu, data ? 1 : 0);
1524 handle_g2v_notification(vgpu, data);
1542 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1552 write_vreg(vgpu, offset, p_data, bytes);
1557 static int pf_write(struct intel_vgpu *vgpu,
1560 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1568 vgpu->id);
1572 return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1575 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1578 write_vreg(vgpu, offset, p_data, bytes);
1580 if (vgpu_vreg(vgpu, offset) &
1582 vgpu_vreg(vgpu, offset) |=
1585 vgpu_vreg(vgpu, offset) &=
1590 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1593 write_vreg(vgpu, offset, p_data, bytes);
1595 if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1596 vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1598 vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1603 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1606 write_vreg(vgpu, offset, p_data, bytes);
1608 if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1609 vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1613 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1616 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1619 write_vreg(vgpu, offset, p_data, bytes);
1620 mode = vgpu_vreg(vgpu, offset);
1625 vgpu->id);
1632 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1635 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1641 vgpu->id);
1644 write_vreg(vgpu, offset, p_data, bytes);
1649 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1652 write_vreg(vgpu, offset, p_data, bytes);
1656 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1661 if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1664 if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1667 if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1670 if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1673 vgpu_vreg(vgpu, offset) = v;
1675 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1678 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1683 u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1687 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1688 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1689 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1690 IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1700 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1713 if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1714 IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1715 IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1716 IS_COMETLAKE(vgpu->gvt->gt->i915))
1725 vgpu->id, value, *data0);
1733 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1736 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1741 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1744 !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1760 vgpu->hws_pga[engine->id] = value;
1762 vgpu->id, value, offset);
1764 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1767 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1772 if (IS_BROXTON(vgpu->gvt->gt->i915))
1779 return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1782 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1791 vgpu_vreg(vgpu, offset) = v;
1796 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1804 vgpu_vreg(vgpu, offset) = v;
1809 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1817 vgpu_vreg(vgpu, offset) = v;
1822 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1830 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1833 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1834 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1838 vgpu_vreg(vgpu, offset) = v;
1843 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1846 u32 v = vgpu_vreg(vgpu, offset);
1850 vgpu_vreg(vgpu, offset) = v;
1852 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1855 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1861 vgpu_vreg(vgpu, offset - 0x600) = v;
1862 vgpu_vreg(vgpu, offset - 0x800) = v;
1864 vgpu_vreg(vgpu, offset - 0x400) = v;
1865 vgpu_vreg(vgpu, offset - 0x600) = v;
1868 vgpu_vreg(vgpu, offset) = v;
1873 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1879 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1881 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1886 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1888 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1893 vgpu_vreg(vgpu, offset) = v;
1898 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1901 vgpu_vreg(vgpu, offset) = 0;
1915 static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1928 vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1933 static int guc_status_read(struct intel_vgpu *vgpu,
1938 read_vreg(vgpu, offset, p_data, bytes);
1939 vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1943 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1946 struct intel_gvt *gvt = vgpu->gvt;
1958 vgpu == gvt->scheduler.engine_owner[engine->id] ||
1962 vgpu_vreg(vgpu, offset) =
1967 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1970 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1973 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1974 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1994 if (vgpu->d3_entered)
1995 vgpu->d3_entered = false;
1997 execlist = &vgpu->submission.execlist[engine->id];
2001 ret = intel_vgpu_submit_execlist(vgpu, engine);
2012 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2017 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2022 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2023 IS_COMETLAKE(vgpu->gvt->gt->i915))
2025 write_vreg(vgpu, offset, p_data, bytes);
2028 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2032 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2033 IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2035 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2045 !vgpu->pv_notified) {
2046 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2060 ret = intel_vgpu_select_submission_ops(vgpu,
2066 intel_vgpu_start_schedule(vgpu);
2071 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2076 write_vreg(vgpu, offset, p_data, bytes);
2077 vgpu_vreg(vgpu, offset) = 0;
2098 set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2103 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2108 write_vreg(vgpu, offset, p_data, bytes);
2109 data = vgpu_vreg(vgpu, offset);
2116 vgpu_vreg(vgpu, offset) = data;
2120 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2127 write_vreg(vgpu, offset, p_data, bytes);
2131 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
3044 * @vgpu: a vGPU
3052 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3055 read_vreg(vgpu, offset, p_data, bytes);
3061 * @vgpu: a vGPU
3069 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3072 write_vreg(vgpu, offset, p_data, bytes);
3078 * @vgpu: a vGPU
3086 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3091 old_vreg = vgpu_vreg(vgpu, offset);
3092 write_vreg(vgpu, offset, p_data, bytes);
3093 mask = vgpu_vreg(vgpu, offset) >> 16;
3094 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3095 (vgpu_vreg(vgpu, offset) & mask);
3119 * @vgpu: a vGPU
3128 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3131 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3132 struct intel_gvt *gvt = vgpu->gvt;
3148 return func(vgpu, offset, pdata, bytes);
3162 return mmio_info->read(vgpu, offset, pdata, bytes);
3169 old_vreg = vgpu_vreg(vgpu, offset);
3173 ret = mmio_info->write(vgpu, offset, pdata, bytes);
3181 data |= vgpu_vreg(vgpu, offset) & ro_mask;
3182 ret = mmio_info->write(vgpu, offset, &data, bytes);
3187 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3189 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3190 | (vgpu_vreg(vgpu, offset) & mask);
3198 intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3199 intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3204 struct intel_vgpu *vgpu;
3207 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3209 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3210 intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3217 struct intel_vgpu *vgpu = data;
3221 intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3228 struct intel_vgpu *vgpu;
3231 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3233 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);