Lines Matching refs:hba
31 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
94 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
96 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
101 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
103 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
108 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
110 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
115 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
117 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
122 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
127 ufshcd_dme_get(hba,
133 ufshcd_dme_set(hba,
136 ufshcd_dme_get(hba,
139 ufshcd_dme_set(hba,
142 ufshcd_dme_get(hba,
147 ufshcd_dme_set(hba,
150 ufshcd_dme_get(hba,
153 ufshcd_dme_set(hba,
158 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
164 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
166 hba->caps &= ~UFSHCD_CAP_CRYPTO;
170 static void ufs_mtk_host_reset(struct ufs_hba *hba)
172 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
185 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
189 *rc = devm_reset_control_get(hba->dev, str);
191 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
197 static void ufs_mtk_init_reset(struct ufs_hba *hba)
199 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
201 ufs_mtk_init_reset_control(hba, &host->hci_reset,
203 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
205 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
209 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
212 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
216 hba->vps->hba_enable_delay_us = 0;
218 hba->vps->hba_enable_delay_us = 600;
219 ufs_mtk_host_reset(hba);
222 if (hba->caps & UFSHCD_CAP_CRYPTO)
223 ufs_mtk_crypto_enable(hba);
226 ufshcd_writel(hba, 0,
228 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
229 hba->ahit = 0;
236 ufshcd_writel(hba,
237 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
244 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
246 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
247 struct device *dev = hba->dev;
282 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
284 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
295 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
298 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
305 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
314 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
330 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
333 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
335 if (hba->dev_info.clk_gating_wait_us) {
337 hba->dev_info.clk_gating_wait_us;
345 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
347 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
350 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
351 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
352 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
353 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
354 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
356 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
360 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
376 ufs_mtk_dbg_sel(hba);
377 val = ufshcd_readl(hba, REG_UFS_PROBE);
397 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
400 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
409 ufs_mtk_dbg_sel(hba);
410 val = ufshcd_readl(hba, REG_UFS_PROBE);
423 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
425 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
434 if (ufs_mtk_is_va09_supported(hba)) {
445 if (ufs_mtk_is_va09_supported(hba)) {
452 dev_info(hba->dev,
478 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
480 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
485 if (!ufs_mtk_is_boost_crypt_enabled(hba))
494 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
502 dev_info(hba->dev,
510 dev_info(hba->dev,
519 dev_info(hba->dev,
526 dev_info(hba->dev,
534 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
539 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
541 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
548 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
550 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
552 struct device *dev = hba->dev;
575 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
579 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
583 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
595 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
597 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
599 host->reg_va09 = regulator_get(hba->dev, "va09");
601 dev_info(hba->dev, "failed to get va09");
606 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
608 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
609 struct device_node *np = hba->dev->of_node;
612 ufs_mtk_init_boost_crypt(hba);
615 ufs_mtk_init_va09_pwr_ctrl(hba);
626 dev_info(hba->dev, "caps: 0x%x", host->caps);
629 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
631 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
640 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
642 ufs_mtk_boost_crypt(hba, scale_up);
643 ufs_mtk_boost_pm_qos(hba, scale_up);
646 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
648 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
652 ufs_mtk_setup_ref_clk(hba, on);
653 if (!ufshcd_is_clkscaling_supported(hba))
654 ufs_mtk_scale_perf(hba, on);
656 if (!ufshcd_is_clkscaling_supported(hba))
657 ufs_mtk_scale_perf(hba, on);
658 ufs_mtk_setup_ref_clk(hba, on);
665 * @hba: host controller instance
671 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
674 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
687 if (ufshcd_is_link_off(hba)) {
689 } else if (ufshcd_is_link_hibern8(hba) ||
690 (!ufshcd_can_hibern8_during_gating(hba) &&
691 ufshcd_is_auto_hibern8_enabled(hba))) {
697 ret = ufs_mtk_wait_link_state(hba,
705 ufs_mtk_pwr_ctrl(hba, false);
707 ufs_mtk_pwr_ctrl(hba, true);
713 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
715 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
724 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
732 if (hba->ufs_version < ufshci_version(3, 0))
733 hba->ufs_version = ufshci_version(3, 0);
738 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
740 return hba->ufs_version;
746 * @hba: per adapter instance
748 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
750 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
751 struct list_head *head = &hba->clk_list_head;
776 hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
777 dev_info(hba->dev,
784 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
786 struct ufs_vreg_info *info = &hba->vreg_info;
787 struct device_node *np = hba->dev->of_node;
788 struct device *dev = hba->dev;
793 if (hba->vreg_info.vcc)
803 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
826 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
828 struct ufs_vreg_info *info = &hba->vreg_info;
831 if (hba->dev_info.wspecversion >= 0x0300) {
844 devm_kfree(hba->dev, (*vreg_off)->name);
845 devm_kfree(hba->dev, *vreg_off);
850 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
852 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
858 pdev = container_of(hba->dev, struct platform_device, dev);
867 host->mcq_intr_info[i].hba = hba;
869 dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
883 * @hba: host controller instance
891 static int ufs_mtk_init(struct ufs_hba *hba)
894 struct device *dev = hba->dev;
905 host->hba = hba;
906 ufshcd_set_variant(hba, host);
915 ufs_mtk_init_host_caps(hba);
917 ufs_mtk_init_mcq_irq(hba);
919 err = ufs_mtk_bind_mphy(hba);
923 ufs_mtk_init_reset(hba);
926 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
929 hba->caps |= UFSHCD_CAP_CLK_GATING;
932 hba->caps |= UFSHCD_CAP_CRYPTO;
935 hba->caps |= UFSHCD_CAP_WB_EN;
938 hba->caps |= UFSHCD_CAP_CLK_SCALING;
940 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
941 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
942 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
943 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
946 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
948 ufs_mtk_init_clocks(hba);
957 ufs_mtk_mphy_power_on(hba, true);
958 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
960 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
969 ufshcd_set_variant(hba, NULL);
974 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
977 if (!ufs_mtk_is_pmc_via_fastauto(hba))
980 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
994 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
998 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1014 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1015 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1016 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1018 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1019 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1021 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1023 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1025 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1028 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1031 ret = ufshcd_uic_change_pwr_mode(hba,
1035 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1041 ret = ufshcd_dme_configure_adapt(hba,
1049 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1058 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1071 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1074 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1076 ret = ufshcd_dme_set(hba,
1091 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1096 ufs_mtk_get_controller_version(hba);
1098 ret = ufs_mtk_unipro_set_lpm(hba, false);
1107 ret = ufshcd_disable_host_tx_lcc(hba);
1112 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1118 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1123 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1127 if (ufshcd_is_clkgating_allowed(hba)) {
1128 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1130 hba->ahit);
1133 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1137 static void ufs_mtk_post_link(struct ufs_hba *hba)
1140 ufs_mtk_cfg_unipro_cg(hba, true);
1142 /* will be configured during probe hba */
1143 if (ufshcd_is_auto_hibern8_supported(hba))
1144 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1147 ufs_mtk_setup_clk_gating(hba);
1150 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1157 ret = ufs_mtk_pre_link(hba);
1160 ufs_mtk_post_link(hba);
1170 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1174 /* disable hba before device reset */
1175 ufshcd_hba_stop(hba);
1193 dev_info(hba->dev, "device reset done\n");
1198 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1202 err = ufshcd_hba_enable(hba);
1206 err = ufs_mtk_unipro_set_lpm(hba, false);
1210 err = ufshcd_uic_hibern8_exit(hba);
1212 ufshcd_set_link_active(hba);
1216 if (!hba->mcq_enabled) {
1217 err = ufshcd_make_hba_operational(hba);
1219 ufs_mtk_config_mcq(hba, false);
1220 ufshcd_mcq_make_queues_operational(hba);
1221 ufshcd_mcq_config_mac(hba, hba->nutrs);
1223 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
1233 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1238 ufshcd_writel(hba,
1239 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1242 err = ufs_mtk_unipro_set_lpm(hba, true);
1245 ufs_mtk_unipro_set_lpm(hba, false);
1252 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1256 if (hba->vreg_info.vccq)
1257 vccqx = hba->vreg_info.vccq;
1259 vccqx = hba->vreg_info.vccq2;
1265 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1270 (unsigned long)hba->dev_info.wspecversion,
1274 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1276 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1280 if (!hba->vreg_info.vcc)
1284 if (lpm && ufshcd_is_ufs_dev_active(hba))
1288 if (lpm && hba->vreg_info.vcc->enabled)
1292 ufs_mtk_vccqx_set_lpm(hba, lpm);
1293 ufs_mtk_vsx_set_lpm(hba, lpm);
1295 ufs_mtk_vsx_set_lpm(hba, lpm);
1296 ufs_mtk_vccqx_set_lpm(hba, lpm);
1300 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1305 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1308 ufs_mtk_wait_idle_state(hba, 5);
1310 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1312 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1315 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1322 if (ufshcd_is_auto_hibern8_supported(hba))
1323 ufs_mtk_auto_hibern8_disable(hba);
1327 if (ufshcd_is_link_hibern8(hba)) {
1328 err = ufs_mtk_link_set_lpm(hba);
1333 if (!ufshcd_is_link_active(hba)) {
1339 err = ufs_mtk_mphy_power_on(hba, false);
1344 if (ufshcd_is_link_off(hba))
1356 ufshcd_set_link_off(hba);
1360 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1365 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1366 ufs_mtk_dev_vreg_set_lpm(hba, false);
1370 err = ufs_mtk_mphy_power_on(hba, true);
1374 if (ufshcd_is_link_hibern8(hba)) {
1375 err = ufs_mtk_link_set_hpm(hba);
1382 return ufshcd_link_recovery(hba);
1385 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1388 ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1391 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1394 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1399 ufs_mtk_dbg_sel(hba);
1400 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1403 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1405 struct ufs_dev_info *dev_info = &hba->dev_info;
1409 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1410 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1419 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1421 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1423 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1425 ufs_mtk_setup_ref_clk_wait_us(hba,
1430 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1432 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1434 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1435 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1436 hba->vreg_info.vcc->always_on = true;
1441 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1445 ufs_mtk_vreg_fix_vcc(hba);
1446 ufs_mtk_vreg_fix_vccqx(hba);
1449 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1460 dev_info(hba->dev,
1468 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1473 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1477 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1482 hba->clk_scaling.min_gear = UFS_HS_G4;
1484 hba->vps->devfreq_profile.polling_ms = 200;
1485 hba->vps->ondemand_data.upthreshold = 50;
1486 hba->vps->ondemand_data.downdifferential = 20;
1498 * @hba: per adapter instance
1501 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1503 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1510 dev_info(hba->dev,
1524 dev_info(hba->dev,
1533 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1536 if (!ufshcd_is_clkscaling_supported(hba))
1541 ufs_mtk_clk_scale(hba, scale_up);
1544 ufs_mtk_scale_perf(hba, scale_up);
1550 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1555 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1560 hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1561 hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1562 hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1563 hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1566 opr = &hba->mcq_opr[i];
1568 opr->base = hba->mmio_base + opr->offset;
1574 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1576 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1580 dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1584 hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1591 struct ufs_hba *hba = mcq_intr_info->hba;
1596 hwq = &hba->uhq[qid];
1598 events = ufshcd_mcq_read_cqis(hba, qid);
1600 ufshcd_mcq_write_cqis(hba, events, qid);
1603 ufshcd_mcq_poll_cqe_lock(hba, hwq);
1608 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1610 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1617 dev_err(hba->dev, "invalid irq. %d\n", i);
1622 ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1625 dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1628 dev_err(hba->dev, "Cannot request irq %d\n", ret);
1636 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1638 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1643 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1646 ret = ufs_mtk_config_mcq_irq(hba);
1654 ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1655 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1660 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1662 return ufs_mtk_config_mcq(hba, true);
1753 struct ufs_hba *hba = platform_get_drvdata(pdev);
1756 ufshcd_remove(hba);
1763 struct ufs_hba *hba = dev_get_drvdata(dev);
1770 ufs_mtk_dev_vreg_set_lpm(hba, true);
1777 struct ufs_hba *hba = dev_get_drvdata(dev);
1779 ufs_mtk_dev_vreg_set_lpm(hba, false);
1788 struct ufs_hba *hba = dev_get_drvdata(dev);
1795 ufs_mtk_dev_vreg_set_lpm(hba, true);
1802 struct ufs_hba *hba = dev_get_drvdata(dev);
1804 ufs_mtk_dev_vreg_set_lpm(hba, false);