1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2019 MediaTek Inc. 4 * Authors: 5 * Stanley Chu <stanley.chu@mediatek.com> 6 * Peter Wang <peter.wang@mediatek.com> 7 */ 8 9#include <linux/arm-smccc.h> 10#include <linux/bitfield.h> 11#include <linux/of.h> 12#include <linux/of_address.h> 13#include <linux/of_device.h> 14#include <linux/phy/phy.h> 15#include <linux/platform_device.h> 16#include <linux/regulator/consumer.h> 17#include <linux/reset.h> 18#include <linux/soc/mediatek/mtk_sip_svc.h> 19 20#include "ufshcd.h" 21#include "ufshcd-crypto.h" 22#include "ufshcd-pltfrm.h" 23#include "ufs_quirks.h" 24#include "unipro.h" 25#include "ufs-mediatek.h" 26 27#define ufs_mtk_smc(cmd, val, res) \ 28 arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ 29 cmd, val, 0, 0, 0, 0, 0, &(res)) 30 31#define ufs_mtk_crypto_ctrl(res, enable) \ 32 ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res) 33 34#define ufs_mtk_ref_clk_notify(on, res) \ 35 ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) 36 37#define ufs_mtk_device_reset_ctrl(high, res) \ 38 ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) 39 40static struct ufs_dev_fix ufs_mtk_dev_fixups[] = { 41 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, 42 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM), 43 UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR", 44 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES), 45 END_FIX 46}; 47 48static const struct ufs_mtk_host_cfg ufs_mtk_mt8192_cfg = { 49 .caps = UFS_MTK_CAP_BOOST_CRYPT_ENGINE, 50}; 51 52static const struct of_device_id ufs_mtk_of_match[] = { 53 { 54 .compatible = "mediatek,mt8183-ufshci", 55 }, 56 { 57 .compatible = "mediatek,mt8192-ufshci", 58 .data = &ufs_mtk_mt8192_cfg 59 }, 60 {}, 61}; 62 63static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) 64{ 65 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 66 67 return (host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); 68} 69 70static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) 71{ 72 u32 tmp; 73 74 if (enable) { 75 ufshcd_dme_get(hba, 76 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); 77 tmp = tmp | 78 (1 << RX_SYMBOL_CLK_GATE_EN) | 79 (1 << SYS_CLK_GATE_EN) | 80 (1 << TX_CLK_GATE_EN); 81 ufshcd_dme_set(hba, 82 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); 83 84 ufshcd_dme_get(hba, 85 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); 86 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE); 87 ufshcd_dme_set(hba, 88 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); 89 } else { 90 ufshcd_dme_get(hba, 91 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); 92 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) | 93 (1 << SYS_CLK_GATE_EN) | 94 (1 << TX_CLK_GATE_EN)); 95 ufshcd_dme_set(hba, 96 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); 97 98 ufshcd_dme_get(hba, 99 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); 100 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE); 101 ufshcd_dme_set(hba, 102 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); 103 } 104} 105 106static void ufs_mtk_crypto_enable(struct ufs_hba *hba) 107{ 108 struct arm_smccc_res res; 109 110 ufs_mtk_crypto_ctrl(res, 1); 111 if (res.a0) { 112 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", 113 __func__, res.a0); 114 hba->caps &= ~UFSHCD_CAP_CRYPTO; 115 } 116} 117 118static void ufs_mtk_host_reset(struct ufs_hba *hba) 119{ 120 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 121 122 reset_control_assert(host->hci_reset); 123 reset_control_assert(host->crypto_reset); 124 reset_control_assert(host->unipro_reset); 125 126 usleep_range(100, 110); 127 128 reset_control_deassert(host->unipro_reset); 129 reset_control_deassert(host->crypto_reset); 130 reset_control_deassert(host->hci_reset); 131} 132 133static void ufs_mtk_init_reset_control(struct ufs_hba *hba, 134 struct reset_control **rc, 135 char *str) 136{ 137 *rc = devm_reset_control_get(hba->dev, str); 138 if (IS_ERR(*rc)) { 139 dev_info(hba->dev, "Failed to get reset control %s: %ld\n", 140 str, PTR_ERR(*rc)); 141 *rc = NULL; 142 } 143} 144 145static void ufs_mtk_init_reset(struct ufs_hba *hba) 146{ 147 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 148 149 ufs_mtk_init_reset_control(hba, &host->hci_reset, 150 "hci_rst"); 151 ufs_mtk_init_reset_control(hba, &host->unipro_reset, 152 "unipro_rst"); 153 ufs_mtk_init_reset_control(hba, &host->crypto_reset, 154 "crypto_rst"); 155} 156 157static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, 158 enum ufs_notify_change_status status) 159{ 160 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 161 162 if (status == PRE_CHANGE) { 163 if (host->unipro_lpm) { 164 hba->vps->hba_enable_delay_us = 0; 165 } else { 166 hba->vps->hba_enable_delay_us = 600; 167 ufs_mtk_host_reset(hba); 168 } 169 170 if (hba->caps & UFSHCD_CAP_CRYPTO) 171 ufs_mtk_crypto_enable(hba); 172 } 173 174 return 0; 175} 176 177static int ufs_mtk_bind_mphy(struct ufs_hba *hba) 178{ 179 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 180 struct device *dev = hba->dev; 181 struct device_node *np = dev->of_node; 182 int err = 0; 183 184 host->mphy = devm_of_phy_get_by_index(dev, np, 0); 185 186 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) { 187 /* 188 * UFS driver might be probed before the phy driver does. 189 * In that case we would like to return EPROBE_DEFER code. 190 */ 191 err = -EPROBE_DEFER; 192 dev_info(dev, 193 "%s: required phy hasn't probed yet. err = %d\n", 194 __func__, err); 195 } else if (IS_ERR(host->mphy)) { 196 err = PTR_ERR(host->mphy); 197 if (err != -ENODEV) { 198 dev_info(dev, "%s: PHY get failed %d\n", __func__, 199 err); 200 } 201 } 202 203 if (err) 204 host->mphy = NULL; 205 /* 206 * Allow unbound mphy because not every platform needs specific 207 * mphy control. 208 */ 209 if (err == -ENODEV) 210 err = 0; 211 212 return err; 213} 214 215static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) 216{ 217 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 218 struct arm_smccc_res res; 219 ktime_t timeout, time_checked; 220 u32 value; 221 222 if (host->ref_clk_enabled == on) 223 return 0; 224 225 if (on) { 226 ufs_mtk_ref_clk_notify(on, res); 227 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10); 228 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); 229 } else { 230 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); 231 } 232 233 /* Wait for ack */ 234 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US); 235 do { 236 time_checked = ktime_get(); 237 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); 238 239 /* Wait until ack bit equals to req bit */ 240 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) 241 goto out; 242 243 usleep_range(100, 200); 244 } while (ktime_before(time_checked, timeout)); 245 246 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); 247 248 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); 249 250 return -ETIMEDOUT; 251 252out: 253 host->ref_clk_enabled = on; 254 if (!on) { 255 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10); 256 ufs_mtk_ref_clk_notify(on, res); 257 } 258 259 return 0; 260} 261 262static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, 263 u16 gating_us, u16 ungating_us) 264{ 265 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 266 267 if (hba->dev_info.clk_gating_wait_us) { 268 host->ref_clk_gating_wait_us = 269 hba->dev_info.clk_gating_wait_us; 270 } else { 271 host->ref_clk_gating_wait_us = gating_us; 272 } 273 274 host->ref_clk_ungating_wait_us = ungating_us; 275} 276 277static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, 278 unsigned long max_wait_ms) 279{ 280 ktime_t timeout, time_checked; 281 u32 val; 282 283 timeout = ktime_add_ms(ktime_get(), max_wait_ms); 284 do { 285 time_checked = ktime_get(); 286 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); 287 val = ufshcd_readl(hba, REG_UFS_PROBE); 288 val = val >> 28; 289 290 if (val == state) 291 return 0; 292 293 /* Sleep for max. 200us */ 294 usleep_range(100, 200); 295 } while (ktime_before(time_checked, timeout)); 296 297 if (val == state) 298 return 0; 299 300 return -ETIMEDOUT; 301} 302 303static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) 304{ 305 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 306 struct phy *mphy = host->mphy; 307 308 if (!mphy) 309 return; 310 311 if (on && !host->mphy_powered_on) 312 phy_power_on(mphy); 313 else if (!on && host->mphy_powered_on) 314 phy_power_off(mphy); 315 else 316 return; 317 host->mphy_powered_on = on; 318} 319 320static int ufs_mtk_get_host_clk(struct device *dev, const char *name, 321 struct clk **clk_out) 322{ 323 struct clk *clk; 324 int err = 0; 325 326 clk = devm_clk_get(dev, name); 327 if (IS_ERR(clk)) 328 err = PTR_ERR(clk); 329 else 330 *clk_out = clk; 331 332 return err; 333} 334 335static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost) 336{ 337 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 338 struct ufs_mtk_crypt_cfg *cfg; 339 struct regulator *reg; 340 int volt, ret; 341 342 if (!ufs_mtk_is_boost_crypt_enabled(hba)) 343 return; 344 345 cfg = host->crypt; 346 volt = cfg->vcore_volt; 347 reg = cfg->reg_vcore; 348 349 ret = clk_prepare_enable(cfg->clk_crypt_mux); 350 if (ret) { 351 dev_info(hba->dev, "clk_prepare_enable(): %d\n", 352 ret); 353 return; 354 } 355 356 if (boost) { 357 ret = regulator_set_voltage(reg, volt, INT_MAX); 358 if (ret) { 359 dev_info(hba->dev, 360 "failed to set vcore to %d\n", volt); 361 goto out; 362 } 363 364 ret = clk_set_parent(cfg->clk_crypt_mux, 365 cfg->clk_crypt_perf); 366 if (ret) { 367 dev_info(hba->dev, 368 "failed to set clk_crypt_perf\n"); 369 regulator_set_voltage(reg, 0, INT_MAX); 370 goto out; 371 } 372 } else { 373 ret = clk_set_parent(cfg->clk_crypt_mux, 374 cfg->clk_crypt_lp); 375 if (ret) { 376 dev_info(hba->dev, 377 "failed to set clk_crypt_lp\n"); 378 goto out; 379 } 380 381 ret = regulator_set_voltage(reg, 0, INT_MAX); 382 if (ret) { 383 dev_info(hba->dev, 384 "failed to set vcore to MIN\n"); 385 } 386 } 387out: 388 clk_disable_unprepare(cfg->clk_crypt_mux); 389} 390 391static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name, 392 struct clk **clk) 393{ 394 int ret; 395 396 ret = ufs_mtk_get_host_clk(hba->dev, name, clk); 397 if (ret) { 398 dev_info(hba->dev, "%s: failed to get %s: %d", __func__, 399 name, ret); 400 } 401 402 return ret; 403} 404 405static void ufs_mtk_init_host_caps(struct ufs_hba *hba) 406{ 407 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 408 struct ufs_mtk_crypt_cfg *cfg; 409 struct device *dev = hba->dev; 410 struct regulator *reg; 411 u32 volt; 412 413 host->caps = host->cfg->caps; 414 415 if (!ufs_mtk_is_boost_crypt_enabled(hba)) 416 return; 417 418 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)), 419 GFP_KERNEL); 420 if (!host->crypt) 421 goto disable_caps; 422 423 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); 424 if (IS_ERR(reg)) { 425 dev_info(dev, "failed to get dvfsrc-vcore: %ld", 426 PTR_ERR(reg)); 427 goto disable_caps; 428 } 429 430 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min", 431 &volt)) { 432 dev_info(dev, "failed to get boost-crypt-vcore-min"); 433 goto disable_caps; 434 } 435 436 cfg = host->crypt; 437 if (ufs_mtk_init_host_clk(hba, "crypt_mux", 438 &cfg->clk_crypt_mux)) 439 goto disable_caps; 440 441 if (ufs_mtk_init_host_clk(hba, "crypt_lp", 442 &cfg->clk_crypt_lp)) 443 goto disable_caps; 444 445 if (ufs_mtk_init_host_clk(hba, "crypt_perf", 446 &cfg->clk_crypt_perf)) 447 goto disable_caps; 448 449 cfg->reg_vcore = reg; 450 cfg->vcore_volt = volt; 451 dev_info(dev, "caps: boost-crypt"); 452 return; 453 454disable_caps: 455 host->caps &= ~UFS_MTK_CAP_BOOST_CRYPT_ENGINE; 456} 457 458/** 459 * ufs_mtk_setup_clocks - enables/disable clocks 460 * @hba: host controller instance 461 * @on: If true, enable clocks else disable them. 462 * @status: PRE_CHANGE or POST_CHANGE notify 463 * 464 * Returns 0 on success, non-zero on failure. 465 */ 466static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, 467 enum ufs_notify_change_status status) 468{ 469 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 470 int ret = 0; 471 bool clk_pwr_off = false; 472 473 /* 474 * In case ufs_mtk_init() is not yet done, simply ignore. 475 * This ufs_mtk_setup_clocks() shall be called from 476 * ufs_mtk_init() after init is done. 477 */ 478 if (!host) 479 return 0; 480 481 if (!on && status == PRE_CHANGE) { 482 if (ufshcd_is_link_off(hba)) { 483 clk_pwr_off = true; 484 } else if (ufshcd_is_link_hibern8(hba) || 485 (!ufshcd_can_hibern8_during_gating(hba) && 486 ufshcd_is_auto_hibern8_enabled(hba))) { 487 /* 488 * Gate ref-clk and poweroff mphy if link state is in 489 * OFF or Hibern8 by either Auto-Hibern8 or 490 * ufshcd_link_state_transition(). 491 */ 492 ret = ufs_mtk_wait_link_state(hba, 493 VS_LINK_HIBERN8, 494 15); 495 if (!ret) 496 clk_pwr_off = true; 497 } 498 499 if (clk_pwr_off) { 500 ufs_mtk_boost_crypt(hba, on); 501 ufs_mtk_setup_ref_clk(hba, on); 502 ufs_mtk_mphy_power_on(hba, on); 503 } 504 } else if (on && status == POST_CHANGE) { 505 ufs_mtk_mphy_power_on(hba, on); 506 ufs_mtk_setup_ref_clk(hba, on); 507 ufs_mtk_boost_crypt(hba, on); 508 } 509 510 return ret; 511} 512 513/** 514 * ufs_mtk_init - find other essential mmio bases 515 * @hba: host controller instance 516 * 517 * Binds PHY with controller and powers up PHY enabling clocks 518 * and regulators. 519 * 520 * Returns -EPROBE_DEFER if binding fails, returns negative error 521 * on phy power up failure and returns zero on success. 522 */ 523static int ufs_mtk_init(struct ufs_hba *hba) 524{ 525 const struct of_device_id *id; 526 struct device *dev = hba->dev; 527 struct ufs_mtk_host *host; 528 int err = 0; 529 530 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 531 if (!host) { 532 err = -ENOMEM; 533 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__); 534 goto out; 535 } 536 537 host->hba = hba; 538 ufshcd_set_variant(hba, host); 539 540 /* Get host capability and platform data */ 541 id = of_match_device(ufs_mtk_of_match, dev); 542 if (!id) { 543 err = -EINVAL; 544 goto out; 545 } 546 547 if (id->data) { 548 host->cfg = (struct ufs_mtk_host_cfg *)id->data; 549 ufs_mtk_init_host_caps(hba); 550 } 551 552 err = ufs_mtk_bind_mphy(hba); 553 if (err) 554 goto out_variant_clear; 555 556 ufs_mtk_init_reset(hba); 557 558 /* Enable runtime autosuspend */ 559 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; 560 561 /* Enable clock-gating */ 562 hba->caps |= UFSHCD_CAP_CLK_GATING; 563 564 /* Enable inline encryption */ 565 hba->caps |= UFSHCD_CAP_CRYPTO; 566 567 /* Enable WriteBooster */ 568 hba->caps |= UFSHCD_CAP_WB_EN; 569 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; 570 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); 571 572 /* 573 * ufshcd_vops_init() is invoked after 574 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus 575 * phy clock setup is skipped. 576 * 577 * Enable phy clocks specifically here. 578 */ 579 ufs_mtk_setup_clocks(hba, true, POST_CHANGE); 580 581 goto out; 582 583out_variant_clear: 584 ufshcd_set_variant(hba, NULL); 585out: 586 return err; 587} 588 589static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, 590 struct ufs_pa_layer_attr *dev_max_params, 591 struct ufs_pa_layer_attr *dev_req_params) 592{ 593 struct ufs_dev_params host_cap; 594 int ret; 595 596 host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX; 597 host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX; 598 host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX; 599 host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX; 600 host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX; 601 host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX; 602 host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM; 603 host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM; 604 host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS; 605 host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS; 606 host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE; 607 host_cap.desired_working_mode = 608 UFS_MTK_LIMIT_DESIRED_MODE; 609 610 ret = ufshcd_get_pwr_dev_param(&host_cap, 611 dev_max_params, 612 dev_req_params); 613 if (ret) { 614 pr_info("%s: failed to determine capabilities\n", 615 __func__); 616 } 617 618 return ret; 619} 620 621static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, 622 enum ufs_notify_change_status stage, 623 struct ufs_pa_layer_attr *dev_max_params, 624 struct ufs_pa_layer_attr *dev_req_params) 625{ 626 int ret = 0; 627 628 switch (stage) { 629 case PRE_CHANGE: 630 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, 631 dev_req_params); 632 break; 633 case POST_CHANGE: 634 break; 635 default: 636 ret = -EINVAL; 637 break; 638 } 639 640 return ret; 641} 642 643static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, bool lpm) 644{ 645 int ret; 646 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 647 648 ret = ufshcd_dme_set(hba, 649 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), 650 lpm); 651 if (!ret || !lpm) { 652 /* 653 * Forcibly set as non-LPM mode if UIC commands is failed 654 * to use default hba_enable_delay_us value for re-enabling 655 * the host. 656 */ 657 host->unipro_lpm = lpm; 658 } 659 660 return ret; 661} 662 663static int ufs_mtk_pre_link(struct ufs_hba *hba) 664{ 665 int ret; 666 u32 tmp; 667 668 ret = ufs_mtk_unipro_set_pm(hba, false); 669 if (ret) 670 return ret; 671 672 /* 673 * Setting PA_Local_TX_LCC_Enable to 0 before link startup 674 * to make sure that both host and device TX LCC are disabled 675 * once link startup is completed. 676 */ 677 ret = ufshcd_disable_host_tx_lcc(hba); 678 if (ret) 679 return ret; 680 681 /* disable deep stall */ 682 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); 683 if (ret) 684 return ret; 685 686 tmp &= ~(1 << 6); 687 688 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); 689 690 return ret; 691} 692 693static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) 694{ 695 unsigned long flags; 696 u32 ah_ms; 697 698 if (ufshcd_is_clkgating_allowed(hba)) { 699 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) 700 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, 701 hba->ahit); 702 else 703 ah_ms = 10; 704 spin_lock_irqsave(hba->host->host_lock, flags); 705 hba->clk_gating.delay_ms = ah_ms + 5; 706 spin_unlock_irqrestore(hba->host->host_lock, flags); 707 } 708} 709 710static int ufs_mtk_post_link(struct ufs_hba *hba) 711{ 712 /* enable unipro clock gating feature */ 713 ufs_mtk_cfg_unipro_cg(hba, true); 714 715 /* configure auto-hibern8 timer to 10ms */ 716 if (ufshcd_is_auto_hibern8_supported(hba)) { 717 ufshcd_auto_hibern8_update(hba, 718 FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | 719 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3)); 720 } 721 722 ufs_mtk_setup_clk_gating(hba); 723 724 return 0; 725} 726 727static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, 728 enum ufs_notify_change_status stage) 729{ 730 int ret = 0; 731 732 switch (stage) { 733 case PRE_CHANGE: 734 ret = ufs_mtk_pre_link(hba); 735 break; 736 case POST_CHANGE: 737 ret = ufs_mtk_post_link(hba); 738 break; 739 default: 740 ret = -EINVAL; 741 break; 742 } 743 744 return ret; 745} 746 747static int ufs_mtk_device_reset(struct ufs_hba *hba) 748{ 749 struct arm_smccc_res res; 750 751 ufs_mtk_device_reset_ctrl(0, res); 752 753 /* 754 * The reset signal is active low. UFS devices shall detect 755 * more than or equal to 1us of positive or negative RST_n 756 * pulse width. 757 * 758 * To be on safe side, keep the reset low for at least 10us. 759 */ 760 usleep_range(10, 15); 761 762 ufs_mtk_device_reset_ctrl(1, res); 763 764 /* Some devices may need time to respond to rst_n */ 765 usleep_range(10000, 15000); 766 767 dev_info(hba->dev, "device reset done\n"); 768 769 return 0; 770} 771 772static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) 773{ 774 int err; 775 776 err = ufshcd_hba_enable(hba); 777 if (err) 778 return err; 779 780 err = ufs_mtk_unipro_set_pm(hba, false); 781 if (err) 782 return err; 783 784 err = ufshcd_uic_hibern8_exit(hba); 785 if (!err) 786 ufshcd_set_link_active(hba); 787 else 788 return err; 789 790 err = ufshcd_make_hba_operational(hba); 791 if (err) 792 return err; 793 794 return 0; 795} 796 797static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) 798{ 799 int err; 800 801 err = ufs_mtk_unipro_set_pm(hba, true); 802 if (err) { 803 /* Resume UniPro state for following error recovery */ 804 ufs_mtk_unipro_set_pm(hba, false); 805 return err; 806 } 807 808 return 0; 809} 810 811static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm) 812{ 813 if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) 814 return; 815 816 if (lpm && !hba->vreg_info.vcc->enabled) 817 regulator_set_mode(hba->vreg_info.vccq2->reg, 818 REGULATOR_MODE_IDLE); 819 else if (!lpm) 820 regulator_set_mode(hba->vreg_info.vccq2->reg, 821 REGULATOR_MODE_NORMAL); 822} 823 824static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 825{ 826 int err; 827 struct arm_smccc_res res; 828 829 if (ufshcd_is_link_hibern8(hba)) { 830 err = ufs_mtk_link_set_lpm(hba); 831 if (err) { 832 /* 833 * Set link as off state enforcedly to trigger 834 * ufshcd_host_reset_and_restore() in ufshcd_suspend() 835 * for completed host reset. 836 */ 837 ufshcd_set_link_off(hba); 838 return -EAGAIN; 839 } 840 /* 841 * Make sure no error will be returned to prevent 842 * ufshcd_suspend() re-enabling regulators while vreg is still 843 * in low-power mode. 844 */ 845 ufs_mtk_vreg_set_lpm(hba, true); 846 } 847 848 if (ufshcd_is_link_off(hba)) 849 ufs_mtk_device_reset_ctrl(0, res); 850 851 return 0; 852} 853 854static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 855{ 856 int err; 857 858 if (ufshcd_is_link_hibern8(hba)) { 859 ufs_mtk_vreg_set_lpm(hba, false); 860 err = ufs_mtk_link_set_hpm(hba); 861 if (err) { 862 err = ufshcd_link_recovery(hba); 863 return err; 864 } 865 } 866 867 return 0; 868} 869 870static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) 871{ 872 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl "); 873 874 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); 875 876 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, 877 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4, 878 "MPHY Ctrl "); 879 880 /* Direct debugging information to REG_MTK_PROBE */ 881 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); 882 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); 883} 884 885static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) 886{ 887 struct ufs_dev_info *dev_info = &hba->dev_info; 888 u16 mid = dev_info->wmanufacturerid; 889 890 if (mid == UFS_VENDOR_SAMSUNG) 891 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); 892 893 /* 894 * Decide waiting time before gating reference clock and 895 * after ungating reference clock according to vendors' 896 * requirements. 897 */ 898 if (mid == UFS_VENDOR_SAMSUNG) 899 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1); 900 else if (mid == UFS_VENDOR_SKHYNIX) 901 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30); 902 else if (mid == UFS_VENDOR_TOSHIBA) 903 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32); 904 905 return 0; 906} 907 908static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) 909{ 910 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); 911} 912 913/* 914 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations 915 * 916 * The variant operations configure the necessary controller and PHY 917 * handshake during initialization. 918 */ 919static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { 920 .name = "mediatek.ufshci", 921 .init = ufs_mtk_init, 922 .setup_clocks = ufs_mtk_setup_clocks, 923 .hce_enable_notify = ufs_mtk_hce_enable_notify, 924 .link_startup_notify = ufs_mtk_link_startup_notify, 925 .pwr_change_notify = ufs_mtk_pwr_change_notify, 926 .apply_dev_quirks = ufs_mtk_apply_dev_quirks, 927 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks, 928 .suspend = ufs_mtk_suspend, 929 .resume = ufs_mtk_resume, 930 .dbg_register_dump = ufs_mtk_dbg_register_dump, 931 .device_reset = ufs_mtk_device_reset, 932}; 933 934/** 935 * ufs_mtk_probe - probe routine of the driver 936 * @pdev: pointer to Platform device handle 937 * 938 * Return zero for success and non-zero for failure 939 */ 940static int ufs_mtk_probe(struct platform_device *pdev) 941{ 942 int err; 943 struct device *dev = &pdev->dev; 944 945 /* perform generic probe */ 946 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); 947 if (err) 948 dev_info(dev, "probe failed %d\n", err); 949 950 return err; 951} 952 953/** 954 * ufs_mtk_remove - set driver_data of the device to NULL 955 * @pdev: pointer to platform device handle 956 * 957 * Always return 0 958 */ 959static int ufs_mtk_remove(struct platform_device *pdev) 960{ 961 struct ufs_hba *hba = platform_get_drvdata(pdev); 962 963 pm_runtime_get_sync(&(pdev)->dev); 964 ufshcd_remove(hba); 965 return 0; 966} 967 968static const struct dev_pm_ops ufs_mtk_pm_ops = { 969 .suspend = ufshcd_pltfrm_suspend, 970 .resume = ufshcd_pltfrm_resume, 971 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 972 .runtime_resume = ufshcd_pltfrm_runtime_resume, 973 .runtime_idle = ufshcd_pltfrm_runtime_idle, 974}; 975 976static struct platform_driver ufs_mtk_pltform = { 977 .probe = ufs_mtk_probe, 978 .remove = ufs_mtk_remove, 979 .shutdown = ufshcd_pltfrm_shutdown, 980 .driver = { 981 .name = "ufshcd-mtk", 982 .pm = &ufs_mtk_pm_ops, 983 .of_match_table = ufs_mtk_of_match, 984 }, 985}; 986 987MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>"); 988MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>"); 989MODULE_DESCRIPTION("MediaTek UFS Host Driver"); 990MODULE_LICENSE("GPL v2"); 991 992module_platform_driver(ufs_mtk_pltform); 993