1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. 4 */ 5 6#include <linux/acpi.h> 7#include <linux/time.h> 8#include <linux/of.h> 9#include <linux/platform_device.h> 10#include <linux/phy/phy.h> 11#include <linux/gpio/consumer.h> 12#include <linux/reset-controller.h> 13#include <linux/devfreq.h> 14 15#include "ufshcd.h" 16#include "ufshcd-pltfrm.h" 17#include "unipro.h" 18#include "ufs-qcom.h" 19#include "ufshci.h" 20#include "ufs_quirks.h" 21#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ 22 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 23 24enum { 25 TSTBUS_UAWM, 26 TSTBUS_UARM, 27 TSTBUS_TXUC, 28 TSTBUS_RXUC, 29 TSTBUS_DFC, 30 TSTBUS_TRLUT, 31 TSTBUS_TMRLUT, 32 TSTBUS_OCSC, 33 TSTBUS_UTP_HCI, 34 TSTBUS_COMBINED, 35 TSTBUS_WRAPPER, 36 TSTBUS_UNIPRO, 37 TSTBUS_MAX, 38}; 39 40static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; 41 42static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); 43static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 44 u32 clk_cycles); 45 46static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) 47{ 48 return container_of(rcd, struct ufs_qcom_host, rcdev); 49} 50 51static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, 52 const char *prefix, void *priv) 53{ 54 ufshcd_dump_regs(hba, offset, len * 4, prefix); 55} 56 57static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) 58{ 59 int err = 0; 60 61 err = ufshcd_dme_get(hba, 62 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); 63 if (err) 64 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", 65 __func__, err); 66 67 return err; 68} 69 70static int ufs_qcom_host_clk_get(struct device *dev, 71 const char *name, struct clk **clk_out, bool optional) 72{ 73 struct clk *clk; 74 int err = 0; 75 76 clk = devm_clk_get(dev, name); 77 if (!IS_ERR(clk)) { 78 *clk_out = clk; 79 return 0; 80 } 81 82 err = PTR_ERR(clk); 83 84 if (optional && err == -ENOENT) { 85 *clk_out = NULL; 86 return 0; 87 } 88 89 if (err != -EPROBE_DEFER) 90 dev_err(dev, "failed to get %s err %d\n", name, err); 91 92 return err; 93} 94 95static int ufs_qcom_host_clk_enable(struct device *dev, 96 const char *name, struct clk *clk) 97{ 98 int err = 0; 99 100 err = clk_prepare_enable(clk); 101 if (err) 102 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); 103 104 return err; 105} 106 107static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) 108{ 109 if (!host->is_lane_clks_enabled) 110 return; 111 112 clk_disable_unprepare(host->tx_l1_sync_clk); 113 clk_disable_unprepare(host->tx_l0_sync_clk); 114 clk_disable_unprepare(host->rx_l1_sync_clk); 115 clk_disable_unprepare(host->rx_l0_sync_clk); 116 117 host->is_lane_clks_enabled = false; 118} 119 120static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) 121{ 122 int err = 0; 123 struct device *dev = host->hba->dev; 124 125 if (host->is_lane_clks_enabled) 126 return 0; 127 128 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", 129 host->rx_l0_sync_clk); 130 if (err) 131 goto out; 132 133 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", 134 host->tx_l0_sync_clk); 135 if (err) 136 goto disable_rx_l0; 137 138 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", 139 host->rx_l1_sync_clk); 140 if (err) 141 goto disable_tx_l0; 142 143 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 144 host->tx_l1_sync_clk); 145 if (err) 146 goto disable_rx_l1; 147 148 host->is_lane_clks_enabled = true; 149 goto out; 150 151disable_rx_l1: 152 clk_disable_unprepare(host->rx_l1_sync_clk); 153disable_tx_l0: 154 clk_disable_unprepare(host->tx_l0_sync_clk); 155disable_rx_l0: 156 clk_disable_unprepare(host->rx_l0_sync_clk); 157out: 158 return err; 159} 160 161static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) 162{ 163 int err = 0; 164 struct device *dev = host->hba->dev; 165 166 if (has_acpi_companion(dev)) 167 return 0; 168 169 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", 170 &host->rx_l0_sync_clk, false); 171 if (err) 172 goto out; 173 174 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", 175 &host->tx_l0_sync_clk, false); 176 if (err) 177 goto out; 178 179 /* In case of single lane per direction, don't read lane1 clocks */ 180 if (host->hba->lanes_per_direction > 1) { 181 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 182 &host->rx_l1_sync_clk, false); 183 if (err) 184 goto out; 185 186 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 187 &host->tx_l1_sync_clk, true); 188 } 189out: 190 return err; 191} 192 193static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) 194{ 195 u32 tx_lanes; 196 197 return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); 198} 199 200static int ufs_qcom_check_hibern8(struct ufs_hba *hba) 201{ 202 int err; 203 u32 tx_fsm_val = 0; 204 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); 205 206 do { 207 err = ufshcd_dme_get(hba, 208 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 209 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 210 &tx_fsm_val); 211 if (err || tx_fsm_val == TX_FSM_HIBERN8) 212 break; 213 214 /* sleep for max. 200us */ 215 usleep_range(100, 200); 216 } while (time_before(jiffies, timeout)); 217 218 /* 219 * we might have scheduled out for long during polling so 220 * check the state again. 221 */ 222 if (time_after(jiffies, timeout)) 223 err = ufshcd_dme_get(hba, 224 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 225 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), 226 &tx_fsm_val); 227 228 if (err) { 229 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", 230 __func__, err); 231 } else if (tx_fsm_val != TX_FSM_HIBERN8) { 232 err = tx_fsm_val; 233 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", 234 __func__, err); 235 } 236 237 return err; 238} 239 240static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) 241{ 242 ufshcd_rmwl(host->hba, QUNIPRO_SEL, 243 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, 244 REG_UFS_CFG1); 245 /* make sure above configuration is applied before we return */ 246 mb(); 247} 248 249/* 250 * ufs_qcom_host_reset - reset host controller and PHY 251 */ 252static int ufs_qcom_host_reset(struct ufs_hba *hba) 253{ 254 int ret = 0; 255 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 256 bool reenable_intr = false; 257 258 if (!host->core_reset) { 259 dev_warn(hba->dev, "%s: reset control not set\n", __func__); 260 goto out; 261 } 262 263 reenable_intr = hba->is_irq_enabled; 264 disable_irq(hba->irq); 265 hba->is_irq_enabled = false; 266 267 ret = reset_control_assert(host->core_reset); 268 if (ret) { 269 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", 270 __func__, ret); 271 goto out; 272 } 273 274 /* 275 * The hardware requirement for delay between assert/deassert 276 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to 277 * ~125us (4/32768). To be on the safe side add 200us delay. 278 */ 279 usleep_range(200, 210); 280 281 ret = reset_control_deassert(host->core_reset); 282 if (ret) 283 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", 284 __func__, ret); 285 286 usleep_range(1000, 1100); 287 288 if (reenable_intr) { 289 enable_irq(hba->irq); 290 hba->is_irq_enabled = true; 291 } 292 293out: 294 return ret; 295} 296 297static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) 298{ 299 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 300 struct phy *phy = host->generic_phy; 301 int ret = 0; 302 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) 303 ? true : false; 304 305 /* Reset UFS Host Controller and PHY */ 306 ret = ufs_qcom_host_reset(hba); 307 if (ret) 308 dev_warn(hba->dev, "%s: host reset returned %d\n", 309 __func__, ret); 310 311 if (is_rate_B) 312 phy_set_mode(phy, PHY_MODE_UFS_HS_B); 313 314 /* phy initialization - calibrate the phy */ 315 ret = phy_init(phy); 316 if (ret) { 317 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", 318 __func__, ret); 319 goto out; 320 } 321 322 /* power on phy - start serdes and phy's power and clocks */ 323 ret = phy_power_on(phy); 324 if (ret) { 325 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", 326 __func__, ret); 327 goto out_disable_phy; 328 } 329 330 ufs_qcom_select_unipro_mode(host); 331 332 return 0; 333 334out_disable_phy: 335 phy_exit(phy); 336out: 337 return ret; 338} 339 340/* 341 * The UTP controller has a number of internal clock gating cells (CGCs). 342 * Internal hardware sub-modules within the UTP controller control the CGCs. 343 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved 344 * in a specific operation, UTP controller CGCs are by default disabled and 345 * this function enables them (after every UFS link startup) to save some power 346 * leakage. 347 */ 348static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) 349{ 350 ufshcd_writel(hba, 351 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, 352 REG_UFS_CFG2); 353 354 /* Ensure that HW clock gating is enabled before next operations */ 355 mb(); 356} 357 358static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, 359 enum ufs_notify_change_status status) 360{ 361 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 362 int err = 0; 363 364 switch (status) { 365 case PRE_CHANGE: 366 ufs_qcom_power_up_sequence(hba); 367 /* 368 * The PHY PLL output is the source of tx/rx lane symbol 369 * clocks, hence, enable the lane clocks only after PHY 370 * is initialized. 371 */ 372 err = ufs_qcom_enable_lane_clks(host); 373 break; 374 case POST_CHANGE: 375 /* check if UFS PHY moved from DISABLED to HIBERN8 */ 376 err = ufs_qcom_check_hibern8(hba); 377 ufs_qcom_enable_hw_clk_gating(hba); 378 ufs_qcom_ice_enable(host); 379 break; 380 default: 381 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); 382 err = -EINVAL; 383 break; 384 } 385 return err; 386} 387 388/* 389 * Returns zero for success and non-zero in case of a failure 390 */ 391static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, 392 u32 hs, u32 rate, bool update_link_startup_timer) 393{ 394 int ret = 0; 395 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 396 struct ufs_clk_info *clki; 397 u32 core_clk_period_in_ns; 398 u32 tx_clk_cycles_per_us = 0; 399 unsigned long core_clk_rate = 0; 400 u32 core_clk_cycles_per_us = 0; 401 402 static u32 pwm_fr_table[][2] = { 403 {UFS_PWM_G1, 0x1}, 404 {UFS_PWM_G2, 0x1}, 405 {UFS_PWM_G3, 0x1}, 406 {UFS_PWM_G4, 0x1}, 407 }; 408 409 static u32 hs_fr_table_rA[][2] = { 410 {UFS_HS_G1, 0x1F}, 411 {UFS_HS_G2, 0x3e}, 412 {UFS_HS_G3, 0x7D}, 413 }; 414 415 static u32 hs_fr_table_rB[][2] = { 416 {UFS_HS_G1, 0x24}, 417 {UFS_HS_G2, 0x49}, 418 {UFS_HS_G3, 0x92}, 419 }; 420 421 /* 422 * The Qunipro controller does not use following registers: 423 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & 424 * UFS_REG_PA_LINK_STARTUP_TIMER 425 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt 426 * Aggregation logic. 427 */ 428 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) 429 goto out; 430 431 if (gear == 0) { 432 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); 433 goto out_error; 434 } 435 436 list_for_each_entry(clki, &hba->clk_list_head, list) { 437 if (!strcmp(clki->name, "core_clk")) 438 core_clk_rate = clk_get_rate(clki->clk); 439 } 440 441 /* If frequency is smaller than 1MHz, set to 1MHz */ 442 if (core_clk_rate < DEFAULT_CLK_RATE_HZ) 443 core_clk_rate = DEFAULT_CLK_RATE_HZ; 444 445 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; 446 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { 447 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); 448 /* 449 * make sure above write gets applied before we return from 450 * this function. 451 */ 452 mb(); 453 } 454 455 if (ufs_qcom_cap_qunipro(host)) 456 goto out; 457 458 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; 459 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; 460 core_clk_period_in_ns &= MASK_CLK_NS_REG; 461 462 switch (hs) { 463 case FASTAUTO_MODE: 464 case FAST_MODE: 465 if (rate == PA_HS_MODE_A) { 466 if (gear > ARRAY_SIZE(hs_fr_table_rA)) { 467 dev_err(hba->dev, 468 "%s: index %d exceeds table size %zu\n", 469 __func__, gear, 470 ARRAY_SIZE(hs_fr_table_rA)); 471 goto out_error; 472 } 473 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; 474 } else if (rate == PA_HS_MODE_B) { 475 if (gear > ARRAY_SIZE(hs_fr_table_rB)) { 476 dev_err(hba->dev, 477 "%s: index %d exceeds table size %zu\n", 478 __func__, gear, 479 ARRAY_SIZE(hs_fr_table_rB)); 480 goto out_error; 481 } 482 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; 483 } else { 484 dev_err(hba->dev, "%s: invalid rate = %d\n", 485 __func__, rate); 486 goto out_error; 487 } 488 break; 489 case SLOWAUTO_MODE: 490 case SLOW_MODE: 491 if (gear > ARRAY_SIZE(pwm_fr_table)) { 492 dev_err(hba->dev, 493 "%s: index %d exceeds table size %zu\n", 494 __func__, gear, 495 ARRAY_SIZE(pwm_fr_table)); 496 goto out_error; 497 } 498 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; 499 break; 500 case UNCHANGED: 501 default: 502 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); 503 goto out_error; 504 } 505 506 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != 507 (core_clk_period_in_ns | tx_clk_cycles_per_us)) { 508 /* this register 2 fields shall be written at once */ 509 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, 510 REG_UFS_TX_SYMBOL_CLK_NS_US); 511 /* 512 * make sure above write gets applied before we return from 513 * this function. 514 */ 515 mb(); 516 } 517 518 if (update_link_startup_timer) { 519 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), 520 REG_UFS_PA_LINK_STARTUP_TIMER); 521 /* 522 * make sure that this configuration is applied before 523 * we return 524 */ 525 mb(); 526 } 527 goto out; 528 529out_error: 530 ret = -EINVAL; 531out: 532 return ret; 533} 534 535static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, 536 enum ufs_notify_change_status status) 537{ 538 int err = 0; 539 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 540 541 switch (status) { 542 case PRE_CHANGE: 543 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 544 0, true)) { 545 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 546 __func__); 547 err = -EINVAL; 548 goto out; 549 } 550 551 if (ufs_qcom_cap_qunipro(host)) 552 /* 553 * set unipro core clock cycles to 150 & clear clock 554 * divider 555 */ 556 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 557 150); 558 559 /* 560 * Some UFS devices (and may be host) have issues if LCC is 561 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 562 * before link startup which will make sure that both host 563 * and device TX LCC are disabled once link startup is 564 * completed. 565 */ 566 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) 567 err = ufshcd_disable_host_tx_lcc(hba); 568 569 break; 570 case POST_CHANGE: 571 ufs_qcom_link_startup_post_change(hba); 572 break; 573 default: 574 break; 575 } 576 577out: 578 return err; 579} 580 581static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 582{ 583 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 584 struct phy *phy = host->generic_phy; 585 586 if (ufs_qcom_is_link_off(hba)) { 587 /* 588 * Disable the tx/rx lane symbol clocks before PHY is 589 * powered down as the PLL source should be disabled 590 * after downstream clocks are disabled. 591 */ 592 ufs_qcom_disable_lane_clks(host); 593 phy_power_off(phy); 594 595 } else if (!ufs_qcom_is_link_active(hba)) { 596 ufs_qcom_disable_lane_clks(host); 597 } 598 599 return 0; 600} 601 602static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 603{ 604 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 605 struct phy *phy = host->generic_phy; 606 int err; 607 608 if (ufs_qcom_is_link_off(hba)) { 609 err = phy_power_on(phy); 610 if (err) { 611 dev_err(hba->dev, "%s: failed PHY power on: %d\n", 612 __func__, err); 613 return err; 614 } 615 616 err = ufs_qcom_enable_lane_clks(host); 617 if (err) 618 return err; 619 620 } else if (!ufs_qcom_is_link_active(hba)) { 621 err = ufs_qcom_enable_lane_clks(host); 622 if (err) 623 return err; 624 } 625 626 return ufs_qcom_ice_resume(host); 627} 628 629static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) 630{ 631 if (host->dev_ref_clk_ctrl_mmio && 632 (enable ^ host->is_dev_ref_clk_enabled)) { 633 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); 634 635 if (enable) 636 temp |= host->dev_ref_clk_en_mask; 637 else 638 temp &= ~host->dev_ref_clk_en_mask; 639 640 /* 641 * If we are here to disable this clock it might be immediately 642 * after entering into hibern8 in which case we need to make 643 * sure that device ref_clk is active for specific time after 644 * hibern8 enter. 645 */ 646 if (!enable) { 647 unsigned long gating_wait; 648 649 gating_wait = host->hba->dev_info.clk_gating_wait_us; 650 if (!gating_wait) { 651 udelay(1); 652 } else { 653 /* 654 * bRefClkGatingWaitTime defines the minimum 655 * time for which the reference clock is 656 * required by device during transition from 657 * HS-MODE to LS-MODE or HIBERN8 state. Give it 658 * more delay to be on the safe side. 659 */ 660 gating_wait += 10; 661 usleep_range(gating_wait, gating_wait + 10); 662 } 663 } 664 665 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); 666 667 /* 668 * Make sure the write to ref_clk reaches the destination and 669 * not stored in a Write Buffer (WB). 670 */ 671 readl(host->dev_ref_clk_ctrl_mmio); 672 673 /* 674 * If we call hibern8 exit after this, we need to make sure that 675 * device ref_clk is stable for at least 1us before the hibern8 676 * exit command. 677 */ 678 if (enable) 679 udelay(1); 680 681 host->is_dev_ref_clk_enabled = enable; 682 } 683} 684 685static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 686 enum ufs_notify_change_status status, 687 struct ufs_pa_layer_attr *dev_max_params, 688 struct ufs_pa_layer_attr *dev_req_params) 689{ 690 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 691 struct ufs_dev_params ufs_qcom_cap; 692 int ret = 0; 693 694 if (!dev_req_params) { 695 pr_err("%s: incoming dev_req_params is NULL\n", __func__); 696 ret = -EINVAL; 697 goto out; 698 } 699 700 switch (status) { 701 case PRE_CHANGE: 702 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX; 703 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX; 704 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX; 705 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX; 706 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX; 707 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX; 708 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM; 709 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM; 710 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS; 711 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS; 712 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; 713 ufs_qcom_cap.desired_working_mode = 714 UFS_QCOM_LIMIT_DESIRED_MODE; 715 716 if (host->hw_ver.major == 0x1) { 717 /* 718 * HS-G3 operations may not reliably work on legacy QCOM 719 * UFS host controller hardware even though capability 720 * exchange during link startup phase may end up 721 * negotiating maximum supported gear as G3. 722 * Hence downgrade the maximum supported gear to HS-G2. 723 */ 724 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) 725 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; 726 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) 727 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; 728 } 729 730 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap, 731 dev_max_params, 732 dev_req_params); 733 if (ret) { 734 pr_err("%s: failed to determine capabilities\n", 735 __func__); 736 goto out; 737 } 738 739 /* enable the device ref clock before changing to HS mode */ 740 if (!ufshcd_is_hs_mode(&hba->pwr_info) && 741 ufshcd_is_hs_mode(dev_req_params)) 742 ufs_qcom_dev_ref_clk_ctrl(host, true); 743 744 if (host->hw_ver.major >= 0x4) { 745 if (dev_req_params->gear_tx == UFS_HS_G4) { 746 /* INITIAL ADAPT */ 747 ufshcd_dme_set(hba, 748 UIC_ARG_MIB(PA_TXHSADAPTTYPE), 749 PA_INITIAL_ADAPT); 750 } else { 751 /* NO ADAPT */ 752 ufshcd_dme_set(hba, 753 UIC_ARG_MIB(PA_TXHSADAPTTYPE), 754 PA_NO_ADAPT); 755 } 756 } 757 break; 758 case POST_CHANGE: 759 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, 760 dev_req_params->pwr_rx, 761 dev_req_params->hs_rate, false)) { 762 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 763 __func__); 764 /* 765 * we return error code at the end of the routine, 766 * but continue to configure UFS_PHY_TX_LANE_ENABLE 767 * and bus voting as usual 768 */ 769 ret = -EINVAL; 770 } 771 772 /* cache the power mode parameters to use internally */ 773 memcpy(&host->dev_req_params, 774 dev_req_params, sizeof(*dev_req_params)); 775 776 /* disable the device ref clock if entered PWM mode */ 777 if (ufshcd_is_hs_mode(&hba->pwr_info) && 778 !ufshcd_is_hs_mode(dev_req_params)) 779 ufs_qcom_dev_ref_clk_ctrl(host, false); 780 break; 781 default: 782 ret = -EINVAL; 783 break; 784 } 785out: 786 return ret; 787} 788 789static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) 790{ 791 int err; 792 u32 pa_vs_config_reg1; 793 794 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), 795 &pa_vs_config_reg1); 796 if (err) 797 goto out; 798 799 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ 800 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), 801 (pa_vs_config_reg1 | (1 << 12))); 802 803out: 804 return err; 805} 806 807static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) 808{ 809 int err = 0; 810 811 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) 812 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); 813 814 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) 815 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; 816 817 return err; 818} 819 820static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) 821{ 822 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 823 824 if (host->hw_ver.major == 0x1) 825 return UFSHCI_VERSION_11; 826 else 827 return UFSHCI_VERSION_20; 828} 829 830/** 831 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks 832 * @hba: host controller instance 833 * 834 * QCOM UFS host controller might have some non standard behaviours (quirks) 835 * than what is specified by UFSHCI specification. Advertise all such 836 * quirks to standard UFS host controller driver so standard takes them into 837 * account. 838 */ 839static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) 840{ 841 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 842 843 if (host->hw_ver.major == 0x01) { 844 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 845 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 846 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; 847 848 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) 849 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; 850 851 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; 852 } 853 854 if (host->hw_ver.major == 0x2) { 855 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; 856 857 if (!ufs_qcom_cap_qunipro(host)) 858 /* Legacy UniPro mode still need following quirks */ 859 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 860 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 861 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); 862 } 863} 864 865static void ufs_qcom_set_caps(struct ufs_hba *hba) 866{ 867 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 868 869 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 870 hba->caps |= UFSHCD_CAP_CLK_SCALING; 871 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 872 hba->caps |= UFSHCD_CAP_WB_EN; 873 hba->caps |= UFSHCD_CAP_CRYPTO; 874 875 if (host->hw_ver.major >= 0x2) { 876 host->caps = UFS_QCOM_CAP_QUNIPRO | 877 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; 878 } 879} 880 881/** 882 * ufs_qcom_setup_clocks - enables/disable clocks 883 * @hba: host controller instance 884 * @on: If true, enable clocks else disable them. 885 * @status: PRE_CHANGE or POST_CHANGE notify 886 * 887 * Returns 0 on success, non-zero on failure. 888 */ 889static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, 890 enum ufs_notify_change_status status) 891{ 892 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 893 int err = 0; 894 895 /* 896 * In case ufs_qcom_init() is not yet done, simply ignore. 897 * This ufs_qcom_setup_clocks() shall be called from 898 * ufs_qcom_init() after init is done. 899 */ 900 if (!host) 901 return 0; 902 903 switch (status) { 904 case PRE_CHANGE: 905 if (!on) { 906 if (!ufs_qcom_is_link_active(hba)) { 907 /* disable device ref_clk */ 908 ufs_qcom_dev_ref_clk_ctrl(host, false); 909 } 910 } 911 break; 912 case POST_CHANGE: 913 if (on) { 914 /* enable the device ref clock for HS mode*/ 915 if (ufshcd_is_hs_mode(&hba->pwr_info)) 916 ufs_qcom_dev_ref_clk_ctrl(host, true); 917 } 918 break; 919 } 920 921 return err; 922} 923 924static int 925ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) 926{ 927 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); 928 929 /* Currently this code only knows about a single reset. */ 930 WARN_ON(id); 931 ufs_qcom_assert_reset(host->hba); 932 /* provide 1ms delay to let the reset pulse propagate. */ 933 usleep_range(1000, 1100); 934 return 0; 935} 936 937static int 938ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) 939{ 940 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); 941 942 /* Currently this code only knows about a single reset. */ 943 WARN_ON(id); 944 ufs_qcom_deassert_reset(host->hba); 945 946 /* 947 * after reset deassertion, phy will need all ref clocks, 948 * voltage, current to settle down before starting serdes. 949 */ 950 usleep_range(1000, 1100); 951 return 0; 952} 953 954static const struct reset_control_ops ufs_qcom_reset_ops = { 955 .assert = ufs_qcom_reset_assert, 956 .deassert = ufs_qcom_reset_deassert, 957}; 958 959#define ANDROID_BOOT_DEV_MAX 30 960static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; 961 962#ifndef MODULE 963static int __init get_android_boot_dev(char *str) 964{ 965 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); 966 return 1; 967} 968__setup("androidboot.bootdevice=", get_android_boot_dev); 969#endif 970 971/** 972 * ufs_qcom_init - bind phy with controller 973 * @hba: host controller instance 974 * 975 * Binds PHY with controller and powers up PHY enabling clocks 976 * and regulators. 977 * 978 * Returns -EPROBE_DEFER if binding fails, returns negative error 979 * on phy power up failure and returns zero on success. 980 */ 981static int ufs_qcom_init(struct ufs_hba *hba) 982{ 983 int err; 984 struct device *dev = hba->dev; 985 struct platform_device *pdev = to_platform_device(dev); 986 struct ufs_qcom_host *host; 987 struct resource *res; 988 989 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) 990 return -ENODEV; 991 992 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 993 if (!host) { 994 err = -ENOMEM; 995 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); 996 goto out; 997 } 998 999 /* Make a two way bind between the qcom host and the hba */ 1000 host->hba = hba; 1001 ufshcd_set_variant(hba, host); 1002 1003 /* Setup the reset control of HCI */ 1004 host->core_reset = devm_reset_control_get(hba->dev, "rst"); 1005 if (IS_ERR(host->core_reset)) { 1006 err = PTR_ERR(host->core_reset); 1007 dev_warn(dev, "Failed to get reset control %d\n", err); 1008 host->core_reset = NULL; 1009 err = 0; 1010 } 1011 1012 /* Fire up the reset controller. Failure here is non-fatal. */ 1013 host->rcdev.of_node = dev->of_node; 1014 host->rcdev.ops = &ufs_qcom_reset_ops; 1015 host->rcdev.owner = dev->driver->owner; 1016 host->rcdev.nr_resets = 1; 1017 err = devm_reset_controller_register(dev, &host->rcdev); 1018 if (err) { 1019 dev_warn(dev, "Failed to register reset controller\n"); 1020 err = 0; 1021 } 1022 1023 /* 1024 * voting/devoting device ref_clk source is time consuming hence 1025 * skip devoting it during aggressive clock gating. This clock 1026 * will still be gated off during runtime suspend. 1027 */ 1028 host->generic_phy = devm_phy_get(dev, "ufsphy"); 1029 1030 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) { 1031 /* 1032 * UFS driver might be probed before the phy driver does. 1033 * In that case we would like to return EPROBE_DEFER code. 1034 */ 1035 err = -EPROBE_DEFER; 1036 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n", 1037 __func__, err); 1038 goto out_variant_clear; 1039 } else if (IS_ERR(host->generic_phy)) { 1040 if (has_acpi_companion(dev)) { 1041 host->generic_phy = NULL; 1042 } else { 1043 err = PTR_ERR(host->generic_phy); 1044 dev_err(dev, "%s: PHY get failed %d\n", __func__, err); 1045 goto out_variant_clear; 1046 } 1047 } 1048 1049 host->device_reset = devm_gpiod_get_optional(dev, "reset", 1050 GPIOD_OUT_HIGH); 1051 if (IS_ERR(host->device_reset)) { 1052 err = PTR_ERR(host->device_reset); 1053 if (err != -EPROBE_DEFER) 1054 dev_err(dev, "failed to acquire reset gpio: %d\n", err); 1055 goto out_variant_clear; 1056 } 1057 1058 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1059 &host->hw_ver.minor, &host->hw_ver.step); 1060 1061 /* 1062 * for newer controllers, device reference clock control bit has 1063 * moved inside UFS controller register address space itself. 1064 */ 1065 if (host->hw_ver.major >= 0x02) { 1066 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; 1067 host->dev_ref_clk_en_mask = BIT(26); 1068 } else { 1069 /* "dev_ref_clk_ctrl_mem" is optional resource */ 1070 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1071 "dev_ref_clk_ctrl_mem"); 1072 if (res) { 1073 host->dev_ref_clk_ctrl_mmio = 1074 devm_ioremap_resource(dev, res); 1075 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) { 1076 dev_warn(dev, 1077 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n", 1078 __func__, 1079 PTR_ERR(host->dev_ref_clk_ctrl_mmio)); 1080 host->dev_ref_clk_ctrl_mmio = NULL; 1081 } 1082 host->dev_ref_clk_en_mask = BIT(5); 1083 } 1084 } 1085 1086 err = ufs_qcom_init_lane_clks(host); 1087 if (err) 1088 goto out_variant_clear; 1089 1090 ufs_qcom_set_caps(hba); 1091 ufs_qcom_advertise_quirks(hba); 1092 1093 err = ufs_qcom_ice_init(host); 1094 if (err) 1095 goto out_variant_clear; 1096 1097 ufs_qcom_setup_clocks(hba, true, POST_CHANGE); 1098 1099 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) 1100 ufs_qcom_hosts[hba->dev->id] = host; 1101 1102 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; 1103 ufs_qcom_get_default_testbus_cfg(host); 1104 err = ufs_qcom_testbus_config(host); 1105 if (err) { 1106 dev_warn(dev, "%s: failed to configure the testbus %d\n", 1107 __func__, err); 1108 err = 0; 1109 } 1110 1111 goto out; 1112 1113out_variant_clear: 1114 ufshcd_set_variant(hba, NULL); 1115out: 1116 return err; 1117} 1118 1119static void ufs_qcom_exit(struct ufs_hba *hba) 1120{ 1121 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1122 1123 ufs_qcom_disable_lane_clks(host); 1124 phy_power_off(host->generic_phy); 1125 phy_exit(host->generic_phy); 1126} 1127 1128static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, 1129 u32 clk_cycles) 1130{ 1131 int err; 1132 u32 core_clk_ctrl_reg; 1133 1134 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) 1135 return -EINVAL; 1136 1137 err = ufshcd_dme_get(hba, 1138 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1139 &core_clk_ctrl_reg); 1140 if (err) 1141 goto out; 1142 1143 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; 1144 core_clk_ctrl_reg |= clk_cycles; 1145 1146 /* Clear CORE_CLK_DIV_EN */ 1147 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1148 1149 err = ufshcd_dme_set(hba, 1150 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1151 core_clk_ctrl_reg); 1152out: 1153 return err; 1154} 1155 1156static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) 1157{ 1158 /* nothing to do as of now */ 1159 return 0; 1160} 1161 1162static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) 1163{ 1164 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1165 1166 if (!ufs_qcom_cap_qunipro(host)) 1167 return 0; 1168 1169 /* set unipro core clock cycles to 150 and clear clock divider */ 1170 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); 1171} 1172 1173static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) 1174{ 1175 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1176 int err; 1177 u32 core_clk_ctrl_reg; 1178 1179 if (!ufs_qcom_cap_qunipro(host)) 1180 return 0; 1181 1182 err = ufshcd_dme_get(hba, 1183 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1184 &core_clk_ctrl_reg); 1185 1186 /* make sure CORE_CLK_DIV_EN is cleared */ 1187 if (!err && 1188 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { 1189 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; 1190 err = ufshcd_dme_set(hba, 1191 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), 1192 core_clk_ctrl_reg); 1193 } 1194 1195 return err; 1196} 1197 1198static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) 1199{ 1200 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1201 1202 if (!ufs_qcom_cap_qunipro(host)) 1203 return 0; 1204 1205 /* set unipro core clock cycles to 75 and clear clock divider */ 1206 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); 1207} 1208 1209static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, 1210 bool scale_up, enum ufs_notify_change_status status) 1211{ 1212 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1213 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; 1214 int err = 0; 1215 1216 if (status == PRE_CHANGE) { 1217 if (scale_up) 1218 err = ufs_qcom_clk_scale_up_pre_change(hba); 1219 else 1220 err = ufs_qcom_clk_scale_down_pre_change(hba); 1221 } else { 1222 if (scale_up) 1223 err = ufs_qcom_clk_scale_up_post_change(hba); 1224 else 1225 err = ufs_qcom_clk_scale_down_post_change(hba); 1226 1227 if (err || !dev_req_params) 1228 goto out; 1229 1230 ufs_qcom_cfg_timers(hba, 1231 dev_req_params->gear_rx, 1232 dev_req_params->pwr_rx, 1233 dev_req_params->hs_rate, 1234 false); 1235 } 1236 1237out: 1238 return err; 1239} 1240 1241static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, 1242 void *priv, void (*print_fn)(struct ufs_hba *hba, 1243 int offset, int num_regs, const char *str, void *priv)) 1244{ 1245 u32 reg; 1246 struct ufs_qcom_host *host; 1247 1248 if (unlikely(!hba)) { 1249 pr_err("%s: hba is NULL\n", __func__); 1250 return; 1251 } 1252 if (unlikely(!print_fn)) { 1253 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); 1254 return; 1255 } 1256 1257 host = ufshcd_get_variant(hba); 1258 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) 1259 return; 1260 1261 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); 1262 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); 1263 1264 reg = ufshcd_readl(hba, REG_UFS_CFG1); 1265 reg |= UTP_DBG_RAMS_EN; 1266 ufshcd_writel(hba, reg, REG_UFS_CFG1); 1267 1268 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); 1269 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); 1270 1271 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); 1272 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); 1273 1274 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); 1275 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); 1276 1277 /* clear bit 17 - UTP_DBG_RAMS_EN */ 1278 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); 1279 1280 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); 1281 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); 1282 1283 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); 1284 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); 1285 1286 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); 1287 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); 1288 1289 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); 1290 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); 1291 1292 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); 1293 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); 1294 1295 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); 1296 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); 1297 1298 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); 1299 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); 1300} 1301 1302static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) 1303{ 1304 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { 1305 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 1306 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); 1307 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); 1308 } else { 1309 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); 1310 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); 1311 } 1312} 1313 1314static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1315{ 1316 /* provide a legal default configuration */ 1317 host->testbus.select_major = TSTBUS_UNIPRO; 1318 host->testbus.select_minor = 37; 1319} 1320 1321static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) 1322{ 1323 if (host->testbus.select_major >= TSTBUS_MAX) { 1324 dev_err(host->hba->dev, 1325 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", 1326 __func__, host->testbus.select_major); 1327 return false; 1328 } 1329 1330 return true; 1331} 1332 1333int ufs_qcom_testbus_config(struct ufs_qcom_host *host) 1334{ 1335 int reg; 1336 int offset; 1337 u32 mask = TEST_BUS_SUB_SEL_MASK; 1338 1339 if (!host) 1340 return -EINVAL; 1341 1342 if (!ufs_qcom_testbus_cfg_is_ok(host)) 1343 return -EPERM; 1344 1345 switch (host->testbus.select_major) { 1346 case TSTBUS_UAWM: 1347 reg = UFS_TEST_BUS_CTRL_0; 1348 offset = 24; 1349 break; 1350 case TSTBUS_UARM: 1351 reg = UFS_TEST_BUS_CTRL_0; 1352 offset = 16; 1353 break; 1354 case TSTBUS_TXUC: 1355 reg = UFS_TEST_BUS_CTRL_0; 1356 offset = 8; 1357 break; 1358 case TSTBUS_RXUC: 1359 reg = UFS_TEST_BUS_CTRL_0; 1360 offset = 0; 1361 break; 1362 case TSTBUS_DFC: 1363 reg = UFS_TEST_BUS_CTRL_1; 1364 offset = 24; 1365 break; 1366 case TSTBUS_TRLUT: 1367 reg = UFS_TEST_BUS_CTRL_1; 1368 offset = 16; 1369 break; 1370 case TSTBUS_TMRLUT: 1371 reg = UFS_TEST_BUS_CTRL_1; 1372 offset = 8; 1373 break; 1374 case TSTBUS_OCSC: 1375 reg = UFS_TEST_BUS_CTRL_1; 1376 offset = 0; 1377 break; 1378 case TSTBUS_WRAPPER: 1379 reg = UFS_TEST_BUS_CTRL_2; 1380 offset = 16; 1381 break; 1382 case TSTBUS_COMBINED: 1383 reg = UFS_TEST_BUS_CTRL_2; 1384 offset = 8; 1385 break; 1386 case TSTBUS_UTP_HCI: 1387 reg = UFS_TEST_BUS_CTRL_2; 1388 offset = 0; 1389 break; 1390 case TSTBUS_UNIPRO: 1391 reg = UFS_UNIPRO_CFG; 1392 offset = 20; 1393 mask = 0xFFF; 1394 break; 1395 /* 1396 * No need for a default case, since 1397 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration 1398 * is legal 1399 */ 1400 } 1401 mask <<= offset; 1402 ufshcd_rmwl(host->hba, TEST_BUS_SEL, 1403 (u32)host->testbus.select_major << 19, 1404 REG_UFS_CFG1); 1405 ufshcd_rmwl(host->hba, mask, 1406 (u32)host->testbus.select_minor << offset, 1407 reg); 1408 ufs_qcom_enable_test_bus(host); 1409 /* 1410 * Make sure the test bus configuration is 1411 * committed before returning. 1412 */ 1413 mb(); 1414 1415 return 0; 1416} 1417 1418static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) 1419{ 1420 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, 1421 "HCI Vendor Specific Registers "); 1422 1423 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); 1424} 1425 1426/** 1427 * ufs_qcom_device_reset() - toggle the (optional) device reset line 1428 * @hba: per-adapter instance 1429 * 1430 * Toggles the (optional) reset line to reset the attached device. 1431 */ 1432static int ufs_qcom_device_reset(struct ufs_hba *hba) 1433{ 1434 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1435 1436 /* reset gpio is optional */ 1437 if (!host->device_reset) 1438 return -EOPNOTSUPP; 1439 1440 /* 1441 * The UFS device shall detect reset pulses of 1us, sleep for 10us to 1442 * be on the safe side. 1443 */ 1444 gpiod_set_value_cansleep(host->device_reset, 1); 1445 usleep_range(10, 15); 1446 1447 gpiod_set_value_cansleep(host->device_reset, 0); 1448 usleep_range(10, 15); 1449 1450 return 0; 1451} 1452 1453#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) 1454static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, 1455 struct devfreq_dev_profile *p, 1456 void *data) 1457{ 1458 static struct devfreq_simple_ondemand_data *d; 1459 1460 if (!data) 1461 return; 1462 1463 d = (struct devfreq_simple_ondemand_data *)data; 1464 p->polling_ms = 60; 1465 d->upthreshold = 70; 1466 d->downdifferential = 5; 1467} 1468#else 1469static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, 1470 struct devfreq_dev_profile *p, 1471 void *data) 1472{ 1473} 1474#endif 1475 1476/* 1477 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 1478 * 1479 * The variant operations configure the necessary controller and PHY 1480 * handshake during initialization. 1481 */ 1482static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { 1483 .name = "qcom", 1484 .init = ufs_qcom_init, 1485 .exit = ufs_qcom_exit, 1486 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, 1487 .clk_scale_notify = ufs_qcom_clk_scale_notify, 1488 .setup_clocks = ufs_qcom_setup_clocks, 1489 .hce_enable_notify = ufs_qcom_hce_enable_notify, 1490 .link_startup_notify = ufs_qcom_link_startup_notify, 1491 .pwr_change_notify = ufs_qcom_pwr_change_notify, 1492 .apply_dev_quirks = ufs_qcom_apply_dev_quirks, 1493 .suspend = ufs_qcom_suspend, 1494 .resume = ufs_qcom_resume, 1495 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 1496 .device_reset = ufs_qcom_device_reset, 1497 .config_scaling_param = ufs_qcom_config_scaling_param, 1498 .program_key = ufs_qcom_ice_program_key, 1499}; 1500 1501/** 1502 * ufs_qcom_probe - probe routine of the driver 1503 * @pdev: pointer to Platform device handle 1504 * 1505 * Return zero for success and non-zero for failure 1506 */ 1507static int ufs_qcom_probe(struct platform_device *pdev) 1508{ 1509 int err; 1510 struct device *dev = &pdev->dev; 1511 1512 /* Perform generic probe */ 1513 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); 1514 if (err) 1515 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); 1516 1517 return err; 1518} 1519 1520/** 1521 * ufs_qcom_remove - set driver_data of the device to NULL 1522 * @pdev: pointer to platform device handle 1523 * 1524 * Always returns 0 1525 */ 1526static int ufs_qcom_remove(struct platform_device *pdev) 1527{ 1528 struct ufs_hba *hba = platform_get_drvdata(pdev); 1529 1530 pm_runtime_get_sync(&(pdev)->dev); 1531 ufshcd_remove(hba); 1532 return 0; 1533} 1534 1535static const struct of_device_id ufs_qcom_of_match[] = { 1536 { .compatible = "qcom,ufshc"}, 1537 {}, 1538}; 1539MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); 1540 1541#ifdef CONFIG_ACPI 1542static const struct acpi_device_id ufs_qcom_acpi_match[] = { 1543 { "QCOM24A5" }, 1544 { }, 1545}; 1546MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); 1547#endif 1548 1549static const struct dev_pm_ops ufs_qcom_pm_ops = { 1550 .suspend = ufshcd_pltfrm_suspend, 1551 .resume = ufshcd_pltfrm_resume, 1552 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 1553 .runtime_resume = ufshcd_pltfrm_runtime_resume, 1554 .runtime_idle = ufshcd_pltfrm_runtime_idle, 1555}; 1556 1557static struct platform_driver ufs_qcom_pltform = { 1558 .probe = ufs_qcom_probe, 1559 .remove = ufs_qcom_remove, 1560 .shutdown = ufshcd_pltfrm_shutdown, 1561 .driver = { 1562 .name = "ufshcd-qcom", 1563 .pm = &ufs_qcom_pm_ops, 1564 .of_match_table = of_match_ptr(ufs_qcom_of_match), 1565 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), 1566 }, 1567}; 1568module_platform_driver(ufs_qcom_pltform); 1569 1570MODULE_LICENSE("GPL v2"); 1571