1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2010 Google, Inc. 4 */ 5 6#include <linux/delay.h> 7#include <linux/dma-mapping.h> 8#include <linux/err.h> 9#include <linux/module.h> 10#include <linux/init.h> 11#include <linux/iopoll.h> 12#include <linux/platform_device.h> 13#include <linux/clk.h> 14#include <linux/io.h> 15#include <linux/of.h> 16#include <linux/of_device.h> 17#include <linux/pinctrl/consumer.h> 18#include <linux/regulator/consumer.h> 19#include <linux/reset.h> 20#include <linux/mmc/card.h> 21#include <linux/mmc/host.h> 22#include <linux/mmc/mmc.h> 23#include <linux/mmc/slot-gpio.h> 24#include <linux/gpio/consumer.h> 25#include <linux/ktime.h> 26 27#include "sdhci-cqhci.h" 28#include "sdhci-pltfm.h" 29#include "cqhci.h" 30 31/* Tegra SDHOST controller vendor register definitions */ 32#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 33#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 34#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 35#define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000 36#define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24 37#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) 38#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) 39#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) 40 41#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104 42#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31) 43 44#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c 45#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00 46#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 47 48#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 49#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0) 50#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 51#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 52#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 53#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 54 55#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0 56#define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31) 57 58#define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc 59#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31) 60 61#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0 62#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000 63#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000 64#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18 65#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0 66#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6 67#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000 68#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13 69#define TRIES_128 2 70#define TRIES_256 4 71#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7 72 73#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4 74#define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8 75#define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC 76#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF 77#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8 78#define TUNING_WORD_BIT_SIZE 32 79 80#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 81#define SDHCI_AUTO_CAL_START BIT(31) 82#define SDHCI_AUTO_CAL_ENABLE BIT(29) 83#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff 84 85#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0 86#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f 87#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 88#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) 89#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 90 91#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec 92#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) 93 94#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 95#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 96#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) 97#define NVQUIRK_ENABLE_SDR50 BIT(3) 98#define NVQUIRK_ENABLE_SDR104 BIT(4) 99#define NVQUIRK_ENABLE_DDR50 BIT(5) 100/* 101 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads 102 * drive strength. 103 */ 104#define NVQUIRK_HAS_PADCALIB BIT(6) 105/* 106 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads. 107 * 3V3/1V8 pad selection happens through pinctrl state selection depending 108 * on the signaling mode. 109 */ 110#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) 111#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) 112#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) 113 114/* 115 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra 116 * SDMMC hardware data timeout. 117 */ 118#define NVQUIRK_HAS_TMCLK BIT(10) 119 120/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ 121#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 122 123#define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ 124 SDHCI_TRNS_BLK_CNT_EN | \ 125 SDHCI_TRNS_DMA) 126 127struct sdhci_tegra_soc_data { 128 const struct sdhci_pltfm_data *pdata; 129 u64 dma_mask; 130 u32 nvquirks; 131 u8 min_tap_delay; 132 u8 max_tap_delay; 133}; 134 135/* Magic pull up and pull down pad calibration offsets */ 136struct sdhci_tegra_autocal_offsets { 137 u32 pull_up_3v3; 138 u32 pull_down_3v3; 139 u32 pull_up_3v3_timeout; 140 u32 pull_down_3v3_timeout; 141 u32 pull_up_1v8; 142 u32 pull_down_1v8; 143 u32 pull_up_1v8_timeout; 144 u32 pull_down_1v8_timeout; 145 u32 pull_up_sdr104; 146 u32 pull_down_sdr104; 147 u32 pull_up_hs400; 148 u32 pull_down_hs400; 149}; 150 151struct sdhci_tegra { 152 const struct sdhci_tegra_soc_data *soc_data; 153 struct gpio_desc *power_gpio; 154 struct clk *tmclk; 155 bool ddr_signaling; 156 bool pad_calib_required; 157 bool pad_control_available; 158 159 struct reset_control *rst; 160 struct pinctrl *pinctrl_sdmmc; 161 struct pinctrl_state *pinctrl_state_3v3; 162 struct pinctrl_state *pinctrl_state_1v8; 163 struct pinctrl_state *pinctrl_state_3v3_drv; 164 struct pinctrl_state *pinctrl_state_1v8_drv; 165 166 struct sdhci_tegra_autocal_offsets autocal_offsets; 167 ktime_t last_calib; 168 169 u32 default_tap; 170 u32 default_trim; 171 u32 dqs_trim; 172 bool enable_hwcq; 173 unsigned long curr_clk_rate; 174 u8 tuned_tap_delay; 175}; 176 177static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) 178{ 179 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 180 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 181 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 182 183 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && 184 (reg == SDHCI_HOST_VERSION))) { 185 /* Erratum: Version register is invalid in HW. */ 186 return SDHCI_SPEC_200; 187 } 188 189 return readw(host->ioaddr + reg); 190} 191 192static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 193{ 194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 195 196 switch (reg) { 197 case SDHCI_TRANSFER_MODE: 198 /* 199 * Postpone this write, we must do it together with a 200 * command write that is down below. 201 */ 202 pltfm_host->xfer_mode_shadow = val; 203 return; 204 case SDHCI_COMMAND: 205 writel((val << 16) | pltfm_host->xfer_mode_shadow, 206 host->ioaddr + SDHCI_TRANSFER_MODE); 207 return; 208 } 209 210 writew(val, host->ioaddr + reg); 211} 212 213static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) 214{ 215 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 216 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 217 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 218 219 /* Seems like we're getting spurious timeout and crc errors, so 220 * disable signalling of them. In case of real errors software 221 * timers should take care of eventually detecting them. 222 */ 223 if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) 224 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); 225 226 writel(val, host->ioaddr + reg); 227 228 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && 229 (reg == SDHCI_INT_ENABLE))) { 230 /* Erratum: Must enable block gap interrupt detection */ 231 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 232 if (val & SDHCI_INT_CARD_INT) 233 gap_ctrl |= 0x8; 234 else 235 gap_ctrl &= ~0x8; 236 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); 237 } 238} 239 240static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable) 241{ 242 bool status; 243 u32 reg; 244 245 reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 246 status = !!(reg & SDHCI_CLOCK_CARD_EN); 247 248 if (status == enable) 249 return status; 250 251 if (enable) 252 reg |= SDHCI_CLOCK_CARD_EN; 253 else 254 reg &= ~SDHCI_CLOCK_CARD_EN; 255 256 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); 257 258 return status; 259} 260 261static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) 262{ 263 bool is_tuning_cmd = 0; 264 bool clk_enabled; 265 u8 cmd; 266 267 if (reg == SDHCI_COMMAND) { 268 cmd = SDHCI_GET_CMD(val); 269 is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK || 270 cmd == MMC_SEND_TUNING_BLOCK_HS200; 271 } 272 273 if (is_tuning_cmd) 274 clk_enabled = tegra_sdhci_configure_card_clk(host, 0); 275 276 writew(val, host->ioaddr + reg); 277 278 if (is_tuning_cmd) { 279 udelay(1); 280 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 281 tegra_sdhci_configure_card_clk(host, clk_enabled); 282 } 283} 284 285static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) 286{ 287 /* 288 * Write-enable shall be assumed if GPIO is missing in a board's 289 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on 290 * Tegra. 291 */ 292 return mmc_gpio_get_ro(host->mmc); 293} 294 295static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 296{ 297 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 298 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 299 int has_1v8, has_3v3; 300 301 /* 302 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad 303 * voltage configuration in order to perform voltage switching. This 304 * means that valid pinctrl info is required on SDHCI instances capable 305 * of performing voltage switching. Whether or not an SDHCI instance is 306 * capable of voltage switching is determined based on the regulator. 307 */ 308 309 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 310 return true; 311 312 if (IS_ERR(host->mmc->supply.vqmmc)) 313 return false; 314 315 has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 316 1700000, 1950000); 317 318 has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, 319 2700000, 3600000); 320 321 if (has_1v8 == 1 && has_3v3 == 1) 322 return tegra_host->pad_control_available; 323 324 /* Fixed voltage, no pad control required. */ 325 return true; 326} 327 328static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) 329{ 330 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 331 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 332 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 333 bool card_clk_enabled = false; 334 u32 reg; 335 336 /* 337 * Touching the tap values is a bit tricky on some SoC generations. 338 * The quirk enables a workaround for a glitch that sometimes occurs if 339 * the tap values are changed. 340 */ 341 342 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP) 343 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 344 345 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 346 reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; 347 reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; 348 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 349 350 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP && 351 card_clk_enabled) { 352 udelay(1); 353 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 354 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 355 } 356} 357 358static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) 359{ 360 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 361 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 362 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 363 u32 misc_ctrl, clk_ctrl, pad_ctrl; 364 365 sdhci_and_cqhci_reset(host, mask); 366 367 if (!(mask & SDHCI_RESET_ALL)) 368 return; 369 370 tegra_sdhci_set_tap(host, tegra_host->default_tap); 371 372 misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 373 clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 374 375 misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 | 376 SDHCI_MISC_CTRL_ENABLE_SDR50 | 377 SDHCI_MISC_CTRL_ENABLE_DDR50 | 378 SDHCI_MISC_CTRL_ENABLE_SDR104); 379 380 clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK | 381 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE); 382 383 if (tegra_sdhci_is_pad_and_regulator_valid(host)) { 384 /* Erratum: Enable SDHCI spec v3.00 support */ 385 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) 386 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; 387 /* Advertise UHS modes as supported by host */ 388 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 389 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; 390 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 391 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; 392 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) 393 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; 394 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) 395 clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; 396 } 397 398 clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT; 399 400 sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); 401 sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 402 403 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) { 404 pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 405 pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK; 406 pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL; 407 sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 408 409 tegra_host->pad_calib_required = true; 410 } 411 412 tegra_host->ddr_signaling = false; 413} 414 415static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable) 416{ 417 u32 val; 418 419 /* 420 * Enable or disable the additional I/O pad used by the drive strength 421 * calibration process. 422 */ 423 val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 424 425 if (enable) 426 val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 427 else 428 val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; 429 430 sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 431 432 if (enable) 433 usleep_range(1, 2); 434} 435 436static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, 437 u16 pdpu) 438{ 439 u32 reg; 440 441 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 442 reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK; 443 reg |= pdpu; 444 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 445} 446 447static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, 448 bool state_drvupdn) 449{ 450 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 451 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 452 struct sdhci_tegra_autocal_offsets *offsets = 453 &tegra_host->autocal_offsets; 454 struct pinctrl_state *pinctrl_drvupdn = NULL; 455 int ret = 0; 456 u8 drvup = 0, drvdn = 0; 457 u32 reg; 458 459 if (!state_drvupdn) { 460 /* PADS Drive Strength */ 461 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 462 if (tegra_host->pinctrl_state_1v8_drv) { 463 pinctrl_drvupdn = 464 tegra_host->pinctrl_state_1v8_drv; 465 } else { 466 drvup = offsets->pull_up_1v8_timeout; 467 drvdn = offsets->pull_down_1v8_timeout; 468 } 469 } else { 470 if (tegra_host->pinctrl_state_3v3_drv) { 471 pinctrl_drvupdn = 472 tegra_host->pinctrl_state_3v3_drv; 473 } else { 474 drvup = offsets->pull_up_3v3_timeout; 475 drvdn = offsets->pull_down_3v3_timeout; 476 } 477 } 478 479 if (pinctrl_drvupdn != NULL) { 480 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 481 pinctrl_drvupdn); 482 if (ret < 0) 483 dev_err(mmc_dev(host->mmc), 484 "failed pads drvupdn, ret: %d\n", ret); 485 } else if ((drvup) || (drvdn)) { 486 reg = sdhci_readl(host, 487 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 488 reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; 489 reg |= (drvup << 20) | (drvdn << 12); 490 sdhci_writel(host, reg, 491 SDHCI_TEGRA_SDMEM_COMP_PADCTRL); 492 } 493 494 } else { 495 /* Dual Voltage PADS Voltage selection */ 496 if (!tegra_host->pad_control_available) 497 return 0; 498 499 if (voltage == MMC_SIGNAL_VOLTAGE_180) { 500 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 501 tegra_host->pinctrl_state_1v8); 502 if (ret < 0) 503 dev_err(mmc_dev(host->mmc), 504 "setting 1.8V failed, ret: %d\n", ret); 505 } else { 506 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, 507 tegra_host->pinctrl_state_3v3); 508 if (ret < 0) 509 dev_err(mmc_dev(host->mmc), 510 "setting 3.3V failed, ret: %d\n", ret); 511 } 512 } 513 514 return ret; 515} 516 517static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) 518{ 519 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 520 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 521 struct sdhci_tegra_autocal_offsets offsets = 522 tegra_host->autocal_offsets; 523 struct mmc_ios *ios = &host->mmc->ios; 524 bool card_clk_enabled; 525 u16 pdpu; 526 u32 reg; 527 int ret; 528 529 switch (ios->timing) { 530 case MMC_TIMING_UHS_SDR104: 531 pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104; 532 break; 533 case MMC_TIMING_MMC_HS400: 534 pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400; 535 break; 536 default: 537 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 538 pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8; 539 else 540 pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; 541 } 542 543 /* Set initial offset before auto-calibration */ 544 tegra_sdhci_set_pad_autocal_offset(host, pdpu); 545 546 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); 547 548 tegra_sdhci_configure_cal_pad(host, true); 549 550 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 551 reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; 552 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 553 554 usleep_range(1, 2); 555 /* 10 ms timeout */ 556 ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS, 557 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE), 558 1000, 10000); 559 560 tegra_sdhci_configure_cal_pad(host, false); 561 562 tegra_sdhci_configure_card_clk(host, card_clk_enabled); 563 564 if (ret) { 565 dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); 566 567 /* Disable automatic cal and use fixed Drive Strengths */ 568 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); 569 reg &= ~SDHCI_AUTO_CAL_ENABLE; 570 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); 571 572 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); 573 if (ret < 0) 574 dev_err(mmc_dev(host->mmc), 575 "Setting drive strengths failed: %d\n", ret); 576 } 577} 578 579static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) 580{ 581 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 582 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 583 struct sdhci_tegra_autocal_offsets *autocal = 584 &tegra_host->autocal_offsets; 585 int err; 586 587 err = device_property_read_u32(host->mmc->parent, 588 "nvidia,pad-autocal-pull-up-offset-3v3", 589 &autocal->pull_up_3v3); 590 if (err) 591 autocal->pull_up_3v3 = 0; 592 593 err = device_property_read_u32(host->mmc->parent, 594 "nvidia,pad-autocal-pull-down-offset-3v3", 595 &autocal->pull_down_3v3); 596 if (err) 597 autocal->pull_down_3v3 = 0; 598 599 err = device_property_read_u32(host->mmc->parent, 600 "nvidia,pad-autocal-pull-up-offset-1v8", 601 &autocal->pull_up_1v8); 602 if (err) 603 autocal->pull_up_1v8 = 0; 604 605 err = device_property_read_u32(host->mmc->parent, 606 "nvidia,pad-autocal-pull-down-offset-1v8", 607 &autocal->pull_down_1v8); 608 if (err) 609 autocal->pull_down_1v8 = 0; 610 611 err = device_property_read_u32(host->mmc->parent, 612 "nvidia,pad-autocal-pull-up-offset-sdr104", 613 &autocal->pull_up_sdr104); 614 if (err) 615 autocal->pull_up_sdr104 = autocal->pull_up_1v8; 616 617 err = device_property_read_u32(host->mmc->parent, 618 "nvidia,pad-autocal-pull-down-offset-sdr104", 619 &autocal->pull_down_sdr104); 620 if (err) 621 autocal->pull_down_sdr104 = autocal->pull_down_1v8; 622 623 err = device_property_read_u32(host->mmc->parent, 624 "nvidia,pad-autocal-pull-up-offset-hs400", 625 &autocal->pull_up_hs400); 626 if (err) 627 autocal->pull_up_hs400 = autocal->pull_up_1v8; 628 629 err = device_property_read_u32(host->mmc->parent, 630 "nvidia,pad-autocal-pull-down-offset-hs400", 631 &autocal->pull_down_hs400); 632 if (err) 633 autocal->pull_down_hs400 = autocal->pull_down_1v8; 634 635 /* 636 * Different fail-safe drive strength values based on the signaling 637 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls. 638 * So, avoid reading below device tree properties for SoCs that don't 639 * have NVQUIRK_NEEDS_PAD_CONTROL. 640 */ 641 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) 642 return; 643 644 err = device_property_read_u32(host->mmc->parent, 645 "nvidia,pad-autocal-pull-up-offset-3v3-timeout", 646 &autocal->pull_up_3v3_timeout); 647 if (err) { 648 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 649 (tegra_host->pinctrl_state_3v3_drv == NULL)) 650 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 651 mmc_hostname(host->mmc)); 652 autocal->pull_up_3v3_timeout = 0; 653 } 654 655 err = device_property_read_u32(host->mmc->parent, 656 "nvidia,pad-autocal-pull-down-offset-3v3-timeout", 657 &autocal->pull_down_3v3_timeout); 658 if (err) { 659 if (!IS_ERR(tegra_host->pinctrl_state_3v3) && 660 (tegra_host->pinctrl_state_3v3_drv == NULL)) 661 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", 662 mmc_hostname(host->mmc)); 663 autocal->pull_down_3v3_timeout = 0; 664 } 665 666 err = device_property_read_u32(host->mmc->parent, 667 "nvidia,pad-autocal-pull-up-offset-1v8-timeout", 668 &autocal->pull_up_1v8_timeout); 669 if (err) { 670 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 671 (tegra_host->pinctrl_state_1v8_drv == NULL)) 672 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 673 mmc_hostname(host->mmc)); 674 autocal->pull_up_1v8_timeout = 0; 675 } 676 677 err = device_property_read_u32(host->mmc->parent, 678 "nvidia,pad-autocal-pull-down-offset-1v8-timeout", 679 &autocal->pull_down_1v8_timeout); 680 if (err) { 681 if (!IS_ERR(tegra_host->pinctrl_state_1v8) && 682 (tegra_host->pinctrl_state_1v8_drv == NULL)) 683 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", 684 mmc_hostname(host->mmc)); 685 autocal->pull_down_1v8_timeout = 0; 686 } 687} 688 689static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 690{ 691 struct sdhci_host *host = mmc_priv(mmc); 692 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 693 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 694 ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib); 695 696 /* 100 ms calibration interval is specified in the TRM */ 697 if (ktime_to_ms(since_calib) > 100) { 698 tegra_sdhci_pad_autocalib(host); 699 tegra_host->last_calib = ktime_get(); 700 } 701 702 sdhci_request(mmc, mrq); 703} 704 705static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) 706{ 707 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 708 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 709 int err; 710 711 err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap", 712 &tegra_host->default_tap); 713 if (err) 714 tegra_host->default_tap = 0; 715 716 err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim", 717 &tegra_host->default_trim); 718 if (err) 719 tegra_host->default_trim = 0; 720 721 err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim", 722 &tegra_host->dqs_trim); 723 if (err) 724 tegra_host->dqs_trim = 0x11; 725} 726 727static void tegra_sdhci_parse_dt(struct sdhci_host *host) 728{ 729 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 730 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 731 732 if (device_property_read_bool(host->mmc->parent, "supports-cqe")) 733 tegra_host->enable_hwcq = true; 734 else 735 tegra_host->enable_hwcq = false; 736 737 tegra_sdhci_parse_pad_autocal_dt(host); 738 tegra_sdhci_parse_tap_and_trim(host); 739} 740 741static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 742{ 743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 744 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 745 unsigned long host_clk; 746 747 if (!clock) 748 return sdhci_set_clock(host, clock); 749 750 /* 751 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI 752 * divider to be configured to divided the host clock by two. The SDHCI 753 * clock divider is calculated as part of sdhci_set_clock() by 754 * sdhci_calc_clk(). The divider is calculated from host->max_clk and 755 * the requested clock rate. 756 * 757 * By setting the host->max_clk to clock * 2 the divider calculation 758 * will always result in the correct value for DDR50/52 modes, 759 * regardless of clock rate rounding, which may happen if the value 760 * from clk_get_rate() is used. 761 */ 762 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; 763 clk_set_rate(pltfm_host->clk, host_clk); 764 tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk); 765 if (tegra_host->ddr_signaling) 766 host->max_clk = host_clk; 767 else 768 host->max_clk = clk_get_rate(pltfm_host->clk); 769 770 sdhci_set_clock(host, clock); 771 772 if (tegra_host->pad_calib_required) { 773 tegra_sdhci_pad_autocalib(host); 774 tegra_host->pad_calib_required = false; 775 } 776} 777 778static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, 779 struct mmc_ios *ios) 780{ 781 struct sdhci_host *host = mmc_priv(mmc); 782 u32 val; 783 784 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 785 786 if (ios->enhanced_strobe) { 787 val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 788 /* 789 * When CMD13 is sent from mmc_select_hs400es() after 790 * switching to HS400ES mode, the bus is operating at 791 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR. 792 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI 793 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host 794 * controller CAR clock and the interface clock are rate matched. 795 */ 796 tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR); 797 } else { 798 val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; 799 } 800 801 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); 802} 803 804static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) 805{ 806 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 807 808 return clk_round_rate(pltfm_host->clk, UINT_MAX); 809} 810 811static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim) 812{ 813 u32 val; 814 815 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 816 val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK; 817 val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT; 818 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); 819} 820 821static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host) 822{ 823 u32 reg; 824 int err; 825 826 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 827 reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE; 828 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); 829 830 /* 1 ms sleep, 5 ms timeout */ 831 err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA, 832 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE), 833 1000, 5000); 834 if (err) 835 dev_err(mmc_dev(host->mmc), 836 "HS400 delay line calibration timed out\n"); 837} 838 839static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up, 840 u8 thd_low, u8 fixed_tap) 841{ 842 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 843 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 844 u32 val, tun_status; 845 u8 word, bit, edge1, tap, window; 846 bool tap_result; 847 bool start_fail = false; 848 bool start_pass = false; 849 bool end_pass = false; 850 bool first_fail = false; 851 bool first_pass = false; 852 u8 start_pass_tap = 0; 853 u8 end_pass_tap = 0; 854 u8 first_fail_tap = 0; 855 u8 first_pass_tap = 0; 856 u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE; 857 858 /* 859 * Read auto-tuned results and extract good valid passing window by 860 * filtering out un-wanted bubble/partial/merged windows. 861 */ 862 for (word = 0; word < total_tuning_words; word++) { 863 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 864 val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK; 865 val |= word; 866 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 867 tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0); 868 bit = 0; 869 while (bit < TUNING_WORD_BIT_SIZE) { 870 tap = word * TUNING_WORD_BIT_SIZE + bit; 871 tap_result = tun_status & (1 << bit); 872 if (!tap_result && !start_fail) { 873 start_fail = true; 874 if (!first_fail) { 875 first_fail_tap = tap; 876 first_fail = true; 877 } 878 879 } else if (tap_result && start_fail && !start_pass) { 880 start_pass_tap = tap; 881 start_pass = true; 882 if (!first_pass) { 883 first_pass_tap = tap; 884 first_pass = true; 885 } 886 887 } else if (!tap_result && start_fail && start_pass && 888 !end_pass) { 889 end_pass_tap = tap - 1; 890 end_pass = true; 891 } else if (tap_result && start_pass && start_fail && 892 end_pass) { 893 window = end_pass_tap - start_pass_tap; 894 /* discard merged window and bubble window */ 895 if (window >= thd_up || window < thd_low) { 896 start_pass_tap = tap; 897 end_pass = false; 898 } else { 899 /* set tap at middle of valid window */ 900 tap = start_pass_tap + window / 2; 901 tegra_host->tuned_tap_delay = tap; 902 return; 903 } 904 } 905 906 bit++; 907 } 908 } 909 910 if (!first_fail) { 911 WARN(1, "no edge detected, continue with hw tuned delay.\n"); 912 } else if (first_pass) { 913 /* set tap location at fixed tap relative to the first edge */ 914 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2; 915 if (edge1 - 1 > fixed_tap) 916 tegra_host->tuned_tap_delay = edge1 - fixed_tap; 917 else 918 tegra_host->tuned_tap_delay = edge1 + fixed_tap; 919 } 920} 921 922static void tegra_sdhci_post_tuning(struct sdhci_host *host) 923{ 924 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 925 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 926 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 927 u32 avg_tap_dly, val, min_tap_dly, max_tap_dly; 928 u8 fixed_tap, start_tap, end_tap, window_width; 929 u8 thdupper, thdlower; 930 u8 num_iter; 931 u32 clk_rate_mhz, period_ps, bestcase, worstcase; 932 933 /* retain HW tuned tap to use incase if no correction is needed */ 934 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); 935 tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >> 936 SDHCI_CLOCK_CTRL_TAP_SHIFT; 937 if (soc_data->min_tap_delay && soc_data->max_tap_delay) { 938 min_tap_dly = soc_data->min_tap_delay; 939 max_tap_dly = soc_data->max_tap_delay; 940 clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC; 941 period_ps = USEC_PER_SEC / clk_rate_mhz; 942 bestcase = period_ps / min_tap_dly; 943 worstcase = period_ps / max_tap_dly; 944 /* 945 * Upper and Lower bound thresholds used to detect merged and 946 * bubble windows 947 */ 948 thdupper = (2 * worstcase + bestcase) / 2; 949 thdlower = worstcase / 4; 950 /* 951 * fixed tap is used when HW tuning result contains single edge 952 * and tap is set at fixed tap delay relative to the first edge 953 */ 954 avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly); 955 fixed_tap = avg_tap_dly / 2; 956 957 val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1); 958 start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 959 end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) & 960 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; 961 window_width = end_tap - start_tap; 962 num_iter = host->tuning_loop_count; 963 /* 964 * partial window includes edges of the tuning range. 965 * merged window includes more taps so window width is higher 966 * than upper threshold. 967 */ 968 if (start_tap == 0 || (end_tap == (num_iter - 1)) || 969 (end_tap == num_iter - 2) || window_width >= thdupper) { 970 pr_debug("%s: Apply tuning correction\n", 971 mmc_hostname(host->mmc)); 972 tegra_sdhci_tap_correction(host, thdupper, thdlower, 973 fixed_tap); 974 } 975 } 976 977 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 978} 979 980static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode) 981{ 982 struct sdhci_host *host = mmc_priv(mmc); 983 int err; 984 985 err = sdhci_execute_tuning(mmc, opcode); 986 if (!err && !host->tuning_err) 987 tegra_sdhci_post_tuning(host); 988 989 return err; 990} 991 992static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, 993 unsigned timing) 994{ 995 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 996 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 997 bool set_default_tap = false; 998 bool set_dqs_trim = false; 999 bool do_hs400_dll_cal = false; 1000 u8 iter = TRIES_256; 1001 u32 val; 1002 1003 tegra_host->ddr_signaling = false; 1004 switch (timing) { 1005 case MMC_TIMING_UHS_SDR50: 1006 break; 1007 case MMC_TIMING_UHS_SDR104: 1008 case MMC_TIMING_MMC_HS200: 1009 /* Don't set default tap on tunable modes. */ 1010 iter = TRIES_128; 1011 break; 1012 case MMC_TIMING_MMC_HS400: 1013 set_dqs_trim = true; 1014 do_hs400_dll_cal = true; 1015 iter = TRIES_128; 1016 break; 1017 case MMC_TIMING_MMC_DDR52: 1018 case MMC_TIMING_UHS_DDR50: 1019 tegra_host->ddr_signaling = true; 1020 set_default_tap = true; 1021 break; 1022 default: 1023 set_default_tap = true; 1024 break; 1025 } 1026 1027 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); 1028 val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK | 1029 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK | 1030 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK); 1031 val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT | 1032 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT | 1033 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT); 1034 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); 1035 sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0); 1036 1037 host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256; 1038 1039 sdhci_set_uhs_signaling(host, timing); 1040 1041 tegra_sdhci_pad_autocalib(host); 1042 1043 if (tegra_host->tuned_tap_delay && !set_default_tap) 1044 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); 1045 else 1046 tegra_sdhci_set_tap(host, tegra_host->default_tap); 1047 1048 if (set_dqs_trim) 1049 tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim); 1050 1051 if (do_hs400_dll_cal) 1052 tegra_sdhci_hs400_dll_cal(host); 1053} 1054 1055static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 1056{ 1057 unsigned int min, max; 1058 1059 /* 1060 * Start search for minimum tap value at 10, as smaller values are 1061 * may wrongly be reported as working but fail at higher speeds, 1062 * according to the TRM. 1063 */ 1064 min = 10; 1065 while (min < 255) { 1066 tegra_sdhci_set_tap(host, min); 1067 if (!mmc_send_tuning(host->mmc, opcode, NULL)) 1068 break; 1069 min++; 1070 } 1071 1072 /* Find the maximum tap value that still passes. */ 1073 max = min + 1; 1074 while (max < 255) { 1075 tegra_sdhci_set_tap(host, max); 1076 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1077 max--; 1078 break; 1079 } 1080 max++; 1081 } 1082 1083 /* The TRM states the ideal tap value is at 75% in the passing range. */ 1084 tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4)); 1085 1086 return mmc_send_tuning(host->mmc, opcode, NULL); 1087} 1088 1089static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, 1090 struct mmc_ios *ios) 1091{ 1092 struct sdhci_host *host = mmc_priv(mmc); 1093 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1094 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1095 int ret = 0; 1096 1097 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1098 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1099 if (ret < 0) 1100 return ret; 1101 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1102 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { 1103 ret = sdhci_start_signal_voltage_switch(mmc, ios); 1104 if (ret < 0) 1105 return ret; 1106 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); 1107 } 1108 1109 if (tegra_host->pad_calib_required) 1110 tegra_sdhci_pad_autocalib(host); 1111 1112 return ret; 1113} 1114 1115static int tegra_sdhci_init_pinctrl_info(struct device *dev, 1116 struct sdhci_tegra *tegra_host) 1117{ 1118 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev); 1119 if (IS_ERR(tegra_host->pinctrl_sdmmc)) { 1120 dev_dbg(dev, "No pinctrl info, err: %ld\n", 1121 PTR_ERR(tegra_host->pinctrl_sdmmc)); 1122 return -1; 1123 } 1124 1125 tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( 1126 tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); 1127 if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { 1128 if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) 1129 tegra_host->pinctrl_state_1v8_drv = NULL; 1130 } 1131 1132 tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( 1133 tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); 1134 if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { 1135 if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) 1136 tegra_host->pinctrl_state_3v3_drv = NULL; 1137 } 1138 1139 tegra_host->pinctrl_state_3v3 = 1140 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); 1141 if (IS_ERR(tegra_host->pinctrl_state_3v3)) { 1142 dev_warn(dev, "Missing 3.3V pad state, err: %ld\n", 1143 PTR_ERR(tegra_host->pinctrl_state_3v3)); 1144 return -1; 1145 } 1146 1147 tegra_host->pinctrl_state_1v8 = 1148 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8"); 1149 if (IS_ERR(tegra_host->pinctrl_state_1v8)) { 1150 dev_warn(dev, "Missing 1.8V pad state, err: %ld\n", 1151 PTR_ERR(tegra_host->pinctrl_state_1v8)); 1152 return -1; 1153 } 1154 1155 tegra_host->pad_control_available = true; 1156 1157 return 0; 1158} 1159 1160static void tegra_sdhci_voltage_switch(struct sdhci_host *host) 1161{ 1162 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1163 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1164 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1165 1166 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1167 tegra_host->pad_calib_required = true; 1168} 1169 1170static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) 1171{ 1172 struct mmc_host *mmc = cq_host->mmc; 1173 struct sdhci_host *host = mmc_priv(mmc); 1174 u8 ctrl; 1175 ktime_t timeout; 1176 bool timed_out; 1177 1178 /* 1179 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to 1180 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need 1181 * to be re-configured. 1182 * Tegra CQHCI/SDHCI prevents write access to block size register when 1183 * CQE is unhalted. So handling CQE resume sequence here to configure 1184 * SDHCI block registers prior to exiting CQE halt state. 1185 */ 1186 if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && 1187 cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { 1188 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 1189 sdhci_cqe_enable(mmc); 1190 writel(val, cq_host->mmio + reg); 1191 timeout = ktime_add_us(ktime_get(), 50); 1192 while (1) { 1193 timed_out = ktime_compare(ktime_get(), timeout) > 0; 1194 ctrl = cqhci_readl(cq_host, CQHCI_CTL); 1195 if (!(ctrl & CQHCI_HALT) || timed_out) 1196 break; 1197 } 1198 /* 1199 * CQE usually resumes very quick, but incase if Tegra CQE 1200 * doesn't resume retry unhalt. 1201 */ 1202 if (timed_out) 1203 writel(val, cq_host->mmio + reg); 1204 } else { 1205 writel(val, cq_host->mmio + reg); 1206 } 1207} 1208 1209static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, 1210 struct mmc_request *mrq, u64 *data) 1211{ 1212 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc)); 1213 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1214 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; 1215 1216 if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING && 1217 mrq->cmd->flags & MMC_RSP_R1B) 1218 *data |= CQHCI_CMD_TIMING(1); 1219} 1220 1221static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) 1222{ 1223 struct cqhci_host *cq_host = mmc->cqe_private; 1224 struct sdhci_host *host = mmc_priv(mmc); 1225 u32 val; 1226 1227 /* 1228 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size 1229 * register when CQE is enabled and unhalted. 1230 * CQHCI driver enables CQE prior to activation, so disable CQE before 1231 * programming block size in sdhci controller and enable it back. 1232 */ 1233 if (!cq_host->activated) { 1234 val = cqhci_readl(cq_host, CQHCI_CFG); 1235 if (val & CQHCI_ENABLE) 1236 cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), 1237 CQHCI_CFG); 1238 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); 1239 sdhci_cqe_enable(mmc); 1240 if (val & CQHCI_ENABLE) 1241 cqhci_writel(cq_host, val, CQHCI_CFG); 1242 } 1243 1244 /* 1245 * CMD CRC errors are seen sometimes with some eMMC devices when status 1246 * command is sent during transfer of last data block which is the 1247 * default case as send status command block counter (CBC) is 1. 1248 * Recommended fix to set CBC to 0 allowing send status command only 1249 * when data lines are idle. 1250 */ 1251 val = cqhci_readl(cq_host, CQHCI_SSC1); 1252 val &= ~CQHCI_SSC1_CBC_MASK; 1253 cqhci_writel(cq_host, val, CQHCI_SSC1); 1254} 1255 1256static void sdhci_tegra_dumpregs(struct mmc_host *mmc) 1257{ 1258 sdhci_dumpregs(mmc_priv(mmc)); 1259} 1260 1261static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) 1262{ 1263 int cmd_error = 0; 1264 int data_error = 0; 1265 1266 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 1267 return intmask; 1268 1269 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 1270 1271 return 0; 1272} 1273 1274static void tegra_sdhci_set_timeout(struct sdhci_host *host, 1275 struct mmc_command *cmd) 1276{ 1277 u32 val; 1278 1279 /* 1280 * HW busy detection timeout is based on programmed data timeout 1281 * counter and maximum supported timeout is 11s which may not be 1282 * enough for long operations like cache flush, sleep awake, erase. 1283 * 1284 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows 1285 * host controller to wait for busy state until the card is busy 1286 * without HW timeout. 1287 * 1288 * So, use infinite busy wait mode for operations that may take 1289 * more than maximum HW busy timeout of 11s otherwise use finite 1290 * busy wait mode. 1291 */ 1292 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1293 if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC) 1294 val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1295 else 1296 val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; 1297 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL); 1298 1299 __sdhci_set_timeout(host, cmd); 1300} 1301 1302static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc) 1303{ 1304 struct cqhci_host *cq_host = mmc->cqe_private; 1305 u32 reg; 1306 1307 reg = cqhci_readl(cq_host, CQHCI_CFG); 1308 reg |= CQHCI_ENABLE; 1309 cqhci_writel(cq_host, reg, CQHCI_CFG); 1310} 1311 1312static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc) 1313{ 1314 struct cqhci_host *cq_host = mmc->cqe_private; 1315 struct sdhci_host *host = mmc_priv(mmc); 1316 u32 reg; 1317 1318 reg = cqhci_readl(cq_host, CQHCI_CFG); 1319 reg &= ~CQHCI_ENABLE; 1320 cqhci_writel(cq_host, reg, CQHCI_CFG); 1321 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1322} 1323 1324static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { 1325 .write_l = tegra_cqhci_writel, 1326 .enable = sdhci_tegra_cqe_enable, 1327 .disable = sdhci_cqe_disable, 1328 .dumpregs = sdhci_tegra_dumpregs, 1329 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, 1330 .pre_enable = sdhci_tegra_cqe_pre_enable, 1331 .post_disable = sdhci_tegra_cqe_post_disable, 1332}; 1333 1334static int tegra_sdhci_set_dma_mask(struct sdhci_host *host) 1335{ 1336 struct sdhci_pltfm_host *platform = sdhci_priv(host); 1337 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform); 1338 const struct sdhci_tegra_soc_data *soc = tegra->soc_data; 1339 struct device *dev = mmc_dev(host->mmc); 1340 1341 if (soc->dma_mask) 1342 return dma_set_mask_and_coherent(dev, soc->dma_mask); 1343 1344 return 0; 1345} 1346 1347static const struct sdhci_ops tegra_sdhci_ops = { 1348 .get_ro = tegra_sdhci_get_ro, 1349 .read_w = tegra_sdhci_readw, 1350 .write_l = tegra_sdhci_writel, 1351 .set_clock = tegra_sdhci_set_clock, 1352 .set_dma_mask = tegra_sdhci_set_dma_mask, 1353 .set_bus_width = sdhci_set_bus_width, 1354 .reset = tegra_sdhci_reset, 1355 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1356 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1357 .voltage_switch = tegra_sdhci_voltage_switch, 1358 .get_max_clock = tegra_sdhci_get_max_clock, 1359}; 1360 1361static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { 1362 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1363 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1364 SDHCI_QUIRK_NO_HISPD_BIT | 1365 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1366 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1367 .ops = &tegra_sdhci_ops, 1368}; 1369 1370static const struct sdhci_tegra_soc_data soc_data_tegra20 = { 1371 .pdata = &sdhci_tegra20_pdata, 1372 .dma_mask = DMA_BIT_MASK(32), 1373 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | 1374 NVQUIRK_ENABLE_BLOCK_GAP_DET, 1375}; 1376 1377static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { 1378 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1379 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1380 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1381 SDHCI_QUIRK_NO_HISPD_BIT | 1382 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1383 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1384 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1385 SDHCI_QUIRK2_BROKEN_HS200 | 1386 /* 1387 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even 1388 * though no command operation was in progress." 1389 * 1390 * The exact reason is unknown, as the same hardware seems 1391 * to support Auto CMD23 on a downstream 3.1 kernel. 1392 */ 1393 SDHCI_QUIRK2_ACMD23_BROKEN, 1394 .ops = &tegra_sdhci_ops, 1395}; 1396 1397static const struct sdhci_tegra_soc_data soc_data_tegra30 = { 1398 .pdata = &sdhci_tegra30_pdata, 1399 .dma_mask = DMA_BIT_MASK(32), 1400 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | 1401 NVQUIRK_ENABLE_SDR50 | 1402 NVQUIRK_ENABLE_SDR104 | 1403 NVQUIRK_HAS_PADCALIB, 1404}; 1405 1406static const struct sdhci_ops tegra114_sdhci_ops = { 1407 .get_ro = tegra_sdhci_get_ro, 1408 .read_w = tegra_sdhci_readw, 1409 .write_w = tegra_sdhci_writew, 1410 .write_l = tegra_sdhci_writel, 1411 .set_clock = tegra_sdhci_set_clock, 1412 .set_dma_mask = tegra_sdhci_set_dma_mask, 1413 .set_bus_width = sdhci_set_bus_width, 1414 .reset = tegra_sdhci_reset, 1415 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1416 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1417 .voltage_switch = tegra_sdhci_voltage_switch, 1418 .get_max_clock = tegra_sdhci_get_max_clock, 1419}; 1420 1421static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { 1422 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1423 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1424 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1425 SDHCI_QUIRK_NO_HISPD_BIT | 1426 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1427 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1428 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1429 .ops = &tegra114_sdhci_ops, 1430}; 1431 1432static const struct sdhci_tegra_soc_data soc_data_tegra114 = { 1433 .pdata = &sdhci_tegra114_pdata, 1434 .dma_mask = DMA_BIT_MASK(32), 1435}; 1436 1437static const struct sdhci_pltfm_data sdhci_tegra124_pdata = { 1438 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1439 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 1440 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1441 SDHCI_QUIRK_NO_HISPD_BIT | 1442 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1443 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1444 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1445 .ops = &tegra114_sdhci_ops, 1446}; 1447 1448static const struct sdhci_tegra_soc_data soc_data_tegra124 = { 1449 .pdata = &sdhci_tegra124_pdata, 1450 .dma_mask = DMA_BIT_MASK(34), 1451}; 1452 1453static const struct sdhci_ops tegra210_sdhci_ops = { 1454 .get_ro = tegra_sdhci_get_ro, 1455 .read_w = tegra_sdhci_readw, 1456 .write_w = tegra210_sdhci_writew, 1457 .write_l = tegra_sdhci_writel, 1458 .set_clock = tegra_sdhci_set_clock, 1459 .set_dma_mask = tegra_sdhci_set_dma_mask, 1460 .set_bus_width = sdhci_set_bus_width, 1461 .reset = tegra_sdhci_reset, 1462 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1463 .voltage_switch = tegra_sdhci_voltage_switch, 1464 .get_max_clock = tegra_sdhci_get_max_clock, 1465 .set_timeout = tegra_sdhci_set_timeout, 1466}; 1467 1468static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 1469 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1470 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1471 SDHCI_QUIRK_NO_HISPD_BIT | 1472 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1473 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1474 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1475 .ops = &tegra210_sdhci_ops, 1476}; 1477 1478static const struct sdhci_tegra_soc_data soc_data_tegra210 = { 1479 .pdata = &sdhci_tegra210_pdata, 1480 .dma_mask = DMA_BIT_MASK(34), 1481 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1482 NVQUIRK_HAS_PADCALIB | 1483 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1484 NVQUIRK_ENABLE_SDR50 | 1485 NVQUIRK_ENABLE_SDR104 | 1486 NVQUIRK_HAS_TMCLK, 1487 .min_tap_delay = 106, 1488 .max_tap_delay = 185, 1489}; 1490 1491static const struct sdhci_ops tegra186_sdhci_ops = { 1492 .get_ro = tegra_sdhci_get_ro, 1493 .read_w = tegra_sdhci_readw, 1494 .write_l = tegra_sdhci_writel, 1495 .set_clock = tegra_sdhci_set_clock, 1496 .set_dma_mask = tegra_sdhci_set_dma_mask, 1497 .set_bus_width = sdhci_set_bus_width, 1498 .reset = tegra_sdhci_reset, 1499 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1500 .voltage_switch = tegra_sdhci_voltage_switch, 1501 .get_max_clock = tegra_sdhci_get_max_clock, 1502 .irq = sdhci_tegra_cqhci_irq, 1503 .set_timeout = tegra_sdhci_set_timeout, 1504}; 1505 1506static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { 1507 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 1508 SDHCI_QUIRK_SINGLE_POWER_WRITE | 1509 SDHCI_QUIRK_NO_HISPD_BIT | 1510 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1511 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1512 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1513 .ops = &tegra186_sdhci_ops, 1514}; 1515 1516static const struct sdhci_tegra_soc_data soc_data_tegra186 = { 1517 .pdata = &sdhci_tegra186_pdata, 1518 .dma_mask = DMA_BIT_MASK(40), 1519 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1520 NVQUIRK_HAS_PADCALIB | 1521 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1522 NVQUIRK_ENABLE_SDR50 | 1523 NVQUIRK_ENABLE_SDR104 | 1524 NVQUIRK_HAS_TMCLK | 1525 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, 1526 .min_tap_delay = 84, 1527 .max_tap_delay = 136, 1528}; 1529 1530static const struct sdhci_tegra_soc_data soc_data_tegra194 = { 1531 .pdata = &sdhci_tegra186_pdata, 1532 .dma_mask = DMA_BIT_MASK(39), 1533 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1534 NVQUIRK_HAS_PADCALIB | 1535 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1536 NVQUIRK_ENABLE_SDR50 | 1537 NVQUIRK_ENABLE_SDR104 | 1538 NVQUIRK_HAS_TMCLK, 1539 .min_tap_delay = 96, 1540 .max_tap_delay = 139, 1541}; 1542 1543static const struct of_device_id sdhci_tegra_dt_match[] = { 1544 { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 }, 1545 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 }, 1546 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 1547 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 1548 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 1549 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 1550 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 1551 {} 1552}; 1553MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); 1554 1555static int sdhci_tegra_add_host(struct sdhci_host *host) 1556{ 1557 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1558 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1559 struct cqhci_host *cq_host; 1560 bool dma64; 1561 int ret; 1562 1563 if (!tegra_host->enable_hwcq) 1564 return sdhci_add_host(host); 1565 1566 sdhci_enable_v4_mode(host); 1567 1568 ret = sdhci_setup_host(host); 1569 if (ret) 1570 return ret; 1571 1572 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 1573 1574 cq_host = devm_kzalloc(host->mmc->parent, 1575 sizeof(*cq_host), GFP_KERNEL); 1576 if (!cq_host) { 1577 ret = -ENOMEM; 1578 goto cleanup; 1579 } 1580 1581 cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; 1582 cq_host->ops = &sdhci_tegra_cqhci_ops; 1583 1584 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 1585 if (dma64) 1586 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 1587 1588 ret = cqhci_init(cq_host, host->mmc, dma64); 1589 if (ret) 1590 goto cleanup; 1591 1592 ret = __sdhci_add_host(host); 1593 if (ret) 1594 goto cleanup; 1595 1596 return 0; 1597 1598cleanup: 1599 sdhci_cleanup_host(host); 1600 return ret; 1601} 1602 1603static int sdhci_tegra_probe(struct platform_device *pdev) 1604{ 1605 const struct of_device_id *match; 1606 const struct sdhci_tegra_soc_data *soc_data; 1607 struct sdhci_host *host; 1608 struct sdhci_pltfm_host *pltfm_host; 1609 struct sdhci_tegra *tegra_host; 1610 struct clk *clk; 1611 int rc; 1612 1613 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); 1614 if (!match) 1615 return -EINVAL; 1616 soc_data = match->data; 1617 1618 host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host)); 1619 if (IS_ERR(host)) 1620 return PTR_ERR(host); 1621 pltfm_host = sdhci_priv(host); 1622 1623 tegra_host = sdhci_pltfm_priv(pltfm_host); 1624 tegra_host->ddr_signaling = false; 1625 tegra_host->pad_calib_required = false; 1626 tegra_host->pad_control_available = false; 1627 tegra_host->soc_data = soc_data; 1628 1629 if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) { 1630 rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host); 1631 if (rc == 0) 1632 host->mmc_host_ops.start_signal_voltage_switch = 1633 sdhci_tegra_start_signal_voltage_switch; 1634 } 1635 1636 /* Hook to periodically rerun pad calibration */ 1637 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) 1638 host->mmc_host_ops.request = tegra_sdhci_request; 1639 1640 host->mmc_host_ops.hs400_enhanced_strobe = 1641 tegra_sdhci_hs400_enhanced_strobe; 1642 1643 if (!host->ops->platform_execute_tuning) 1644 host->mmc_host_ops.execute_tuning = 1645 tegra_sdhci_execute_hw_tuning; 1646 1647 rc = mmc_of_parse(host->mmc); 1648 if (rc) 1649 goto err_parse_dt; 1650 1651 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) 1652 host->mmc->caps |= MMC_CAP_1_8V_DDR; 1653 1654 /* HW busy detection is supported, but R1B responses are required. */ 1655 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 1656 1657 tegra_sdhci_parse_dt(host); 1658 1659 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", 1660 GPIOD_OUT_HIGH); 1661 if (IS_ERR(tegra_host->power_gpio)) { 1662 rc = PTR_ERR(tegra_host->power_gpio); 1663 goto err_power_req; 1664 } 1665 1666 /* 1667 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host 1668 * timeout clock and SW can choose TMCLK or SDCLK for hardware 1669 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of 1670 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. 1671 * 1672 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses 1673 * 12Mhz TMCLK which is advertised in host capability register. 1674 * With TMCLK of 12Mhz provides maximum data timeout period that can 1675 * be achieved is 11s better than using SDCLK for data timeout. 1676 * 1677 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's 1678 * supporting separate TMCLK. 1679 */ 1680 1681 if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { 1682 clk = devm_clk_get(&pdev->dev, "tmclk"); 1683 if (IS_ERR(clk)) { 1684 rc = PTR_ERR(clk); 1685 if (rc == -EPROBE_DEFER) 1686 goto err_power_req; 1687 1688 dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); 1689 clk = NULL; 1690 } 1691 1692 clk_set_rate(clk, 12000000); 1693 rc = clk_prepare_enable(clk); 1694 if (rc) { 1695 dev_err(&pdev->dev, 1696 "failed to enable tmclk: %d\n", rc); 1697 goto err_power_req; 1698 } 1699 1700 tegra_host->tmclk = clk; 1701 } 1702 1703 clk = devm_clk_get(mmc_dev(host->mmc), NULL); 1704 if (IS_ERR(clk)) { 1705 rc = dev_err_probe(&pdev->dev, PTR_ERR(clk), 1706 "failed to get clock\n"); 1707 goto err_clk_get; 1708 } 1709 clk_prepare_enable(clk); 1710 pltfm_host->clk = clk; 1711 1712 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, 1713 "sdhci"); 1714 if (IS_ERR(tegra_host->rst)) { 1715 rc = PTR_ERR(tegra_host->rst); 1716 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); 1717 goto err_rst_get; 1718 } 1719 1720 rc = reset_control_assert(tegra_host->rst); 1721 if (rc) 1722 goto err_rst_get; 1723 1724 usleep_range(2000, 4000); 1725 1726 rc = reset_control_deassert(tegra_host->rst); 1727 if (rc) 1728 goto err_rst_get; 1729 1730 usleep_range(2000, 4000); 1731 1732 rc = sdhci_tegra_add_host(host); 1733 if (rc) 1734 goto err_add_host; 1735 1736 return 0; 1737 1738err_add_host: 1739 reset_control_assert(tegra_host->rst); 1740err_rst_get: 1741 clk_disable_unprepare(pltfm_host->clk); 1742err_clk_get: 1743 clk_disable_unprepare(tegra_host->tmclk); 1744err_power_req: 1745err_parse_dt: 1746 sdhci_pltfm_free(pdev); 1747 return rc; 1748} 1749 1750static int sdhci_tegra_remove(struct platform_device *pdev) 1751{ 1752 struct sdhci_host *host = platform_get_drvdata(pdev); 1753 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1754 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 1755 1756 sdhci_remove_host(host, 0); 1757 1758 reset_control_assert(tegra_host->rst); 1759 usleep_range(2000, 4000); 1760 clk_disable_unprepare(pltfm_host->clk); 1761 clk_disable_unprepare(tegra_host->tmclk); 1762 1763 sdhci_pltfm_free(pdev); 1764 1765 return 0; 1766} 1767 1768#ifdef CONFIG_PM_SLEEP 1769static int __maybe_unused sdhci_tegra_suspend(struct device *dev) 1770{ 1771 struct sdhci_host *host = dev_get_drvdata(dev); 1772 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1773 int ret; 1774 1775 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1776 ret = cqhci_suspend(host->mmc); 1777 if (ret) 1778 return ret; 1779 } 1780 1781 ret = sdhci_suspend_host(host); 1782 if (ret) { 1783 cqhci_resume(host->mmc); 1784 return ret; 1785 } 1786 1787 clk_disable_unprepare(pltfm_host->clk); 1788 return 0; 1789} 1790 1791static int __maybe_unused sdhci_tegra_resume(struct device *dev) 1792{ 1793 struct sdhci_host *host = dev_get_drvdata(dev); 1794 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1795 int ret; 1796 1797 ret = clk_prepare_enable(pltfm_host->clk); 1798 if (ret) 1799 return ret; 1800 1801 ret = sdhci_resume_host(host); 1802 if (ret) 1803 goto disable_clk; 1804 1805 if (host->mmc->caps2 & MMC_CAP2_CQE) { 1806 ret = cqhci_resume(host->mmc); 1807 if (ret) 1808 goto suspend_host; 1809 } 1810 1811 return 0; 1812 1813suspend_host: 1814 sdhci_suspend_host(host); 1815disable_clk: 1816 clk_disable_unprepare(pltfm_host->clk); 1817 return ret; 1818} 1819#endif 1820 1821static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, 1822 sdhci_tegra_resume); 1823 1824static struct platform_driver sdhci_tegra_driver = { 1825 .driver = { 1826 .name = "sdhci-tegra", 1827 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1828 .of_match_table = sdhci_tegra_dt_match, 1829 .pm = &sdhci_tegra_dev_pm_ops, 1830 }, 1831 .probe = sdhci_tegra_probe, 1832 .remove = sdhci_tegra_remove, 1833}; 1834 1835module_platform_driver(sdhci_tegra_driver); 1836 1837MODULE_DESCRIPTION("SDHCI driver for Tegra"); 1838MODULE_AUTHOR("Google, Inc."); 1839MODULE_LICENSE("GPL v2"); 1840