1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * UFS Host Controller driver for Exynos specific extensions 4 * 5 * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. 6 * Author: Seungwon Jeon <essuuj@gmail.com> 7 * Author: Alim Akhtar <alim.akhtar@samsung.com> 8 * 9 */ 10 11#include <linux/clk.h> 12#include <linux/module.h> 13#include <linux/of.h> 14#include <linux/of_address.h> 15#include <linux/phy/phy.h> 16#include <linux/platform_device.h> 17 18#include "ufshcd.h" 19#include "ufshcd-pltfrm.h" 20#include "ufshci.h" 21#include "unipro.h" 22 23#include "ufs-exynos.h" 24 25/* 26 * Exynos's Vendor specific registers for UFSHCI 27 */ 28#define HCI_TXPRDT_ENTRY_SIZE 0x00 29#define PRDT_PREFECT_EN BIT(31) 30#define PRDT_SET_SIZE(x) ((x) & 0x1F) 31#define HCI_RXPRDT_ENTRY_SIZE 0x04 32#define HCI_1US_TO_CNT_VAL 0x0C 33#define CNT_VAL_1US_MASK 0x3FF 34#define HCI_UTRL_NEXUS_TYPE 0x40 35#define HCI_UTMRL_NEXUS_TYPE 0x44 36#define HCI_SW_RST 0x50 37#define UFS_LINK_SW_RST BIT(0) 38#define UFS_UNIPRO_SW_RST BIT(1) 39#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST) 40#define HCI_DATA_REORDER 0x60 41#define HCI_UNIPRO_APB_CLK_CTRL 0x68 42#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) 43#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C 44#define HCI_GPIO_OUT 0x70 45#define HCI_ERR_EN_PA_LAYER 0x78 46#define HCI_ERR_EN_DL_LAYER 0x7C 47#define HCI_ERR_EN_N_LAYER 0x80 48#define HCI_ERR_EN_T_LAYER 0x84 49#define HCI_ERR_EN_DME_LAYER 0x88 50#define HCI_CLKSTOP_CTRL 0xB0 51#define REFCLK_STOP BIT(2) 52#define UNIPRO_MCLK_STOP BIT(1) 53#define UNIPRO_PCLK_STOP BIT(0) 54#define CLK_STOP_MASK (REFCLK_STOP |\ 55 UNIPRO_MCLK_STOP |\ 56 UNIPRO_PCLK_STOP) 57#define HCI_MISC 0xB4 58#define REFCLK_CTRL_EN BIT(7) 59#define UNIPRO_PCLK_CTRL_EN BIT(6) 60#define UNIPRO_MCLK_CTRL_EN BIT(5) 61#define HCI_CORECLK_CTRL_EN BIT(4) 62#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ 63 UNIPRO_PCLK_CTRL_EN |\ 64 UNIPRO_MCLK_CTRL_EN) 65/* Device fatal error */ 66#define DFES_ERR_EN BIT(31) 67#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ 68 UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 69#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\ 70 UIC_NETWORK_BAD_DEVICEID_ENC |\ 71 UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING) 72#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\ 73 UIC_TRANSPORT_UNKNOWN_CPORTID |\ 74 UIC_TRANSPORT_NO_CONNECTION_RX |\ 75 UIC_TRANSPORT_BAD_TC) 76 77enum { 78 UNIPRO_L1_5 = 0,/* PHY Adapter */ 79 UNIPRO_L2, /* Data Link */ 80 UNIPRO_L3, /* Network */ 81 UNIPRO_L4, /* Transport */ 82 UNIPRO_DME, /* DME */ 83}; 84 85/* 86 * UNIPRO registers 87 */ 88#define UNIPRO_COMP_VERSION 0x000 89#define UNIPRO_DME_PWR_REQ 0x090 90#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094 91#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098 92#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C 93#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0 94#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4 95#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8 96#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC 97 98/* 99 * UFS Protector registers 100 */ 101#define UFSPRSECURITY 0x010 102#define NSSMU BIT(14) 103#define UFSPSBEGIN0 0x200 104#define UFSPSEND0 0x204 105#define UFSPSLUN0 0x208 106#define UFSPSCTRL0 0x20C 107 108#define CNTR_DIV_VAL 40 109 110static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en); 111static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en); 112 113static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs) 114{ 115 exynos_ufs_auto_ctrl_hcc(ufs, true); 116} 117 118static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs) 119{ 120 exynos_ufs_auto_ctrl_hcc(ufs, false); 121} 122 123static inline void exynos_ufs_disable_auto_ctrl_hcc_save( 124 struct exynos_ufs *ufs, u32 *val) 125{ 126 *val = hci_readl(ufs, HCI_MISC); 127 exynos_ufs_auto_ctrl_hcc(ufs, false); 128} 129 130static inline void exynos_ufs_auto_ctrl_hcc_restore( 131 struct exynos_ufs *ufs, u32 *val) 132{ 133 hci_writel(ufs, *val, HCI_MISC); 134} 135 136static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs) 137{ 138 exynos_ufs_ctrl_clkstop(ufs, true); 139} 140 141static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) 142{ 143 exynos_ufs_ctrl_clkstop(ufs, false); 144} 145 146static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) 147{ 148 return 0; 149} 150 151static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) 152{ 153 struct ufs_hba *hba = ufs->hba; 154 u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite; 155 int i; 156 157 exynos_ufs_enable_ov_tm(hba); 158 for_each_ufs_tx_lane(ufs, i) 159 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17); 160 for_each_ufs_rx_lane(ufs, i) { 161 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff); 162 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00); 163 } 164 exynos_ufs_disable_ov_tm(hba); 165 166 for_each_ufs_tx_lane(ufs, i) 167 ufshcd_dme_set(hba, 168 UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0); 169 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1); 170 udelay(1); 171 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12)); 172 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1); 173 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1); 174 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1); 175 udelay(1600); 176 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val); 177 178 return 0; 179} 180 181static int exynos7_ufs_post_link(struct exynos_ufs *ufs) 182{ 183 struct ufs_hba *hba = ufs->hba; 184 int i; 185 186 exynos_ufs_enable_ov_tm(hba); 187 for_each_ufs_tx_lane(ufs, i) { 188 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83); 189 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07); 190 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i), 191 TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000))); 192 } 193 exynos_ufs_disable_ov_tm(hba); 194 195 exynos_ufs_enable_dbg_mode(hba); 196 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8); 197 exynos_ufs_disable_dbg_mode(hba); 198 199 return 0; 200} 201 202static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs, 203 struct ufs_pa_layer_attr *pwr) 204{ 205 unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE); 206 207 return 0; 208} 209 210static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs, 211 struct ufs_pa_layer_attr *pwr) 212{ 213 struct ufs_hba *hba = ufs->hba; 214 int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx); 215 216 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1); 217 218 if (lanes == 1) { 219 exynos_ufs_enable_dbg_mode(hba); 220 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1); 221 exynos_ufs_disable_dbg_mode(hba); 222 } 223 224 return 0; 225} 226 227/* 228 * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w 229 * Control should be disabled in the below cases 230 * - Before host controller S/W reset 231 * - Access to UFS protector's register 232 */ 233static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en) 234{ 235 u32 misc = hci_readl(ufs, HCI_MISC); 236 237 if (en) 238 hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC); 239 else 240 hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC); 241} 242 243static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en) 244{ 245 u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL); 246 u32 misc = hci_readl(ufs, HCI_MISC); 247 248 if (en) { 249 hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC); 250 hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL); 251 } else { 252 hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL); 253 hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC); 254 } 255} 256 257static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs) 258{ 259 struct ufs_hba *hba = ufs->hba; 260 struct list_head *head = &hba->clk_list_head; 261 struct ufs_clk_info *clki; 262 unsigned long pclk_rate; 263 u32 f_min, f_max; 264 u8 div = 0; 265 int ret = 0; 266 267 if (list_empty(head)) 268 goto out; 269 270 list_for_each_entry(clki, head, list) { 271 if (!IS_ERR(clki->clk)) { 272 if (!strcmp(clki->name, "core_clk")) 273 ufs->clk_hci_core = clki->clk; 274 else if (!strcmp(clki->name, "sclk_unipro_main")) 275 ufs->clk_unipro_main = clki->clk; 276 } 277 } 278 279 if (!ufs->clk_hci_core || !ufs->clk_unipro_main) { 280 dev_err(hba->dev, "failed to get clk info\n"); 281 ret = -EINVAL; 282 goto out; 283 } 284 285 ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main); 286 pclk_rate = clk_get_rate(ufs->clk_hci_core); 287 f_min = ufs->pclk_avail_min; 288 f_max = ufs->pclk_avail_max; 289 290 if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { 291 do { 292 pclk_rate /= (div + 1); 293 294 if (pclk_rate <= f_max) 295 break; 296 div++; 297 } while (pclk_rate >= f_min); 298 } 299 300 if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) { 301 dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate); 302 ret = -EINVAL; 303 goto out; 304 } 305 306 ufs->pclk_rate = pclk_rate; 307 ufs->pclk_div = div; 308 309out: 310 return ret; 311} 312 313static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs) 314{ 315 if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { 316 u32 val; 317 318 val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL); 319 hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div), 320 HCI_UNIPRO_APB_CLK_CTRL); 321 } 322} 323 324static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs) 325{ 326 struct ufs_hba *hba = ufs->hba; 327 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 328 329 ufshcd_dme_set(hba, 330 UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl); 331} 332 333static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs) 334{ 335 struct ufs_hba *hba = ufs->hba; 336 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 337 const unsigned int div = 30, mult = 20; 338 const unsigned long pwm_min = 3 * 1000 * 1000; 339 const unsigned long pwm_max = 9 * 1000 * 1000; 340 const int divs[] = {32, 16, 8, 4}; 341 unsigned long clk = 0, _clk, clk_period; 342 int i = 0, clk_idx = -1; 343 344 clk_period = UNIPRO_PCLK_PERIOD(ufs); 345 for (i = 0; i < ARRAY_SIZE(divs); i++) { 346 _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div); 347 if (_clk >= pwm_min && _clk <= pwm_max) { 348 if (_clk > clk) { 349 clk_idx = i; 350 clk = _clk; 351 } 352 } 353 } 354 355 if (clk_idx == -1) { 356 ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx); 357 dev_err(hba->dev, 358 "failed to decide pwm clock divider, will not change\n"); 359 } 360 361 attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK; 362} 363 364long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period) 365{ 366 const int precise = 10; 367 long pclk_rate = ufs->pclk_rate; 368 long clk_period, fraction; 369 370 clk_period = UNIPRO_PCLK_PERIOD(ufs); 371 fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate; 372 373 return (period * precise) / ((clk_period * precise) + fraction); 374} 375 376static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) 377{ 378 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 379 struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; 380 381 t_cfg->tx_linereset_p = 382 exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); 383 t_cfg->tx_linereset_n = 384 exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec); 385 t_cfg->tx_high_z_cnt = 386 exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec); 387 t_cfg->tx_base_n_val = 388 exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec); 389 t_cfg->tx_gran_n_val = 390 exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec); 391 t_cfg->tx_sleep_cnt = 392 exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt); 393 394 t_cfg->rx_linereset = 395 exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec); 396 t_cfg->rx_hibern8_wait = 397 exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec); 398 t_cfg->rx_base_n_val = 399 exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec); 400 t_cfg->rx_gran_n_val = 401 exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec); 402 t_cfg->rx_sleep_cnt = 403 exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt); 404 t_cfg->rx_stall_cnt = 405 exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt); 406} 407 408static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs) 409{ 410 struct ufs_hba *hba = ufs->hba; 411 struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; 412 int i; 413 414 exynos_ufs_set_pwm_clk_div(ufs); 415 416 exynos_ufs_enable_ov_tm(hba); 417 418 for_each_ufs_rx_lane(ufs, i) { 419 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i), 420 ufs->drv_data->uic_attr->rx_filler_enable); 421 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i), 422 RX_LINERESET(t_cfg->rx_linereset)); 423 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i), 424 RX_BASE_NVAL_L(t_cfg->rx_base_n_val)); 425 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i), 426 RX_BASE_NVAL_H(t_cfg->rx_base_n_val)); 427 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i), 428 RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val)); 429 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i), 430 RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val)); 431 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i), 432 RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt)); 433 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i), 434 RX_OV_STALL_CNT(t_cfg->rx_stall_cnt)); 435 } 436 437 for_each_ufs_tx_lane(ufs, i) { 438 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i), 439 TX_LINERESET_P(t_cfg->tx_linereset_p)); 440 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i), 441 TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt)); 442 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i), 443 TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt)); 444 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i), 445 TX_BASE_NVAL_L(t_cfg->tx_base_n_val)); 446 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i), 447 TX_BASE_NVAL_H(t_cfg->tx_base_n_val)); 448 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i), 449 TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val)); 450 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i), 451 TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val)); 452 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i), 453 TX_OV_H8_ENTER_EN | 454 TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt)); 455 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i), 456 ufs->drv_data->uic_attr->tx_min_activatetime); 457 } 458 459 exynos_ufs_disable_ov_tm(hba); 460} 461 462static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs) 463{ 464 struct ufs_hba *hba = ufs->hba; 465 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 466 int i; 467 468 exynos_ufs_enable_ov_tm(hba); 469 470 for_each_ufs_rx_lane(ufs, i) { 471 ufshcd_dme_set(hba, 472 UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i), 473 attr->rx_hs_g1_sync_len_cap); 474 ufshcd_dme_set(hba, 475 UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i), 476 attr->rx_hs_g2_sync_len_cap); 477 ufshcd_dme_set(hba, 478 UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i), 479 attr->rx_hs_g3_sync_len_cap); 480 ufshcd_dme_set(hba, 481 UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i), 482 attr->rx_hs_g1_prep_sync_len_cap); 483 ufshcd_dme_set(hba, 484 UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i), 485 attr->rx_hs_g2_prep_sync_len_cap); 486 ufshcd_dme_set(hba, 487 UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i), 488 attr->rx_hs_g3_prep_sync_len_cap); 489 } 490 491 if (attr->rx_adv_fine_gran_sup_en == 0) { 492 for_each_ufs_rx_lane(ufs, i) { 493 ufshcd_dme_set(hba, 494 UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0); 495 496 if (attr->rx_min_actv_time_cap) 497 ufshcd_dme_set(hba, 498 UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP, 499 i), attr->rx_min_actv_time_cap); 500 501 if (attr->rx_hibern8_time_cap) 502 ufshcd_dme_set(hba, 503 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i), 504 attr->rx_hibern8_time_cap); 505 } 506 } else if (attr->rx_adv_fine_gran_sup_en == 1) { 507 for_each_ufs_rx_lane(ufs, i) { 508 if (attr->rx_adv_fine_gran_step) 509 ufshcd_dme_set(hba, 510 UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, 511 i), RX_ADV_FINE_GRAN_STEP( 512 attr->rx_adv_fine_gran_step)); 513 514 if (attr->rx_adv_min_actv_time_cap) 515 ufshcd_dme_set(hba, 516 UIC_ARG_MIB_SEL( 517 RX_ADV_MIN_ACTIVATETIME_CAP, i), 518 attr->rx_adv_min_actv_time_cap); 519 520 if (attr->rx_adv_hibern8_time_cap) 521 ufshcd_dme_set(hba, 522 UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP, 523 i), 524 attr->rx_adv_hibern8_time_cap); 525 } 526 } 527 528 exynos_ufs_disable_ov_tm(hba); 529} 530 531static void exynos_ufs_establish_connt(struct exynos_ufs *ufs) 532{ 533 struct ufs_hba *hba = ufs->hba; 534 enum { 535 DEV_ID = 0x00, 536 PEER_DEV_ID = 0x01, 537 PEER_CPORT_ID = 0x00, 538 TRAFFIC_CLASS = 0x00, 539 }; 540 541 /* allow cport attributes to be set */ 542 ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE); 543 544 /* local unipro attributes */ 545 ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID); 546 ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), TRUE); 547 ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID); 548 ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID); 549 ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS); 550 ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS); 551 ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED); 552} 553 554static void exynos_ufs_config_smu(struct exynos_ufs *ufs) 555{ 556 u32 reg, val; 557 558 exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); 559 560 /* make encryption disabled by default */ 561 reg = ufsp_readl(ufs, UFSPRSECURITY); 562 ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY); 563 ufsp_writel(ufs, 0x0, UFSPSBEGIN0); 564 ufsp_writel(ufs, 0xffffffff, UFSPSEND0); 565 ufsp_writel(ufs, 0xff, UFSPSLUN0); 566 ufsp_writel(ufs, 0xf1, UFSPSCTRL0); 567 568 exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); 569} 570 571static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, 572 struct ufs_pa_layer_attr *pwr) 573{ 574 struct ufs_hba *hba = ufs->hba; 575 u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx); 576 u32 mask, sync_len; 577 enum { 578 SYNC_LEN_G1 = 80 * 1000, /* 80us */ 579 SYNC_LEN_G2 = 40 * 1000, /* 44us */ 580 SYNC_LEN_G3 = 20 * 1000, /* 20us */ 581 }; 582 int i; 583 584 if (g == 1) 585 sync_len = SYNC_LEN_G1; 586 else if (g == 2) 587 sync_len = SYNC_LEN_G2; 588 else if (g == 3) 589 sync_len = SYNC_LEN_G3; 590 else 591 return; 592 593 mask = exynos_ufs_calc_time_cntr(ufs, sync_len); 594 mask = (mask >> 8) & 0xff; 595 596 exynos_ufs_enable_ov_tm(hba); 597 598 for_each_ufs_rx_lane(ufs, i) 599 ufshcd_dme_set(hba, 600 UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask); 601 602 exynos_ufs_disable_ov_tm(hba); 603} 604 605static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, 606 struct ufs_pa_layer_attr *dev_max_params, 607 struct ufs_pa_layer_attr *dev_req_params) 608{ 609 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 610 struct phy *generic_phy = ufs->phy; 611 struct ufs_dev_params ufs_exynos_cap; 612 int ret; 613 614 if (!dev_req_params) { 615 pr_err("%s: incoming dev_req_params is NULL\n", __func__); 616 ret = -EINVAL; 617 goto out; 618 } 619 620 621 ufs_exynos_cap.tx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_TX; 622 ufs_exynos_cap.rx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_RX; 623 ufs_exynos_cap.hs_rx_gear = UFS_EXYNOS_LIMIT_HSGEAR_RX; 624 ufs_exynos_cap.hs_tx_gear = UFS_EXYNOS_LIMIT_HSGEAR_TX; 625 ufs_exynos_cap.pwm_rx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_RX; 626 ufs_exynos_cap.pwm_tx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_TX; 627 ufs_exynos_cap.rx_pwr_pwm = UFS_EXYNOS_LIMIT_RX_PWR_PWM; 628 ufs_exynos_cap.tx_pwr_pwm = UFS_EXYNOS_LIMIT_TX_PWR_PWM; 629 ufs_exynos_cap.rx_pwr_hs = UFS_EXYNOS_LIMIT_RX_PWR_HS; 630 ufs_exynos_cap.tx_pwr_hs = UFS_EXYNOS_LIMIT_TX_PWR_HS; 631 ufs_exynos_cap.hs_rate = UFS_EXYNOS_LIMIT_HS_RATE; 632 ufs_exynos_cap.desired_working_mode = 633 UFS_EXYNOS_LIMIT_DESIRED_MODE; 634 635 ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap, 636 dev_max_params, dev_req_params); 637 if (ret) { 638 pr_err("%s: failed to determine capabilities\n", __func__); 639 goto out; 640 } 641 642 if (ufs->drv_data->pre_pwr_change) 643 ufs->drv_data->pre_pwr_change(ufs, dev_req_params); 644 645 if (ufshcd_is_hs_mode(dev_req_params)) { 646 exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params); 647 648 switch (dev_req_params->hs_rate) { 649 case PA_HS_MODE_A: 650 case PA_HS_MODE_B: 651 phy_calibrate(generic_phy); 652 break; 653 } 654 } 655 656 /* setting for three timeout values for traffic class #0 */ 657 ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064); 658 ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224); 659 ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160); 660 661 return 0; 662out: 663 return ret; 664} 665 666#define PWR_MODE_STR_LEN 64 667static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba, 668 struct ufs_pa_layer_attr *pwr_max, 669 struct ufs_pa_layer_attr *pwr_req) 670{ 671 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 672 struct phy *generic_phy = ufs->phy; 673 int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx); 674 int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx); 675 char pwr_str[PWR_MODE_STR_LEN] = ""; 676 677 /* let default be PWM Gear 1, Lane 1 */ 678 if (!gear) 679 gear = 1; 680 681 if (!lanes) 682 lanes = 1; 683 684 if (ufs->drv_data->post_pwr_change) 685 ufs->drv_data->post_pwr_change(ufs, pwr_req); 686 687 if ((ufshcd_is_hs_mode(pwr_req))) { 688 switch (pwr_req->hs_rate) { 689 case PA_HS_MODE_A: 690 case PA_HS_MODE_B: 691 phy_calibrate(generic_phy); 692 break; 693 } 694 695 snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d", 696 "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B", 697 gear, lanes); 698 } else { 699 snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d", 700 "SLOW", gear, lanes); 701 } 702 703 dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str); 704 705 return 0; 706} 707 708static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba, 709 int tag, bool op) 710{ 711 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 712 u32 type; 713 714 type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE); 715 716 if (op) 717 hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE); 718 else 719 hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE); 720} 721 722static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba, 723 int tag, u8 func) 724{ 725 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 726 u32 type; 727 728 type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE); 729 730 switch (func) { 731 case UFS_ABORT_TASK: 732 case UFS_QUERY_TASK: 733 hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE); 734 break; 735 case UFS_ABORT_TASK_SET: 736 case UFS_CLEAR_TASK_SET: 737 case UFS_LOGICAL_RESET: 738 case UFS_QUERY_TASK_SET: 739 hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE); 740 break; 741 } 742} 743 744static int exynos_ufs_phy_init(struct exynos_ufs *ufs) 745{ 746 struct ufs_hba *hba = ufs->hba; 747 struct phy *generic_phy = ufs->phy; 748 int ret = 0; 749 750 if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) { 751 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), 752 &ufs->avail_ln_rx); 753 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), 754 &ufs->avail_ln_tx); 755 WARN(ufs->avail_ln_rx != ufs->avail_ln_tx, 756 "available data lane is not equal(rx:%d, tx:%d)\n", 757 ufs->avail_ln_rx, ufs->avail_ln_tx); 758 } 759 760 phy_set_bus_width(generic_phy, ufs->avail_ln_rx); 761 ret = phy_init(generic_phy); 762 if (ret) { 763 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", 764 __func__, ret); 765 goto out_exit_phy; 766 } 767 768 return 0; 769 770out_exit_phy: 771 phy_exit(generic_phy); 772 773 return ret; 774} 775 776static void exynos_ufs_config_unipro(struct exynos_ufs *ufs) 777{ 778 struct ufs_hba *hba = ufs->hba; 779 780 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), 781 DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); 782 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS), 783 ufs->drv_data->uic_attr->tx_trailingclks); 784 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 785 ufs->drv_data->uic_attr->pa_dbg_option_suite); 786} 787 788static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) 789{ 790 switch (index) { 791 case UNIPRO_L1_5: 792 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER); 793 break; 794 case UNIPRO_L2: 795 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER); 796 break; 797 case UNIPRO_L3: 798 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER); 799 break; 800 case UNIPRO_L4: 801 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER); 802 break; 803 case UNIPRO_DME: 804 hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER); 805 break; 806 } 807} 808 809static int exynos_ufs_pre_link(struct ufs_hba *hba) 810{ 811 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 812 813 /* hci */ 814 exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2); 815 exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3); 816 exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); 817 exynos_ufs_set_unipro_pclk_div(ufs); 818 819 /* unipro */ 820 exynos_ufs_config_unipro(ufs); 821 822 /* m-phy */ 823 exynos_ufs_phy_init(ufs); 824 exynos_ufs_config_phy_time_attr(ufs); 825 exynos_ufs_config_phy_cap_attr(ufs); 826 827 if (ufs->drv_data->pre_link) 828 ufs->drv_data->pre_link(ufs); 829 830 return 0; 831} 832 833static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs) 834{ 835 u32 val; 836 837 val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL); 838 hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL); 839} 840 841static int exynos_ufs_post_link(struct ufs_hba *hba) 842{ 843 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 844 struct phy *generic_phy = ufs->phy; 845 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 846 847 exynos_ufs_establish_connt(ufs); 848 exynos_ufs_fit_aggr_timeout(ufs); 849 850 hci_writel(ufs, 0xa, HCI_DATA_REORDER); 851 hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); 852 hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); 853 hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); 854 hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); 855 hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); 856 857 if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) 858 ufshcd_dme_set(hba, 859 UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), TRUE); 860 861 if (attr->pa_granularity) { 862 exynos_ufs_enable_dbg_mode(hba); 863 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY), 864 attr->pa_granularity); 865 exynos_ufs_disable_dbg_mode(hba); 866 867 if (attr->pa_tactivate) 868 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 869 attr->pa_tactivate); 870 if (attr->pa_hibern8time && 871 !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER)) 872 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 873 attr->pa_hibern8time); 874 } 875 876 if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { 877 if (!attr->pa_granularity) 878 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 879 &attr->pa_granularity); 880 if (!attr->pa_hibern8time) 881 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 882 &attr->pa_hibern8time); 883 /* 884 * not wait for HIBERN8 time to exit hibernation 885 */ 886 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0); 887 888 if (attr->pa_granularity < 1 || attr->pa_granularity > 6) { 889 /* Valid range for granularity: 1 ~ 6 */ 890 dev_warn(hba->dev, 891 "%s: pa_granularity %d is invalid, assuming backwards compatibility\n", 892 __func__, 893 attr->pa_granularity); 894 attr->pa_granularity = 6; 895 } 896 } 897 898 phy_calibrate(generic_phy); 899 900 if (ufs->drv_data->post_link) 901 ufs->drv_data->post_link(ufs); 902 903 return 0; 904} 905 906static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) 907{ 908 struct device_node *np = dev->of_node; 909 struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs; 910 struct exynos_ufs_uic_attr *attr; 911 int ret = 0; 912 913 while (drv_data->compatible) { 914 if (of_device_is_compatible(np, drv_data->compatible)) { 915 ufs->drv_data = drv_data; 916 break; 917 } 918 drv_data++; 919 } 920 921 if (ufs->drv_data && ufs->drv_data->uic_attr) { 922 attr = ufs->drv_data->uic_attr; 923 } else { 924 dev_err(dev, "failed to get uic attributes\n"); 925 ret = -EINVAL; 926 goto out; 927 } 928 929 ufs->pclk_avail_min = PCLK_AVAIL_MIN; 930 ufs->pclk_avail_max = PCLK_AVAIL_MAX; 931 932 attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN; 933 attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL; 934 attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP; 935 attr->pa_granularity = PA_GRANULARITY_VAL; 936 attr->pa_tactivate = PA_TACTIVATE_VAL; 937 attr->pa_hibern8time = PA_HIBERN8TIME_VAL; 938 939out: 940 return ret; 941} 942 943static int exynos_ufs_init(struct ufs_hba *hba) 944{ 945 struct device *dev = hba->dev; 946 struct platform_device *pdev = to_platform_device(dev); 947 struct exynos_ufs *ufs; 948 int ret; 949 950 ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); 951 if (!ufs) 952 return -ENOMEM; 953 954 /* exynos-specific hci */ 955 ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci"); 956 if (IS_ERR(ufs->reg_hci)) { 957 dev_err(dev, "cannot ioremap for hci vendor register\n"); 958 return PTR_ERR(ufs->reg_hci); 959 } 960 961 /* unipro */ 962 ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro"); 963 if (IS_ERR(ufs->reg_unipro)) { 964 dev_err(dev, "cannot ioremap for unipro register\n"); 965 return PTR_ERR(ufs->reg_unipro); 966 } 967 968 /* ufs protector */ 969 ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp"); 970 if (IS_ERR(ufs->reg_ufsp)) { 971 dev_err(dev, "cannot ioremap for ufs protector register\n"); 972 return PTR_ERR(ufs->reg_ufsp); 973 } 974 975 ret = exynos_ufs_parse_dt(dev, ufs); 976 if (ret) { 977 dev_err(dev, "failed to get dt info.\n"); 978 goto out; 979 } 980 981 ufs->phy = devm_phy_get(dev, "ufs-phy"); 982 if (IS_ERR(ufs->phy)) { 983 ret = PTR_ERR(ufs->phy); 984 dev_err(dev, "failed to get ufs-phy\n"); 985 goto out; 986 } 987 988 ret = phy_power_on(ufs->phy); 989 if (ret) 990 goto phy_off; 991 992 ufs->hba = hba; 993 ufs->opts = ufs->drv_data->opts; 994 ufs->rx_sel_idx = PA_MAXDATALANES; 995 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) 996 ufs->rx_sel_idx = 0; 997 hba->priv = (void *)ufs; 998 hba->quirks = ufs->drv_data->quirks; 999 if (ufs->drv_data->drv_init) { 1000 ret = ufs->drv_data->drv_init(dev, ufs); 1001 if (ret) { 1002 dev_err(dev, "failed to init drv-data\n"); 1003 goto out; 1004 } 1005 } 1006 1007 ret = exynos_ufs_get_clk_info(ufs); 1008 if (ret) 1009 goto out; 1010 exynos_ufs_specify_phy_time_attr(ufs); 1011 exynos_ufs_config_smu(ufs); 1012 return 0; 1013 1014phy_off: 1015 phy_power_off(ufs->phy); 1016out: 1017 hba->priv = NULL; 1018 return ret; 1019} 1020 1021static int exynos_ufs_host_reset(struct ufs_hba *hba) 1022{ 1023 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1024 unsigned long timeout = jiffies + msecs_to_jiffies(1); 1025 u32 val; 1026 int ret = 0; 1027 1028 exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); 1029 1030 hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST); 1031 1032 do { 1033 if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK)) 1034 goto out; 1035 } while (time_before(jiffies, timeout)); 1036 1037 dev_err(hba->dev, "timeout host sw-reset\n"); 1038 ret = -ETIMEDOUT; 1039 1040out: 1041 exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); 1042 return ret; 1043} 1044 1045static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) 1046{ 1047 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1048 1049 hci_writel(ufs, 0 << 0, HCI_GPIO_OUT); 1050 udelay(5); 1051 hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); 1052} 1053 1054static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) 1055{ 1056 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1057 struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; 1058 1059 if (!enter) { 1060 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) 1061 exynos_ufs_disable_auto_ctrl_hcc(ufs); 1062 exynos_ufs_ungate_clks(ufs); 1063 1064 if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { 1065 const unsigned int granularity_tbl[] = { 1066 1, 4, 8, 16, 32, 100 1067 }; 1068 int h8_time = attr->pa_hibern8time * 1069 granularity_tbl[attr->pa_granularity - 1]; 1070 unsigned long us; 1071 s64 delta; 1072 1073 do { 1074 delta = h8_time - ktime_us_delta(ktime_get(), 1075 ufs->entry_hibern8_t); 1076 if (delta <= 0) 1077 break; 1078 1079 us = min_t(s64, delta, USEC_PER_MSEC); 1080 if (us >= 10) 1081 usleep_range(us, us + 10); 1082 } while (1); 1083 } 1084 } 1085} 1086 1087static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) 1088{ 1089 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1090 1091 if (!enter) { 1092 u32 cur_mode = 0; 1093 u32 pwrmode; 1094 1095 if (ufshcd_is_hs_mode(&ufs->dev_req_params)) 1096 pwrmode = FAST_MODE; 1097 else 1098 pwrmode = SLOW_MODE; 1099 1100 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); 1101 if (cur_mode != (pwrmode << 4 | pwrmode)) { 1102 dev_warn(hba->dev, "%s: power mode change\n", __func__); 1103 hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; 1104 hba->pwr_info.pwr_tx = cur_mode & 0xf; 1105 ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 1106 } 1107 1108 if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) 1109 exynos_ufs_establish_connt(ufs); 1110 } else { 1111 ufs->entry_hibern8_t = ktime_get(); 1112 exynos_ufs_gate_clks(ufs); 1113 if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) 1114 exynos_ufs_enable_auto_ctrl_hcc(ufs); 1115 } 1116} 1117 1118static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, 1119 enum ufs_notify_change_status status) 1120{ 1121 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1122 int ret = 0; 1123 1124 switch (status) { 1125 case PRE_CHANGE: 1126 ret = exynos_ufs_host_reset(hba); 1127 if (ret) 1128 return ret; 1129 exynos_ufs_dev_hw_reset(hba); 1130 break; 1131 case POST_CHANGE: 1132 exynos_ufs_calc_pwm_clk_div(ufs); 1133 if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)) 1134 exynos_ufs_enable_auto_ctrl_hcc(ufs); 1135 break; 1136 } 1137 1138 return ret; 1139} 1140 1141static int exynos_ufs_link_startup_notify(struct ufs_hba *hba, 1142 enum ufs_notify_change_status status) 1143{ 1144 int ret = 0; 1145 1146 switch (status) { 1147 case PRE_CHANGE: 1148 ret = exynos_ufs_pre_link(hba); 1149 break; 1150 case POST_CHANGE: 1151 ret = exynos_ufs_post_link(hba); 1152 break; 1153 } 1154 1155 return ret; 1156} 1157 1158static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, 1159 enum ufs_notify_change_status status, 1160 struct ufs_pa_layer_attr *dev_max_params, 1161 struct ufs_pa_layer_attr *dev_req_params) 1162{ 1163 int ret = 0; 1164 1165 switch (status) { 1166 case PRE_CHANGE: 1167 ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params, 1168 dev_req_params); 1169 break; 1170 case POST_CHANGE: 1171 ret = exynos_ufs_post_pwr_mode(hba, NULL, dev_req_params); 1172 break; 1173 } 1174 1175 return ret; 1176} 1177 1178static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, 1179 enum uic_cmd_dme enter, 1180 enum ufs_notify_change_status notify) 1181{ 1182 switch ((u8)notify) { 1183 case PRE_CHANGE: 1184 exynos_ufs_pre_hibern8(hba, enter); 1185 break; 1186 case POST_CHANGE: 1187 exynos_ufs_post_hibern8(hba, enter); 1188 break; 1189 } 1190} 1191 1192static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 1193{ 1194 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1195 1196 if (!ufshcd_is_link_active(hba)) 1197 phy_power_off(ufs->phy); 1198 1199 return 0; 1200} 1201 1202static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 1203{ 1204 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1205 1206 if (!ufshcd_is_link_active(hba)) 1207 phy_power_on(ufs->phy); 1208 1209 exynos_ufs_config_smu(ufs); 1210 1211 return 0; 1212} 1213 1214static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { 1215 .name = "exynos_ufs", 1216 .init = exynos_ufs_init, 1217 .hce_enable_notify = exynos_ufs_hce_enable_notify, 1218 .link_startup_notify = exynos_ufs_link_startup_notify, 1219 .pwr_change_notify = exynos_ufs_pwr_change_notify, 1220 .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, 1221 .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req, 1222 .hibern8_notify = exynos_ufs_hibern8_notify, 1223 .suspend = exynos_ufs_suspend, 1224 .resume = exynos_ufs_resume, 1225}; 1226 1227static int exynos_ufs_probe(struct platform_device *pdev) 1228{ 1229 int err; 1230 struct device *dev = &pdev->dev; 1231 1232 err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops); 1233 if (err) 1234 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); 1235 1236 return err; 1237} 1238 1239static int exynos_ufs_remove(struct platform_device *pdev) 1240{ 1241 struct ufs_hba *hba = platform_get_drvdata(pdev); 1242 1243 pm_runtime_get_sync(&(pdev)->dev); 1244 ufshcd_remove(hba); 1245 return 0; 1246} 1247 1248struct exynos_ufs_drv_data exynos_ufs_drvs = { 1249 1250 .compatible = "samsung,exynos7-ufs", 1251 .uic_attr = &exynos7_uic_attr, 1252 .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | 1253 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | 1254 UFSHCI_QUIRK_BROKEN_HCE | 1255 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | 1256 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | 1257 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | 1258 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING | 1259 UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE, 1260 .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | 1261 EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | 1262 EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | 1263 EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | 1264 EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, 1265 .drv_init = exynos7_ufs_drv_init, 1266 .pre_link = exynos7_ufs_pre_link, 1267 .post_link = exynos7_ufs_post_link, 1268 .pre_pwr_change = exynos7_ufs_pre_pwr_change, 1269 .post_pwr_change = exynos7_ufs_post_pwr_change, 1270}; 1271 1272static const struct of_device_id exynos_ufs_of_match[] = { 1273 { .compatible = "samsung,exynos7-ufs", 1274 .data = &exynos_ufs_drvs }, 1275 {}, 1276}; 1277 1278static const struct dev_pm_ops exynos_ufs_pm_ops = { 1279 .suspend = ufshcd_pltfrm_suspend, 1280 .resume = ufshcd_pltfrm_resume, 1281 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 1282 .runtime_resume = ufshcd_pltfrm_runtime_resume, 1283 .runtime_idle = ufshcd_pltfrm_runtime_idle, 1284}; 1285 1286static struct platform_driver exynos_ufs_pltform = { 1287 .probe = exynos_ufs_probe, 1288 .remove = exynos_ufs_remove, 1289 .shutdown = ufshcd_pltfrm_shutdown, 1290 .driver = { 1291 .name = "exynos-ufshc", 1292 .pm = &exynos_ufs_pm_ops, 1293 .of_match_table = of_match_ptr(exynos_ufs_of_match), 1294 }, 1295}; 1296module_platform_driver(exynos_ufs_pltform); 1297 1298MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>"); 1299MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>"); 1300MODULE_DESCRIPTION("Exynos UFS HCI Driver"); 1301MODULE_LICENSE("GPL v2"); 1302