1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Universal Flash Storage Host controller driver 4 * Copyright (C) 2011-2013 Samsung India Software Operations 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 6 * 7 * Authors: 8 * Santosh Yaraganavi <santosh.sy@samsung.com> 9 * Vinayak Holikatti <h.vinayak@samsung.com> 10 */ 11 12#ifndef _UFSHCD_H 13#define _UFSHCD_H 14 15#include <linux/module.h> 16#include <linux/kernel.h> 17#include <linux/init.h> 18#include <linux/interrupt.h> 19#include <linux/io.h> 20#include <linux/delay.h> 21#include <linux/slab.h> 22#include <linux/spinlock.h> 23#include <linux/rwsem.h> 24#include <linux/workqueue.h> 25#include <linux/errno.h> 26#include <linux/types.h> 27#include <linux/wait.h> 28#include <linux/bitops.h> 29#include <linux/pm_runtime.h> 30#include <linux/clk.h> 31#include <linux/completion.h> 32#include <linux/regulator/consumer.h> 33#include <linux/bitfield.h> 34#include <linux/devfreq.h> 35#include <linux/keyslot-manager.h> 36#include "unipro.h" 37 38#include <asm/irq.h> 39#include <asm/byteorder.h> 40#include <scsi/scsi.h> 41#include <scsi/scsi_cmnd.h> 42#include <scsi/scsi_host.h> 43#include <scsi/scsi_tcq.h> 44#include <scsi/scsi_dbg.h> 45#include <scsi/scsi_eh.h> 46 47#include "ufs.h" 48#include "ufs_quirks.h" 49#include "ufshci.h" 50 51#define UFSHCD "ufshcd" 52#define UFSHCD_DRIVER_VERSION "0.2" 53 54struct ufs_hba; 55 56enum dev_cmd_type { 57 DEV_CMD_TYPE_NOP = 0x0, 58 DEV_CMD_TYPE_QUERY = 0x1, 59}; 60 61/** 62 * struct uic_command - UIC command structure 63 * @command: UIC command 64 * @argument1: UIC command argument 1 65 * @argument2: UIC command argument 2 66 * @argument3: UIC command argument 3 67 * @cmd_active: Indicate if UIC command is outstanding 68 * @done: UIC command completion 69 */ 70struct uic_command { 71 u32 command; 72 u32 argument1; 73 u32 argument2; 74 u32 argument3; 75 int cmd_active; 76 struct completion done; 77}; 78 79/* Used to differentiate the power management options */ 80enum ufs_pm_op { 81 UFS_RUNTIME_PM, 82 UFS_SYSTEM_PM, 83 UFS_SHUTDOWN_PM, 84}; 85 86#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) 87#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) 88#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) 89 90/* Host <-> Device UniPro Link state */ 91enum uic_link_state { 92 UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ 93 UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ 94 UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ 95 UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */ 96}; 97 98#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) 99#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ 100 UIC_LINK_ACTIVE_STATE) 101#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ 102 UIC_LINK_HIBERN8_STATE) 103#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \ 104 UIC_LINK_BROKEN_STATE) 105#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) 106#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ 107 UIC_LINK_ACTIVE_STATE) 108#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ 109 UIC_LINK_HIBERN8_STATE) 110#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \ 111 UIC_LINK_BROKEN_STATE) 112 113#define ufshcd_set_ufs_dev_active(h) \ 114 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) 115#define ufshcd_set_ufs_dev_sleep(h) \ 116 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) 117#define ufshcd_set_ufs_dev_poweroff(h) \ 118 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) 119#define ufshcd_is_ufs_dev_active(h) \ 120 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) 121#define ufshcd_is_ufs_dev_sleep(h) \ 122 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) 123#define ufshcd_is_ufs_dev_poweroff(h) \ 124 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) 125 126/* 127 * UFS Power management levels. 128 * Each level is in increasing order of power savings. 129 */ 130enum ufs_pm_level { 131 UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ 132 UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ 133 UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ 134 UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ 135 UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ 136 UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ 137 UFS_PM_LVL_MAX 138}; 139 140struct ufs_pm_lvl_states { 141 enum ufs_dev_pwr_mode dev_state; 142 enum uic_link_state link_state; 143}; 144 145/** 146 * struct ufshcd_lrb - local reference block 147 * @utr_descriptor_ptr: UTRD address of the command 148 * @ucd_req_ptr: UCD address of the command 149 * @ucd_rsp_ptr: Response UPIU address for this command 150 * @ucd_prdt_ptr: PRDT address of the command 151 * @utrd_dma_addr: UTRD dma address for debug 152 * @ucd_prdt_dma_addr: PRDT dma address for debug 153 * @ucd_rsp_dma_addr: UPIU response dma address for debug 154 * @ucd_req_dma_addr: UPIU request dma address for debug 155 * @cmd: pointer to SCSI command 156 * @sense_buffer: pointer to sense buffer address of the SCSI command 157 * @sense_bufflen: Length of the sense buffer 158 * @scsi_status: SCSI status of the command 159 * @command_type: SCSI, UFS, Query. 160 * @task_tag: Task tag of the command 161 * @lun: LUN of the command 162 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) 163 * @issue_time_stamp: time stamp for debug purposes 164 * @compl_time_stamp: time stamp for statistics 165 * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) 166 * @data_unit_num: the data unit number for the first block for inline crypto 167 * @req_abort_skip: skip request abort task flag 168 */ 169struct ufshcd_lrb { 170 struct utp_transfer_req_desc *utr_descriptor_ptr; 171 struct utp_upiu_req *ucd_req_ptr; 172 struct utp_upiu_rsp *ucd_rsp_ptr; 173 struct ufshcd_sg_entry *ucd_prdt_ptr; 174 175 dma_addr_t utrd_dma_addr; 176 dma_addr_t ucd_req_dma_addr; 177 dma_addr_t ucd_rsp_dma_addr; 178 dma_addr_t ucd_prdt_dma_addr; 179 180 struct scsi_cmnd *cmd; 181 u8 *sense_buffer; 182 unsigned int sense_bufflen; 183 int scsi_status; 184 185 int command_type; 186 int task_tag; 187 u8 lun; /* UPIU LUN id field is only 8-bit wide */ 188 bool intr_cmd; 189 ktime_t issue_time_stamp; 190 ktime_t compl_time_stamp; 191#ifdef CONFIG_SCSI_UFS_CRYPTO 192 int crypto_key_slot; 193 u64 data_unit_num; 194#endif 195 196 bool req_abort_skip; 197}; 198 199/** 200 * struct ufs_query - holds relevant data structures for query request 201 * @request: request upiu and function 202 * @descriptor: buffer for sending/receiving descriptor 203 * @response: response upiu and response 204 */ 205struct ufs_query { 206 struct ufs_query_req request; 207 u8 *descriptor; 208 struct ufs_query_res response; 209}; 210 211/** 212 * struct ufs_dev_cmd - all assosiated fields with device management commands 213 * @type: device management command type - Query, NOP OUT 214 * @lock: lock to allow one command at a time 215 * @complete: internal commands completion 216 */ 217struct ufs_dev_cmd { 218 enum dev_cmd_type type; 219 struct mutex lock; 220 struct completion *complete; 221 struct ufs_query query; 222}; 223 224/** 225 * struct ufs_clk_info - UFS clock related info 226 * @list: list headed by hba->clk_list_head 227 * @clk: clock node 228 * @name: clock name 229 * @max_freq: maximum frequency supported by the clock 230 * @min_freq: min frequency that can be used for clock scaling 231 * @curr_freq: indicates the current frequency that it is set to 232 * @keep_link_active: indicates that the clk should not be disabled if 233 link is active 234 * @enabled: variable to check against multiple enable/disable 235 */ 236struct ufs_clk_info { 237 struct list_head list; 238 struct clk *clk; 239 const char *name; 240 u32 max_freq; 241 u32 min_freq; 242 u32 curr_freq; 243 bool keep_link_active; 244 bool enabled; 245}; 246 247enum ufs_notify_change_status { 248 PRE_CHANGE, 249 POST_CHANGE, 250}; 251 252struct ufs_pa_layer_attr { 253 u32 gear_rx; 254 u32 gear_tx; 255 u32 lane_rx; 256 u32 lane_tx; 257 u32 pwr_rx; 258 u32 pwr_tx; 259 u32 hs_rate; 260}; 261 262struct ufs_pwr_mode_info { 263 bool is_valid; 264 struct ufs_pa_layer_attr info; 265}; 266 267/** 268 * struct ufs_hba_variant_ops - variant specific callbacks 269 * @name: variant name 270 * @init: called when the driver is initialized 271 * @exit: called to cleanup everything done in init 272 * @get_ufs_hci_version: called to get UFS HCI version 273 * @clk_scale_notify: notifies that clks are scaled up/down 274 * @setup_clocks: called before touching any of the controller registers 275 * @setup_regulators: called before accessing the host controller 276 * @hce_enable_notify: called before and after HCE enable bit is set to allow 277 * variant specific Uni-Pro initialization. 278 * @link_startup_notify: called before and after Link startup is carried out 279 * to allow variant specific Uni-Pro initialization. 280 * @pwr_change_notify: called before and after a power mode change 281 * is carried out to allow vendor spesific capabilities 282 * to be set. 283 * @setup_xfer_req: called before any transfer request is issued 284 * to set some things 285 * @setup_task_mgmt: called before any task management request is issued 286 * to set some things 287 * @hibern8_notify: called around hibern8 enter/exit 288 * @apply_dev_quirks: called to apply device specific quirks 289 * @suspend: called during host controller PM callback 290 * @resume: called during host controller PM callback 291 * @dbg_register_dump: used to dump controller debug information 292 * @phy_initialization: used to initialize phys 293 * @device_reset: called to issue a reset pulse on the UFS device 294 * @program_key: program or evict an inline encryption key 295 */ 296struct ufs_hba_variant_ops { 297 const char *name; 298 int (*init)(struct ufs_hba *); 299 void (*exit)(struct ufs_hba *); 300 u32 (*get_ufs_hci_version)(struct ufs_hba *); 301 int (*clk_scale_notify)(struct ufs_hba *, bool, 302 enum ufs_notify_change_status); 303 int (*setup_clocks)(struct ufs_hba *, bool, 304 enum ufs_notify_change_status); 305 int (*setup_regulators)(struct ufs_hba *, bool); 306 int (*hce_enable_notify)(struct ufs_hba *, 307 enum ufs_notify_change_status); 308 int (*link_startup_notify)(struct ufs_hba *, 309 enum ufs_notify_change_status); 310 int (*pwr_change_notify)(struct ufs_hba *, 311 enum ufs_notify_change_status status, 312 struct ufs_pa_layer_attr *, 313 struct ufs_pa_layer_attr *); 314 void (*setup_xfer_req)(struct ufs_hba *, int, bool); 315 void (*setup_task_mgmt)(struct ufs_hba *, int, u8); 316 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, 317 enum ufs_notify_change_status); 318 int (*apply_dev_quirks)(struct ufs_hba *hba); 319 void (*fixup_dev_quirks)(struct ufs_hba *hba); 320 int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 321 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 322 void (*dbg_register_dump)(struct ufs_hba *hba); 323 int (*phy_initialization)(struct ufs_hba *); 324 int (*device_reset)(struct ufs_hba *hba); 325 void (*config_scaling_param)(struct ufs_hba *hba, 326 struct devfreq_dev_profile *profile, 327 void *data); 328 int (*program_key)(struct ufs_hba *hba, 329 const union ufs_crypto_cfg_entry *cfg, int slot); 330}; 331 332/* clock gating state */ 333enum clk_gating_state { 334 CLKS_OFF, 335 CLKS_ON, 336 REQ_CLKS_OFF, 337 REQ_CLKS_ON, 338}; 339 340/** 341 * struct ufs_clk_gating - UFS clock gating related info 342 * @gate_work: worker to turn off clocks after some delay as specified in 343 * delay_ms 344 * @ungate_work: worker to turn on clocks that will be used in case of 345 * interrupt context 346 * @state: the current clocks state 347 * @delay_ms: gating delay in ms 348 * @is_suspended: clk gating is suspended when set to 1 which can be used 349 * during suspend/resume 350 * @delay_attr: sysfs attribute to control delay_attr 351 * @enable_attr: sysfs attribute to enable/disable clock gating 352 * @is_enabled: Indicates the current status of clock gating 353 * @active_reqs: number of requests that are pending and should be waited for 354 * completion before gating clocks. 355 */ 356struct ufs_clk_gating { 357 struct delayed_work gate_work; 358 struct work_struct ungate_work; 359 enum clk_gating_state state; 360 unsigned long delay_ms; 361 bool is_suspended; 362 struct device_attribute delay_attr; 363 struct device_attribute enable_attr; 364 bool is_enabled; 365 int active_reqs; 366 struct workqueue_struct *clk_gating_workq; 367}; 368 369struct ufs_saved_pwr_info { 370 struct ufs_pa_layer_attr info; 371 bool is_valid; 372}; 373 374/** 375 * struct ufs_clk_scaling - UFS clock scaling related data 376 * @active_reqs: number of requests that are pending. If this is zero when 377 * devfreq ->target() function is called then schedule "suspend_work" to 378 * suspend devfreq. 379 * @tot_busy_t: Total busy time in current polling window 380 * @window_start_t: Start time (in jiffies) of the current polling window 381 * @busy_start_t: Start time of current busy period 382 * @enable_attr: sysfs attribute to enable/disable clock scaling 383 * @saved_pwr_info: UFS power mode may also be changed during scaling and this 384 * one keeps track of previous power mode. 385 * @workq: workqueue to schedule devfreq suspend/resume work 386 * @suspend_work: worker to suspend devfreq 387 * @resume_work: worker to resume devfreq 388 * @is_allowed: tracks if scaling is currently allowed or not 389 * @is_busy_started: tracks if busy period has started or not 390 * @is_suspended: tracks if devfreq is suspended or not 391 */ 392struct ufs_clk_scaling { 393 int active_reqs; 394 unsigned long tot_busy_t; 395 ktime_t window_start_t; 396 ktime_t busy_start_t; 397 struct device_attribute enable_attr; 398 struct ufs_saved_pwr_info saved_pwr_info; 399 struct workqueue_struct *workq; 400 struct work_struct suspend_work; 401 struct work_struct resume_work; 402 bool is_allowed; 403 bool is_busy_started; 404 bool is_suspended; 405}; 406 407#define UFS_ERR_REG_HIST_LENGTH 8 408/** 409 * struct ufs_err_reg_hist - keeps history of errors 410 * @pos: index to indicate cyclic buffer position 411 * @reg: cyclic buffer for registers value 412 * @tstamp: cyclic buffer for time stamp 413 */ 414struct ufs_err_reg_hist { 415 int pos; 416 u32 reg[UFS_ERR_REG_HIST_LENGTH]; 417 ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH]; 418}; 419 420/** 421 * struct ufs_stats - keeps usage/err statistics 422 * @last_intr_status: record the last interrupt status. 423 * @last_intr_ts: record the last interrupt timestamp. 424 * @hibern8_exit_cnt: Counter to keep track of number of exits, 425 * reset this after link-startup. 426 * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. 427 * Clear after the first successful command completion. 428 * @pa_err: tracks pa-uic errors 429 * @dl_err: tracks dl-uic errors 430 * @nl_err: tracks nl-uic errors 431 * @tl_err: tracks tl-uic errors 432 * @dme_err: tracks dme errors 433 * @auto_hibern8_err: tracks auto-hibernate errors 434 * @fatal_err: tracks fatal errors 435 * @linkup_err: tracks link-startup errors 436 * @resume_err: tracks resume errors 437 * @suspend_err: tracks suspend errors 438 * @dev_reset: tracks device reset events 439 * @host_reset: tracks host reset events 440 * @tsk_abort: tracks task abort events 441 */ 442struct ufs_stats { 443 u32 last_intr_status; 444 ktime_t last_intr_ts; 445 446 u32 hibern8_exit_cnt; 447 ktime_t last_hibern8_exit_tstamp; 448 449 /* uic specific errors */ 450 struct ufs_err_reg_hist pa_err; 451 struct ufs_err_reg_hist dl_err; 452 struct ufs_err_reg_hist nl_err; 453 struct ufs_err_reg_hist tl_err; 454 struct ufs_err_reg_hist dme_err; 455 456 /* fatal errors */ 457 struct ufs_err_reg_hist auto_hibern8_err; 458 struct ufs_err_reg_hist fatal_err; 459 struct ufs_err_reg_hist link_startup_err; 460 struct ufs_err_reg_hist resume_err; 461 struct ufs_err_reg_hist suspend_err; 462 463 /* abnormal events */ 464 struct ufs_err_reg_hist dev_reset; 465 struct ufs_err_reg_hist host_reset; 466 struct ufs_err_reg_hist task_abort; 467}; 468 469enum ufshcd_quirks { 470 /* Interrupt aggregation support is broken */ 471 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, 472 473 /* 474 * delay before each dme command is required as the unipro 475 * layer has shown instabilities 476 */ 477 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1, 478 479 /* 480 * If UFS host controller is having issue in processing LCC (Line 481 * Control Command) coming from device then enable this quirk. 482 * When this quirk is enabled, host controller driver should disable 483 * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE 484 * attribute of device to 0). 485 */ 486 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2, 487 488 /* 489 * The attribute PA_RXHSUNTERMCAP specifies whether or not the 490 * inbound Link supports unterminated line in HS mode. Setting this 491 * attribute to 1 fixes moving to HS gear. 492 */ 493 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3, 494 495 /* 496 * This quirk needs to be enabled if the host controller only allows 497 * accessing the peer dme attributes in AUTO mode (FAST AUTO or 498 * SLOW AUTO). 499 */ 500 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4, 501 502 /* 503 * This quirk needs to be enabled if the host controller doesn't 504 * advertise the correct version in UFS_VER register. If this quirk 505 * is enabled, standard UFS host driver will call the vendor specific 506 * ops (get_ufs_hci_version) to get the correct version. 507 */ 508 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, 509 510 /* 511 * Clear handling for transfer/task request list is just opposite. 512 */ 513 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6, 514 515 /* 516 * This quirk needs to be enabled if host controller doesn't allow 517 * that the interrupt aggregation timer and counter are reset by s/w. 518 */ 519 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7, 520 521 /* 522 * This quirks needs to be enabled if host controller cannot be 523 * enabled via HCE register. 524 */ 525 UFSHCI_QUIRK_BROKEN_HCE = 1 << 8, 526 527 /* 528 * This quirk needs to be enabled if the host controller regards 529 * resolution of the values of PRDTO and PRDTL in UTRD as byte. 530 */ 531 UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9, 532 533 /* 534 * This quirk needs to be enabled if the host controller reports 535 * OCS FATAL ERROR with device error through sense data 536 */ 537 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, 538 539 /* 540 * This quirk needs to be enabled if the host controller has 541 * auto-hibernate capability but it doesn't work. 542 */ 543 UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11, 544 545 /* 546 * This quirk needs to disable manual flush for write booster 547 */ 548 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12, 549 550 /* 551 * This quirk needs to disable unipro timeout values 552 * before power mode change 553 */ 554 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13, 555 556 /* 557 * This quirk allows only sg entries aligned with page size. 558 */ 559 UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14, 560}; 561 562enum ufshcd_caps { 563 /* Allow dynamic clk gating */ 564 UFSHCD_CAP_CLK_GATING = 1 << 0, 565 566 /* Allow hiberb8 with clk gating */ 567 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1, 568 569 /* Allow dynamic clk scaling */ 570 UFSHCD_CAP_CLK_SCALING = 1 << 2, 571 572 /* Allow auto bkops to enabled during runtime suspend */ 573 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3, 574 575 /* 576 * This capability allows host controller driver to use the UFS HCI's 577 * interrupt aggregation capability. 578 * CAUTION: Enabling this might reduce overall UFS throughput. 579 */ 580 UFSHCD_CAP_INTR_AGGR = 1 << 4, 581 582 /* 583 * This capability allows the device auto-bkops to be always enabled 584 * except during suspend (both runtime and suspend). 585 * Enabling this capability means that device will always be allowed 586 * to do background operation when it's active but it might degrade 587 * the performance of ongoing read/write operations. 588 */ 589 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5, 590 591 /* 592 * This capability allows host controller driver to automatically 593 * enable runtime power management by itself instead of waiting 594 * for userspace to control the power management. 595 */ 596 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6, 597 598 /* 599 * This capability allows the host controller driver to turn-on 600 * WriteBooster, if the underlying device supports it and is 601 * provisioned to be used. This would increase the write performance. 602 */ 603 UFSHCD_CAP_WB_EN = 1 << 7, 604 605 /* 606 * This capability allows the host controller driver to use the 607 * inline crypto engine, if it is present 608 */ 609 UFSHCD_CAP_CRYPTO = 1 << 8, 610}; 611 612struct ufs_hba_variant_params { 613 struct devfreq_dev_profile devfreq_profile; 614 struct devfreq_simple_ondemand_data ondemand_data; 615 u16 hba_enable_delay_us; 616 u32 wb_flush_threshold; 617}; 618 619/** 620 * struct ufs_hba - per adapter private structure 621 * @mmio_base: UFSHCI base register address 622 * @ucdl_base_addr: UFS Command Descriptor base address 623 * @utrdl_base_addr: UTP Transfer Request Descriptor base address 624 * @utmrdl_base_addr: UTP Task Management Descriptor base address 625 * @ucdl_dma_addr: UFS Command Descriptor DMA address 626 * @utrdl_dma_addr: UTRDL DMA address 627 * @utmrdl_dma_addr: UTMRDL DMA address 628 * @host: Scsi_Host instance of the driver 629 * @dev: device handle 630 * @lrb: local reference block 631 * @cmd_queue: Used to allocate command tags from hba->host->tag_set. 632 * @outstanding_tasks: Bits representing outstanding task requests 633 * @outstanding_reqs: Bits representing outstanding transfer requests 634 * @capabilities: UFS Controller Capabilities 635 * @nutrs: Transfer Request Queue depth supported by controller 636 * @nutmrs: Task Management Queue depth supported by controller 637 * @ufs_version: UFS Version to which controller complies 638 * @vops: pointer to variant specific operations 639 * @priv: pointer to variant specific private data 640 * @irq: Irq number of the controller 641 * @active_uic_cmd: handle of active UIC command 642 * @uic_cmd_mutex: mutex for uic command 643 * @tmf_tag_set: TMF tag set. 644 * @tmf_queue: Used to allocate TMF tags. 645 * @pwr_done: completion for power mode change 646 * @ufshcd_state: UFSHCD states 647 * @eh_flags: Error handling flags 648 * @intr_mask: Interrupt Mask Bits 649 * @ee_ctrl_mask: Exception event control mask 650 * @is_powered: flag to check if HBA is powered 651 * @eh_wq: Workqueue that eh_work works on 652 * @eh_work: Worker to handle UFS errors that require s/w attention 653 * @eeh_work: Worker to handle exception events 654 * @errors: HBA errors 655 * @uic_error: UFS interconnect layer error status 656 * @saved_err: sticky error mask 657 * @saved_uic_err: sticky UIC error mask 658 * @force_reset: flag to force eh_work perform a full reset 659 * @force_pmc: flag to force a power mode change 660 * @silence_err_logs: flag to silence error logs 661 * @dev_cmd: ufs device management command information 662 * @last_dme_cmd_tstamp: time stamp of the last completed DME command 663 * @auto_bkops_enabled: to track whether bkops is enabled in device 664 * @vreg_info: UFS device voltage regulator information 665 * @clk_list_head: UFS host controller clocks list node head 666 * @pwr_info: holds current power mode 667 * @max_pwr_info: keeps the device max valid pwm 668 * @desc_size: descriptor sizes reported by device 669 * @urgent_bkops_lvl: keeps track of urgent bkops level for device 670 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for 671 * device is known or not. 672 * @scsi_block_reqs_cnt: reference counting for scsi block requests 673 * @crypto_capabilities: Content of crypto capabilities register (0x100) 674 * @crypto_cap_array: Array of crypto capabilities 675 * @crypto_cfg_register: Start of the crypto cfg array 676 * @ksm: the keyslot manager tied to this hba 677 */ 678struct ufs_hba { 679 void __iomem *mmio_base; 680 681 /* Virtual memory reference */ 682 struct utp_transfer_cmd_desc *ucdl_base_addr; 683 struct utp_transfer_req_desc *utrdl_base_addr; 684 struct utp_task_req_desc *utmrdl_base_addr; 685 686 /* DMA memory reference */ 687 dma_addr_t ucdl_dma_addr; 688 dma_addr_t utrdl_dma_addr; 689 dma_addr_t utmrdl_dma_addr; 690 691 struct Scsi_Host *host; 692 struct device *dev; 693 struct request_queue *cmd_queue; 694 /* 695 * This field is to keep a reference to "scsi_device" corresponding to 696 * "UFS device" W-LU. 697 */ 698 struct scsi_device *sdev_ufs_device; 699 struct scsi_device *sdev_rpmb; 700 701 enum ufs_dev_pwr_mode curr_dev_pwr_mode; 702 enum uic_link_state uic_link_state; 703 /* Desired UFS power management level during runtime PM */ 704 enum ufs_pm_level rpm_lvl; 705 /* Desired UFS power management level during system PM */ 706 enum ufs_pm_level spm_lvl; 707 struct device_attribute rpm_lvl_attr; 708 struct device_attribute spm_lvl_attr; 709 int pm_op_in_progress; 710 711 /* Auto-Hibernate Idle Timer register value */ 712 u32 ahit; 713 714 struct ufshcd_lrb *lrb; 715 716 unsigned long outstanding_tasks; 717 unsigned long outstanding_reqs; 718 719 u32 capabilities; 720 int nutrs; 721 int nutmrs; 722 u32 ufs_version; 723 const struct ufs_hba_variant_ops *vops; 724 struct ufs_hba_variant_params *vps; 725 void *priv; 726 unsigned int irq; 727 bool is_irq_enabled; 728 enum ufs_ref_clk_freq dev_ref_clk_freq; 729 730 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 731 732 /* Device deviations from standard UFS device spec. */ 733 unsigned int dev_quirks; 734 735 struct blk_mq_tag_set tmf_tag_set; 736 struct request_queue *tmf_queue; 737 struct request **tmf_rqs; 738 739 struct uic_command *active_uic_cmd; 740 struct mutex uic_cmd_mutex; 741 struct completion *uic_async_done; 742 743 u32 ufshcd_state; 744 u32 eh_flags; 745 u32 intr_mask; 746 u16 ee_ctrl_mask; 747 bool is_powered; 748 749 /* Work Queues */ 750 struct workqueue_struct *eh_wq; 751 struct work_struct eh_work; 752 struct work_struct eeh_work; 753 754 /* HBA Errors */ 755 u32 errors; 756 u32 uic_error; 757 u32 saved_err; 758 u32 saved_uic_err; 759 struct ufs_stats ufs_stats; 760 bool force_reset; 761 bool force_pmc; 762 bool silence_err_logs; 763 764 /* Device management request data */ 765 struct ufs_dev_cmd dev_cmd; 766 ktime_t last_dme_cmd_tstamp; 767 768 /* Keeps information of the UFS device connected to this host */ 769 struct ufs_dev_info dev_info; 770 bool auto_bkops_enabled; 771 struct ufs_vreg_info vreg_info; 772 struct list_head clk_list_head; 773 774 bool wlun_dev_clr_ua; 775 776 /* Number of requests aborts */ 777 int req_abort_count; 778 779 /* Number of lanes available (1 or 2) for Rx/Tx */ 780 u32 lanes_per_direction; 781 struct ufs_pa_layer_attr pwr_info; 782 struct ufs_pwr_mode_info max_pwr_info; 783 784 struct ufs_clk_gating clk_gating; 785 /* Control to enable/disable host capabilities */ 786 u32 caps; 787 788 struct devfreq *devfreq; 789 struct ufs_clk_scaling clk_scaling; 790 bool is_sys_suspended; 791 792 enum bkops_status urgent_bkops_lvl; 793 bool is_urgent_bkops_lvl_checked; 794 795 struct rw_semaphore clk_scaling_lock; 796 unsigned char desc_size[QUERY_DESC_IDN_MAX]; 797 atomic_t scsi_block_reqs_cnt; 798 799 struct device bsg_dev; 800 struct request_queue *bsg_queue; 801 bool wb_buf_flush_enabled; 802 bool wb_enabled; 803 struct delayed_work rpm_dev_flush_recheck_work; 804 805#ifdef CONFIG_SCSI_UFS_CRYPTO 806 union ufs_crypto_capabilities crypto_capabilities; 807 union ufs_crypto_cap_entry *crypto_cap_array; 808 u32 crypto_cfg_register; 809 struct blk_keyslot_manager ksm; 810#endif 811}; 812 813/* Returns true if clocks can be gated. Otherwise false */ 814static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) 815{ 816 return hba->caps & UFSHCD_CAP_CLK_GATING; 817} 818static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) 819{ 820 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 821} 822static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) 823{ 824 return hba->caps & UFSHCD_CAP_CLK_SCALING; 825} 826static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) 827{ 828 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 829} 830static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) 831{ 832 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; 833} 834 835static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) 836{ 837/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ 838#ifndef CONFIG_SCSI_UFS_DWC 839 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && 840 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) 841 return true; 842 else 843 return false; 844#else 845return true; 846#endif 847} 848 849static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) 850{ 851 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) && 852 !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8); 853} 854 855static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) 856{ 857 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; 858} 859 860static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba) 861{ 862 return hba->caps & UFSHCD_CAP_WB_EN; 863} 864 865#define ufshcd_writel(hba, val, reg) \ 866 writel((val), (hba)->mmio_base + (reg)) 867#define ufshcd_readl(hba, reg) \ 868 readl((hba)->mmio_base + (reg)) 869 870/** 871 * ufshcd_rmwl - read modify write into a register 872 * @hba - per adapter instance 873 * @mask - mask to apply on read value 874 * @val - actual value to write 875 * @reg - register address 876 */ 877static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) 878{ 879 u32 tmp; 880 881 tmp = ufshcd_readl(hba, reg); 882 tmp &= ~mask; 883 tmp |= (val & mask); 884 ufshcd_writel(hba, tmp, reg); 885} 886 887int ufshcd_alloc_host(struct device *, struct ufs_hba **); 888void ufshcd_dealloc_host(struct ufs_hba *); 889int ufshcd_hba_enable(struct ufs_hba *hba); 890int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); 891int ufshcd_link_recovery(struct ufs_hba *hba); 892int ufshcd_make_hba_operational(struct ufs_hba *hba); 893void ufshcd_remove(struct ufs_hba *); 894int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); 895void ufshcd_delay_us(unsigned long us, unsigned long tolerance); 896int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 897 u32 val, unsigned long interval_us, 898 unsigned long timeout_ms); 899void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); 900void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, 901 u32 reg); 902 903static inline void check_upiu_size(void) 904{ 905 BUILD_BUG_ON(ALIGNED_UPIU_SIZE < 906 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); 907} 908 909/** 910 * ufshcd_set_variant - set variant specific data to the hba 911 * @hba - per adapter instance 912 * @variant - pointer to variant specific data 913 */ 914static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) 915{ 916 BUG_ON(!hba); 917 hba->priv = variant; 918} 919 920/** 921 * ufshcd_get_variant - get variant specific data from the hba 922 * @hba - per adapter instance 923 */ 924static inline void *ufshcd_get_variant(struct ufs_hba *hba) 925{ 926 BUG_ON(!hba); 927 return hba->priv; 928} 929static inline bool ufshcd_keep_autobkops_enabled_except_suspend( 930 struct ufs_hba *hba) 931{ 932 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; 933} 934 935static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba) 936{ 937 if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) 938 return hba->dev_info.wb_dedicated_lu; 939 return 0; 940} 941 942extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 943extern int ufshcd_runtime_resume(struct ufs_hba *hba); 944extern int ufshcd_runtime_idle(struct ufs_hba *hba); 945extern int ufshcd_system_suspend(struct ufs_hba *hba); 946extern int ufshcd_system_resume(struct ufs_hba *hba); 947extern int ufshcd_shutdown(struct ufs_hba *hba); 948extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 949 u8 attr_set, u32 mib_val, u8 peer); 950extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 951 u32 *mib_val, u8 peer); 952extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, 953 struct ufs_pa_layer_attr *desired_pwr_mode); 954 955/* UIC command interfaces for DME primitives */ 956#define DME_LOCAL 0 957#define DME_PEER 1 958#define ATTR_SET_NOR 0 /* NORMAL */ 959#define ATTR_SET_ST 1 /* STATIC */ 960 961static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, 962 u32 mib_val) 963{ 964 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, 965 mib_val, DME_LOCAL); 966} 967 968static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, 969 u32 mib_val) 970{ 971 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, 972 mib_val, DME_LOCAL); 973} 974 975static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, 976 u32 mib_val) 977{ 978 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, 979 mib_val, DME_PEER); 980} 981 982static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, 983 u32 mib_val) 984{ 985 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, 986 mib_val, DME_PEER); 987} 988 989static inline int ufshcd_dme_get(struct ufs_hba *hba, 990 u32 attr_sel, u32 *mib_val) 991{ 992 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); 993} 994 995static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, 996 u32 attr_sel, u32 *mib_val) 997{ 998 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 999} 1000 1001static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) 1002{ 1003 return (pwr_info->pwr_rx == FAST_MODE || 1004 pwr_info->pwr_rx == FASTAUTO_MODE) && 1005 (pwr_info->pwr_tx == FAST_MODE || 1006 pwr_info->pwr_tx == FASTAUTO_MODE); 1007} 1008 1009static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba) 1010{ 1011 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); 1012} 1013 1014/* Expose Query-Request API */ 1015int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 1016 enum query_opcode opcode, 1017 enum desc_idn idn, u8 index, 1018 u8 selector, 1019 u8 *desc_buf, int *buf_len); 1020int ufshcd_read_desc_param(struct ufs_hba *hba, 1021 enum desc_idn desc_id, 1022 int desc_index, 1023 u8 param_offset, 1024 u8 *param_read_buf, 1025 u8 param_size); 1026int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 1027 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); 1028int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 1029 enum flag_idn idn, u8 index, bool *flag_res); 1030 1031void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); 1032void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); 1033void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups); 1034#define SD_ASCII_STD true 1035#define SD_RAW false 1036int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, 1037 u8 **buf, bool ascii); 1038 1039int ufshcd_hold(struct ufs_hba *hba, bool async); 1040void ufshcd_release(struct ufs_hba *hba); 1041 1042void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, 1043 int *desc_length); 1044 1045u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); 1046 1047int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); 1048 1049int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, 1050 struct utp_upiu_req *req_upiu, 1051 struct utp_upiu_req *rsp_upiu, 1052 int msgcode, 1053 u8 *desc_buff, int *buff_len, 1054 enum query_opcode desc_op); 1055 1056/* Wrapper functions for safely calling variant operations */ 1057static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) 1058{ 1059 if (hba->vops) 1060 return hba->vops->name; 1061 return ""; 1062} 1063 1064static inline int ufshcd_vops_init(struct ufs_hba *hba) 1065{ 1066 if (hba->vops && hba->vops->init) 1067 return hba->vops->init(hba); 1068 1069 return 0; 1070} 1071 1072static inline void ufshcd_vops_exit(struct ufs_hba *hba) 1073{ 1074 if (hba->vops && hba->vops->exit) 1075 return hba->vops->exit(hba); 1076} 1077 1078static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) 1079{ 1080 if (hba->vops && hba->vops->get_ufs_hci_version) 1081 return hba->vops->get_ufs_hci_version(hba); 1082 1083 return ufshcd_readl(hba, REG_UFS_VERSION); 1084} 1085 1086static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, 1087 bool up, enum ufs_notify_change_status status) 1088{ 1089 if (hba->vops && hba->vops->clk_scale_notify) 1090 return hba->vops->clk_scale_notify(hba, up, status); 1091 return 0; 1092} 1093 1094static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, 1095 enum ufs_notify_change_status status) 1096{ 1097 if (hba->vops && hba->vops->setup_clocks) 1098 return hba->vops->setup_clocks(hba, on, status); 1099 return 0; 1100} 1101 1102static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status) 1103{ 1104 if (hba->vops && hba->vops->setup_regulators) 1105 return hba->vops->setup_regulators(hba, status); 1106 1107 return 0; 1108} 1109 1110static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, 1111 bool status) 1112{ 1113 if (hba->vops && hba->vops->hce_enable_notify) 1114 return hba->vops->hce_enable_notify(hba, status); 1115 1116 return 0; 1117} 1118static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, 1119 bool status) 1120{ 1121 if (hba->vops && hba->vops->link_startup_notify) 1122 return hba->vops->link_startup_notify(hba, status); 1123 1124 return 0; 1125} 1126 1127static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, 1128 bool status, 1129 struct ufs_pa_layer_attr *dev_max_params, 1130 struct ufs_pa_layer_attr *dev_req_params) 1131{ 1132 if (hba->vops && hba->vops->pwr_change_notify) 1133 return hba->vops->pwr_change_notify(hba, status, 1134 dev_max_params, dev_req_params); 1135 1136 return -ENOTSUPP; 1137} 1138 1139static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, 1140 bool is_scsi_cmd) 1141{ 1142 if (hba->vops && hba->vops->setup_xfer_req) 1143 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); 1144} 1145 1146static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, 1147 int tag, u8 tm_function) 1148{ 1149 if (hba->vops && hba->vops->setup_task_mgmt) 1150 return hba->vops->setup_task_mgmt(hba, tag, tm_function); 1151} 1152 1153static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, 1154 enum uic_cmd_dme cmd, 1155 enum ufs_notify_change_status status) 1156{ 1157 if (hba->vops && hba->vops->hibern8_notify) 1158 return hba->vops->hibern8_notify(hba, cmd, status); 1159} 1160 1161static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) 1162{ 1163 if (hba->vops && hba->vops->apply_dev_quirks) 1164 return hba->vops->apply_dev_quirks(hba); 1165 return 0; 1166} 1167 1168static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba) 1169{ 1170 if (hba->vops && hba->vops->fixup_dev_quirks) 1171 hba->vops->fixup_dev_quirks(hba); 1172} 1173 1174static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) 1175{ 1176 if (hba->vops && hba->vops->suspend) 1177 return hba->vops->suspend(hba, op); 1178 1179 return 0; 1180} 1181 1182static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) 1183{ 1184 if (hba->vops && hba->vops->resume) 1185 return hba->vops->resume(hba, op); 1186 1187 return 0; 1188} 1189 1190static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) 1191{ 1192 if (hba->vops && hba->vops->dbg_register_dump) 1193 hba->vops->dbg_register_dump(hba); 1194} 1195 1196static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) 1197{ 1198 if (hba->vops && hba->vops->device_reset) { 1199 int err = hba->vops->device_reset(hba); 1200 1201 if (!err) { 1202 ufshcd_set_ufs_dev_active(hba); 1203 if (ufshcd_is_wb_allowed(hba)) { 1204 hba->wb_enabled = false; 1205 hba->wb_buf_flush_enabled = false; 1206 } 1207 } 1208 if (err != -EOPNOTSUPP) 1209 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, err); 1210 } 1211} 1212 1213static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, 1214 struct devfreq_dev_profile 1215 *profile, void *data) 1216{ 1217 if (hba->vops && hba->vops->config_scaling_param) 1218 hba->vops->config_scaling_param(hba, profile, data); 1219} 1220 1221extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; 1222 1223/* 1224 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN 1225 * @scsi_lun: scsi LUN id 1226 * 1227 * Returns UPIU LUN id 1228 */ 1229static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) 1230{ 1231 if (scsi_is_wlun(scsi_lun)) 1232 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) 1233 | UFS_UPIU_WLUN_ID; 1234 else 1235 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; 1236} 1237 1238int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 1239 const char *prefix); 1240 1241#endif /* End of Header */ 1242