Lines Matching refs:hba
97 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
115 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
222 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
224 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
226 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
227 static void ufshcd_hba_exit(struct ufs_hba *hba);
228 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
229 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
230 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
231 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
232 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
233 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
234 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
235 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
236 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
238 static int ufshcd_change_power_mode(struct ufs_hba *hba,
240 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
241 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
242 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
243 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
245 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
246 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
247 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
248 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
249 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
250 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
252 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
254 return tag >= 0 && tag < hba->nutrs;
257 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
259 if (!hba->is_irq_enabled) {
260 enable_irq(hba->irq);
261 hba->is_irq_enabled = true;
265 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
267 if (hba->is_irq_enabled) {
268 disable_irq(hba->irq);
269 hba->is_irq_enabled = false;
273 static inline void ufshcd_wb_config(struct ufs_hba *hba)
277 if (!ufshcd_is_wb_allowed(hba))
280 ret = ufshcd_wb_ctrl(hba, true);
282 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
284 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
285 ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
287 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
289 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
290 ufshcd_wb_toggle_flush(hba, true);
293 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
295 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
296 scsi_unblock_requests(hba->host);
299 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
301 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
302 scsi_block_requests(hba->host);
305 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
308 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
310 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
313 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
316 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
318 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
321 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
324 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
326 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
330 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
342 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
344 trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
345 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
346 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
347 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
350 static void ufshcd_add_command_trace(struct ufs_hba *hba,
356 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
363 ufshcd_add_cmd_upiu_trace(hba, tag, str);
369 ufshcd_add_cmd_upiu_trace(hba, tag, str);
383 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
384 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
385 trace_ufshcd_command(dev_name(hba->dev), str, tag,
389 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
392 struct list_head *head = &hba->clk_list_head;
400 dev_err(hba->dev, "clk: %s, rate: %u\n",
405 static void ufshcd_print_err_hist(struct ufs_hba *hba,
417 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
423 dev_err(hba->dev, "No record of %s\n", err_name);
426 static void ufshcd_print_host_regs(struct ufs_hba *hba)
428 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
430 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
431 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
432 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
433 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
434 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
435 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
437 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
438 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
440 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
441 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
443 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
444 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
445 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
447 ufshcd_vops_dbg_register_dump(hba);
451 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
457 for_each_set_bit(tag, &bitmap, hba->nutrs) {
458 lrbp = &hba->lrb[tag];
460 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
462 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
464 dev_err(hba->dev,
470 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
474 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
481 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
484 dev_err(hba->dev,
495 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
499 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
500 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
502 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
507 static void ufshcd_print_host_state(struct ufs_hba *hba)
509 struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
511 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
512 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
513 hba->outstanding_reqs, hba->outstanding_tasks);
514 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
515 hba->saved_err, hba->saved_uic_err);
516 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
517 hba->curr_dev_pwr_mode, hba->uic_link_state);
518 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
519 hba->pm_op_in_progress, hba->is_sys_suspended);
520 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
521 hba->auto_bkops_enabled, hba->host->host_self_blocked);
522 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
523 dev_err(hba->dev,
525 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
526 hba->ufs_stats.hibern8_exit_cnt);
527 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
528 ktime_to_us(hba->ufs_stats.last_intr_ts),
529 hba->ufs_stats.last_intr_status);
530 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
531 hba->eh_flags, hba->req_abort_count);
532 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
533 hba->ufs_version, hba->capabilities, hba->caps);
534 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
535 hba->dev_quirks);
537 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
540 ufshcd_print_clk_freqs(hba);
544 * ufshcd_print_pwr_info - print power params as saved in hba
546 * @hba: per-adapter instance
548 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
560 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
562 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
563 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
564 names[hba->pwr_info.pwr_rx],
565 names[hba->pwr_info.pwr_tx],
566 hba->pwr_info.hs_rate);
583 * @hba: per-adapter interface
593 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
603 while ((ufshcd_readl(hba, reg) & mask) != val) {
606 if ((ufshcd_readl(hba, reg) & mask) != val)
617 * @hba: Pointer to adapter instance
621 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
625 switch (hba->ufs_version) {
644 * @hba: Pointer to adapter instance
648 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
650 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
651 return ufshcd_vops_get_ufs_hci_version(hba);
653 return ufshcd_readl(hba, REG_UFS_VERSION);
659 * @hba: pointer to adapter instance
663 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
665 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
683 * @hba: per adapter instance
686 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
688 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
689 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
691 ufshcd_writel(hba, ~(1 << pos),
697 * @hba: per adapter instance
700 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
702 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
703 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
705 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
710 * @hba: per adapter instance
713 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
715 __clear_bit(tag, &hba->outstanding_reqs);
731 * @hba: Pointer to adapter instance
736 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
738 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
744 * @hba: Pointer to adapter instance
749 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
751 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
808 * @hba: per adapter instance
811 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
813 ufshcd_writel(hba, INT_AGGR_ENABLE |
820 * @hba: per adapter instance
825 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
827 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
835 * @hba: per adapter instance
837 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
839 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
846 * @hba: per adapter instance
848 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
850 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
852 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
858 * @hba: per adapter instance
860 static inline void ufshcd_hba_start(struct ufs_hba *hba)
864 if (ufshcd_crypto_enable(hba))
867 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
872 * @hba: per adapter instance
876 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
878 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
882 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
885 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
886 (hba->ufs_version == UFSHCI_VERSION_11))
893 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
904 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
912 * @hba: per adapter instance
918 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
922 struct list_head *head = &hba->clk_list_head;
935 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
940 trace_ufshcd_clk_scaling(dev_name(hba->dev),
953 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
958 trace_ufshcd_clk_scaling(dev_name(hba->dev),
965 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
975 * @hba: per adapter instance
981 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
986 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
990 ret = ufshcd_set_clk_freq(hba, scale_up);
994 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
996 ufshcd_set_clk_freq(hba, !scale_up);
999 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1007 * @hba: per adapter instance
1012 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1016 struct list_head *head = &hba->clk_list_head;
1038 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1048 ufshcd_hold(hba, false);
1049 spin_lock_irqsave(hba->host->host_lock, flags);
1056 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1061 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1062 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1070 spin_unlock_irqrestore(hba->host->host_lock, flags);
1082 spin_lock_irqsave(hba->host->host_lock, flags);
1086 dev_err(hba->dev,
1092 spin_unlock_irqrestore(hba->host->host_lock, flags);
1093 ufshcd_release(hba);
1099 * @hba: per adapter instance
1106 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1113 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1116 memcpy(&new_pwr_info, &hba->pwr_info,
1119 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1120 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1122 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1123 &hba->pwr_info,
1133 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1135 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1137 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1143 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1151 ufshcd_scsi_block_requests(hba);
1152 down_write(&hba->clk_scaling_lock);
1153 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1155 up_write(&hba->clk_scaling_lock);
1156 ufshcd_scsi_unblock_requests(hba);
1162 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1164 up_write(&hba->clk_scaling_lock);
1165 ufshcd_scsi_unblock_requests(hba);
1170 * @hba: per adapter instance
1177 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1182 ufshcd_hold(hba, false);
1184 ret = ufshcd_clock_scaling_prepare(hba);
1190 ret = ufshcd_scale_gear(hba, false);
1195 ret = ufshcd_scale_clks(hba, scale_up);
1198 ufshcd_scale_gear(hba, true);
1204 ret = ufshcd_scale_gear(hba, true);
1206 ufshcd_scale_clks(hba, false);
1212 up_write(&hba->clk_scaling_lock);
1213 ufshcd_wb_ctrl(hba, scale_up);
1214 down_write(&hba->clk_scaling_lock);
1217 ufshcd_clock_scaling_unprepare(hba);
1219 ufshcd_release(hba);
1225 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1229 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1230 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1231 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1234 hba->clk_scaling.is_suspended = true;
1235 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1237 __ufshcd_suspend_clkscaling(hba);
1242 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1246 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1247 if (!hba->clk_scaling.is_suspended) {
1248 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1251 hba->clk_scaling.is_suspended = false;
1252 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1254 devfreq_resume_device(hba->devfreq);
1261 struct ufs_hba *hba = dev_get_drvdata(dev);
1264 struct list_head *clk_list = &hba->clk_list_head;
1268 if (!ufshcd_is_clkscaling_supported(hba))
1271 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1274 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1275 if (ufshcd_eh_in_progress(hba)) {
1276 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1280 if (!hba->clk_scaling.active_reqs)
1284 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1293 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1294 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1298 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1300 pm_runtime_get_noresume(hba->dev);
1301 if (!pm_runtime_active(hba->dev)) {
1302 pm_runtime_put_noidle(hba->dev);
1307 ret = ufshcd_devfreq_scale(hba, scale_up);
1308 pm_runtime_put(hba->dev);
1310 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1316 queue_work(hba->clk_scaling.workq,
1317 &hba->clk_scaling.suspend_work);
1332 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1334 struct request_queue *q = hba->cmd_queue;
1344 struct ufs_hba *hba = dev_get_drvdata(dev);
1345 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1347 struct list_head *clk_list = &hba->clk_list_head;
1351 if (!ufshcd_is_clkscaling_supported(hba))
1356 spin_lock_irqsave(hba->host->host_lock, flags);
1378 if (hba->outstanding_reqs) {
1385 spin_unlock_irqrestore(hba->host->host_lock, flags);
1389 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1391 struct list_head *clk_list = &hba->clk_list_head;
1401 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1402 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1404 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1405 &hba->vps->ondemand_data);
1406 devfreq = devfreq_add_device(hba->dev,
1407 &hba->vps->devfreq_profile,
1409 &hba->vps->ondemand_data);
1412 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1414 dev_pm_opp_remove(hba->dev, clki->min_freq);
1415 dev_pm_opp_remove(hba->dev, clki->max_freq);
1419 hba->devfreq = devfreq;
1424 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1426 struct list_head *clk_list = &hba->clk_list_head;
1429 if (!hba->devfreq)
1432 devfreq_remove_device(hba->devfreq);
1433 hba->devfreq = NULL;
1436 dev_pm_opp_remove(hba->dev, clki->min_freq);
1437 dev_pm_opp_remove(hba->dev, clki->max_freq);
1440 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1444 devfreq_suspend_device(hba->devfreq);
1445 spin_lock_irqsave(hba->host->host_lock, flags);
1446 hba->clk_scaling.window_start_t = 0;
1447 spin_unlock_irqrestore(hba->host->host_lock, flags);
1450 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1455 if (!ufshcd_is_clkscaling_supported(hba))
1458 spin_lock_irqsave(hba->host->host_lock, flags);
1459 if (!hba->clk_scaling.is_suspended) {
1461 hba->clk_scaling.is_suspended = true;
1463 spin_unlock_irqrestore(hba->host->host_lock, flags);
1466 __ufshcd_suspend_clkscaling(hba);
1469 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1474 if (!ufshcd_is_clkscaling_supported(hba))
1477 spin_lock_irqsave(hba->host->host_lock, flags);
1478 if (hba->clk_scaling.is_suspended) {
1480 hba->clk_scaling.is_suspended = false;
1482 spin_unlock_irqrestore(hba->host->host_lock, flags);
1485 devfreq_resume_device(hba->devfreq);
1491 struct ufs_hba *hba = dev_get_drvdata(dev);
1493 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1499 struct ufs_hba *hba = dev_get_drvdata(dev);
1507 if (value == hba->clk_scaling.is_allowed)
1510 pm_runtime_get_sync(hba->dev);
1511 ufshcd_hold(hba, false);
1513 cancel_work_sync(&hba->clk_scaling.suspend_work);
1514 cancel_work_sync(&hba->clk_scaling.resume_work);
1516 hba->clk_scaling.is_allowed = value;
1519 ufshcd_resume_clkscaling(hba);
1521 ufshcd_suspend_clkscaling(hba);
1522 err = ufshcd_devfreq_scale(hba, true);
1524 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1528 ufshcd_release(hba);
1529 pm_runtime_put_sync(hba->dev);
1534 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1536 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1537 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1538 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1539 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1540 hba->clk_scaling.enable_attr.attr.mode = 0644;
1541 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1542 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1549 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1552 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1554 spin_lock_irqsave(hba->host->host_lock, flags);
1555 if (hba->clk_gating.state == CLKS_ON) {
1556 spin_unlock_irqrestore(hba->host->host_lock, flags);
1560 spin_unlock_irqrestore(hba->host->host_lock, flags);
1561 ufshcd_setup_clocks(hba, true);
1563 ufshcd_enable_irq(hba);
1566 if (ufshcd_can_hibern8_during_gating(hba)) {
1568 hba->clk_gating.is_suspended = true;
1569 if (ufshcd_is_link_hibern8(hba)) {
1570 ret = ufshcd_uic_hibern8_exit(hba);
1572 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1575 ufshcd_set_link_active(hba);
1577 hba->clk_gating.is_suspended = false;
1580 ufshcd_scsi_unblock_requests(hba);
1586 * @hba: per adapter instance
1589 int ufshcd_hold(struct ufs_hba *hba, bool async)
1595 if (!ufshcd_is_clkgating_allowed(hba))
1597 spin_lock_irqsave(hba->host->host_lock, flags);
1598 hba->clk_gating.active_reqs++;
1601 switch (hba->clk_gating.state) {
1611 if (ufshcd_can_hibern8_during_gating(hba) &&
1612 ufshcd_is_link_hibern8(hba)) {
1615 hba->clk_gating.active_reqs--;
1618 spin_unlock_irqrestore(hba->host->host_lock, flags);
1619 flush_result = flush_work(&hba->clk_gating.ungate_work);
1620 if (hba->clk_gating.is_suspended && !flush_result)
1622 spin_lock_irqsave(hba->host->host_lock, flags);
1627 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1628 hba->clk_gating.state = CLKS_ON;
1629 trace_ufshcd_clk_gating(dev_name(hba->dev),
1630 hba->clk_gating.state);
1640 hba->clk_gating.state = REQ_CLKS_ON;
1641 trace_ufshcd_clk_gating(dev_name(hba->dev),
1642 hba->clk_gating.state);
1643 if (queue_work(hba->clk_gating.clk_gating_workq,
1644 &hba->clk_gating.ungate_work))
1645 ufshcd_scsi_block_requests(hba);
1654 hba->clk_gating.active_reqs--;
1658 spin_unlock_irqrestore(hba->host->host_lock, flags);
1659 flush_work(&hba->clk_gating.ungate_work);
1661 spin_lock_irqsave(hba->host->host_lock, flags);
1664 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1665 __func__, hba->clk_gating.state);
1668 spin_unlock_irqrestore(hba->host->host_lock, flags);
1676 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1681 spin_lock_irqsave(hba->host->host_lock, flags);
1688 if (hba->clk_gating.is_suspended ||
1689 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1690 hba->clk_gating.state = CLKS_ON;
1691 trace_ufshcd_clk_gating(dev_name(hba->dev),
1692 hba->clk_gating.state);
1696 if (hba->clk_gating.active_reqs
1697 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1698 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1699 || hba->active_uic_cmd || hba->uic_async_done)
1702 spin_unlock_irqrestore(hba->host->host_lock, flags);
1705 if (ufshcd_can_hibern8_during_gating(hba)) {
1706 ret = ufshcd_uic_hibern8_enter(hba);
1708 hba->clk_gating.state = CLKS_ON;
1709 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1711 trace_ufshcd_clk_gating(dev_name(hba->dev),
1712 hba->clk_gating.state);
1715 ufshcd_set_link_hibern8(hba);
1718 ufshcd_disable_irq(hba);
1720 ufshcd_setup_clocks(hba, false);
1731 spin_lock_irqsave(hba->host->host_lock, flags);
1732 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1733 hba->clk_gating.state = CLKS_OFF;
1734 trace_ufshcd_clk_gating(dev_name(hba->dev),
1735 hba->clk_gating.state);
1738 spin_unlock_irqrestore(hba->host->host_lock, flags);
1744 static void __ufshcd_release(struct ufs_hba *hba)
1746 if (!ufshcd_is_clkgating_allowed(hba))
1749 hba->clk_gating.active_reqs--;
1751 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1752 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1753 hba->outstanding_tasks ||
1754 hba->active_uic_cmd || hba->uic_async_done ||
1755 hba->clk_gating.state == CLKS_OFF)
1758 hba->clk_gating.state = REQ_CLKS_OFF;
1759 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1760 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1761 &hba->clk_gating.gate_work,
1762 msecs_to_jiffies(hba->clk_gating.delay_ms));
1765 void ufshcd_release(struct ufs_hba *hba)
1769 spin_lock_irqsave(hba->host->host_lock, flags);
1770 __ufshcd_release(hba);
1771 spin_unlock_irqrestore(hba->host->host_lock, flags);
1778 struct ufs_hba *hba = dev_get_drvdata(dev);
1780 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1786 struct ufs_hba *hba = dev_get_drvdata(dev);
1792 spin_lock_irqsave(hba->host->host_lock, flags);
1793 hba->clk_gating.delay_ms = value;
1794 spin_unlock_irqrestore(hba->host->host_lock, flags);
1801 struct ufs_hba *hba = dev_get_drvdata(dev);
1803 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1809 struct ufs_hba *hba = dev_get_drvdata(dev);
1817 if (value == hba->clk_gating.is_enabled)
1821 ufshcd_release(hba);
1823 spin_lock_irqsave(hba->host->host_lock, flags);
1824 hba->clk_gating.active_reqs++;
1825 spin_unlock_irqrestore(hba->host->host_lock, flags);
1828 hba->clk_gating.is_enabled = value;
1833 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1837 if (!ufshcd_is_clkscaling_supported(hba))
1840 INIT_WORK(&hba->clk_scaling.suspend_work,
1842 INIT_WORK(&hba->clk_scaling.resume_work,
1846 hba->host->host_no);
1847 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1849 ufshcd_clkscaling_init_sysfs(hba);
1852 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1854 if (!ufshcd_is_clkscaling_supported(hba))
1857 destroy_workqueue(hba->clk_scaling.workq);
1858 ufshcd_devfreq_remove(hba);
1861 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1865 if (!ufshcd_is_clkgating_allowed(hba))
1868 hba->clk_gating.state = CLKS_ON;
1870 hba->clk_gating.delay_ms = 150;
1871 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1872 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1875 hba->host->host_no);
1876 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1879 hba->clk_gating.is_enabled = true;
1881 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1882 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1883 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1884 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1885 hba->clk_gating.delay_attr.attr.mode = 0644;
1886 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1887 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1889 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1890 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1891 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1892 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1893 hba->clk_gating.enable_attr.attr.mode = 0644;
1894 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1895 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1898 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1900 if (!ufshcd_is_clkgating_allowed(hba))
1902 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1903 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1904 cancel_work_sync(&hba->clk_gating.ungate_work);
1905 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1906 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1910 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1915 if (!ufshcd_is_clkscaling_supported(hba))
1918 if (!hba->clk_scaling.active_reqs++)
1921 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1925 queue_work(hba->clk_scaling.workq,
1926 &hba->clk_scaling.resume_work);
1928 if (!hba->clk_scaling.window_start_t) {
1929 hba->clk_scaling.window_start_t = curr_t;
1930 hba->clk_scaling.tot_busy_t = 0;
1931 hba->clk_scaling.is_busy_started = false;
1934 if (!hba->clk_scaling.is_busy_started) {
1935 hba->clk_scaling.busy_start_t = curr_t;
1936 hba->clk_scaling.is_busy_started = true;
1940 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1942 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1944 if (!ufshcd_is_clkscaling_supported(hba))
1947 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1956 * @hba: per adapter instance
1960 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1962 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
1966 ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
1967 ufshcd_add_command_trace(hba, task_tag, "send");
1968 ufshcd_clk_scaling_start_busy(hba);
1969 __set_bit(task_tag, &hba->outstanding_reqs);
1970 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1997 * @hba: per adapter instance
2001 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2003 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2008 if (hba->dev_cmd.query.descriptor &&
2019 hba->dev_cmd.query.request.upiu_req.length);
2021 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2023 dev_warn(hba->dev,
2035 * @hba: per adapter instance
2039 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2043 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2046 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2047 hba->nutmrs =
2048 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2051 err = ufshcd_hba_init_crypto_capabilities(hba);
2053 dev_err(hba->dev, "crypto setup failed\n");
2061 * @hba: per adapter instance
2064 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2066 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2074 * @hba: Pointer to adapter instance
2079 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2081 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2086 * @hba: per adapter instance
2092 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2094 WARN_ON(hba->active_uic_cmd);
2096 hba->active_uic_cmd = uic_cmd;
2099 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2100 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2101 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2103 ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2106 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2112 * @hba: per adapter instance
2119 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2129 dev_err(hba->dev,
2134 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2140 spin_lock_irqsave(hba->host->host_lock, flags);
2141 hba->active_uic_cmd = NULL;
2142 spin_unlock_irqrestore(hba->host->host_lock, flags);
2149 * @hba: per adapter instance
2158 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2161 if (!ufshcd_ready_for_uic_cmd(hba)) {
2162 dev_err(hba->dev,
2171 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2178 * @hba: per adapter instance
2183 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2188 ufshcd_hold(hba, false);
2189 mutex_lock(&hba->uic_cmd_mutex);
2190 ufshcd_add_delay_before_dme_cmd(hba);
2192 spin_lock_irqsave(hba->host->host_lock, flags);
2193 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2194 spin_unlock_irqrestore(hba->host->host_lock, flags);
2196 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2198 mutex_unlock(&hba->uic_cmd_mutex);
2200 ufshcd_release(hba);
2206 * @hba: per adapter instance
2211 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2226 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2254 * @hba: per adapter instance
2257 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2259 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2261 if (hba->ufs_version == UFSHCI_VERSION_10) {
2269 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2274 * @hba: per adapter instance
2277 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2279 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2281 if (hba->ufs_version == UFSHCI_VERSION_10) {
2291 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2379 * @hba: UFS hba
2383 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2387 struct ufs_query *query = &hba->dev_cmd.query;
2435 * @hba: per adapter instance
2438 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2444 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2445 (hba->ufs_version == UFSHCI_VERSION_11))
2451 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2452 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2453 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2464 * @hba: per adapter instance
2467 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2472 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2473 (hba->ufs_version == UFSHCI_VERSION_11))
2500 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2502 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2503 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2504 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2511 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2531 struct ufs_hba *hba;
2536 hba = shost_priv(host);
2539 if (!ufshcd_valid_tag(hba, tag)) {
2540 dev_err(hba->dev,
2546 if (!down_read_trylock(&hba->clk_scaling_lock))
2549 hba->req_abort_count = 0;
2551 err = ufshcd_hold(hba, true);
2556 WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2557 (hba->clk_gating.state != CLKS_ON));
2559 lrbp = &hba->lrb[tag];
2567 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2573 ufshcd_comp_scsi_upiu(hba, lrbp);
2575 err = ufshcd_map_sg(hba, lrbp);
2578 ufshcd_release(hba);
2584 spin_lock_irqsave(hba->host->host_lock, flags);
2585 switch (hba->ufshcd_state) {
2592 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2600 if (hba->pm_op_in_progress) {
2601 hba->force_reset = true;
2613 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2614 __func__, hba->ufshcd_state);
2618 ufshcd_send_command(hba, tag);
2619 spin_unlock_irqrestore(hba->host->host_lock, flags);
2625 spin_unlock_irqrestore(hba->host->host_lock, flags);
2626 ufshcd_release(hba);
2630 up_read(&hba->clk_scaling_lock);
2634 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2644 hba->dev_cmd.type = cmd_type;
2646 return ufshcd_compose_devman_upiu(hba, lrbp);
2650 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2657 spin_lock_irqsave(hba->host->host_lock, flags);
2658 ufshcd_utrl_clear(hba, tag);
2659 spin_unlock_irqrestore(hba->host->host_lock, flags);
2665 err = ufshcd_wait_for_register(hba,
2673 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2675 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2685 * @hba: per adapter instance
2689 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2694 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2699 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2701 dev_err(hba->dev, "%s: unexpected response %x\n",
2706 err = ufshcd_check_query_response(hba, lrbp);
2708 err = ufshcd_copy_query_response(hba, lrbp);
2713 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2718 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2726 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2733 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2738 spin_lock_irqsave(hba->host->host_lock, flags);
2739 hba->dev_cmd.complete = NULL;
2743 err = ufshcd_dev_cmd_completion(hba, lrbp);
2745 spin_unlock_irqrestore(hba->host->host_lock, flags);
2749 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2751 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2757 * field in hba
2759 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2767 * @hba: UFS hba
2772 * it is expected you hold the hba->dev_cmd.lock mutex.
2774 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2777 struct request_queue *q = hba->cmd_queue;
2785 down_read(&hba->clk_scaling_lock);
2798 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2804 lrbp = &hba->lrb[tag];
2806 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2810 hba->dev_cmd.complete = &wait;
2812 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2815 spin_lock_irqsave(hba->host->host_lock, flags);
2816 ufshcd_send_command(hba, tag);
2817 spin_unlock_irqrestore(hba->host->host_lock, flags);
2819 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2821 ufshcd_add_query_upiu_trace(hba, tag,
2827 up_read(&hba->clk_scaling_lock);
2833 * @hba: per-adapter instance
2841 static inline void ufshcd_init_query(struct ufs_hba *hba,
2845 *request = &hba->dev_cmd.query.request;
2846 *response = &hba->dev_cmd.query.response;
2855 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2862 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2864 dev_dbg(hba->dev,
2872 dev_err(hba->dev,
2880 * @hba: per-adapter instance
2888 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2896 BUG_ON(!hba);
2898 ufshcd_hold(hba, false);
2899 mutex_lock(&hba->dev_cmd.lock);
2900 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2913 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2920 dev_err(hba->dev,
2927 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2930 dev_err(hba->dev,
2941 mutex_unlock(&hba->dev_cmd.lock);
2942 ufshcd_release(hba);
2948 * @hba: per-adapter instance
2957 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2964 BUG_ON(!hba);
2966 ufshcd_hold(hba, false);
2968 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2974 mutex_lock(&hba->dev_cmd.lock);
2975 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2987 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2993 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2996 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3004 mutex_unlock(&hba->dev_cmd.lock);
3006 ufshcd_release(hba);
3013 * @hba: per-adapter instance
3023 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3031 ret = ufshcd_query_attr(hba, opcode, idn, index,
3034 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3041 dev_err(hba->dev,
3047 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3055 BUG_ON(!hba);
3057 ufshcd_hold(hba, false);
3059 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3066 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3072 mutex_lock(&hba->dev_cmd.lock);
3073 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3075 hba->dev_cmd.query.descriptor = desc_buf;
3086 dev_err(hba->dev,
3093 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3096 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3104 hba->dev_cmd.query.descriptor = NULL;
3105 mutex_unlock(&hba->dev_cmd.lock);
3107 ufshcd_release(hba);
3113 * @hba: per-adapter instance
3125 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3135 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3146 * @hba: Pointer to adapter instance
3150 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3157 *desc_len = hba->desc_size[desc_id];
3161 static void ufshcd_update_desc_length(struct ufs_hba *hba,
3165 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3172 hba->desc_size[desc_id] = desc_len;
3177 * @hba: Pointer to adapter instance
3186 int ufshcd_read_desc_param(struct ufs_hba *hba,
3203 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3205 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3210 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3226 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3231 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3238 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3246 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3283 * @hba: pointer to adapter instance
3295 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3309 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3312 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3319 dev_dbg(hba->dev, "String Desc is of zero length\n");
3366 * @hba: Pointer to adapter instance
3374 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3384 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3387 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3391 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3396 if (hba->dev_info.wspecversion >= 0x300) {
3397 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3401 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3406 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3410 hba->dev_info.clk_gating_wait_us = gating_wait;
3418 * @hba: per adapter instance
3429 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3434 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3435 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3437 &hba->ucdl_dma_addr,
3442 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3443 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3446 if (!hba->ucdl_base_addr ||
3447 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3448 dev_err(hba->dev,
3457 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3458 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3460 &hba->utrdl_dma_addr,
3462 if (!hba->utrdl_base_addr ||
3463 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3464 dev_err(hba->dev,
3473 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3474 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3476 &hba->utmrdl_dma_addr,
3478 if (!hba->utmrdl_base_addr ||
3479 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3480 dev_err(hba->dev,
3486 hba->lrb = devm_kcalloc(hba->dev,
3487 hba->nutrs, sizeof(struct ufshcd_lrb),
3489 if (!hba->lrb) {
3490 dev_err(hba->dev, "LRB Memory allocation failed\n");
3501 * @hba: per adapter instance
3511 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3521 utrdlp = hba->utrdl_base_addr;
3529 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3531 for (i = 0; i < hba->nutrs; i++) {
3541 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3557 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3563 * @hba: per adapter instance
3572 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3579 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3581 dev_dbg(hba->dev,
3587 * @hba: per adapter instance
3594 static int ufshcd_dme_reset(struct ufs_hba *hba)
3601 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3603 dev_err(hba->dev,
3611 * @hba: per adapter instance
3617 static int ufshcd_dme_enable(struct ufs_hba *hba)
3624 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3626 dev_err(hba->dev,
3632 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3637 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3644 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3650 hba->last_dme_cmd_tstamp));
3665 * @hba: per adapter instance
3673 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3693 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3695 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3700 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3710 * @hba: per adapter instance
3717 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3732 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3733 orig_pwr_info = hba->pwr_info;
3748 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3760 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3762 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3767 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3774 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3776 ufshcd_change_power_mode(hba, &orig_pwr_info);
3786 * @hba: per adapter instance
3798 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3806 mutex_lock(&hba->uic_cmd_mutex);
3808 ufshcd_add_delay_before_dme_cmd(hba);
3810 spin_lock_irqsave(hba->host->host_lock, flags);
3811 if (ufshcd_is_link_broken(hba)) {
3815 hba->uic_async_done = &uic_async_done;
3816 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3817 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3825 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3826 spin_unlock_irqrestore(hba->host->host_lock, flags);
3828 dev_err(hba->dev,
3834 if (!wait_for_completion_timeout(hba->uic_async_done,
3836 dev_err(hba->dev,
3841 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
3851 status = ufshcd_get_upmcrs(hba);
3853 dev_err(hba->dev,
3860 ufshcd_print_host_state(hba);
3861 ufshcd_print_pwr_info(hba);
3862 ufshcd_print_host_regs(hba);
3865 spin_lock_irqsave(hba->host->host_lock, flags);
3866 hba->active_uic_cmd = NULL;
3867 hba->uic_async_done = NULL;
3869 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3871 ufshcd_set_link_broken(hba);
3872 ufshcd_schedule_eh_work(hba);
3875 spin_unlock_irqrestore(hba->host->host_lock, flags);
3876 mutex_unlock(&hba->uic_cmd_mutex);
3884 * @hba: per adapter instance
3889 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3894 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3895 ret = ufshcd_dme_set(hba,
3898 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3907 ufshcd_hold(hba, false);
3908 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3909 ufshcd_release(hba);
3915 int ufshcd_link_recovery(struct ufs_hba *hba)
3920 spin_lock_irqsave(hba->host->host_lock, flags);
3921 hba->ufshcd_state = UFSHCD_STATE_RESET;
3922 ufshcd_set_eh_in_progress(hba);
3923 spin_unlock_irqrestore(hba->host->host_lock, flags);
3926 ufshcd_vops_device_reset(hba);
3928 ret = ufshcd_host_reset_and_restore(hba);
3930 spin_lock_irqsave(hba->host->host_lock, flags);
3932 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3933 ufshcd_clear_eh_in_progress(hba);
3934 spin_unlock_irqrestore(hba->host->host_lock, flags);
3937 dev_err(hba->dev, "%s: link recovery failed, err %d",
3944 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3950 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3953 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3954 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3958 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3961 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3967 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3973 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3976 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3977 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3981 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3984 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3986 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3987 hba->ufs_stats.hibern8_exit_cnt++;
3994 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3999 if (!ufshcd_is_auto_hibern8_supported(hba))
4002 spin_lock_irqsave(hba->host->host_lock, flags);
4003 if (hba->ahit != ahit) {
4004 hba->ahit = ahit;
4007 spin_unlock_irqrestore(hba->host->host_lock, flags);
4009 if (update && !pm_runtime_suspended(hba->dev)) {
4010 pm_runtime_get_sync(hba->dev);
4011 ufshcd_hold(hba, false);
4012 ufshcd_auto_hibern8_enable(hba);
4013 ufshcd_release(hba);
4014 pm_runtime_put(hba->dev);
4019 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4023 if (!ufshcd_is_auto_hibern8_supported(hba))
4026 spin_lock_irqsave(hba->host->host_lock, flags);
4027 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4028 spin_unlock_irqrestore(hba->host->host_lock, flags);
4033 * values in hba power info
4034 * @hba: per-adapter instance
4036 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4038 hba->pwr_info.gear_rx = UFS_PWM_G1;
4039 hba->pwr_info.gear_tx = UFS_PWM_G1;
4040 hba->pwr_info.lane_rx = 1;
4041 hba->pwr_info.lane_tx = 1;
4042 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4043 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4044 hba->pwr_info.hs_rate = 0;
4049 * @hba: per-adapter instance
4051 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4053 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4055 if (hba->max_pwr_info.is_valid)
4063 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4065 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4069 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4081 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4083 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4086 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4093 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4096 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4099 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4106 hba->max_pwr_info.is_valid = true;
4110 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4116 if (!hba->force_pmc &&
4117 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4118 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4119 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4120 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4121 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4122 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4123 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4124 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4134 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4135 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4139 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4141 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4143 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4144 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4148 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4150 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4156 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4159 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4160 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4162 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4164 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4166 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4168 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4170 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4173 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4175 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4177 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4181 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4185 dev_err(hba->dev,
4188 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4191 memcpy(&hba->pwr_info, pwr_mode,
4200 * @hba: per-adapter instance
4203 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4209 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4215 ret = ufshcd_change_power_mode(hba, &final_params);
4223 * @hba: per-adapter instance
4227 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4233 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4236 dev_err(hba->dev,
4245 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4253 dev_err(hba->dev,
4257 dev_err(hba->dev,
4268 * @hba: per adapter instance
4278 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4284 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4287 if (ufshcd_is_intr_aggr_allowed(hba))
4288 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4290 ufshcd_disable_intr_aggr(hba);
4293 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4295 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4297 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4299 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4311 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4313 ufshcd_enable_run_stop_reg(hba);
4315 dev_err(hba->dev,
4326 * @hba: per adapter instance
4328 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4337 spin_lock_irqsave(hba->host->host_lock, flags);
4338 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4339 spin_unlock_irqrestore(hba->host->host_lock, flags);
4341 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4345 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4350 * @hba: per adapter instance
4358 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4362 if (!ufshcd_is_hba_active(hba))
4364 ufshcd_hba_stop(hba);
4367 ufshcd_set_link_off(hba);
4369 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4372 ufshcd_hba_start(hba);
4384 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4388 while (ufshcd_is_hba_active(hba)) {
4392 dev_err(hba->dev,
4400 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4402 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4407 int ufshcd_hba_enable(struct ufs_hba *hba)
4411 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4412 ufshcd_set_link_off(hba);
4413 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4416 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4417 ret = ufshcd_dme_reset(hba);
4419 ret = ufshcd_dme_enable(hba);
4421 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4423 dev_err(hba->dev,
4427 ret = ufshcd_hba_execute_hce(hba);
4434 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4439 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4442 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4446 err = ufshcd_dme_set(hba,
4451 err = ufshcd_dme_peer_set(hba,
4456 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4465 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4467 return ufshcd_disable_tx_lcc(hba, true);
4481 * @hba: per adapter instance
4485 static int ufshcd_link_startup(struct ufs_hba *hba)
4495 if (!ufshcd_is_ufs_dev_active(hba))
4500 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4502 ret = ufshcd_dme_link_startup(hba);
4505 if (!ret && !ufshcd_is_device_present(hba)) {
4506 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4508 dev_err(hba->dev, "%s: Device not present\n", __func__);
4518 if (ret && ufshcd_hba_enable(hba)) {
4519 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4527 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4539 ufshcd_init_pwr_info(hba);
4540 ufshcd_print_pwr_info(hba);
4542 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4543 ret = ufshcd_disable_device_tx_lcc(hba);
4549 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4554 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4555 ret = ufshcd_make_hba_operational(hba);
4558 dev_err(hba->dev, "link startup failed %d\n", ret);
4559 ufshcd_print_host_state(hba);
4560 ufshcd_print_pwr_info(hba);
4561 ufshcd_print_host_regs(hba);
4568 * @hba: per-adapter instance
4576 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4581 ufshcd_hold(hba, false);
4582 mutex_lock(&hba->dev_cmd.lock);
4584 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4590 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4592 mutex_unlock(&hba->dev_cmd.lock);
4593 ufshcd_release(hba);
4596 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4613 struct ufs_hba *hba;
4615 hba = shost_priv(sdev->host);
4617 lun_qdepth = hba->nutrs;
4618 ret = ufshcd_read_unit_desc_param(hba,
4629 lun_qdepth = hba->nutrs;
4631 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4633 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4640 * @hba: per-adapter instance
4649 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4662 else if (lun >= hba->dev_info.max_lu_supported)
4665 ret = ufshcd_read_unit_desc_param(hba,
4676 * @hba: per-adapter instance
4680 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4683 if (hba->dev_info.f_power_on_wp_en &&
4684 !hba->dev_info.is_lu_power_on_wp) {
4687 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4690 hba->dev_info.is_lu_power_on_wp = true;
4702 struct ufs_hba *hba;
4704 hba = shost_priv(sdev->host);
4723 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4737 struct ufs_hba *hba = shost_priv(sdev->host);
4739 if (depth > hba->nutrs)
4740 depth = hba->nutrs;
4750 struct ufs_hba *hba = shost_priv(sdev->host);
4754 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
4757 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4760 ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4771 struct ufs_hba *hba;
4773 hba = shost_priv(sdev->host);
4778 spin_lock_irqsave(hba->host->host_lock, flags);
4779 hba->sdev_ufs_device = NULL;
4780 spin_unlock_irqrestore(hba->host->host_lock, flags);
4821 * @hba: per adapter instance
4827 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4836 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4845 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4873 if (!hba->pm_op_in_progress &&
4875 schedule_work(&hba->eeh_work)) {
4881 pm_runtime_get_noresume(hba->dev);
4887 dev_err(hba->dev,
4891 dev_err(hba->dev,
4915 dev_err(hba->dev,
4918 ufshcd_print_host_regs(hba);
4919 ufshcd_print_host_state(hba);
4924 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
4925 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4931 * @hba: per adapter instance
4938 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4942 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4943 hba->active_uic_cmd->argument2 |=
4944 ufshcd_get_uic_cmd_result(hba);
4945 hba->active_uic_cmd->argument3 =
4946 ufshcd_get_dme_attr_val(hba);
4947 if (!hba->uic_async_done)
4948 hba->active_uic_cmd->cmd_active = 0;
4949 complete(&hba->active_uic_cmd->done);
4953 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
4954 hba->active_uic_cmd->cmd_active = 0;
4955 complete(hba->uic_async_done);
4960 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
4967 * @hba: per adapter instance
4970 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4978 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4979 lrbp = &hba->lrb[index];
4983 ufshcd_add_command_trace(hba, index, "complete");
4984 result = ufshcd_transfer_rsp_status(hba, lrbp);
4991 __ufshcd_release(hba);
4994 if (hba->dev_cmd.complete) {
4995 ufshcd_add_command_trace(hba, index,
4997 complete(hba->dev_cmd.complete);
5000 if (ufshcd_is_clkscaling_supported(hba))
5001 hba->clk_scaling.active_reqs--;
5005 hba->outstanding_reqs ^= completed_reqs;
5007 ufshcd_clk_scaling_update_busy(hba);
5012 * @hba: per adapter instance
5018 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5030 if (ufshcd_is_intr_aggr_allowed(hba) &&
5031 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5032 ufshcd_reset_intr_aggr(hba);
5034 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5035 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5038 __ufshcd_transfer_req_compl(hba, completed_reqs);
5047 * @hba: per-adapter instance
5055 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5060 if (!(hba->ee_ctrl_mask & mask))
5063 val = hba->ee_ctrl_mask & ~mask;
5065 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5068 hba->ee_ctrl_mask &= ~mask;
5075 * @hba: per-adapter instance
5083 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5088 if (hba->ee_ctrl_mask & mask)
5091 val = hba->ee_ctrl_mask | mask;
5093 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5096 hba->ee_ctrl_mask |= mask;
5103 * @hba: per-adapter instance
5112 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5116 if (hba->auto_bkops_enabled)
5119 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5122 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5127 hba->auto_bkops_enabled = true;
5128 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5131 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5133 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5141 * @hba: per-adapter instance
5151 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5155 if (!hba->auto_bkops_enabled)
5162 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5164 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5169 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5172 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5174 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5178 hba->auto_bkops_enabled = false;
5179 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5180 hba->is_urgent_bkops_lvl_checked = false;
5187 * @hba: per adapter instance
5194 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5196 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5197 hba->auto_bkops_enabled = false;
5198 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5199 ufshcd_enable_auto_bkops(hba);
5201 hba->auto_bkops_enabled = true;
5202 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5203 ufshcd_disable_auto_bkops(hba);
5205 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5206 hba->is_urgent_bkops_lvl_checked = false;
5209 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5211 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5217 * @hba: per-adapter instance
5227 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5231 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5237 err = ufshcd_get_bkops_status(hba, &curr_status);
5239 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5243 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5250 err = ufshcd_enable_auto_bkops(hba);
5252 err = ufshcd_disable_auto_bkops(hba);
5259 * @hba: per-adapter instance
5267 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5269 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5272 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5274 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5278 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5283 if (hba->is_urgent_bkops_lvl_checked)
5286 err = ufshcd_get_bkops_status(hba, &curr_status);
5288 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5300 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5303 hba->urgent_bkops_lvl = curr_status;
5304 hba->is_urgent_bkops_lvl_checked = true;
5308 err = ufshcd_enable_auto_bkops(hba);
5311 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5315 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5321 if (!ufshcd_is_wb_allowed(hba))
5324 if (!(enable ^ hba->wb_enabled))
5331 index = ufshcd_wb_get_query_index(hba);
5332 ret = ufshcd_query_flag_retry(hba, opcode,
5335 dev_err(hba->dev, "%s write booster %s failed %d\n",
5340 hba->wb_enabled = enable;
5341 dev_dbg(hba->dev, "%s write booster %s %d\n",
5347 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5357 index = ufshcd_wb_get_query_index(hba);
5358 return ufshcd_query_flag_retry(hba, val,
5363 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5366 ufshcd_wb_buf_flush_enable(hba);
5368 ufshcd_wb_buf_flush_disable(hba);
5372 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5377 if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5380 index = ufshcd_wb_get_query_index(hba);
5381 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5385 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5388 hba->wb_buf_flush_enabled = true;
5390 dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5394 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5399 if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5402 index = ufshcd_wb_get_query_index(hba);
5403 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5407 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5410 hba->wb_buf_flush_enabled = false;
5411 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5417 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5424 index = ufshcd_wb_get_query_index(hba);
5425 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5429 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5435 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5440 if (avail_buf < hba->vps->wb_flush_threshold)
5446 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5452 if (!ufshcd_is_wb_allowed(hba))
5465 index = ufshcd_wb_get_query_index(hba);
5466 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5470 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5475 if (!hba->dev_info.b_presrv_uspc_en) {
5481 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5486 struct ufs_hba *hba = container_of(to_delayed_work(work),
5495 pm_runtime_get_sync(hba->dev);
5496 pm_runtime_put_sync(hba->dev);
5508 struct ufs_hba *hba;
5511 hba = container_of(work, struct ufs_hba, eeh_work);
5513 pm_runtime_get_sync(hba->dev);
5514 ufshcd_scsi_block_requests(hba);
5515 err = ufshcd_get_ee_status(hba, &status);
5517 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5522 status &= hba->ee_ctrl_mask;
5525 ufshcd_bkops_exception_event_handler(hba);
5528 ufshcd_scsi_unblock_requests(hba);
5535 pm_runtime_put_noidle(hba->dev);
5536 pm_runtime_put(hba->dev);
5541 static void ufshcd_complete_requests(struct ufs_hba *hba)
5543 ufshcd_transfer_req_compl(hba);
5544 ufshcd_tmc_handler(hba);
5550 * @hba: per-adapter instance
5554 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5559 spin_lock_irqsave(hba->host->host_lock, flags);
5564 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5567 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5568 ((hba->saved_err & UIC_ERROR) &&
5569 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5572 if ((hba->saved_err & UIC_ERROR) &&
5573 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5578 spin_unlock_irqrestore(hba->host->host_lock, flags);
5580 spin_lock_irqsave(hba->host->host_lock, flags);
5586 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5587 ((hba->saved_err & UIC_ERROR) &&
5588 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5598 spin_unlock_irqrestore(hba->host->host_lock, flags);
5599 err = ufshcd_verify_dev_init(hba);
5600 spin_lock_irqsave(hba->host->host_lock, flags);
5606 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5607 hba->saved_err &= ~UIC_ERROR;
5609 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5610 if (!hba->saved_uic_err)
5614 spin_unlock_irqrestore(hba->host->host_lock, flags);
5619 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5621 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5622 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5626 static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5629 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5630 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5631 ufshcd_is_saved_err_fatal(hba))
5632 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5634 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5635 queue_work(hba->eh_wq, &hba->eh_work);
5639 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5641 pm_runtime_get_sync(hba->dev);
5642 if (pm_runtime_suspended(hba->dev)) {
5648 ufshcd_setup_hba_vreg(hba, true);
5649 ufshcd_enable_irq(hba);
5650 ufshcd_setup_vreg(hba, true);
5651 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5652 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5653 ufshcd_hold(hba, false);
5654 if (!ufshcd_is_clkgating_allowed(hba))
5655 ufshcd_setup_clocks(hba, true);
5656 ufshcd_release(hba);
5657 ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
5659 ufshcd_hold(hba, false);
5660 if (hba->clk_scaling.is_allowed) {
5661 cancel_work_sync(&hba->clk_scaling.suspend_work);
5662 cancel_work_sync(&hba->clk_scaling.resume_work);
5663 ufshcd_suspend_clkscaling(hba);
5668 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5670 ufshcd_release(hba);
5671 if (hba->clk_scaling.is_allowed)
5672 ufshcd_resume_clkscaling(hba);
5673 pm_runtime_put(hba->dev);
5676 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5678 return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5679 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5680 ufshcd_is_link_broken(hba))));
5684 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5686 struct Scsi_Host *shost = hba->host;
5692 * Set RPM status of hba device to RPM_ACTIVE,
5695 ret = pm_runtime_set_active(hba->dev);
5697 * If hba device had runtime error, we also need to resume those
5698 * scsi devices under hba in case any of them has failed to be
5699 * resumed due to hba runtime resume failure. This is to unblock
5712 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
5717 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
5719 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
5722 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
5739 struct ufs_hba *hba;
5747 hba = container_of(work, struct ufs_hba, eh_work);
5749 spin_lock_irqsave(hba->host->host_lock, flags);
5750 if (ufshcd_err_handling_should_stop(hba)) {
5751 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5752 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5753 spin_unlock_irqrestore(hba->host->host_lock, flags);
5756 ufshcd_set_eh_in_progress(hba);
5757 spin_unlock_irqrestore(hba->host->host_lock, flags);
5758 ufshcd_err_handling_prepare(hba);
5759 spin_lock_irqsave(hba->host->host_lock, flags);
5760 ufshcd_scsi_block_requests(hba);
5765 if (ufshcd_err_handling_should_stop(hba)) {
5766 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5767 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5770 hba->ufshcd_state = UFSHCD_STATE_RESET;
5773 ufshcd_complete_requests(hba);
5775 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5778 spin_unlock_irqrestore(hba->host->host_lock, flags);
5780 ret = ufshcd_quirk_dl_nac_errors(hba);
5781 spin_lock_irqsave(hba->host->host_lock, flags);
5782 if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
5786 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5787 ufshcd_is_saved_err_fatal(hba) ||
5788 ((hba->saved_err & UIC_ERROR) &&
5789 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5793 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
5794 (hba->saved_uic_err &&
5795 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
5796 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
5798 spin_unlock_irqrestore(hba->host->host_lock, flags);
5799 ufshcd_print_host_state(hba);
5800 ufshcd_print_pwr_info(hba);
5801 ufshcd_print_host_regs(hba);
5802 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5803 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
5804 spin_lock_irqsave(hba->host->host_lock, flags);
5819 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
5820 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5821 if (!hba->saved_uic_err)
5822 hba->saved_err &= ~UIC_ERROR;
5823 spin_unlock_irqrestore(hba->host->host_lock, flags);
5824 if (ufshcd_is_pwr_mode_restore_needed(hba))
5826 spin_lock_irqsave(hba->host->host_lock, flags);
5827 if (!hba->saved_err && !needs_restore)
5831 hba->silence_err_logs = true;
5833 spin_unlock_irqrestore(hba->host->host_lock, flags);
5835 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5836 if (ufshcd_try_to_abort_task(hba, tag)) {
5843 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5844 if (ufshcd_clear_tm_cmd(hba, tag)) {
5851 spin_lock_irqsave(hba->host->host_lock, flags);
5854 ufshcd_complete_requests(hba);
5855 hba->silence_err_logs = false;
5867 spin_unlock_irqrestore(hba->host->host_lock, flags);
5872 down_write(&hba->clk_scaling_lock);
5873 hba->force_pmc = true;
5874 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
5877 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
5880 hba->force_pmc = false;
5881 ufshcd_print_pwr_info(hba);
5882 up_write(&hba->clk_scaling_lock);
5883 spin_lock_irqsave(hba->host->host_lock, flags);
5889 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5898 if (hba->outstanding_reqs == max_doorbells)
5899 __ufshcd_transfer_req_compl(hba,
5900 (1UL << (hba->nutrs - 1)));
5902 hba->force_reset = false;
5903 spin_unlock_irqrestore(hba->host->host_lock, flags);
5904 err = ufshcd_reset_and_restore(hba);
5906 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
5909 ufshcd_recover_pm_error(hba);
5910 spin_lock_irqsave(hba->host->host_lock, flags);
5915 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5916 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5917 if (hba->saved_err || hba->saved_uic_err)
5918 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5919 __func__, hba->saved_err, hba->saved_uic_err);
5923 ufshcd_clear_eh_in_progress(hba);
5924 spin_unlock_irqrestore(hba->host->host_lock, flags);
5925 ufshcd_scsi_unblock_requests(hba);
5926 ufshcd_err_handling_unprepare(hba);
5931 * @hba: per-adapter instance
5937 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
5943 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5946 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5952 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
5959 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
5960 if (hba->uic_async_done && hba->active_uic_cmd)
5961 cmd = hba->active_uic_cmd;
5967 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5973 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5976 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5979 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5980 else if (hba->dev_quirks &
5983 hba->uic_error |=
5986 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5992 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5995 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5996 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6000 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6003 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
6004 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6008 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6011 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
6012 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6016 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6017 __func__, hba->uic_error);
6021 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
6024 if (!ufshcd_is_auto_hibern8_supported(hba) ||
6025 !ufshcd_is_auto_hibern8_enabled(hba))
6031 if (hba->active_uic_cmd &&
6032 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
6033 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
6041 * @hba: per-adapter instance
6047 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6052 if (hba->errors & INT_FATAL_ERRORS) {
6053 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
6057 if (hba->errors & UIC_ERROR) {
6058 hba->uic_error = 0;
6059 retval = ufshcd_update_uic_error(hba);
6060 if (hba->uic_error)
6064 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6065 dev_err(hba->dev,
6067 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6069 hba->errors, ufshcd_get_upmcrs(hba));
6070 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
6071 hba->errors);
6072 ufshcd_set_link_broken(hba);
6081 hba->saved_err |= hba->errors;
6082 hba->saved_uic_err |= hba->uic_error;
6085 if ((hba->saved_err & (INT_FATAL_ERRORS)) ||
6086 (hba->saved_uic_err &&
6087 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6088 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6089 __func__, hba->saved_err,
6090 hba->saved_uic_err);
6091 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6093 ufshcd_print_pwr_info(hba);
6095 ufshcd_schedule_eh_work(hba);
6109 * @hba: per adapter instance
6115 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6121 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6123 issued = hba->outstanding_tasks & ~pending;
6124 for_each_set_bit(tag, &issued, hba->nutmrs) {
6125 struct request *req = hba->tmf_rqs[tag];
6137 * @hba: per adapter instance
6144 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6148 hba->errors = UFSHCD_ERROR_MASK & intr_status;
6150 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
6151 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
6153 if (hba->errors)
6154 retval |= ufshcd_check_errors(hba);
6157 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6160 retval |= ufshcd_tmc_handler(hba);
6163 retval |= ufshcd_transfer_req_compl(hba);
6181 struct ufs_hba *hba = __hba;
6182 int retries = hba->nutrs;
6184 spin_lock(hba->host->host_lock);
6185 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6186 hba->ufs_stats.last_intr_status = intr_status;
6187 hba->ufs_stats.last_intr_ts = ktime_get();
6190 * There could be max of hba->nutrs reqs in flight and in worst case
6197 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6199 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6201 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6203 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6207 !ufshcd_eh_in_progress(hba)) {
6208 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6211 hba->ufs_stats.last_intr_status,
6213 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6216 spin_unlock(hba->host->host_lock);
6220 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6226 if (!test_bit(tag, &hba->outstanding_tasks))
6229 spin_lock_irqsave(hba->host->host_lock, flags);
6230 ufshcd_utmrl_clear(hba, tag);
6231 spin_unlock_irqrestore(hba->host->host_lock, flags);
6234 err = ufshcd_wait_for_register(hba,
6241 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6244 struct request_queue *q = hba->tmf_queue;
6245 struct Scsi_Host *host = hba->host;
6259 ufshcd_hold(hba, false);
6264 hba->tmf_rqs[req->tag] = req;
6267 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6268 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6271 __set_bit(task_tag, &hba->outstanding_tasks);
6276 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6282 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6288 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6289 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6291 if (ufshcd_clear_tm_cmd(hba, task_tag))
6292 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6297 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6299 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6302 spin_lock_irqsave(hba->host->host_lock, flags);
6303 hba->tmf_rqs[req->tag] = NULL;
6304 __clear_bit(task_tag, &hba->outstanding_tasks);
6305 spin_unlock_irqrestore(hba->host->host_lock, flags);
6307 ufshcd_release(hba);
6315 * @hba: per adapter instance
6323 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6345 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6351 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6361 * @hba: per-adapter instance
6374 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6376 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6383 struct request_queue *q = hba->cmd_queue;
6392 down_read(&hba->clk_scaling_lock);
6400 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6403 lrbp = &hba->lrb[tag];
6413 hba->dev_cmd.type = cmd_type;
6415 switch (hba->ufs_version) {
6443 hba->dev_cmd.complete = &wait;
6447 spin_lock_irqsave(hba->host->host_lock, flags);
6448 ufshcd_send_command(hba, tag);
6449 spin_unlock_irqrestore(hba->host->host_lock, flags);
6456 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6469 dev_warn(hba->dev,
6479 up_read(&hba->clk_scaling_lock);
6485 * @hba: per-adapter instance
6498 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6516 ufshcd_hold(hba, false);
6517 mutex_lock(&hba->dev_cmd.lock);
6518 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6521 mutex_unlock(&hba->dev_cmd.lock);
6522 ufshcd_release(hba);
6531 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6537 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6564 struct ufs_hba *hba;
6571 hba = shost_priv(host);
6574 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
6582 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6583 if (hba->lrb[pos].lun == lun) {
6584 err = ufshcd_clear_cmd(hba, pos);
6590 ufshcd_transfer_req_compl(hba);
6594 hba->req_abort_count = 0;
6595 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
6599 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6605 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6610 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6611 lrbp = &hba->lrb[tag];
6628 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6630 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6637 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6641 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6649 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6651 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6658 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6662 dev_err(hba->dev,
6676 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6681 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6687 err = ufshcd_clear_cmd(hba, tag);
6689 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6705 struct ufs_hba *hba;
6713 hba = shost_priv(host);
6715 lrbp = &hba->lrb[tag];
6716 if (!ufshcd_valid_tag(hba, tag)) {
6717 dev_err(hba->dev,
6733 ufshcd_hold(hba, false);
6734 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6736 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6737 dev_err(hba->dev,
6739 __func__, tag, hba->outstanding_reqs, reg);
6744 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6753 scsi_print_command(hba->lrb[tag].cmd);
6754 if (!hba->req_abort_count) {
6755 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6756 ufshcd_print_host_regs(hba);
6757 ufshcd_print_host_state(hba);
6758 ufshcd_print_pwr_info(hba);
6759 ufshcd_print_trs(hba, 1 << tag, true);
6761 ufshcd_print_trs(hba, 1 << tag, false);
6763 hba->req_abort_count++;
6766 dev_err(hba->dev,
6776 err = ufshcd_try_to_abort_task(hba, tag);
6781 __ufshcd_transfer_req_compl(hba, (1UL << tag));
6786 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6787 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6795 ufshcd_release(hba);
6801 * @hba: per-adapter instance
6809 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6818 ufshcd_hba_stop(hba);
6820 spin_lock_irqsave(hba->host->host_lock, flags);
6821 hba->silence_err_logs = true;
6822 ufshcd_complete_requests(hba);
6823 hba->silence_err_logs = false;
6824 spin_unlock_irqrestore(hba->host->host_lock, flags);
6827 ufshcd_set_clk_freq(hba, true);
6829 err = ufshcd_hba_enable(hba);
6834 err = ufshcd_probe_hba(hba, false);
6838 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6839 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6845 * @hba: per-adapter instance
6852 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6864 spin_lock_irqsave(hba->host->host_lock, flags);
6865 saved_err = hba->saved_err;
6866 saved_uic_err = hba->saved_uic_err;
6867 hba->saved_err = 0;
6868 hba->saved_uic_err = 0;
6869 spin_unlock_irqrestore(hba->host->host_lock, flags);
6873 ufshcd_vops_device_reset(hba);
6875 err = ufshcd_host_reset_and_restore(hba);
6878 spin_lock_irqsave(hba->host->host_lock, flags);
6883 scsi_report_bus_reset(hba->host, 0);
6885 hba->saved_err |= saved_err;
6886 hba->saved_uic_err |= saved_uic_err;
6888 spin_unlock_irqrestore(hba->host->host_lock, flags);
6903 struct ufs_hba *hba;
6905 hba = shost_priv(cmd->device->host);
6907 spin_lock_irqsave(hba->host->host_lock, flags);
6908 hba->force_reset = true;
6909 ufshcd_schedule_eh_work(hba);
6910 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
6911 spin_unlock_irqrestore(hba->host->host_lock, flags);
6913 flush_work(&hba->eh_work);
6915 spin_lock_irqsave(hba->host->host_lock, flags);
6916 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
6918 spin_unlock_irqrestore(hba->host->host_lock, flags);
6971 * @hba: per-adapter instance
6977 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6982 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6983 !hba->vreg_info.vccq2) {
6984 dev_err(hba->dev,
6990 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6992 hba->vreg_info.vcc->max_uA,
6996 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6998 hba->vreg_info.vccq->max_uA,
7002 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7004 hba->vreg_info.vccq2->max_uA,
7011 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7014 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7022 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7025 dev_err(hba->dev,
7031 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7033 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7035 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7039 dev_err(hba->dev,
7059 * @hba: per-adapter instance
7083 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7088 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7090 if (IS_ERR(hba->sdev_ufs_device)) {
7091 ret = PTR_ERR(hba->sdev_ufs_device);
7092 hba->sdev_ufs_device = NULL;
7095 ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
7096 scsi_device_put(hba->sdev_ufs_device);
7098 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7100 if (IS_ERR(hba->sdev_rpmb)) {
7101 ret = PTR_ERR(hba->sdev_rpmb);
7104 ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7105 scsi_device_put(hba->sdev_rpmb);
7107 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7110 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7118 scsi_remove_device(hba->sdev_ufs_device);
7123 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7125 struct ufs_dev_info *dev_info = &hba->dev_info;
7129 if (!ufshcd_is_wb_allowed(hba))
7138 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7141 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7173 ufshcd_read_unit_desc_param(hba,
7190 hba->caps &= ~UFSHCD_CAP_WB_EN;
7193 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7196 struct ufs_dev_info *dev_info = &hba->dev_info;
7207 hba->dev_quirks |= f->quirk;
7212 static void ufs_fixup_device_setup(struct ufs_hba *hba)
7215 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7218 ufshcd_vops_fixup_dev_quirks(hba);
7221 static int ufs_get_device_desc(struct ufs_hba *hba)
7226 struct ufs_dev_info *dev_info = &hba->dev_info;
7234 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7235 hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7237 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7255 err = ufshcd_read_string_desc(hba, model_index,
7258 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7263 ufs_fixup_device_setup(hba);
7265 ufshcd_wb_probe(hba, desc_buf);
7278 static void ufs_put_device_desc(struct ufs_hba *hba)
7280 struct ufs_dev_info *dev_info = &hba->dev_info;
7288 * @hba: per-adapter instance
7297 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7302 ret = ufshcd_dme_peer_get(hba,
7314 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7323 * @hba: per-adapter instance
7332 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7338 ret = ufshcd_dme_get(hba,
7345 ret = ufshcd_dme_peer_get(hba,
7357 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7366 * @hba: per-adapter instance
7374 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7382 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7387 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7394 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7401 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7406 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7410 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7425 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7433 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7435 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7436 ufshcd_tune_pa_tactivate(hba);
7437 ufshcd_tune_pa_hibern8time(hba);
7440 ufshcd_vops_apply_dev_quirks(hba);
7442 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7444 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7446 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7447 ufshcd_quirk_tune_host_pa_tactivate(hba);
7450 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7452 hba->ufs_stats.hibern8_exit_cnt = 0;
7453 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7454 hba->req_abort_count = 0;
7457 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7463 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7470 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7473 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7479 hba->dev_info.max_lu_supported = 32;
7481 hba->dev_info.max_lu_supported = 8;
7508 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7514 hba->dev_ref_clk_freq =
7517 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7518 dev_err(hba->dev,
7522 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7526 u32 freq = hba->dev_ref_clk_freq;
7528 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7532 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7540 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7544 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7549 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7556 static int ufshcd_device_params_init(struct ufs_hba *hba)
7563 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7566 ret = ufshcd_device_geo_params_init(hba);
7571 ret = ufs_get_device_desc(hba);
7573 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7578 ufshcd_get_ref_clk_gating_wait(hba);
7580 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7582 hba->dev_info.f_power_on_wp_en = flag;
7585 if (ufshcd_get_max_pwr_mode(hba))
7586 dev_err(hba->dev,
7595 * @hba: per-adapter instance
7597 static int ufshcd_add_lus(struct ufs_hba *hba)
7602 ret = ufshcd_scsi_add_wlus(hba);
7607 if (ufshcd_is_clkscaling_supported(hba)) {
7608 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7609 &hba->pwr_info,
7611 hba->clk_scaling.saved_pwr_info.is_valid = true;
7612 if (!hba->devfreq) {
7613 ret = ufshcd_devfreq_init(hba);
7618 hba->clk_scaling.is_allowed = true;
7621 ufs_bsg_probe(hba);
7622 scsi_scan_host(hba->host);
7623 pm_runtime_put_sync(hba->dev);
7630 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
7632 static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
7638 spin_lock_irqsave(hba->host->host_lock, flags);
7640 sdp = hba->sdev_ufs_device;
7642 sdp = hba->sdev_rpmb;
7654 spin_unlock_irqrestore(hba->host->host_lock, flags);
7658 ret = ufshcd_send_request_sense(hba, sdp);
7662 dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
7667 static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
7671 if (!hba->wlun_dev_clr_ua)
7674 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
7676 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
7678 hba->wlun_dev_clr_ua = false;
7681 dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
7687 * ufshcd_probe_hba - probe hba to detect device and initialize
7688 * @hba: per-adapter instance
7693 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7699 ret = ufshcd_link_startup(hba);
7704 ufshcd_clear_dbg_ufs_stats(hba);
7707 ufshcd_set_link_active(hba);
7710 ret = ufshcd_verify_dev_init(hba);
7715 ret = ufshcd_complete_dev_init(hba);
7724 ret = ufshcd_device_params_init(hba);
7729 ufshcd_tune_unipro_params(hba);
7732 ufshcd_set_ufs_dev_active(hba);
7733 ufshcd_force_reset_auto_bkops(hba);
7734 hba->wlun_dev_clr_ua = true;
7737 if (hba->max_pwr_info.is_valid) {
7742 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7743 ufshcd_set_dev_ref_clk(hba);
7744 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7746 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7750 ufshcd_print_pwr_info(hba);
7759 ufshcd_set_active_icc_lvl(hba);
7761 ufshcd_wb_config(hba);
7763 ufshcd_auto_hibern8_enable(hba);
7766 spin_lock_irqsave(hba->host->host_lock, flags);
7768 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7769 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7770 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7771 spin_unlock_irqrestore(hba->host->host_lock, flags);
7773 trace_ufshcd_init(dev_name(hba->dev), ret,
7775 hba->curr_dev_pwr_mode, hba->uic_link_state);
7780 * ufshcd_async_scan - asynchronous execution for probing hba
7786 struct ufs_hba *hba = (struct ufs_hba *)data;
7789 /* Initialize hba, detect and initialize UFS device */
7790 ret = ufshcd_probe_hba(hba, true);
7795 ret = ufshcd_add_lus(hba);
7802 pm_runtime_put_sync(hba->dev);
7803 ufshcd_exit_clk_scaling(hba);
7804 ufshcd_hba_exit(hba);
7806 ufshcd_clear_ua_wluns(hba);
7876 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7879 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7882 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7888 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7964 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7967 struct device *dev = hba->dev;
7968 struct ufs_vreg_info *info = &hba->vreg_info;
7989 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7991 struct ufs_vreg_info *info = &hba->vreg_info;
7993 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8013 static int ufshcd_init_vreg(struct ufs_hba *hba)
8016 struct device *dev = hba->dev;
8017 struct ufs_vreg_info *info = &hba->vreg_info;
8030 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8032 struct ufs_vreg_info *info = &hba->vreg_info;
8035 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8040 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8044 struct list_head *head = &hba->clk_list_head;
8052 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8062 if (ufshcd_is_link_active(hba) &&
8070 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8078 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8083 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8094 spin_lock_irqsave(hba->host->host_lock, flags);
8095 hba->clk_gating.state = CLKS_ON;
8096 trace_ufshcd_clk_gating(dev_name(hba->dev),
8097 hba->clk_gating.state);
8098 spin_unlock_irqrestore(hba->host->host_lock, flags);
8102 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8108 static int ufshcd_init_clocks(struct ufs_hba *hba)
8112 struct device *dev = hba->dev;
8113 struct list_head *head = &hba->clk_list_head;
8136 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8141 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8155 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8159 if (!hba->vops)
8162 err = ufshcd_vops_init(hba);
8166 err = ufshcd_vops_setup_regulators(hba, true);
8168 ufshcd_vops_exit(hba);
8171 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8172 __func__, ufshcd_get_var_name(hba), err);
8176 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8178 if (!hba->vops)
8181 ufshcd_vops_setup_regulators(hba, false);
8183 ufshcd_vops_exit(hba);
8186 static int ufshcd_hba_init(struct ufs_hba *hba)
8197 err = ufshcd_init_hba_vreg(hba);
8201 err = ufshcd_setup_hba_vreg(hba, true);
8205 err = ufshcd_init_clocks(hba);
8209 err = ufshcd_setup_clocks(hba, true);
8213 err = ufshcd_init_vreg(hba);
8217 err = ufshcd_setup_vreg(hba, true);
8221 err = ufshcd_variant_hba_init(hba);
8225 hba->is_powered = true;
8229 ufshcd_setup_vreg(hba, false);
8231 ufshcd_setup_clocks(hba, false);
8233 ufshcd_setup_hba_vreg(hba, false);
8238 static void ufshcd_hba_exit(struct ufs_hba *hba)
8240 if (hba->is_powered) {
8241 ufshcd_variant_hba_exit(hba);
8242 ufshcd_setup_vreg(hba, false);
8243 ufshcd_suspend_clkscaling(hba);
8244 if (ufshcd_is_clkscaling_supported(hba))
8245 if (hba->devfreq)
8246 ufshcd_suspend_clkscaling(hba);
8247 ufshcd_setup_clocks(hba, false);
8248 ufshcd_setup_hba_vreg(hba, false);
8249 hba->is_powered = false;
8250 ufs_put_device_desc(hba);
8255 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8286 * @hba: per adapter instance
8292 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8301 spin_lock_irqsave(hba->host->host_lock, flags);
8302 sdp = hba->sdev_ufs_device;
8312 spin_unlock_irqrestore(hba->host->host_lock, flags);
8323 hba->host->eh_noresume = 1;
8324 if (hba->wlun_dev_clr_ua) {
8325 ret = ufshcd_send_request_sense(hba, sdp);
8329 hba->wlun_dev_clr_ua = false;
8350 hba->curr_dev_pwr_mode = pwr_mode;
8353 hba->host->eh_noresume = 0;
8357 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8363 if (req_link_state == hba->uic_link_state)
8367 ret = ufshcd_uic_hibern8_enter(hba);
8369 ufshcd_set_link_hibern8(hba);
8371 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8381 (!check_for_bkops || !hba->auto_bkops_enabled)) {
8389 ret = ufshcd_uic_hibern8_enter(hba);
8391 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8399 ufshcd_hba_stop(hba);
8404 ufshcd_set_link_off(hba);
8411 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8421 if (!ufshcd_is_link_active(hba) &&
8422 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8440 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8441 !hba->dev_info.is_lu_power_on_wp) {
8442 ufshcd_setup_vreg(hba, false);
8444 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8445 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8447 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
8448 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8449 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8456 if (vcc_off && hba->vreg_info.vcc &&
8457 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8461 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8465 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8466 !hba->dev_info.is_lu_power_on_wp) {
8467 ret = ufshcd_setup_vreg(hba, true);
8468 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8469 if (!ufshcd_is_link_active(hba)) {
8470 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8473 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8477 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8482 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8484 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8489 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8491 if (ufshcd_is_link_off(hba))
8492 ufshcd_setup_hba_vreg(hba, false);
8495 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8497 if (ufshcd_is_link_off(hba))
8498 ufshcd_setup_hba_vreg(hba, true);
8503 * @hba: per adapter instance
8517 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8524 hba->pm_op_in_progress = 1;
8527 hba->rpm_lvl : hba->spm_lvl;
8539 ufshcd_hold(hba, false);
8540 hba->clk_gating.is_suspended = true;
8542 if (hba->clk_scaling.is_allowed) {
8543 cancel_work_sync(&hba->clk_scaling.suspend_work);
8544 cancel_work_sync(&hba->clk_scaling.resume_work);
8545 ufshcd_suspend_clkscaling(hba);
8553 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8554 (req_link_state == hba->uic_link_state))
8558 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8564 if (ufshcd_can_autobkops_during_suspend(hba)) {
8570 ret = ufshcd_urgent_bkops(hba);
8575 ufshcd_disable_auto_bkops(hba);
8582 hba->dev_info.b_rpm_dev_flush_capable =
8583 hba->auto_bkops_enabled ||
8586 ufshcd_is_auto_hibern8_enabled(hba))) &&
8587 ufshcd_wb_need_flush(hba));
8590 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8591 if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8594 ufshcd_disable_auto_bkops(hba);
8597 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8598 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8604 flush_work(&hba->eeh_work);
8605 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8609 ufshcd_vreg_set_lpm(hba);
8617 ret = ufshcd_vops_suspend(hba, pm_op);
8624 ufshcd_disable_irq(hba);
8626 ufshcd_setup_clocks(hba, false);
8628 if (ufshcd_is_clkgating_allowed(hba)) {
8629 hba->clk_gating.state = CLKS_OFF;
8630 trace_ufshcd_clk_gating(dev_name(hba->dev),
8631 hba->clk_gating.state);
8635 ufshcd_hba_vreg_set_lpm(hba);
8639 if (hba->clk_scaling.is_allowed)
8640 ufshcd_resume_clkscaling(hba);
8641 ufshcd_vreg_set_hpm(hba);
8642 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8643 ufshcd_set_link_active(hba);
8644 else if (ufshcd_is_link_off(hba))
8645 ufshcd_host_reset_and_restore(hba);
8647 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8648 ufshcd_disable_auto_bkops(hba);
8650 if (hba->clk_scaling.is_allowed)
8651 ufshcd_resume_clkscaling(hba);
8652 hba->clk_gating.is_suspended = false;
8653 hba->dev_info.b_rpm_dev_flush_capable = false;
8654 ufshcd_release(hba);
8656 if (hba->dev_info.b_rpm_dev_flush_capable) {
8657 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8661 hba->pm_op_in_progress = 0;
8664 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
8670 * @hba: per adapter instance
8678 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8683 hba->pm_op_in_progress = 1;
8684 old_link_state = hba->uic_link_state;
8686 ufshcd_hba_vreg_set_hpm(hba);
8688 ret = ufshcd_setup_clocks(hba, true);
8693 ufshcd_enable_irq(hba);
8695 ret = ufshcd_vreg_set_hpm(hba);
8704 ret = ufshcd_vops_resume(hba, pm_op);
8708 if (ufshcd_is_link_hibern8(hba)) {
8709 ret = ufshcd_uic_hibern8_exit(hba);
8711 ufshcd_set_link_active(hba);
8713 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8717 } else if (ufshcd_is_link_off(hba)) {
8722 ret = ufshcd_reset_and_restore(hba);
8727 if (ret || !ufshcd_is_link_active(hba))
8731 if (!ufshcd_is_ufs_dev_active(hba)) {
8732 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8737 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8738 ufshcd_enable_auto_bkops(hba);
8744 ufshcd_urgent_bkops(hba);
8746 hba->clk_gating.is_suspended = false;
8748 if (hba->clk_scaling.is_allowed)
8749 ufshcd_resume_clkscaling(hba);
8752 ufshcd_auto_hibern8_enable(hba);
8754 if (hba->dev_info.b_rpm_dev_flush_capable) {
8755 hba->dev_info.b_rpm_dev_flush_capable = false;
8756 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8760 ufshcd_release(hba);
8765 ufshcd_link_state_transition(hba, old_link_state, 0);
8767 ufshcd_vops_suspend(hba, pm_op);
8769 ufshcd_vreg_set_lpm(hba);
8771 ufshcd_disable_irq(hba);
8772 if (hba->clk_scaling.is_allowed)
8773 ufshcd_suspend_clkscaling(hba);
8774 ufshcd_setup_clocks(hba, false);
8775 if (ufshcd_is_clkgating_allowed(hba)) {
8776 hba->clk_gating.state = CLKS_OFF;
8777 trace_ufshcd_clk_gating(dev_name(hba->dev),
8778 hba->clk_gating.state);
8781 hba->pm_op_in_progress = 0;
8783 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
8789 * @hba: per adapter instance
8795 int ufshcd_system_suspend(struct ufs_hba *hba)
8800 if (!hba || !hba->is_powered)
8803 cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
8805 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8806 hba->curr_dev_pwr_mode) &&
8807 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8808 hba->uic_link_state) &&
8809 pm_runtime_suspended(hba->dev) &&
8810 !hba->dev_info.b_rpm_dev_flush_capable)
8813 if (pm_runtime_suspended(hba->dev)) {
8822 ret = ufshcd_runtime_resume(hba);
8827 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8829 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8831 hba->curr_dev_pwr_mode, hba->uic_link_state);
8833 hba->is_sys_suspended = true;
8840 * @hba: per adapter instance
8845 int ufshcd_system_resume(struct ufs_hba *hba)
8850 if (!hba)
8853 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8860 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8862 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8864 hba->curr_dev_pwr_mode, hba->uic_link_state);
8866 hba->is_sys_suspended = false;
8873 * @hba: per adapter instance
8879 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8884 if (!hba)
8887 if (!hba->is_powered)
8890 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8892 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8894 hba->curr_dev_pwr_mode, hba->uic_link_state);
8901 * @hba: per adapter instance
8920 int ufshcd_runtime_resume(struct ufs_hba *hba)
8925 if (!hba)
8928 if (!hba->is_powered)
8931 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8933 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8935 hba->curr_dev_pwr_mode, hba->uic_link_state);
8940 int ufshcd_runtime_idle(struct ufs_hba *hba)
8948 * @hba: per adapter instance
8954 int ufshcd_shutdown(struct ufs_hba *hba)
8958 if (!hba->is_powered)
8961 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8964 pm_runtime_get_sync(hba->dev);
8966 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8969 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8978 * @hba: per adapter instance
8980 void ufshcd_remove(struct ufs_hba *hba)
8982 ufs_bsg_remove(hba);
8983 ufs_sysfs_remove_nodes(hba->dev);
8984 blk_cleanup_queue(hba->tmf_queue);
8985 blk_mq_free_tag_set(&hba->tmf_tag_set);
8986 blk_cleanup_queue(hba->cmd_queue);
8987 scsi_remove_host(hba->host);
8988 destroy_workqueue(hba->eh_wq);
8990 ufshcd_disable_intr(hba, hba->intr_mask);
8991 ufshcd_hba_stop(hba);
8993 ufshcd_exit_clk_scaling(hba);
8994 ufshcd_exit_clk_gating(hba);
8995 if (ufshcd_is_clkscaling_supported(hba))
8996 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8997 ufshcd_hba_exit(hba);
9003 * @hba: pointer to Host Bus Adapter (HBA)
9005 void ufshcd_dealloc_host(struct ufs_hba *hba)
9007 ufshcd_crypto_destroy_keyslot_manager(hba);
9008 scsi_host_put(hba->host);
9015 * @hba: per adapter instance
9019 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9021 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9022 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9025 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9037 struct ufs_hba *hba;
9054 hba = shost_priv(host);
9055 hba->host = host;
9056 hba->dev = dev;
9057 *hba_handle = hba;
9058 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9060 INIT_LIST_HEAD(&hba->clk_list_head);
9081 * @hba: per-adapter instance
9086 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9089 struct Scsi_Host *host = hba->host;
9090 struct device *dev = hba->dev;
9098 dev_set_drvdata(dev, hba);
9101 dev_err(hba->dev,
9107 hba->mmio_base = mmio_base;
9108 hba->irq = irq;
9109 hba->vps = &ufs_hba_vps;
9111 err = ufshcd_hba_init(hba);
9116 err = ufshcd_hba_capabilities(hba);
9121 hba->ufs_version = ufshcd_get_ufs_version(hba);
9123 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9124 (hba->ufs_version != UFSHCI_VERSION_11) &&
9125 (hba->ufs_version != UFSHCI_VERSION_20) &&
9126 (hba->ufs_version != UFSHCI_VERSION_21))
9127 dev_err(hba->dev, "invalid UFS version 0x%x\n",
9128 hba->ufs_version);
9131 hba->intr_mask = ufshcd_get_intr_mask(hba);
9133 err = ufshcd_set_dma_mask(hba);
9135 dev_err(hba->dev, "set dma mask failed\n");
9140 err = ufshcd_memory_alloc(hba);
9142 dev_err(hba->dev, "Memory allocation failed\n");
9147 ufshcd_host_memory_configure(hba);
9149 host->can_queue = hba->nutrs;
9150 host->cmd_per_lun = hba->nutrs;
9157 hba->max_pwr_info.is_valid = false;
9161 hba->host->host_no);
9162 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9163 if (!hba->eh_wq) {
9164 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9169 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9170 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9173 mutex_init(&hba->uic_cmd_mutex);
9176 mutex_init(&hba->dev_cmd.lock);
9178 init_rwsem(&hba->clk_scaling_lock);
9180 ufshcd_init_clk_gating(hba);
9182 ufshcd_init_clk_scaling(hba);
9189 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9191 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9199 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9201 dev_err(hba->dev, "request irq failed\n");
9204 hba->is_irq_enabled = true;
9207 err = scsi_add_host(host, hba->dev);
9209 dev_err(hba->dev, "scsi_add_host failed\n");
9213 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9214 if (IS_ERR(hba->cmd_queue)) {
9215 err = PTR_ERR(hba->cmd_queue);
9219 hba->tmf_tag_set = (struct blk_mq_tag_set) {
9221 .queue_depth = hba->nutmrs,
9225 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9228 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9229 if (IS_ERR(hba->tmf_queue)) {
9230 err = PTR_ERR(hba->tmf_queue);
9233 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
9234 sizeof(*hba->tmf_rqs), GFP_KERNEL);
9235 if (!hba->tmf_rqs) {
9241 ufshcd_vops_device_reset(hba);
9243 ufshcd_init_crypto(hba);
9246 err = ufshcd_hba_enable(hba);
9248 dev_err(hba->dev, "Host controller enable failed\n");
9249 ufshcd_print_host_regs(hba);
9250 ufshcd_print_host_state(hba);
9259 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9262 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9266 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9270 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9271 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9277 atomic_set(&hba->scsi_block_reqs_cnt, 0);
9284 ufshcd_set_ufs_dev_active(hba);
9286 async_schedule(ufshcd_async_scan, hba);
9287 ufs_sysfs_add_nodes(hba->dev);
9292 blk_cleanup_queue(hba->tmf_queue);
9294 blk_mq_free_tag_set(&hba->tmf_tag_set);
9296 blk_cleanup_queue(hba->cmd_queue);
9298 scsi_remove_host(hba->host);
9300 ufshcd_exit_clk_scaling(hba);
9301 ufshcd_exit_clk_gating(hba);
9302 destroy_workqueue(hba->eh_wq);
9304 hba->is_irq_enabled = false;
9305 ufshcd_hba_exit(hba);