1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6 *
7 * Authors:
8 *	Santosh Yaraganavi <santosh.sy@samsung.com>
9 *	Vinayak Holikatti <h.vinayak@samsung.com>
10 */
11
12#include <linux/async.h>
13#include <linux/devfreq.h>
14#include <linux/nls.h>
15#include <linux/of.h>
16#include <linux/bitfield.h>
17#include <linux/blk-pm.h>
18#include <linux/blkdev.h>
19#include "ufshcd.h"
20#include "ufs_quirks.h"
21#include "unipro.h"
22#include "ufs-sysfs.h"
23#include "ufs_bsg.h"
24#include "ufshcd-crypto.h"
25#include <asm/unaligned.h>
26#include <linux/blkdev.h>
27
28#define CREATE_TRACE_POINTS
29#include <trace/events/ufs.h>
30
31#define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
32				 UTP_TASK_REQ_COMPL |\
33				 UFSHCD_ERROR_MASK)
34/* UIC command timeout, unit: ms */
35#define UIC_CMD_TIMEOUT	500
36
37/* NOP OUT retries waiting for NOP IN response */
38#define NOP_OUT_RETRIES    10
39/* Timeout after 50 msecs if NOP OUT hangs without response */
40#define NOP_OUT_TIMEOUT    50 /* msecs */
41
42/* Query request retries */
43#define QUERY_REQ_RETRIES 3
44/* Query request timeout */
45#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
46
47/* Task management command timeout */
48#define TM_CMD_TIMEOUT	100 /* msecs */
49
50/* maximum number of retries for a general UIC command  */
51#define UFS_UIC_COMMAND_RETRIES 3
52
53/* maximum number of link-startup retries */
54#define DME_LINKSTARTUP_RETRIES 3
55
56/* Maximum retries for Hibern8 enter */
57#define UIC_HIBERN8_ENTER_RETRIES 3
58
59/* maximum number of reset retries before giving up */
60#define MAX_HOST_RESET_RETRIES 5
61
62/* Expose the flag value from utp_upiu_query.value */
63#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
64
65/* Interrupt aggregation default timeout, unit: 40us */
66#define INT_AGGR_DEF_TO	0x02
67
68/* default delay of autosuspend: 2000 ms */
69#define RPM_AUTOSUSPEND_DELAY_MS 2000
70
71/* Default delay of RPM device flush delayed work */
72#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
73
74/* Default value of wait time before gating device ref clock */
75#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
76
77/* Polling time to wait for fDeviceInit */
78#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
79
80#define ufshcd_toggle_vreg(_dev, _vreg, _on)				\
81	({                                                              \
82		int _ret;                                               \
83		if (_on)                                                \
84			_ret = ufshcd_enable_vreg(_dev, _vreg);         \
85		else                                                    \
86			_ret = ufshcd_disable_vreg(_dev, _vreg);        \
87		_ret;                                                   \
88	})
89
90#define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
91	size_t __len = (len);                                            \
92	print_hex_dump(KERN_ERR, prefix_str,                             \
93		       __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
94		       16, 4, buf, __len, false);                        \
95} while (0)
96
97int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
98		     const char *prefix)
99{
100	u32 *regs;
101	size_t pos;
102
103	if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
104		return -EINVAL;
105
106	regs = kzalloc(len, GFP_ATOMIC);
107	if (!regs)
108		return -ENOMEM;
109
110	for (pos = 0; pos < len; pos += 4) {
111		if (offset == 0 &&
112		    pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
113		    pos <= REG_UIC_ERROR_CODE_DME)
114			continue;
115		regs[pos / 4] = ufshcd_readl(hba, offset + pos);
116	}
117
118	ufshcd_hex_dump(prefix, regs, len);
119	kfree(regs);
120
121	return 0;
122}
123EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
124
125enum {
126	UFSHCD_MAX_CHANNEL	= 0,
127	UFSHCD_MAX_ID		= 1,
128	UFSHCD_CMD_PER_LUN	= 32,
129	UFSHCD_CAN_QUEUE	= 32,
130};
131
132/* UFSHCD states */
133enum {
134	UFSHCD_STATE_RESET,
135	UFSHCD_STATE_ERROR,
136	UFSHCD_STATE_OPERATIONAL,
137	UFSHCD_STATE_EH_SCHEDULED_FATAL,
138	UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
139};
140
141/* UFSHCD error handling flags */
142enum {
143	UFSHCD_EH_IN_PROGRESS = (1 << 0),
144};
145
146/* UFSHCD UIC layer error flags */
147enum {
148	UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
149	UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
150	UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
151	UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
152	UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
153	UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
154	UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
155};
156
157#define ufshcd_set_eh_in_progress(h) \
158	((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
159#define ufshcd_eh_in_progress(h) \
160	((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
161#define ufshcd_clear_eh_in_progress(h) \
162	((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
163
164struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
165	{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
166	{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
167	{UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
168	{UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
169	{UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
170	{UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
171};
172
173static inline enum ufs_dev_pwr_mode
174ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
175{
176	return ufs_pm_lvl_states[lvl].dev_state;
177}
178
179static inline enum uic_link_state
180ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
181{
182	return ufs_pm_lvl_states[lvl].link_state;
183}
184
185static inline enum ufs_pm_level
186ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
187					enum uic_link_state link_state)
188{
189	enum ufs_pm_level lvl;
190
191	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
192		if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
193			(ufs_pm_lvl_states[lvl].link_state == link_state))
194			return lvl;
195	}
196
197	/* if no match found, return the level 0 */
198	return UFS_PM_LVL_0;
199}
200
201static struct ufs_dev_fix ufs_fixups[] = {
202	/* UFS cards deviations table */
203	UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
204		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
206		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
207		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
208		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
209	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
210		UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
211	UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
212		UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
213	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
214		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
215	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
216		UFS_DEVICE_QUIRK_PA_TACTIVATE),
217	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
218		UFS_DEVICE_QUIRK_PA_TACTIVATE),
219	END_FIX
220};
221
222static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
223static void ufshcd_async_scan(void *data, async_cookie_t cookie);
224static int ufshcd_reset_and_restore(struct ufs_hba *hba);
225static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
226static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
227static void ufshcd_hba_exit(struct ufs_hba *hba);
228static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
229static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
230static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
231static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
232static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
233static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
234static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
235static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
236static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
237static irqreturn_t ufshcd_intr(int irq, void *__hba);
238static int ufshcd_change_power_mode(struct ufs_hba *hba,
239			     struct ufs_pa_layer_attr *pwr_mode);
240static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
241static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
242static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
243static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
244					 struct ufs_vreg *vreg);
245static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
246static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
247static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
248static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
249static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
250static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
251
252static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
253{
254	return tag >= 0 && tag < hba->nutrs;
255}
256
257static inline void ufshcd_enable_irq(struct ufs_hba *hba)
258{
259	if (!hba->is_irq_enabled) {
260		enable_irq(hba->irq);
261		hba->is_irq_enabled = true;
262	}
263}
264
265static inline void ufshcd_disable_irq(struct ufs_hba *hba)
266{
267	if (hba->is_irq_enabled) {
268		disable_irq(hba->irq);
269		hba->is_irq_enabled = false;
270	}
271}
272
273static inline void ufshcd_wb_config(struct ufs_hba *hba)
274{
275	int ret;
276
277	if (!ufshcd_is_wb_allowed(hba))
278		return;
279
280	ret = ufshcd_wb_ctrl(hba, true);
281	if (ret)
282		dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
283	else
284		dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
285	ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
286	if (ret)
287		dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
288			__func__, ret);
289	if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
290		ufshcd_wb_toggle_flush(hba, true);
291}
292
293static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
294{
295	if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
296		scsi_unblock_requests(hba->host);
297}
298
299static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
300{
301	if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
302		scsi_block_requests(hba->host);
303}
304
305static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
306		const char *str)
307{
308	struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
309
310	trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
311}
312
313static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
314		const char *str)
315{
316	struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
317
318	trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
319}
320
321static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
322		const char *str)
323{
324	struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
325
326	trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
327			&descp->input_param1);
328}
329
330static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
331					 struct uic_command *ucmd,
332					 const char *str)
333{
334	u32 cmd;
335
336	if (!trace_ufshcd_uic_command_enabled())
337		return;
338
339	if (!strcmp(str, "send"))
340		cmd = ucmd->command;
341	else
342		cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
343
344	trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
345				 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
346				 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
347				 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
348}
349
350static void ufshcd_add_command_trace(struct ufs_hba *hba,
351		unsigned int tag, const char *str)
352{
353	sector_t lba = -1;
354	u8 opcode = 0;
355	u32 intr, doorbell;
356	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
357	struct scsi_cmnd *cmd = lrbp->cmd;
358	int transfer_len = -1;
359
360	if (!trace_ufshcd_command_enabled()) {
361		/* trace UPIU W/O tracing command */
362		if (cmd)
363			ufshcd_add_cmd_upiu_trace(hba, tag, str);
364		return;
365	}
366
367	if (cmd) { /* data phase exists */
368		/* trace UPIU also */
369		ufshcd_add_cmd_upiu_trace(hba, tag, str);
370		opcode = cmd->cmnd[0];
371		if ((opcode == READ_10) || (opcode == WRITE_10)) {
372			/*
373			 * Currently we only fully trace read(10) and write(10)
374			 * commands
375			 */
376			if (cmd->request && cmd->request->bio)
377				lba = cmd->request->bio->bi_iter.bi_sector;
378			transfer_len = be32_to_cpu(
379				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
380		}
381	}
382
383	intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
384	doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
385	trace_ufshcd_command(dev_name(hba->dev), str, tag,
386				doorbell, transfer_len, intr, lba, opcode);
387}
388
389static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
390{
391	struct ufs_clk_info *clki;
392	struct list_head *head = &hba->clk_list_head;
393
394	if (list_empty(head))
395		return;
396
397	list_for_each_entry(clki, head, list) {
398		if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
399				clki->max_freq)
400			dev_err(hba->dev, "clk: %s, rate: %u\n",
401					clki->name, clki->curr_freq);
402	}
403}
404
405static void ufshcd_print_err_hist(struct ufs_hba *hba,
406				  struct ufs_err_reg_hist *err_hist,
407				  char *err_name)
408{
409	int i;
410	bool found = false;
411
412	for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
413		int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
414
415		if (err_hist->tstamp[p] == 0)
416			continue;
417		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
418			err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
419		found = true;
420	}
421
422	if (!found)
423		dev_err(hba->dev, "No record of %s\n", err_name);
424}
425
426static void ufshcd_print_host_regs(struct ufs_hba *hba)
427{
428	ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
429
430	ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
431	ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
432	ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
433	ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
434	ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
435	ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
436			      "auto_hibern8_err");
437	ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
438	ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
439			      "link_startup_fail");
440	ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
441	ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
442			      "suspend_fail");
443	ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
444	ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
445	ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
446
447	ufshcd_vops_dbg_register_dump(hba);
448}
449
450static
451void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
452{
453	struct ufshcd_lrb *lrbp;
454	int prdt_length;
455	int tag;
456
457	for_each_set_bit(tag, &bitmap, hba->nutrs) {
458		lrbp = &hba->lrb[tag];
459
460		dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
461				tag, ktime_to_us(lrbp->issue_time_stamp));
462		dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
463				tag, ktime_to_us(lrbp->compl_time_stamp));
464		dev_err(hba->dev,
465			"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
466			tag, (u64)lrbp->utrd_dma_addr);
467
468		ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
469				sizeof(struct utp_transfer_req_desc));
470		dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
471			(u64)lrbp->ucd_req_dma_addr);
472		ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
473				sizeof(struct utp_upiu_req));
474		dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
475			(u64)lrbp->ucd_rsp_dma_addr);
476		ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
477				sizeof(struct utp_upiu_rsp));
478
479		prdt_length = le16_to_cpu(
480			lrbp->utr_descriptor_ptr->prd_table_length);
481		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
482			prdt_length /= sizeof(struct ufshcd_sg_entry);
483
484		dev_err(hba->dev,
485			"UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
486			tag, prdt_length,
487			(u64)lrbp->ucd_prdt_dma_addr);
488
489		if (pr_prdt)
490			ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
491				sizeof(struct ufshcd_sg_entry) * prdt_length);
492	}
493}
494
495static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
496{
497	int tag;
498
499	for_each_set_bit(tag, &bitmap, hba->nutmrs) {
500		struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
501
502		dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
503		ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
504	}
505}
506
507static void ufshcd_print_host_state(struct ufs_hba *hba)
508{
509	struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
510
511	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
512	dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
513		hba->outstanding_reqs, hba->outstanding_tasks);
514	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
515		hba->saved_err, hba->saved_uic_err);
516	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
517		hba->curr_dev_pwr_mode, hba->uic_link_state);
518	dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
519		hba->pm_op_in_progress, hba->is_sys_suspended);
520	dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
521		hba->auto_bkops_enabled, hba->host->host_self_blocked);
522	dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
523	dev_err(hba->dev,
524		"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
525		ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
526		hba->ufs_stats.hibern8_exit_cnt);
527	dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
528		ktime_to_us(hba->ufs_stats.last_intr_ts),
529		hba->ufs_stats.last_intr_status);
530	dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
531		hba->eh_flags, hba->req_abort_count);
532	dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
533		hba->ufs_version, hba->capabilities, hba->caps);
534	dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
535		hba->dev_quirks);
536	if (sdev_ufs)
537		dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
538			sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
539
540	ufshcd_print_clk_freqs(hba);
541}
542
543/**
544 * ufshcd_print_pwr_info - print power params as saved in hba
545 * power info
546 * @hba: per-adapter instance
547 */
548static void ufshcd_print_pwr_info(struct ufs_hba *hba)
549{
550	static const char * const names[] = {
551		"INVALID MODE",
552		"FAST MODE",
553		"SLOW_MODE",
554		"INVALID MODE",
555		"FASTAUTO_MODE",
556		"SLOWAUTO_MODE",
557		"INVALID MODE",
558	};
559
560	dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
561		 __func__,
562		 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
563		 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
564		 names[hba->pwr_info.pwr_rx],
565		 names[hba->pwr_info.pwr_tx],
566		 hba->pwr_info.hs_rate);
567}
568
569void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
570{
571	if (!us)
572		return;
573
574	if (us < 10)
575		udelay(us);
576	else
577		usleep_range(us, us + tolerance);
578}
579EXPORT_SYMBOL_GPL(ufshcd_delay_us);
580
581/**
582 * ufshcd_wait_for_register - wait for register value to change
583 * @hba: per-adapter interface
584 * @reg: mmio register offset
585 * @mask: mask to apply to the read register value
586 * @val: value to wait for
587 * @interval_us: polling interval in microseconds
588 * @timeout_ms: timeout in milliseconds
589 *
590 * Return:
591 * -ETIMEDOUT on error, zero on success.
592 */
593int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
594				u32 val, unsigned long interval_us,
595				unsigned long timeout_ms)
596{
597	int err = 0;
598	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
599
600	/* ignore bits that we don't intend to wait on */
601	val = val & mask;
602
603	while ((ufshcd_readl(hba, reg) & mask) != val) {
604		usleep_range(interval_us, interval_us + 50);
605		if (time_after(jiffies, timeout)) {
606			if ((ufshcd_readl(hba, reg) & mask) != val)
607				err = -ETIMEDOUT;
608			break;
609		}
610	}
611
612	return err;
613}
614
615/**
616 * ufshcd_get_intr_mask - Get the interrupt bit mask
617 * @hba: Pointer to adapter instance
618 *
619 * Returns interrupt bit mask per version
620 */
621static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
622{
623	u32 intr_mask = 0;
624
625	switch (hba->ufs_version) {
626	case UFSHCI_VERSION_10:
627		intr_mask = INTERRUPT_MASK_ALL_VER_10;
628		break;
629	case UFSHCI_VERSION_11:
630	case UFSHCI_VERSION_20:
631		intr_mask = INTERRUPT_MASK_ALL_VER_11;
632		break;
633	case UFSHCI_VERSION_21:
634	default:
635		intr_mask = INTERRUPT_MASK_ALL_VER_21;
636		break;
637	}
638
639	return intr_mask;
640}
641
642/**
643 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
644 * @hba: Pointer to adapter instance
645 *
646 * Returns UFSHCI version supported by the controller
647 */
648static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
649{
650	if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
651		return ufshcd_vops_get_ufs_hci_version(hba);
652
653	return ufshcd_readl(hba, REG_UFS_VERSION);
654}
655
656/**
657 * ufshcd_is_device_present - Check if any device connected to
658 *			      the host controller
659 * @hba: pointer to adapter instance
660 *
661 * Returns true if device present, false if no device detected
662 */
663static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
664{
665	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
666						DEVICE_PRESENT) ? true : false;
667}
668
669/**
670 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
671 * @lrbp: pointer to local command reference block
672 *
673 * This function is used to get the OCS field from UTRD
674 * Returns the OCS field in the UTRD
675 */
676static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
677{
678	return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
679}
680
681/**
682 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
683 * @hba: per adapter instance
684 * @pos: position of the bit to be cleared
685 */
686static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
687{
688	if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
689		ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
690	else
691		ufshcd_writel(hba, ~(1 << pos),
692				REG_UTP_TRANSFER_REQ_LIST_CLEAR);
693}
694
695/**
696 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
697 * @hba: per adapter instance
698 * @pos: position of the bit to be cleared
699 */
700static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
701{
702	if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
703		ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
704	else
705		ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
706}
707
708/**
709 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
710 * @hba: per adapter instance
711 * @tag: position of the bit to be cleared
712 */
713static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
714{
715	__clear_bit(tag, &hba->outstanding_reqs);
716}
717
718/**
719 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
720 * @reg: Register value of host controller status
721 *
722 * Returns integer, 0 on Success and positive value if failed
723 */
724static inline int ufshcd_get_lists_status(u32 reg)
725{
726	return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
727}
728
729/**
730 * ufshcd_get_uic_cmd_result - Get the UIC command result
731 * @hba: Pointer to adapter instance
732 *
733 * This function gets the result of UIC command completion
734 * Returns 0 on success, non zero value on error
735 */
736static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
737{
738	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
739	       MASK_UIC_COMMAND_RESULT;
740}
741
742/**
743 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
744 * @hba: Pointer to adapter instance
745 *
746 * This function gets UIC command argument3
747 * Returns 0 on success, non zero value on error
748 */
749static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
750{
751	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
752}
753
754/**
755 * ufshcd_get_req_rsp - returns the TR response transaction type
756 * @ucd_rsp_ptr: pointer to response UPIU
757 */
758static inline int
759ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
760{
761	return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
762}
763
764/**
765 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
766 * @ucd_rsp_ptr: pointer to response UPIU
767 *
768 * This function gets the response status and scsi_status from response UPIU
769 * Returns the response result code.
770 */
771static inline int
772ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
773{
774	return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
775}
776
777/*
778 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
779 *				from response UPIU
780 * @ucd_rsp_ptr: pointer to response UPIU
781 *
782 * Return the data segment length.
783 */
784static inline unsigned int
785ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
786{
787	return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
788		MASK_RSP_UPIU_DATA_SEG_LEN;
789}
790
791/**
792 * ufshcd_is_exception_event - Check if the device raised an exception event
793 * @ucd_rsp_ptr: pointer to response UPIU
794 *
795 * The function checks if the device raised an exception event indicated in
796 * the Device Information field of response UPIU.
797 *
798 * Returns true if exception is raised, false otherwise.
799 */
800static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
801{
802	return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
803			MASK_RSP_EXCEPTION_EVENT ? true : false;
804}
805
806/**
807 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
808 * @hba: per adapter instance
809 */
810static inline void
811ufshcd_reset_intr_aggr(struct ufs_hba *hba)
812{
813	ufshcd_writel(hba, INT_AGGR_ENABLE |
814		      INT_AGGR_COUNTER_AND_TIMER_RESET,
815		      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
816}
817
818/**
819 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
820 * @hba: per adapter instance
821 * @cnt: Interrupt aggregation counter threshold
822 * @tmout: Interrupt aggregation timeout value
823 */
824static inline void
825ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
826{
827	ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
828		      INT_AGGR_COUNTER_THLD_VAL(cnt) |
829		      INT_AGGR_TIMEOUT_VAL(tmout),
830		      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
831}
832
833/**
834 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
835 * @hba: per adapter instance
836 */
837static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
838{
839	ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
840}
841
842/**
843 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
844 *			When run-stop registers are set to 1, it indicates the
845 *			host controller that it can process the requests
846 * @hba: per adapter instance
847 */
848static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
849{
850	ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
851		      REG_UTP_TASK_REQ_LIST_RUN_STOP);
852	ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
853		      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
854}
855
856/**
857 * ufshcd_hba_start - Start controller initialization sequence
858 * @hba: per adapter instance
859 */
860static inline void ufshcd_hba_start(struct ufs_hba *hba)
861{
862	u32 val = CONTROLLER_ENABLE;
863
864	if (ufshcd_crypto_enable(hba))
865		val |= CRYPTO_GENERAL_ENABLE;
866
867	ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
868}
869
870/**
871 * ufshcd_is_hba_active - Get controller state
872 * @hba: per adapter instance
873 *
874 * Returns false if controller is active, true otherwise
875 */
876static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
877{
878	return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
879		? false : true;
880}
881
882u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
883{
884	/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
885	if ((hba->ufs_version == UFSHCI_VERSION_10) ||
886	    (hba->ufs_version == UFSHCI_VERSION_11))
887		return UFS_UNIPRO_VER_1_41;
888	else
889		return UFS_UNIPRO_VER_1_6;
890}
891EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
892
893static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
894{
895	/*
896	 * If both host and device support UniPro ver1.6 or later, PA layer
897	 * parameters tuning happens during link startup itself.
898	 *
899	 * We can manually tune PA layer parameters if either host or device
900	 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
901	 * logic simple, we will only do manual tuning if local unipro version
902	 * doesn't support ver1.6 or later.
903	 */
904	if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
905		return true;
906	else
907		return false;
908}
909
910/**
911 * ufshcd_set_clk_freq - set UFS controller clock frequencies
912 * @hba: per adapter instance
913 * @scale_up: If True, set max possible frequency othewise set low frequency
914 *
915 * Returns 0 if successful
916 * Returns < 0 for any other errors
917 */
918static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
919{
920	int ret = 0;
921	struct ufs_clk_info *clki;
922	struct list_head *head = &hba->clk_list_head;
923
924	if (list_empty(head))
925		goto out;
926
927	list_for_each_entry(clki, head, list) {
928		if (!IS_ERR_OR_NULL(clki->clk)) {
929			if (scale_up && clki->max_freq) {
930				if (clki->curr_freq == clki->max_freq)
931					continue;
932
933				ret = clk_set_rate(clki->clk, clki->max_freq);
934				if (ret) {
935					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
936						__func__, clki->name,
937						clki->max_freq, ret);
938					break;
939				}
940				trace_ufshcd_clk_scaling(dev_name(hba->dev),
941						"scaled up", clki->name,
942						clki->curr_freq,
943						clki->max_freq);
944
945				clki->curr_freq = clki->max_freq;
946
947			} else if (!scale_up && clki->min_freq) {
948				if (clki->curr_freq == clki->min_freq)
949					continue;
950
951				ret = clk_set_rate(clki->clk, clki->min_freq);
952				if (ret) {
953					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
954						__func__, clki->name,
955						clki->min_freq, ret);
956					break;
957				}
958				trace_ufshcd_clk_scaling(dev_name(hba->dev),
959						"scaled down", clki->name,
960						clki->curr_freq,
961						clki->min_freq);
962				clki->curr_freq = clki->min_freq;
963			}
964		}
965		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
966				clki->name, clk_get_rate(clki->clk));
967	}
968
969out:
970	return ret;
971}
972
973/**
974 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
975 * @hba: per adapter instance
976 * @scale_up: True if scaling up and false if scaling down
977 *
978 * Returns 0 if successful
979 * Returns < 0 for any other errors
980 */
981static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
982{
983	int ret = 0;
984	ktime_t start = ktime_get();
985
986	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
987	if (ret)
988		goto out;
989
990	ret = ufshcd_set_clk_freq(hba, scale_up);
991	if (ret)
992		goto out;
993
994	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
995	if (ret)
996		ufshcd_set_clk_freq(hba, !scale_up);
997
998out:
999	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1000			(scale_up ? "up" : "down"),
1001			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1002	return ret;
1003}
1004
1005/**
1006 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1007 * @hba: per adapter instance
1008 * @scale_up: True if scaling up and false if scaling down
1009 *
1010 * Returns true if scaling is required, false otherwise.
1011 */
1012static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1013					       bool scale_up)
1014{
1015	struct ufs_clk_info *clki;
1016	struct list_head *head = &hba->clk_list_head;
1017
1018	if (list_empty(head))
1019		return false;
1020
1021	list_for_each_entry(clki, head, list) {
1022		if (!IS_ERR_OR_NULL(clki->clk)) {
1023			if (scale_up && clki->max_freq) {
1024				if (clki->curr_freq == clki->max_freq)
1025					continue;
1026				return true;
1027			} else if (!scale_up && clki->min_freq) {
1028				if (clki->curr_freq == clki->min_freq)
1029					continue;
1030				return true;
1031			}
1032		}
1033	}
1034
1035	return false;
1036}
1037
1038static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1039					u64 wait_timeout_us)
1040{
1041	unsigned long flags;
1042	int ret = 0;
1043	u32 tm_doorbell;
1044	u32 tr_doorbell;
1045	bool timeout = false, do_last_check = false;
1046	ktime_t start;
1047
1048	ufshcd_hold(hba, false);
1049	spin_lock_irqsave(hba->host->host_lock, flags);
1050	/*
1051	 * Wait for all the outstanding tasks/transfer requests.
1052	 * Verify by checking the doorbell registers are clear.
1053	 */
1054	start = ktime_get();
1055	do {
1056		if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1057			ret = -EBUSY;
1058			goto out;
1059		}
1060
1061		tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1062		tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1063		if (!tm_doorbell && !tr_doorbell) {
1064			timeout = false;
1065			break;
1066		} else if (do_last_check) {
1067			break;
1068		}
1069
1070		spin_unlock_irqrestore(hba->host->host_lock, flags);
1071		schedule();
1072		if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1073		    wait_timeout_us) {
1074			timeout = true;
1075			/*
1076			 * We might have scheduled out for long time so make
1077			 * sure to check if doorbells are cleared by this time
1078			 * or not.
1079			 */
1080			do_last_check = true;
1081		}
1082		spin_lock_irqsave(hba->host->host_lock, flags);
1083	} while (tm_doorbell || tr_doorbell);
1084
1085	if (timeout) {
1086		dev_err(hba->dev,
1087			"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1088			__func__, tm_doorbell, tr_doorbell);
1089		ret = -EBUSY;
1090	}
1091out:
1092	spin_unlock_irqrestore(hba->host->host_lock, flags);
1093	ufshcd_release(hba);
1094	return ret;
1095}
1096
1097/**
1098 * ufshcd_scale_gear - scale up/down UFS gear
1099 * @hba: per adapter instance
1100 * @scale_up: True for scaling up gear and false for scaling down
1101 *
1102 * Returns 0 for success,
1103 * Returns -EBUSY if scaling can't happen at this time
1104 * Returns non-zero for any other errors
1105 */
1106static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1107{
1108	#define UFS_MIN_GEAR_TO_SCALE_DOWN	UFS_HS_G1
1109	int ret = 0;
1110	struct ufs_pa_layer_attr new_pwr_info;
1111
1112	if (scale_up) {
1113		memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1114		       sizeof(struct ufs_pa_layer_attr));
1115	} else {
1116		memcpy(&new_pwr_info, &hba->pwr_info,
1117		       sizeof(struct ufs_pa_layer_attr));
1118
1119		if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1120		    || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1121			/* save the current power mode */
1122			memcpy(&hba->clk_scaling.saved_pwr_info.info,
1123				&hba->pwr_info,
1124				sizeof(struct ufs_pa_layer_attr));
1125
1126			/* scale down gear */
1127			new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1128			new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1129		}
1130	}
1131
1132	/* check if the power mode needs to be changed or not? */
1133	ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1134	if (ret)
1135		dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1136			__func__, ret,
1137			hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1138			new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1139
1140	return ret;
1141}
1142
1143static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1144{
1145	#define DOORBELL_CLR_TOUT_US		(1000 * 1000) /* 1 sec */
1146	int ret = 0;
1147	/*
1148	 * make sure that there are no outstanding requests when
1149	 * clock scaling is in progress
1150	 */
1151	ufshcd_scsi_block_requests(hba);
1152	down_write(&hba->clk_scaling_lock);
1153	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1154		ret = -EBUSY;
1155		up_write(&hba->clk_scaling_lock);
1156		ufshcd_scsi_unblock_requests(hba);
1157	}
1158
1159	return ret;
1160}
1161
1162static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1163{
1164	up_write(&hba->clk_scaling_lock);
1165	ufshcd_scsi_unblock_requests(hba);
1166}
1167
1168/**
1169 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1170 * @hba: per adapter instance
1171 * @scale_up: True for scaling up and false for scalin down
1172 *
1173 * Returns 0 for success,
1174 * Returns -EBUSY if scaling can't happen at this time
1175 * Returns non-zero for any other errors
1176 */
1177static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1178{
1179	int ret = 0;
1180
1181	/* let's not get into low power until clock scaling is completed */
1182	ufshcd_hold(hba, false);
1183
1184	ret = ufshcd_clock_scaling_prepare(hba);
1185	if (ret)
1186		goto out;
1187
1188	/* scale down the gear before scaling down clocks */
1189	if (!scale_up) {
1190		ret = ufshcd_scale_gear(hba, false);
1191		if (ret)
1192			goto out_unprepare;
1193	}
1194
1195	ret = ufshcd_scale_clks(hba, scale_up);
1196	if (ret) {
1197		if (!scale_up)
1198			ufshcd_scale_gear(hba, true);
1199		goto out_unprepare;
1200	}
1201
1202	/* scale up the gear after scaling up clocks */
1203	if (scale_up) {
1204		ret = ufshcd_scale_gear(hba, true);
1205		if (ret) {
1206			ufshcd_scale_clks(hba, false);
1207			goto out_unprepare;
1208		}
1209	}
1210
1211	/* Enable Write Booster if we have scaled up else disable it */
1212	up_write(&hba->clk_scaling_lock);
1213	ufshcd_wb_ctrl(hba, scale_up);
1214	down_write(&hba->clk_scaling_lock);
1215
1216out_unprepare:
1217	ufshcd_clock_scaling_unprepare(hba);
1218out:
1219	ufshcd_release(hba);
1220	return ret;
1221}
1222
1223static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1224{
1225	struct ufs_hba *hba = container_of(work, struct ufs_hba,
1226					   clk_scaling.suspend_work);
1227	unsigned long irq_flags;
1228
1229	spin_lock_irqsave(hba->host->host_lock, irq_flags);
1230	if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1231		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1232		return;
1233	}
1234	hba->clk_scaling.is_suspended = true;
1235	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1236
1237	__ufshcd_suspend_clkscaling(hba);
1238}
1239
1240static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1241{
1242	struct ufs_hba *hba = container_of(work, struct ufs_hba,
1243					   clk_scaling.resume_work);
1244	unsigned long irq_flags;
1245
1246	spin_lock_irqsave(hba->host->host_lock, irq_flags);
1247	if (!hba->clk_scaling.is_suspended) {
1248		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1249		return;
1250	}
1251	hba->clk_scaling.is_suspended = false;
1252	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1253
1254	devfreq_resume_device(hba->devfreq);
1255}
1256
1257static int ufshcd_devfreq_target(struct device *dev,
1258				unsigned long *freq, u32 flags)
1259{
1260	int ret = 0;
1261	struct ufs_hba *hba = dev_get_drvdata(dev);
1262	ktime_t start;
1263	bool scale_up, sched_clk_scaling_suspend_work = false;
1264	struct list_head *clk_list = &hba->clk_list_head;
1265	struct ufs_clk_info *clki;
1266	unsigned long irq_flags;
1267
1268	if (!ufshcd_is_clkscaling_supported(hba))
1269		return -EINVAL;
1270
1271	clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1272	/* Override with the closest supported frequency */
1273	*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1274	spin_lock_irqsave(hba->host->host_lock, irq_flags);
1275	if (ufshcd_eh_in_progress(hba)) {
1276		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1277		return 0;
1278	}
1279
1280	if (!hba->clk_scaling.active_reqs)
1281		sched_clk_scaling_suspend_work = true;
1282
1283	if (list_empty(clk_list)) {
1284		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1285		goto out;
1286	}
1287
1288	/* Decide based on the rounded-off frequency and update */
1289	scale_up = (*freq == clki->max_freq) ? true : false;
1290	if (!scale_up)
1291		*freq = clki->min_freq;
1292	/* Update the frequency */
1293	if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1294		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1295		ret = 0;
1296		goto out; /* no state change required */
1297	}
1298	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1299
1300	pm_runtime_get_noresume(hba->dev);
1301	if (!pm_runtime_active(hba->dev)) {
1302		pm_runtime_put_noidle(hba->dev);
1303		ret = -EAGAIN;
1304		goto out;
1305	}
1306	start = ktime_get();
1307	ret = ufshcd_devfreq_scale(hba, scale_up);
1308	pm_runtime_put(hba->dev);
1309
1310	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1311		(scale_up ? "up" : "down"),
1312		ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1313
1314out:
1315	if (sched_clk_scaling_suspend_work)
1316		queue_work(hba->clk_scaling.workq,
1317			   &hba->clk_scaling.suspend_work);
1318
1319	return ret;
1320}
1321
1322static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1323{
1324	int *busy = priv;
1325
1326	WARN_ON_ONCE(reserved);
1327	(*busy)++;
1328	return false;
1329}
1330
1331/* Whether or not any tag is in use by a request that is in progress. */
1332static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1333{
1334	struct request_queue *q = hba->cmd_queue;
1335	int busy = 0;
1336
1337	blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1338	return busy;
1339}
1340
1341static int ufshcd_devfreq_get_dev_status(struct device *dev,
1342		struct devfreq_dev_status *stat)
1343{
1344	struct ufs_hba *hba = dev_get_drvdata(dev);
1345	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1346	unsigned long flags;
1347	struct list_head *clk_list = &hba->clk_list_head;
1348	struct ufs_clk_info *clki;
1349	ktime_t curr_t;
1350
1351	if (!ufshcd_is_clkscaling_supported(hba))
1352		return -EINVAL;
1353
1354	memset(stat, 0, sizeof(*stat));
1355
1356	spin_lock_irqsave(hba->host->host_lock, flags);
1357	curr_t = ktime_get();
1358	if (!scaling->window_start_t)
1359		goto start_window;
1360
1361	clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1362	/*
1363	 * If current frequency is 0, then the ondemand governor considers
1364	 * there's no initial frequency set. And it always requests to set
1365	 * to max. frequency.
1366	 */
1367	stat->current_frequency = clki->curr_freq;
1368	if (scaling->is_busy_started)
1369		scaling->tot_busy_t += ktime_us_delta(curr_t,
1370				scaling->busy_start_t);
1371
1372	stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1373	stat->busy_time = scaling->tot_busy_t;
1374start_window:
1375	scaling->window_start_t = curr_t;
1376	scaling->tot_busy_t = 0;
1377
1378	if (hba->outstanding_reqs) {
1379		scaling->busy_start_t = curr_t;
1380		scaling->is_busy_started = true;
1381	} else {
1382		scaling->busy_start_t = 0;
1383		scaling->is_busy_started = false;
1384	}
1385	spin_unlock_irqrestore(hba->host->host_lock, flags);
1386	return 0;
1387}
1388
1389static int ufshcd_devfreq_init(struct ufs_hba *hba)
1390{
1391	struct list_head *clk_list = &hba->clk_list_head;
1392	struct ufs_clk_info *clki;
1393	struct devfreq *devfreq;
1394	int ret;
1395
1396	/* Skip devfreq if we don't have any clocks in the list */
1397	if (list_empty(clk_list))
1398		return 0;
1399
1400	clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1401	dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1402	dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1403
1404	ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1405					 &hba->vps->ondemand_data);
1406	devfreq = devfreq_add_device(hba->dev,
1407			&hba->vps->devfreq_profile,
1408			DEVFREQ_GOV_SIMPLE_ONDEMAND,
1409			&hba->vps->ondemand_data);
1410	if (IS_ERR(devfreq)) {
1411		ret = PTR_ERR(devfreq);
1412		dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1413
1414		dev_pm_opp_remove(hba->dev, clki->min_freq);
1415		dev_pm_opp_remove(hba->dev, clki->max_freq);
1416		return ret;
1417	}
1418
1419	hba->devfreq = devfreq;
1420
1421	return 0;
1422}
1423
1424static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1425{
1426	struct list_head *clk_list = &hba->clk_list_head;
1427	struct ufs_clk_info *clki;
1428
1429	if (!hba->devfreq)
1430		return;
1431
1432	devfreq_remove_device(hba->devfreq);
1433	hba->devfreq = NULL;
1434
1435	clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1436	dev_pm_opp_remove(hba->dev, clki->min_freq);
1437	dev_pm_opp_remove(hba->dev, clki->max_freq);
1438}
1439
1440static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1441{
1442	unsigned long flags;
1443
1444	devfreq_suspend_device(hba->devfreq);
1445	spin_lock_irqsave(hba->host->host_lock, flags);
1446	hba->clk_scaling.window_start_t = 0;
1447	spin_unlock_irqrestore(hba->host->host_lock, flags);
1448}
1449
1450static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1451{
1452	unsigned long flags;
1453	bool suspend = false;
1454
1455	if (!ufshcd_is_clkscaling_supported(hba))
1456		return;
1457
1458	spin_lock_irqsave(hba->host->host_lock, flags);
1459	if (!hba->clk_scaling.is_suspended) {
1460		suspend = true;
1461		hba->clk_scaling.is_suspended = true;
1462	}
1463	spin_unlock_irqrestore(hba->host->host_lock, flags);
1464
1465	if (suspend)
1466		__ufshcd_suspend_clkscaling(hba);
1467}
1468
1469static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1470{
1471	unsigned long flags;
1472	bool resume = false;
1473
1474	if (!ufshcd_is_clkscaling_supported(hba))
1475		return;
1476
1477	spin_lock_irqsave(hba->host->host_lock, flags);
1478	if (hba->clk_scaling.is_suspended) {
1479		resume = true;
1480		hba->clk_scaling.is_suspended = false;
1481	}
1482	spin_unlock_irqrestore(hba->host->host_lock, flags);
1483
1484	if (resume)
1485		devfreq_resume_device(hba->devfreq);
1486}
1487
1488static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1489		struct device_attribute *attr, char *buf)
1490{
1491	struct ufs_hba *hba = dev_get_drvdata(dev);
1492
1493	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1494}
1495
1496static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1497		struct device_attribute *attr, const char *buf, size_t count)
1498{
1499	struct ufs_hba *hba = dev_get_drvdata(dev);
1500	u32 value;
1501	int err;
1502
1503	if (kstrtou32(buf, 0, &value))
1504		return -EINVAL;
1505
1506	value = !!value;
1507	if (value == hba->clk_scaling.is_allowed)
1508		goto out;
1509
1510	pm_runtime_get_sync(hba->dev);
1511	ufshcd_hold(hba, false);
1512
1513	cancel_work_sync(&hba->clk_scaling.suspend_work);
1514	cancel_work_sync(&hba->clk_scaling.resume_work);
1515
1516	hba->clk_scaling.is_allowed = value;
1517
1518	if (value) {
1519		ufshcd_resume_clkscaling(hba);
1520	} else {
1521		ufshcd_suspend_clkscaling(hba);
1522		err = ufshcd_devfreq_scale(hba, true);
1523		if (err)
1524			dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1525					__func__, err);
1526	}
1527
1528	ufshcd_release(hba);
1529	pm_runtime_put_sync(hba->dev);
1530out:
1531	return count;
1532}
1533
1534static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1535{
1536	hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1537	hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1538	sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1539	hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1540	hba->clk_scaling.enable_attr.attr.mode = 0644;
1541	if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1542		dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1543}
1544
1545static void ufshcd_ungate_work(struct work_struct *work)
1546{
1547	int ret;
1548	unsigned long flags;
1549	struct ufs_hba *hba = container_of(work, struct ufs_hba,
1550			clk_gating.ungate_work);
1551
1552	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1553
1554	spin_lock_irqsave(hba->host->host_lock, flags);
1555	if (hba->clk_gating.state == CLKS_ON) {
1556		spin_unlock_irqrestore(hba->host->host_lock, flags);
1557		goto unblock_reqs;
1558	}
1559
1560	spin_unlock_irqrestore(hba->host->host_lock, flags);
1561	ufshcd_setup_clocks(hba, true);
1562
1563	ufshcd_enable_irq(hba);
1564
1565	/* Exit from hibern8 */
1566	if (ufshcd_can_hibern8_during_gating(hba)) {
1567		/* Prevent gating in this path */
1568		hba->clk_gating.is_suspended = true;
1569		if (ufshcd_is_link_hibern8(hba)) {
1570			ret = ufshcd_uic_hibern8_exit(hba);
1571			if (ret)
1572				dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1573					__func__, ret);
1574			else
1575				ufshcd_set_link_active(hba);
1576		}
1577		hba->clk_gating.is_suspended = false;
1578	}
1579unblock_reqs:
1580	ufshcd_scsi_unblock_requests(hba);
1581}
1582
1583/**
1584 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1585 * Also, exit from hibern8 mode and set the link as active.
1586 * @hba: per adapter instance
1587 * @async: This indicates whether caller should ungate clocks asynchronously.
1588 */
1589int ufshcd_hold(struct ufs_hba *hba, bool async)
1590{
1591	int rc = 0;
1592	bool flush_result;
1593	unsigned long flags;
1594
1595	if (!ufshcd_is_clkgating_allowed(hba))
1596		goto out;
1597	spin_lock_irqsave(hba->host->host_lock, flags);
1598	hba->clk_gating.active_reqs++;
1599
1600start:
1601	switch (hba->clk_gating.state) {
1602	case CLKS_ON:
1603		/*
1604		 * Wait for the ungate work to complete if in progress.
1605		 * Though the clocks may be in ON state, the link could
1606		 * still be in hibner8 state if hibern8 is allowed
1607		 * during clock gating.
1608		 * Make sure we exit hibern8 state also in addition to
1609		 * clocks being ON.
1610		 */
1611		if (ufshcd_can_hibern8_during_gating(hba) &&
1612		    ufshcd_is_link_hibern8(hba)) {
1613			if (async) {
1614				rc = -EAGAIN;
1615				hba->clk_gating.active_reqs--;
1616				break;
1617			}
1618			spin_unlock_irqrestore(hba->host->host_lock, flags);
1619			flush_result = flush_work(&hba->clk_gating.ungate_work);
1620			if (hba->clk_gating.is_suspended && !flush_result)
1621				goto out;
1622			spin_lock_irqsave(hba->host->host_lock, flags);
1623			goto start;
1624		}
1625		break;
1626	case REQ_CLKS_OFF:
1627		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1628			hba->clk_gating.state = CLKS_ON;
1629			trace_ufshcd_clk_gating(dev_name(hba->dev),
1630						hba->clk_gating.state);
1631			break;
1632		}
1633		/*
1634		 * If we are here, it means gating work is either done or
1635		 * currently running. Hence, fall through to cancel gating
1636		 * work and to enable clocks.
1637		 */
1638		fallthrough;
1639	case CLKS_OFF:
1640		hba->clk_gating.state = REQ_CLKS_ON;
1641		trace_ufshcd_clk_gating(dev_name(hba->dev),
1642					hba->clk_gating.state);
1643		if (queue_work(hba->clk_gating.clk_gating_workq,
1644			       &hba->clk_gating.ungate_work))
1645			ufshcd_scsi_block_requests(hba);
1646		/*
1647		 * fall through to check if we should wait for this
1648		 * work to be done or not.
1649		 */
1650		fallthrough;
1651	case REQ_CLKS_ON:
1652		if (async) {
1653			rc = -EAGAIN;
1654			hba->clk_gating.active_reqs--;
1655			break;
1656		}
1657
1658		spin_unlock_irqrestore(hba->host->host_lock, flags);
1659		flush_work(&hba->clk_gating.ungate_work);
1660		/* Make sure state is CLKS_ON before returning */
1661		spin_lock_irqsave(hba->host->host_lock, flags);
1662		goto start;
1663	default:
1664		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1665				__func__, hba->clk_gating.state);
1666		break;
1667	}
1668	spin_unlock_irqrestore(hba->host->host_lock, flags);
1669out:
1670	return rc;
1671}
1672EXPORT_SYMBOL_GPL(ufshcd_hold);
1673
1674static void ufshcd_gate_work(struct work_struct *work)
1675{
1676	struct ufs_hba *hba = container_of(work, struct ufs_hba,
1677			clk_gating.gate_work.work);
1678	unsigned long flags;
1679	int ret;
1680
1681	spin_lock_irqsave(hba->host->host_lock, flags);
1682	/*
1683	 * In case you are here to cancel this work the gating state
1684	 * would be marked as REQ_CLKS_ON. In this case save time by
1685	 * skipping the gating work and exit after changing the clock
1686	 * state to CLKS_ON.
1687	 */
1688	if (hba->clk_gating.is_suspended ||
1689		(hba->clk_gating.state != REQ_CLKS_OFF)) {
1690		hba->clk_gating.state = CLKS_ON;
1691		trace_ufshcd_clk_gating(dev_name(hba->dev),
1692					hba->clk_gating.state);
1693		goto rel_lock;
1694	}
1695
1696	if (hba->clk_gating.active_reqs
1697		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1698		|| ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1699		|| hba->active_uic_cmd || hba->uic_async_done)
1700		goto rel_lock;
1701
1702	spin_unlock_irqrestore(hba->host->host_lock, flags);
1703
1704	/* put the link into hibern8 mode before turning off clocks */
1705	if (ufshcd_can_hibern8_during_gating(hba)) {
1706		ret = ufshcd_uic_hibern8_enter(hba);
1707		if (ret) {
1708			hba->clk_gating.state = CLKS_ON;
1709			dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1710					__func__, ret);
1711			trace_ufshcd_clk_gating(dev_name(hba->dev),
1712						hba->clk_gating.state);
1713			goto out;
1714		}
1715		ufshcd_set_link_hibern8(hba);
1716	}
1717
1718	ufshcd_disable_irq(hba);
1719
1720	ufshcd_setup_clocks(hba, false);
1721
1722	/*
1723	 * In case you are here to cancel this work the gating state
1724	 * would be marked as REQ_CLKS_ON. In this case keep the state
1725	 * as REQ_CLKS_ON which would anyway imply that clocks are off
1726	 * and a request to turn them on is pending. By doing this way,
1727	 * we keep the state machine in tact and this would ultimately
1728	 * prevent from doing cancel work multiple times when there are
1729	 * new requests arriving before the current cancel work is done.
1730	 */
1731	spin_lock_irqsave(hba->host->host_lock, flags);
1732	if (hba->clk_gating.state == REQ_CLKS_OFF) {
1733		hba->clk_gating.state = CLKS_OFF;
1734		trace_ufshcd_clk_gating(dev_name(hba->dev),
1735					hba->clk_gating.state);
1736	}
1737rel_lock:
1738	spin_unlock_irqrestore(hba->host->host_lock, flags);
1739out:
1740	return;
1741}
1742
1743/* host lock must be held before calling this variant */
1744static void __ufshcd_release(struct ufs_hba *hba)
1745{
1746	if (!ufshcd_is_clkgating_allowed(hba))
1747		return;
1748
1749	hba->clk_gating.active_reqs--;
1750
1751	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1752	    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1753	    hba->outstanding_tasks ||
1754	    hba->active_uic_cmd || hba->uic_async_done ||
1755	    hba->clk_gating.state == CLKS_OFF)
1756		return;
1757
1758	hba->clk_gating.state = REQ_CLKS_OFF;
1759	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1760	queue_delayed_work(hba->clk_gating.clk_gating_workq,
1761			   &hba->clk_gating.gate_work,
1762			   msecs_to_jiffies(hba->clk_gating.delay_ms));
1763}
1764
1765void ufshcd_release(struct ufs_hba *hba)
1766{
1767	unsigned long flags;
1768
1769	spin_lock_irqsave(hba->host->host_lock, flags);
1770	__ufshcd_release(hba);
1771	spin_unlock_irqrestore(hba->host->host_lock, flags);
1772}
1773EXPORT_SYMBOL_GPL(ufshcd_release);
1774
1775static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1776		struct device_attribute *attr, char *buf)
1777{
1778	struct ufs_hba *hba = dev_get_drvdata(dev);
1779
1780	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1781}
1782
1783static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1784		struct device_attribute *attr, const char *buf, size_t count)
1785{
1786	struct ufs_hba *hba = dev_get_drvdata(dev);
1787	unsigned long flags, value;
1788
1789	if (kstrtoul(buf, 0, &value))
1790		return -EINVAL;
1791
1792	spin_lock_irqsave(hba->host->host_lock, flags);
1793	hba->clk_gating.delay_ms = value;
1794	spin_unlock_irqrestore(hba->host->host_lock, flags);
1795	return count;
1796}
1797
1798static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1799		struct device_attribute *attr, char *buf)
1800{
1801	struct ufs_hba *hba = dev_get_drvdata(dev);
1802
1803	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1804}
1805
1806static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1807		struct device_attribute *attr, const char *buf, size_t count)
1808{
1809	struct ufs_hba *hba = dev_get_drvdata(dev);
1810	unsigned long flags;
1811	u32 value;
1812
1813	if (kstrtou32(buf, 0, &value))
1814		return -EINVAL;
1815
1816	value = !!value;
1817	if (value == hba->clk_gating.is_enabled)
1818		goto out;
1819
1820	if (value) {
1821		ufshcd_release(hba);
1822	} else {
1823		spin_lock_irqsave(hba->host->host_lock, flags);
1824		hba->clk_gating.active_reqs++;
1825		spin_unlock_irqrestore(hba->host->host_lock, flags);
1826	}
1827
1828	hba->clk_gating.is_enabled = value;
1829out:
1830	return count;
1831}
1832
1833static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1834{
1835	char wq_name[sizeof("ufs_clkscaling_00")];
1836
1837	if (!ufshcd_is_clkscaling_supported(hba))
1838		return;
1839
1840	INIT_WORK(&hba->clk_scaling.suspend_work,
1841		  ufshcd_clk_scaling_suspend_work);
1842	INIT_WORK(&hba->clk_scaling.resume_work,
1843		  ufshcd_clk_scaling_resume_work);
1844
1845	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1846		 hba->host->host_no);
1847	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1848
1849	ufshcd_clkscaling_init_sysfs(hba);
1850}
1851
1852static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1853{
1854	if (!ufshcd_is_clkscaling_supported(hba))
1855		return;
1856
1857	destroy_workqueue(hba->clk_scaling.workq);
1858	ufshcd_devfreq_remove(hba);
1859}
1860
1861static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1862{
1863	char wq_name[sizeof("ufs_clk_gating_00")];
1864
1865	if (!ufshcd_is_clkgating_allowed(hba))
1866		return;
1867
1868	hba->clk_gating.state = CLKS_ON;
1869
1870	hba->clk_gating.delay_ms = 150;
1871	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1872	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1873
1874	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1875		 hba->host->host_no);
1876	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1877							   WQ_MEM_RECLAIM);
1878
1879	hba->clk_gating.is_enabled = true;
1880
1881	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1882	hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1883	sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1884	hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1885	hba->clk_gating.delay_attr.attr.mode = 0644;
1886	if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1887		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1888
1889	hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1890	hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1891	sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1892	hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1893	hba->clk_gating.enable_attr.attr.mode = 0644;
1894	if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1895		dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1896}
1897
1898static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1899{
1900	if (!ufshcd_is_clkgating_allowed(hba))
1901		return;
1902	device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1903	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1904	cancel_work_sync(&hba->clk_gating.ungate_work);
1905	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1906	destroy_workqueue(hba->clk_gating.clk_gating_workq);
1907}
1908
1909/* Must be called with host lock acquired */
1910static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1911{
1912	bool queue_resume_work = false;
1913	ktime_t curr_t = ktime_get();
1914
1915	if (!ufshcd_is_clkscaling_supported(hba))
1916		return;
1917
1918	if (!hba->clk_scaling.active_reqs++)
1919		queue_resume_work = true;
1920
1921	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1922		return;
1923
1924	if (queue_resume_work)
1925		queue_work(hba->clk_scaling.workq,
1926			   &hba->clk_scaling.resume_work);
1927
1928	if (!hba->clk_scaling.window_start_t) {
1929		hba->clk_scaling.window_start_t = curr_t;
1930		hba->clk_scaling.tot_busy_t = 0;
1931		hba->clk_scaling.is_busy_started = false;
1932	}
1933
1934	if (!hba->clk_scaling.is_busy_started) {
1935		hba->clk_scaling.busy_start_t = curr_t;
1936		hba->clk_scaling.is_busy_started = true;
1937	}
1938}
1939
1940static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1941{
1942	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1943
1944	if (!ufshcd_is_clkscaling_supported(hba))
1945		return;
1946
1947	if (!hba->outstanding_reqs && scaling->is_busy_started) {
1948		scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1949					scaling->busy_start_t));
1950		scaling->busy_start_t = 0;
1951		scaling->is_busy_started = false;
1952	}
1953}
1954/**
1955 * ufshcd_send_command - Send SCSI or device management commands
1956 * @hba: per adapter instance
1957 * @task_tag: Task tag of the command
1958 */
1959static inline
1960void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1961{
1962	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
1963
1964	lrbp->issue_time_stamp = ktime_get();
1965	lrbp->compl_time_stamp = ktime_set(0, 0);
1966	ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
1967	ufshcd_add_command_trace(hba, task_tag, "send");
1968	ufshcd_clk_scaling_start_busy(hba);
1969	__set_bit(task_tag, &hba->outstanding_reqs);
1970	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1971	/* Make sure that doorbell is committed immediately */
1972	wmb();
1973}
1974
1975/**
1976 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1977 * @lrbp: pointer to local reference block
1978 */
1979static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1980{
1981	int len;
1982	if (lrbp->sense_buffer &&
1983	    ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1984		int len_to_copy;
1985
1986		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1987		len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1988
1989		memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1990		       len_to_copy);
1991	}
1992}
1993
1994/**
1995 * ufshcd_copy_query_response() - Copy the Query Response and the data
1996 * descriptor
1997 * @hba: per adapter instance
1998 * @lrbp: pointer to local reference block
1999 */
2000static
2001int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2002{
2003	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2004
2005	memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2006
2007	/* Get the descriptor */
2008	if (hba->dev_cmd.query.descriptor &&
2009	    lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2010		u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2011				GENERAL_UPIU_REQUEST_SIZE;
2012		u16 resp_len;
2013		u16 buf_len;
2014
2015		/* data segment length */
2016		resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2017						MASK_QUERY_DATA_SEG_LEN;
2018		buf_len = be16_to_cpu(
2019				hba->dev_cmd.query.request.upiu_req.length);
2020		if (likely(buf_len >= resp_len)) {
2021			memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2022		} else {
2023			dev_warn(hba->dev,
2024				 "%s: rsp size %d is bigger than buffer size %d",
2025				 __func__, resp_len, buf_len);
2026			return -EINVAL;
2027		}
2028	}
2029
2030	return 0;
2031}
2032
2033/**
2034 * ufshcd_hba_capabilities - Read controller capabilities
2035 * @hba: per adapter instance
2036 *
2037 * Return: 0 on success, negative on error.
2038 */
2039static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2040{
2041	int err;
2042
2043	hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2044
2045	/* nutrs and nutmrs are 0 based values */
2046	hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2047	hba->nutmrs =
2048	((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2049
2050	/* Read crypto capabilities */
2051	err = ufshcd_hba_init_crypto_capabilities(hba);
2052	if (err)
2053		dev_err(hba->dev, "crypto setup failed\n");
2054
2055	return err;
2056}
2057
2058/**
2059 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2060 *                            to accept UIC commands
2061 * @hba: per adapter instance
2062 * Return true on success, else false
2063 */
2064static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2065{
2066	if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2067		return true;
2068	else
2069		return false;
2070}
2071
2072/**
2073 * ufshcd_get_upmcrs - Get the power mode change request status
2074 * @hba: Pointer to adapter instance
2075 *
2076 * This function gets the UPMCRS field of HCS register
2077 * Returns value of UPMCRS field
2078 */
2079static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2080{
2081	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2082}
2083
2084/**
2085 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2086 * @hba: per adapter instance
2087 * @uic_cmd: UIC command
2088 *
2089 * Mutex must be held.
2090 */
2091static inline void
2092ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2093{
2094	WARN_ON(hba->active_uic_cmd);
2095
2096	hba->active_uic_cmd = uic_cmd;
2097
2098	/* Write Args */
2099	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2100	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2101	ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2102
2103	ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2104
2105	/* Write UIC Cmd */
2106	ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2107		      REG_UIC_COMMAND);
2108}
2109
2110/**
2111 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2112 * @hba: per adapter instance
2113 * @uic_cmd: UIC command
2114 *
2115 * Must be called with mutex held.
2116 * Returns 0 only if success.
2117 */
2118static int
2119ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2120{
2121	int ret;
2122	unsigned long flags;
2123
2124	if (wait_for_completion_timeout(&uic_cmd->done,
2125					msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2126		ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2127	} else {
2128		ret = -ETIMEDOUT;
2129		dev_err(hba->dev,
2130			"uic cmd 0x%x with arg3 0x%x completion timeout\n",
2131			uic_cmd->command, uic_cmd->argument3);
2132
2133		if (!uic_cmd->cmd_active) {
2134			dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2135				__func__);
2136			ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2137		}
2138	}
2139
2140	spin_lock_irqsave(hba->host->host_lock, flags);
2141	hba->active_uic_cmd = NULL;
2142	spin_unlock_irqrestore(hba->host->host_lock, flags);
2143
2144	return ret;
2145}
2146
2147/**
2148 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2149 * @hba: per adapter instance
2150 * @uic_cmd: UIC command
2151 * @completion: initialize the completion only if this is set to true
2152 *
2153 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2154 * with mutex held and host_lock locked.
2155 * Returns 0 only if success.
2156 */
2157static int
2158__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2159		      bool completion)
2160{
2161	if (!ufshcd_ready_for_uic_cmd(hba)) {
2162		dev_err(hba->dev,
2163			"Controller not ready to accept UIC commands\n");
2164		return -EIO;
2165	}
2166
2167	if (completion)
2168		init_completion(&uic_cmd->done);
2169
2170	uic_cmd->cmd_active = 1;
2171	ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2172
2173	return 0;
2174}
2175
2176/**
2177 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2178 * @hba: per adapter instance
2179 * @uic_cmd: UIC command
2180 *
2181 * Returns 0 only if success.
2182 */
2183int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2184{
2185	int ret;
2186	unsigned long flags;
2187
2188	ufshcd_hold(hba, false);
2189	mutex_lock(&hba->uic_cmd_mutex);
2190	ufshcd_add_delay_before_dme_cmd(hba);
2191
2192	spin_lock_irqsave(hba->host->host_lock, flags);
2193	ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2194	spin_unlock_irqrestore(hba->host->host_lock, flags);
2195	if (!ret)
2196		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2197
2198	mutex_unlock(&hba->uic_cmd_mutex);
2199
2200	ufshcd_release(hba);
2201	return ret;
2202}
2203
2204/**
2205 * ufshcd_map_sg - Map scatter-gather list to prdt
2206 * @hba: per adapter instance
2207 * @lrbp: pointer to local reference block
2208 *
2209 * Returns 0 in case of success, non-zero value in case of failure
2210 */
2211static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2212{
2213	struct ufshcd_sg_entry *prd_table;
2214	struct scatterlist *sg;
2215	struct scsi_cmnd *cmd;
2216	int sg_segments;
2217	int i;
2218
2219	cmd = lrbp->cmd;
2220	sg_segments = scsi_dma_map(cmd);
2221	if (sg_segments < 0)
2222		return sg_segments;
2223
2224	if (sg_segments) {
2225
2226		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2227			lrbp->utr_descriptor_ptr->prd_table_length =
2228				cpu_to_le16((sg_segments *
2229					sizeof(struct ufshcd_sg_entry)));
2230		else
2231			lrbp->utr_descriptor_ptr->prd_table_length =
2232				cpu_to_le16((u16) (sg_segments));
2233
2234		prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2235
2236		scsi_for_each_sg(cmd, sg, sg_segments, i) {
2237			prd_table[i].size  =
2238				cpu_to_le32(((u32) sg_dma_len(sg))-1);
2239			prd_table[i].base_addr =
2240				cpu_to_le32(lower_32_bits(sg->dma_address));
2241			prd_table[i].upper_addr =
2242				cpu_to_le32(upper_32_bits(sg->dma_address));
2243			prd_table[i].reserved = 0;
2244		}
2245	} else {
2246		lrbp->utr_descriptor_ptr->prd_table_length = 0;
2247	}
2248
2249	return 0;
2250}
2251
2252/**
2253 * ufshcd_enable_intr - enable interrupts
2254 * @hba: per adapter instance
2255 * @intrs: interrupt bits
2256 */
2257static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2258{
2259	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2260
2261	if (hba->ufs_version == UFSHCI_VERSION_10) {
2262		u32 rw;
2263		rw = set & INTERRUPT_MASK_RW_VER_10;
2264		set = rw | ((set ^ intrs) & intrs);
2265	} else {
2266		set |= intrs;
2267	}
2268
2269	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2270}
2271
2272/**
2273 * ufshcd_disable_intr - disable interrupts
2274 * @hba: per adapter instance
2275 * @intrs: interrupt bits
2276 */
2277static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2278{
2279	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2280
2281	if (hba->ufs_version == UFSHCI_VERSION_10) {
2282		u32 rw;
2283		rw = (set & INTERRUPT_MASK_RW_VER_10) &
2284			~(intrs & INTERRUPT_MASK_RW_VER_10);
2285		set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2286
2287	} else {
2288		set &= ~intrs;
2289	}
2290
2291	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2292}
2293
2294/**
2295 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2296 * descriptor according to request
2297 * @lrbp: pointer to local reference block
2298 * @upiu_flags: flags required in the header
2299 * @cmd_dir: requests data direction
2300 */
2301static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2302			u8 *upiu_flags, enum dma_data_direction cmd_dir)
2303{
2304	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2305	u32 data_direction;
2306	u32 dword_0;
2307	u32 dword_1 = 0;
2308	u32 dword_3 = 0;
2309
2310	if (cmd_dir == DMA_FROM_DEVICE) {
2311		data_direction = UTP_DEVICE_TO_HOST;
2312		*upiu_flags = UPIU_CMD_FLAGS_READ;
2313	} else if (cmd_dir == DMA_TO_DEVICE) {
2314		data_direction = UTP_HOST_TO_DEVICE;
2315		*upiu_flags = UPIU_CMD_FLAGS_WRITE;
2316	} else {
2317		data_direction = UTP_NO_DATA_TRANSFER;
2318		*upiu_flags = UPIU_CMD_FLAGS_NONE;
2319	}
2320
2321	dword_0 = data_direction | (lrbp->command_type
2322				<< UPIU_COMMAND_TYPE_OFFSET);
2323	if (lrbp->intr_cmd)
2324		dword_0 |= UTP_REQ_DESC_INT_CMD;
2325
2326	/* Prepare crypto related dwords */
2327	ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2328
2329	/* Transfer request descriptor header fields */
2330	req_desc->header.dword_0 = cpu_to_le32(dword_0);
2331	req_desc->header.dword_1 = cpu_to_le32(dword_1);
2332	/*
2333	 * assigning invalid value for command status. Controller
2334	 * updates OCS on command completion, with the command
2335	 * status
2336	 */
2337	req_desc->header.dword_2 =
2338		cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2339	req_desc->header.dword_3 = cpu_to_le32(dword_3);
2340
2341	req_desc->prd_table_length = 0;
2342}
2343
2344/**
2345 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2346 * for scsi commands
2347 * @lrbp: local reference block pointer
2348 * @upiu_flags: flags
2349 */
2350static
2351void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2352{
2353	struct scsi_cmnd *cmd = lrbp->cmd;
2354	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2355	unsigned short cdb_len;
2356
2357	/* command descriptor fields */
2358	ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2359				UPIU_TRANSACTION_COMMAND, upiu_flags,
2360				lrbp->lun, lrbp->task_tag);
2361	ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2362				UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2363
2364	/* Total EHS length and Data segment length will be zero */
2365	ucd_req_ptr->header.dword_2 = 0;
2366
2367	ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2368
2369	cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2370	memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2371	memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2372
2373	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2374}
2375
2376/**
2377 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2378 * for query requsts
2379 * @hba: UFS hba
2380 * @lrbp: local reference block pointer
2381 * @upiu_flags: flags
2382 */
2383static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2384				struct ufshcd_lrb *lrbp, u8 upiu_flags)
2385{
2386	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2387	struct ufs_query *query = &hba->dev_cmd.query;
2388	u16 len = be16_to_cpu(query->request.upiu_req.length);
2389
2390	/* Query request header */
2391	ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2392			UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2393			lrbp->lun, lrbp->task_tag);
2394	ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2395			0, query->request.query_func, 0, 0);
2396
2397	/* Data segment length only need for WRITE_DESC */
2398	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2399		ucd_req_ptr->header.dword_2 =
2400			UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2401	else
2402		ucd_req_ptr->header.dword_2 = 0;
2403
2404	/* Copy the Query Request buffer as is */
2405	memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2406			QUERY_OSF_SIZE);
2407
2408	/* Copy the Descriptor */
2409	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2410		memcpy(ucd_req_ptr + 1, query->descriptor, len);
2411
2412	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2413}
2414
2415static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2416{
2417	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2418
2419	memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2420
2421	/* command descriptor fields */
2422	ucd_req_ptr->header.dword_0 =
2423		UPIU_HEADER_DWORD(
2424			UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2425	/* clear rest of the fields of basic header */
2426	ucd_req_ptr->header.dword_1 = 0;
2427	ucd_req_ptr->header.dword_2 = 0;
2428
2429	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2430}
2431
2432/**
2433 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2434 *			     for Device Management Purposes
2435 * @hba: per adapter instance
2436 * @lrbp: pointer to local reference block
2437 */
2438static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2439				      struct ufshcd_lrb *lrbp)
2440{
2441	u8 upiu_flags;
2442	int ret = 0;
2443
2444	if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2445	    (hba->ufs_version == UFSHCI_VERSION_11))
2446		lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2447	else
2448		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2449
2450	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2451	if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2452		ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2453	else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2454		ufshcd_prepare_utp_nop_upiu(lrbp);
2455	else
2456		ret = -EINVAL;
2457
2458	return ret;
2459}
2460
2461/**
2462 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2463 *			   for SCSI Purposes
2464 * @hba: per adapter instance
2465 * @lrbp: pointer to local reference block
2466 */
2467static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2468{
2469	u8 upiu_flags;
2470	int ret = 0;
2471
2472	if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2473	    (hba->ufs_version == UFSHCI_VERSION_11))
2474		lrbp->command_type = UTP_CMD_TYPE_SCSI;
2475	else
2476		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2477
2478	if (likely(lrbp->cmd)) {
2479		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2480						lrbp->cmd->sc_data_direction);
2481		ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2482	} else {
2483		ret = -EINVAL;
2484	}
2485
2486	return ret;
2487}
2488
2489/**
2490 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2491 * @upiu_wlun_id: UPIU W-LUN id
2492 *
2493 * Returns SCSI W-LUN id
2494 */
2495static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2496{
2497	return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2498}
2499
2500static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2501{
2502	struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2503	struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2504	dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2505		i * sizeof(struct utp_transfer_cmd_desc);
2506	u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2507				       response_upiu);
2508	u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2509
2510	lrb->utr_descriptor_ptr = utrdlp + i;
2511	lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2512		i * sizeof(struct utp_transfer_req_desc);
2513	lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2514	lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2515	lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2516	lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2517	lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2518	lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2519}
2520
2521/**
2522 * ufshcd_queuecommand - main entry point for SCSI requests
2523 * @host: SCSI host pointer
2524 * @cmd: command from SCSI Midlayer
2525 *
2526 * Returns 0 for success, non-zero in case of failure
2527 */
2528static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2529{
2530	struct ufshcd_lrb *lrbp;
2531	struct ufs_hba *hba;
2532	unsigned long flags;
2533	int tag;
2534	int err = 0;
2535
2536	hba = shost_priv(host);
2537
2538	tag = cmd->request->tag;
2539	if (!ufshcd_valid_tag(hba, tag)) {
2540		dev_err(hba->dev,
2541			"%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2542			__func__, tag, cmd, cmd->request);
2543		BUG();
2544	}
2545
2546	if (!down_read_trylock(&hba->clk_scaling_lock))
2547		return SCSI_MLQUEUE_HOST_BUSY;
2548
2549	hba->req_abort_count = 0;
2550
2551	err = ufshcd_hold(hba, true);
2552	if (err) {
2553		err = SCSI_MLQUEUE_HOST_BUSY;
2554		goto out;
2555	}
2556	WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2557		(hba->clk_gating.state != CLKS_ON));
2558
2559	lrbp = &hba->lrb[tag];
2560
2561	WARN_ON(lrbp->cmd);
2562	lrbp->cmd = cmd;
2563	lrbp->sense_bufflen = UFS_SENSE_SIZE;
2564	lrbp->sense_buffer = cmd->sense_buffer;
2565	lrbp->task_tag = tag;
2566	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2567	lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2568
2569	ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2570
2571	lrbp->req_abort_skip = false;
2572
2573	ufshcd_comp_scsi_upiu(hba, lrbp);
2574
2575	err = ufshcd_map_sg(hba, lrbp);
2576	if (err) {
2577		lrbp->cmd = NULL;
2578		ufshcd_release(hba);
2579		goto out;
2580	}
2581	/* Make sure descriptors are ready before ringing the doorbell */
2582	wmb();
2583
2584	spin_lock_irqsave(hba->host->host_lock, flags);
2585	switch (hba->ufshcd_state) {
2586	case UFSHCD_STATE_OPERATIONAL:
2587	case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2588		break;
2589	case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2590		/*
2591		 * pm_runtime_get_sync() is used at error handling preparation
2592		 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2593		 * PM ops, it can never be finished if we let SCSI layer keep
2594		 * retrying it, which gets err handler stuck forever. Neither
2595		 * can we let the scsi cmd pass through, because UFS is in bad
2596		 * state, the scsi cmd may eventually time out, which will get
2597		 * err handler blocked for too long. So, just fail the scsi cmd
2598		 * sent from PM ops, err handler can recover PM error anyways.
2599		 */
2600		if (hba->pm_op_in_progress) {
2601			hba->force_reset = true;
2602			set_host_byte(cmd, DID_BAD_TARGET);
2603			goto out_compl_cmd;
2604		}
2605		fallthrough;
2606	case UFSHCD_STATE_RESET:
2607		err = SCSI_MLQUEUE_HOST_BUSY;
2608		goto out_compl_cmd;
2609	case UFSHCD_STATE_ERROR:
2610		set_host_byte(cmd, DID_ERROR);
2611		goto out_compl_cmd;
2612	default:
2613		dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2614				__func__, hba->ufshcd_state);
2615		set_host_byte(cmd, DID_BAD_TARGET);
2616		goto out_compl_cmd;
2617	}
2618	ufshcd_send_command(hba, tag);
2619	spin_unlock_irqrestore(hba->host->host_lock, flags);
2620	goto out;
2621
2622out_compl_cmd:
2623	scsi_dma_unmap(lrbp->cmd);
2624	lrbp->cmd = NULL;
2625	spin_unlock_irqrestore(hba->host->host_lock, flags);
2626	ufshcd_release(hba);
2627	if (!err)
2628		cmd->scsi_done(cmd);
2629out:
2630	up_read(&hba->clk_scaling_lock);
2631	return err;
2632}
2633
2634static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2635		struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2636{
2637	lrbp->cmd = NULL;
2638	lrbp->sense_bufflen = 0;
2639	lrbp->sense_buffer = NULL;
2640	lrbp->task_tag = tag;
2641	lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2642	lrbp->intr_cmd = true; /* No interrupt aggregation */
2643	ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2644	hba->dev_cmd.type = cmd_type;
2645
2646	return ufshcd_compose_devman_upiu(hba, lrbp);
2647}
2648
2649static int
2650ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2651{
2652	int err = 0;
2653	unsigned long flags;
2654	u32 mask = 1 << tag;
2655
2656	/* clear outstanding transaction before retry */
2657	spin_lock_irqsave(hba->host->host_lock, flags);
2658	ufshcd_utrl_clear(hba, tag);
2659	spin_unlock_irqrestore(hba->host->host_lock, flags);
2660
2661	/*
2662	 * wait for for h/w to clear corresponding bit in door-bell.
2663	 * max. wait is 1 sec.
2664	 */
2665	err = ufshcd_wait_for_register(hba,
2666			REG_UTP_TRANSFER_REQ_DOOR_BELL,
2667			mask, ~mask, 1000, 1000);
2668
2669	return err;
2670}
2671
2672static int
2673ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2674{
2675	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2676
2677	/* Get the UPIU response */
2678	query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2679				UPIU_RSP_CODE_OFFSET;
2680	return query_res->response;
2681}
2682
2683/**
2684 * ufshcd_dev_cmd_completion() - handles device management command responses
2685 * @hba: per adapter instance
2686 * @lrbp: pointer to local reference block
2687 */
2688static int
2689ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2690{
2691	int resp;
2692	int err = 0;
2693
2694	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2695	resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2696
2697	switch (resp) {
2698	case UPIU_TRANSACTION_NOP_IN:
2699		if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2700			err = -EINVAL;
2701			dev_err(hba->dev, "%s: unexpected response %x\n",
2702					__func__, resp);
2703		}
2704		break;
2705	case UPIU_TRANSACTION_QUERY_RSP:
2706		err = ufshcd_check_query_response(hba, lrbp);
2707		if (!err)
2708			err = ufshcd_copy_query_response(hba, lrbp);
2709		break;
2710	case UPIU_TRANSACTION_REJECT_UPIU:
2711		/* TODO: handle Reject UPIU Response */
2712		err = -EPERM;
2713		dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2714				__func__);
2715		break;
2716	default:
2717		err = -EINVAL;
2718		dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2719				__func__, resp);
2720		break;
2721	}
2722
2723	return err;
2724}
2725
2726static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2727		struct ufshcd_lrb *lrbp, int max_timeout)
2728{
2729	int err = 0;
2730	unsigned long time_left;
2731	unsigned long flags;
2732
2733	time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2734			msecs_to_jiffies(max_timeout));
2735
2736	/* Make sure descriptors are ready before ringing the doorbell */
2737	wmb();
2738	spin_lock_irqsave(hba->host->host_lock, flags);
2739	hba->dev_cmd.complete = NULL;
2740	if (likely(time_left)) {
2741		err = ufshcd_get_tr_ocs(lrbp);
2742		if (!err)
2743			err = ufshcd_dev_cmd_completion(hba, lrbp);
2744	}
2745	spin_unlock_irqrestore(hba->host->host_lock, flags);
2746
2747	if (!time_left) {
2748		err = -ETIMEDOUT;
2749		dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2750			__func__, lrbp->task_tag);
2751		if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2752			/* successfully cleared the command, retry if needed */
2753			err = -EAGAIN;
2754		/*
2755		 * in case of an error, after clearing the doorbell,
2756		 * we also need to clear the outstanding_request
2757		 * field in hba
2758		 */
2759		ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2760	}
2761
2762	return err;
2763}
2764
2765/**
2766 * ufshcd_exec_dev_cmd - API for sending device management requests
2767 * @hba: UFS hba
2768 * @cmd_type: specifies the type (NOP, Query...)
2769 * @timeout: timeout in milliseconds
2770 *
2771 * NOTE: Since there is only one available tag for device management commands,
2772 * it is expected you hold the hba->dev_cmd.lock mutex.
2773 */
2774static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2775		enum dev_cmd_type cmd_type, int timeout)
2776{
2777	struct request_queue *q = hba->cmd_queue;
2778	struct request *req;
2779	struct ufshcd_lrb *lrbp;
2780	int err;
2781	int tag;
2782	struct completion wait;
2783	unsigned long flags;
2784
2785	down_read(&hba->clk_scaling_lock);
2786
2787	/*
2788	 * Get free slot, sleep if slots are unavailable.
2789	 * Even though we use wait_event() which sleeps indefinitely,
2790	 * the maximum wait time is bounded by SCSI request timeout.
2791	 */
2792	req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2793	if (IS_ERR(req)) {
2794		err = PTR_ERR(req);
2795		goto out_unlock;
2796	}
2797	tag = req->tag;
2798	WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2799	/* Set the timeout such that the SCSI error handler is not activated. */
2800	req->timeout = msecs_to_jiffies(2 * timeout);
2801	blk_mq_start_request(req);
2802
2803	init_completion(&wait);
2804	lrbp = &hba->lrb[tag];
2805	WARN_ON(lrbp->cmd);
2806	err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2807	if (unlikely(err))
2808		goto out_put_tag;
2809
2810	hba->dev_cmd.complete = &wait;
2811
2812	ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2813	/* Make sure descriptors are ready before ringing the doorbell */
2814	wmb();
2815	spin_lock_irqsave(hba->host->host_lock, flags);
2816	ufshcd_send_command(hba, tag);
2817	spin_unlock_irqrestore(hba->host->host_lock, flags);
2818
2819	err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2820
2821	ufshcd_add_query_upiu_trace(hba, tag,
2822			err ? "query_complete_err" : "query_complete");
2823
2824out_put_tag:
2825	blk_put_request(req);
2826out_unlock:
2827	up_read(&hba->clk_scaling_lock);
2828	return err;
2829}
2830
2831/**
2832 * ufshcd_init_query() - init the query response and request parameters
2833 * @hba: per-adapter instance
2834 * @request: address of the request pointer to be initialized
2835 * @response: address of the response pointer to be initialized
2836 * @opcode: operation to perform
2837 * @idn: flag idn to access
2838 * @index: LU number to access
2839 * @selector: query/flag/descriptor further identification
2840 */
2841static inline void ufshcd_init_query(struct ufs_hba *hba,
2842		struct ufs_query_req **request, struct ufs_query_res **response,
2843		enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2844{
2845	*request = &hba->dev_cmd.query.request;
2846	*response = &hba->dev_cmd.query.response;
2847	memset(*request, 0, sizeof(struct ufs_query_req));
2848	memset(*response, 0, sizeof(struct ufs_query_res));
2849	(*request)->upiu_req.opcode = opcode;
2850	(*request)->upiu_req.idn = idn;
2851	(*request)->upiu_req.index = index;
2852	(*request)->upiu_req.selector = selector;
2853}
2854
2855static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2856	enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
2857{
2858	int ret;
2859	int retries;
2860
2861	for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2862		ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2863		if (ret)
2864			dev_dbg(hba->dev,
2865				"%s: failed with error %d, retries %d\n",
2866				__func__, ret, retries);
2867		else
2868			break;
2869	}
2870
2871	if (ret)
2872		dev_err(hba->dev,
2873			"%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2874			__func__, opcode, idn, ret, retries);
2875	return ret;
2876}
2877
2878/**
2879 * ufshcd_query_flag() - API function for sending flag query requests
2880 * @hba: per-adapter instance
2881 * @opcode: flag query to perform
2882 * @idn: flag idn to access
2883 * @index: flag index to access
2884 * @flag_res: the flag value after the query request completes
2885 *
2886 * Returns 0 for success, non-zero in case of failure
2887 */
2888int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2889			enum flag_idn idn, u8 index, bool *flag_res)
2890{
2891	struct ufs_query_req *request = NULL;
2892	struct ufs_query_res *response = NULL;
2893	int err, selector = 0;
2894	int timeout = QUERY_REQ_TIMEOUT;
2895
2896	BUG_ON(!hba);
2897
2898	ufshcd_hold(hba, false);
2899	mutex_lock(&hba->dev_cmd.lock);
2900	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2901			selector);
2902
2903	switch (opcode) {
2904	case UPIU_QUERY_OPCODE_SET_FLAG:
2905	case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2906	case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2907		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2908		break;
2909	case UPIU_QUERY_OPCODE_READ_FLAG:
2910		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2911		if (!flag_res) {
2912			/* No dummy reads */
2913			dev_err(hba->dev, "%s: Invalid argument for read request\n",
2914					__func__);
2915			err = -EINVAL;
2916			goto out_unlock;
2917		}
2918		break;
2919	default:
2920		dev_err(hba->dev,
2921			"%s: Expected query flag opcode but got = %d\n",
2922			__func__, opcode);
2923		err = -EINVAL;
2924		goto out_unlock;
2925	}
2926
2927	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2928
2929	if (err) {
2930		dev_err(hba->dev,
2931			"%s: Sending flag query for idn %d failed, err = %d\n",
2932			__func__, idn, err);
2933		goto out_unlock;
2934	}
2935
2936	if (flag_res)
2937		*flag_res = (be32_to_cpu(response->upiu_res.value) &
2938				MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2939
2940out_unlock:
2941	mutex_unlock(&hba->dev_cmd.lock);
2942	ufshcd_release(hba);
2943	return err;
2944}
2945
2946/**
2947 * ufshcd_query_attr - API function for sending attribute requests
2948 * @hba: per-adapter instance
2949 * @opcode: attribute opcode
2950 * @idn: attribute idn to access
2951 * @index: index field
2952 * @selector: selector field
2953 * @attr_val: the attribute value after the query request completes
2954 *
2955 * Returns 0 for success, non-zero in case of failure
2956*/
2957int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2958		      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2959{
2960	struct ufs_query_req *request = NULL;
2961	struct ufs_query_res *response = NULL;
2962	int err;
2963
2964	BUG_ON(!hba);
2965
2966	ufshcd_hold(hba, false);
2967	if (!attr_val) {
2968		dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2969				__func__, opcode);
2970		err = -EINVAL;
2971		goto out;
2972	}
2973
2974	mutex_lock(&hba->dev_cmd.lock);
2975	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2976			selector);
2977
2978	switch (opcode) {
2979	case UPIU_QUERY_OPCODE_WRITE_ATTR:
2980		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2981		request->upiu_req.value = cpu_to_be32(*attr_val);
2982		break;
2983	case UPIU_QUERY_OPCODE_READ_ATTR:
2984		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2985		break;
2986	default:
2987		dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2988				__func__, opcode);
2989		err = -EINVAL;
2990		goto out_unlock;
2991	}
2992
2993	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2994
2995	if (err) {
2996		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2997				__func__, opcode, idn, index, err);
2998		goto out_unlock;
2999	}
3000
3001	*attr_val = be32_to_cpu(response->upiu_res.value);
3002
3003out_unlock:
3004	mutex_unlock(&hba->dev_cmd.lock);
3005out:
3006	ufshcd_release(hba);
3007	return err;
3008}
3009
3010/**
3011 * ufshcd_query_attr_retry() - API function for sending query
3012 * attribute with retries
3013 * @hba: per-adapter instance
3014 * @opcode: attribute opcode
3015 * @idn: attribute idn to access
3016 * @index: index field
3017 * @selector: selector field
3018 * @attr_val: the attribute value after the query request
3019 * completes
3020 *
3021 * Returns 0 for success, non-zero in case of failure
3022*/
3023static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3024	enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3025	u32 *attr_val)
3026{
3027	int ret = 0;
3028	u32 retries;
3029
3030	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3031		ret = ufshcd_query_attr(hba, opcode, idn, index,
3032						selector, attr_val);
3033		if (ret)
3034			dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3035				__func__, ret, retries);
3036		else
3037			break;
3038	}
3039
3040	if (ret)
3041		dev_err(hba->dev,
3042			"%s: query attribute, idn %d, failed with error %d after %d retires\n",
3043			__func__, idn, ret, QUERY_REQ_RETRIES);
3044	return ret;
3045}
3046
3047static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3048			enum query_opcode opcode, enum desc_idn idn, u8 index,
3049			u8 selector, u8 *desc_buf, int *buf_len)
3050{
3051	struct ufs_query_req *request = NULL;
3052	struct ufs_query_res *response = NULL;
3053	int err;
3054
3055	BUG_ON(!hba);
3056
3057	ufshcd_hold(hba, false);
3058	if (!desc_buf) {
3059		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3060				__func__, opcode);
3061		err = -EINVAL;
3062		goto out;
3063	}
3064
3065	if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3066		dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3067				__func__, *buf_len);
3068		err = -EINVAL;
3069		goto out;
3070	}
3071
3072	mutex_lock(&hba->dev_cmd.lock);
3073	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3074			selector);
3075	hba->dev_cmd.query.descriptor = desc_buf;
3076	request->upiu_req.length = cpu_to_be16(*buf_len);
3077
3078	switch (opcode) {
3079	case UPIU_QUERY_OPCODE_WRITE_DESC:
3080		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3081		break;
3082	case UPIU_QUERY_OPCODE_READ_DESC:
3083		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3084		break;
3085	default:
3086		dev_err(hba->dev,
3087				"%s: Expected query descriptor opcode but got = 0x%.2x\n",
3088				__func__, opcode);
3089		err = -EINVAL;
3090		goto out_unlock;
3091	}
3092
3093	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3094
3095	if (err) {
3096		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3097				__func__, opcode, idn, index, err);
3098		goto out_unlock;
3099	}
3100
3101	*buf_len = be16_to_cpu(response->upiu_res.length);
3102
3103out_unlock:
3104	hba->dev_cmd.query.descriptor = NULL;
3105	mutex_unlock(&hba->dev_cmd.lock);
3106out:
3107	ufshcd_release(hba);
3108	return err;
3109}
3110
3111/**
3112 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3113 * @hba: per-adapter instance
3114 * @opcode: attribute opcode
3115 * @idn: attribute idn to access
3116 * @index: index field
3117 * @selector: selector field
3118 * @desc_buf: the buffer that contains the descriptor
3119 * @buf_len: length parameter passed to the device
3120 *
3121 * Returns 0 for success, non-zero in case of failure.
3122 * The buf_len parameter will contain, on return, the length parameter
3123 * received on the response.
3124 */
3125int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3126				  enum query_opcode opcode,
3127				  enum desc_idn idn, u8 index,
3128				  u8 selector,
3129				  u8 *desc_buf, int *buf_len)
3130{
3131	int err;
3132	int retries;
3133
3134	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3135		err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3136						selector, desc_buf, buf_len);
3137		if (!err || err == -EINVAL)
3138			break;
3139	}
3140
3141	return err;
3142}
3143
3144/**
3145 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3146 * @hba: Pointer to adapter instance
3147 * @desc_id: descriptor idn value
3148 * @desc_len: mapped desc length (out)
3149 */
3150void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3151				  int *desc_len)
3152{
3153	if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3154	    desc_id == QUERY_DESC_IDN_RFU_1)
3155		*desc_len = 0;
3156	else
3157		*desc_len = hba->desc_size[desc_id];
3158}
3159EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3160
3161static void ufshcd_update_desc_length(struct ufs_hba *hba,
3162				      enum desc_idn desc_id, int desc_index,
3163				      unsigned char desc_len)
3164{
3165	if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3166	    desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3167		/* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3168		 * than the RPMB unit, however, both descriptors share the same
3169		 * desc_idn, to cover both unit descriptors with one length, we
3170		 * choose the normal unit descriptor length by desc_index.
3171		 */
3172		hba->desc_size[desc_id] = desc_len;
3173}
3174
3175/**
3176 * ufshcd_read_desc_param - read the specified descriptor parameter
3177 * @hba: Pointer to adapter instance
3178 * @desc_id: descriptor idn value
3179 * @desc_index: descriptor index
3180 * @param_offset: offset of the parameter to read
3181 * @param_read_buf: pointer to buffer where parameter would be read
3182 * @param_size: sizeof(param_read_buf)
3183 *
3184 * Return 0 in case of success, non-zero otherwise
3185 */
3186int ufshcd_read_desc_param(struct ufs_hba *hba,
3187			   enum desc_idn desc_id,
3188			   int desc_index,
3189			   u8 param_offset,
3190			   u8 *param_read_buf,
3191			   u8 param_size)
3192{
3193	int ret;
3194	u8 *desc_buf;
3195	int buff_len;
3196	bool is_kmalloc = true;
3197
3198	/* Safety check */
3199	if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3200		return -EINVAL;
3201
3202	/* Get the length of descriptor */
3203	ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3204	if (!buff_len) {
3205		dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3206		return -EINVAL;
3207	}
3208
3209	if (param_offset >= buff_len) {
3210		dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3211			__func__, param_offset, desc_id, buff_len);
3212		return -EINVAL;
3213	}
3214
3215	/* Check whether we need temp memory */
3216	if (param_offset != 0 || param_size < buff_len) {
3217		desc_buf = kzalloc(buff_len, GFP_KERNEL);
3218		if (!desc_buf)
3219			return -ENOMEM;
3220	} else {
3221		desc_buf = param_read_buf;
3222		is_kmalloc = false;
3223	}
3224
3225	/* Request for full descriptor */
3226	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3227					desc_id, desc_index, 0,
3228					desc_buf, &buff_len);
3229
3230	if (ret) {
3231		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3232			__func__, desc_id, desc_index, param_offset, ret);
3233		goto out;
3234	}
3235
3236	/* Sanity check */
3237	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3238		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3239			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3240		ret = -EINVAL;
3241		goto out;
3242	}
3243
3244	/* Update descriptor length */
3245	buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3246	ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3247
3248	if (is_kmalloc) {
3249		/* Make sure we don't copy more data than available */
3250		if (param_offset >= buff_len)
3251			ret = -EINVAL;
3252		else
3253			memcpy(param_read_buf, &desc_buf[param_offset],
3254			       min_t(u32, param_size, buff_len - param_offset));
3255	}
3256out:
3257	if (is_kmalloc)
3258		kfree(desc_buf);
3259	return ret;
3260}
3261
3262/**
3263 * struct uc_string_id - unicode string
3264 *
3265 * @len: size of this descriptor inclusive
3266 * @type: descriptor type
3267 * @uc: unicode string character
3268 */
3269struct uc_string_id {
3270	u8 len;
3271	u8 type;
3272	wchar_t uc[];
3273} __packed;
3274
3275/* replace non-printable or non-ASCII characters with spaces */
3276static inline char ufshcd_remove_non_printable(u8 ch)
3277{
3278	return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3279}
3280
3281/**
3282 * ufshcd_read_string_desc - read string descriptor
3283 * @hba: pointer to adapter instance
3284 * @desc_index: descriptor index
3285 * @buf: pointer to buffer where descriptor would be read,
3286 *       the caller should free the memory.
3287 * @ascii: if true convert from unicode to ascii characters
3288 *         null terminated string.
3289 *
3290 * Return:
3291 * *      string size on success.
3292 * *      -ENOMEM: on allocation failure
3293 * *      -EINVAL: on a wrong parameter
3294 */
3295int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3296			    u8 **buf, bool ascii)
3297{
3298	struct uc_string_id *uc_str;
3299	u8 *str;
3300	int ret;
3301
3302	if (!buf)
3303		return -EINVAL;
3304
3305	uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3306	if (!uc_str)
3307		return -ENOMEM;
3308
3309	ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3310				     (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3311	if (ret < 0) {
3312		dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3313			QUERY_REQ_RETRIES, ret);
3314		str = NULL;
3315		goto out;
3316	}
3317
3318	if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3319		dev_dbg(hba->dev, "String Desc is of zero length\n");
3320		str = NULL;
3321		ret = 0;
3322		goto out;
3323	}
3324
3325	if (ascii) {
3326		ssize_t ascii_len;
3327		int i;
3328		/* remove header and divide by 2 to move from UTF16 to UTF8 */
3329		ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3330		str = kzalloc(ascii_len, GFP_KERNEL);
3331		if (!str) {
3332			ret = -ENOMEM;
3333			goto out;
3334		}
3335
3336		/*
3337		 * the descriptor contains string in UTF16 format
3338		 * we need to convert to utf-8 so it can be displayed
3339		 */
3340		ret = utf16s_to_utf8s(uc_str->uc,
3341				      uc_str->len - QUERY_DESC_HDR_SIZE,
3342				      UTF16_BIG_ENDIAN, str, ascii_len - 1);
3343
3344		/* replace non-printable or non-ASCII characters with spaces */
3345		for (i = 0; i < ret; i++)
3346			str[i] = ufshcd_remove_non_printable(str[i]);
3347
3348		str[ret++] = '\0';
3349
3350	} else {
3351		str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3352		if (!str) {
3353			ret = -ENOMEM;
3354			goto out;
3355		}
3356		ret = uc_str->len;
3357	}
3358out:
3359	*buf = str;
3360	kfree(uc_str);
3361	return ret;
3362}
3363
3364/**
3365 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3366 * @hba: Pointer to adapter instance
3367 * @lun: lun id
3368 * @param_offset: offset of the parameter to read
3369 * @param_read_buf: pointer to buffer where parameter would be read
3370 * @param_size: sizeof(param_read_buf)
3371 *
3372 * Return 0 in case of success, non-zero otherwise
3373 */
3374static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3375					      int lun,
3376					      enum unit_desc_param param_offset,
3377					      u8 *param_read_buf,
3378					      u32 param_size)
3379{
3380	/*
3381	 * Unit descriptors are only available for general purpose LUs (LUN id
3382	 * from 0 to 7) and RPMB Well known LU.
3383	 */
3384	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3385		return -EOPNOTSUPP;
3386
3387	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3388				      param_offset, param_read_buf, param_size);
3389}
3390
3391static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3392{
3393	int err = 0;
3394	u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3395
3396	if (hba->dev_info.wspecversion >= 0x300) {
3397		err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3398				QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3399				&gating_wait);
3400		if (err)
3401			dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3402					 err, gating_wait);
3403
3404		if (gating_wait == 0) {
3405			gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3406			dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3407					 gating_wait);
3408		}
3409
3410		hba->dev_info.clk_gating_wait_us = gating_wait;
3411	}
3412
3413	return err;
3414}
3415
3416/**
3417 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3418 * @hba: per adapter instance
3419 *
3420 * 1. Allocate DMA memory for Command Descriptor array
3421 *	Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3422 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3423 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3424 *	(UTMRDL)
3425 * 4. Allocate memory for local reference block(lrb).
3426 *
3427 * Returns 0 for success, non-zero in case of failure
3428 */
3429static int ufshcd_memory_alloc(struct ufs_hba *hba)
3430{
3431	size_t utmrdl_size, utrdl_size, ucdl_size;
3432
3433	/* Allocate memory for UTP command descriptors */
3434	ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3435	hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3436						  ucdl_size,
3437						  &hba->ucdl_dma_addr,
3438						  GFP_KERNEL);
3439
3440	/*
3441	 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3442	 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3443	 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3444	 * be aligned to 128 bytes as well
3445	 */
3446	if (!hba->ucdl_base_addr ||
3447	    WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3448		dev_err(hba->dev,
3449			"Command Descriptor Memory allocation failed\n");
3450		goto out;
3451	}
3452
3453	/*
3454	 * Allocate memory for UTP Transfer descriptors
3455	 * UFSHCI requires 1024 byte alignment of UTRD
3456	 */
3457	utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3458	hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3459						   utrdl_size,
3460						   &hba->utrdl_dma_addr,
3461						   GFP_KERNEL);
3462	if (!hba->utrdl_base_addr ||
3463	    WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3464		dev_err(hba->dev,
3465			"Transfer Descriptor Memory allocation failed\n");
3466		goto out;
3467	}
3468
3469	/*
3470	 * Allocate memory for UTP Task Management descriptors
3471	 * UFSHCI requires 1024 byte alignment of UTMRD
3472	 */
3473	utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3474	hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3475						    utmrdl_size,
3476						    &hba->utmrdl_dma_addr,
3477						    GFP_KERNEL);
3478	if (!hba->utmrdl_base_addr ||
3479	    WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3480		dev_err(hba->dev,
3481		"Task Management Descriptor Memory allocation failed\n");
3482		goto out;
3483	}
3484
3485	/* Allocate memory for local reference block */
3486	hba->lrb = devm_kcalloc(hba->dev,
3487				hba->nutrs, sizeof(struct ufshcd_lrb),
3488				GFP_KERNEL);
3489	if (!hba->lrb) {
3490		dev_err(hba->dev, "LRB Memory allocation failed\n");
3491		goto out;
3492	}
3493	return 0;
3494out:
3495	return -ENOMEM;
3496}
3497
3498/**
3499 * ufshcd_host_memory_configure - configure local reference block with
3500 *				memory offsets
3501 * @hba: per adapter instance
3502 *
3503 * Configure Host memory space
3504 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3505 * address.
3506 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3507 * and PRDT offset.
3508 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3509 * into local reference block.
3510 */
3511static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3512{
3513	struct utp_transfer_req_desc *utrdlp;
3514	dma_addr_t cmd_desc_dma_addr;
3515	dma_addr_t cmd_desc_element_addr;
3516	u16 response_offset;
3517	u16 prdt_offset;
3518	int cmd_desc_size;
3519	int i;
3520
3521	utrdlp = hba->utrdl_base_addr;
3522
3523	response_offset =
3524		offsetof(struct utp_transfer_cmd_desc, response_upiu);
3525	prdt_offset =
3526		offsetof(struct utp_transfer_cmd_desc, prd_table);
3527
3528	cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3529	cmd_desc_dma_addr = hba->ucdl_dma_addr;
3530
3531	for (i = 0; i < hba->nutrs; i++) {
3532		/* Configure UTRD with command descriptor base address */
3533		cmd_desc_element_addr =
3534				(cmd_desc_dma_addr + (cmd_desc_size * i));
3535		utrdlp[i].command_desc_base_addr_lo =
3536				cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3537		utrdlp[i].command_desc_base_addr_hi =
3538				cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3539
3540		/* Response upiu and prdt offset should be in double words */
3541		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3542			utrdlp[i].response_upiu_offset =
3543				cpu_to_le16(response_offset);
3544			utrdlp[i].prd_table_offset =
3545				cpu_to_le16(prdt_offset);
3546			utrdlp[i].response_upiu_length =
3547				cpu_to_le16(ALIGNED_UPIU_SIZE);
3548		} else {
3549			utrdlp[i].response_upiu_offset =
3550				cpu_to_le16(response_offset >> 2);
3551			utrdlp[i].prd_table_offset =
3552				cpu_to_le16(prdt_offset >> 2);
3553			utrdlp[i].response_upiu_length =
3554				cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3555		}
3556
3557		ufshcd_init_lrb(hba, &hba->lrb[i], i);
3558	}
3559}
3560
3561/**
3562 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3563 * @hba: per adapter instance
3564 *
3565 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3566 * in order to initialize the Unipro link startup procedure.
3567 * Once the Unipro links are up, the device connected to the controller
3568 * is detected.
3569 *
3570 * Returns 0 on success, non-zero value on failure
3571 */
3572static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3573{
3574	struct uic_command uic_cmd = {0};
3575	int ret;
3576
3577	uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3578
3579	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3580	if (ret)
3581		dev_dbg(hba->dev,
3582			"dme-link-startup: error code %d\n", ret);
3583	return ret;
3584}
3585/**
3586 * ufshcd_dme_reset - UIC command for DME_RESET
3587 * @hba: per adapter instance
3588 *
3589 * DME_RESET command is issued in order to reset UniPro stack.
3590 * This function now deals with cold reset.
3591 *
3592 * Returns 0 on success, non-zero value on failure
3593 */
3594static int ufshcd_dme_reset(struct ufs_hba *hba)
3595{
3596	struct uic_command uic_cmd = {0};
3597	int ret;
3598
3599	uic_cmd.command = UIC_CMD_DME_RESET;
3600
3601	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3602	if (ret)
3603		dev_err(hba->dev,
3604			"dme-reset: error code %d\n", ret);
3605
3606	return ret;
3607}
3608
3609/**
3610 * ufshcd_dme_enable - UIC command for DME_ENABLE
3611 * @hba: per adapter instance
3612 *
3613 * DME_ENABLE command is issued in order to enable UniPro stack.
3614 *
3615 * Returns 0 on success, non-zero value on failure
3616 */
3617static int ufshcd_dme_enable(struct ufs_hba *hba)
3618{
3619	struct uic_command uic_cmd = {0};
3620	int ret;
3621
3622	uic_cmd.command = UIC_CMD_DME_ENABLE;
3623
3624	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3625	if (ret)
3626		dev_err(hba->dev,
3627			"dme-enable: error code %d\n", ret);
3628
3629	return ret;
3630}
3631
3632static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3633{
3634	#define MIN_DELAY_BEFORE_DME_CMDS_US	1000
3635	unsigned long min_sleep_time_us;
3636
3637	if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3638		return;
3639
3640	/*
3641	 * last_dme_cmd_tstamp will be 0 only for 1st call to
3642	 * this function
3643	 */
3644	if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3645		min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3646	} else {
3647		unsigned long delta =
3648			(unsigned long) ktime_to_us(
3649				ktime_sub(ktime_get(),
3650				hba->last_dme_cmd_tstamp));
3651
3652		if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3653			min_sleep_time_us =
3654				MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3655		else
3656			return; /* no more delay required */
3657	}
3658
3659	/* allow sleep for extra 50us if needed */
3660	usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3661}
3662
3663/**
3664 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3665 * @hba: per adapter instance
3666 * @attr_sel: uic command argument1
3667 * @attr_set: attribute set type as uic command argument2
3668 * @mib_val: setting value as uic command argument3
3669 * @peer: indicate whether peer or local
3670 *
3671 * Returns 0 on success, non-zero value on failure
3672 */
3673int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3674			u8 attr_set, u32 mib_val, u8 peer)
3675{
3676	struct uic_command uic_cmd = {0};
3677	static const char *const action[] = {
3678		"dme-set",
3679		"dme-peer-set"
3680	};
3681	const char *set = action[!!peer];
3682	int ret;
3683	int retries = UFS_UIC_COMMAND_RETRIES;
3684
3685	uic_cmd.command = peer ?
3686		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3687	uic_cmd.argument1 = attr_sel;
3688	uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3689	uic_cmd.argument3 = mib_val;
3690
3691	do {
3692		/* for peer attributes we retry upon failure */
3693		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3694		if (ret)
3695			dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3696				set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3697	} while (ret && peer && --retries);
3698
3699	if (ret)
3700		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3701			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3702			UFS_UIC_COMMAND_RETRIES - retries);
3703
3704	return ret;
3705}
3706EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3707
3708/**
3709 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3710 * @hba: per adapter instance
3711 * @attr_sel: uic command argument1
3712 * @mib_val: the value of the attribute as returned by the UIC command
3713 * @peer: indicate whether peer or local
3714 *
3715 * Returns 0 on success, non-zero value on failure
3716 */
3717int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3718			u32 *mib_val, u8 peer)
3719{
3720	struct uic_command uic_cmd = {0};
3721	static const char *const action[] = {
3722		"dme-get",
3723		"dme-peer-get"
3724	};
3725	const char *get = action[!!peer];
3726	int ret;
3727	int retries = UFS_UIC_COMMAND_RETRIES;
3728	struct ufs_pa_layer_attr orig_pwr_info;
3729	struct ufs_pa_layer_attr temp_pwr_info;
3730	bool pwr_mode_change = false;
3731
3732	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3733		orig_pwr_info = hba->pwr_info;
3734		temp_pwr_info = orig_pwr_info;
3735
3736		if (orig_pwr_info.pwr_tx == FAST_MODE ||
3737		    orig_pwr_info.pwr_rx == FAST_MODE) {
3738			temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3739			temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3740			pwr_mode_change = true;
3741		} else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3742		    orig_pwr_info.pwr_rx == SLOW_MODE) {
3743			temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3744			temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3745			pwr_mode_change = true;
3746		}
3747		if (pwr_mode_change) {
3748			ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3749			if (ret)
3750				goto out;
3751		}
3752	}
3753
3754	uic_cmd.command = peer ?
3755		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3756	uic_cmd.argument1 = attr_sel;
3757
3758	do {
3759		/* for peer attributes we retry upon failure */
3760		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3761		if (ret)
3762			dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3763				get, UIC_GET_ATTR_ID(attr_sel), ret);
3764	} while (ret && peer && --retries);
3765
3766	if (ret)
3767		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3768			get, UIC_GET_ATTR_ID(attr_sel),
3769			UFS_UIC_COMMAND_RETRIES - retries);
3770
3771	if (mib_val && !ret)
3772		*mib_val = uic_cmd.argument3;
3773
3774	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3775	    && pwr_mode_change)
3776		ufshcd_change_power_mode(hba, &orig_pwr_info);
3777out:
3778	return ret;
3779}
3780EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3781
3782/**
3783 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3784 * state) and waits for it to take effect.
3785 *
3786 * @hba: per adapter instance
3787 * @cmd: UIC command to execute
3788 *
3789 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3790 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3791 * and device UniPro link and hence it's final completion would be indicated by
3792 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3793 * addition to normal UIC command completion Status (UCCS). This function only
3794 * returns after the relevant status bits indicate the completion.
3795 *
3796 * Returns 0 on success, non-zero value on failure
3797 */
3798static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3799{
3800	struct completion uic_async_done;
3801	unsigned long flags;
3802	u8 status;
3803	int ret;
3804	bool reenable_intr = false;
3805
3806	mutex_lock(&hba->uic_cmd_mutex);
3807	init_completion(&uic_async_done);
3808	ufshcd_add_delay_before_dme_cmd(hba);
3809
3810	spin_lock_irqsave(hba->host->host_lock, flags);
3811	if (ufshcd_is_link_broken(hba)) {
3812		ret = -ENOLINK;
3813		goto out_unlock;
3814	}
3815	hba->uic_async_done = &uic_async_done;
3816	if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3817		ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3818		/*
3819		 * Make sure UIC command completion interrupt is disabled before
3820		 * issuing UIC command.
3821		 */
3822		wmb();
3823		reenable_intr = true;
3824	}
3825	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3826	spin_unlock_irqrestore(hba->host->host_lock, flags);
3827	if (ret) {
3828		dev_err(hba->dev,
3829			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3830			cmd->command, cmd->argument3, ret);
3831		goto out;
3832	}
3833
3834	if (!wait_for_completion_timeout(hba->uic_async_done,
3835					 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3836		dev_err(hba->dev,
3837			"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3838			cmd->command, cmd->argument3);
3839
3840		if (!cmd->cmd_active) {
3841			dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
3842				__func__);
3843			goto check_upmcrs;
3844		}
3845
3846		ret = -ETIMEDOUT;
3847		goto out;
3848	}
3849
3850check_upmcrs:
3851	status = ufshcd_get_upmcrs(hba);
3852	if (status != PWR_LOCAL) {
3853		dev_err(hba->dev,
3854			"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3855			cmd->command, status);
3856		ret = (status != PWR_OK) ? status : -1;
3857	}
3858out:
3859	if (ret) {
3860		ufshcd_print_host_state(hba);
3861		ufshcd_print_pwr_info(hba);
3862		ufshcd_print_host_regs(hba);
3863	}
3864
3865	spin_lock_irqsave(hba->host->host_lock, flags);
3866	hba->active_uic_cmd = NULL;
3867	hba->uic_async_done = NULL;
3868	if (reenable_intr)
3869		ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3870	if (ret) {
3871		ufshcd_set_link_broken(hba);
3872		ufshcd_schedule_eh_work(hba);
3873	}
3874out_unlock:
3875	spin_unlock_irqrestore(hba->host->host_lock, flags);
3876	mutex_unlock(&hba->uic_cmd_mutex);
3877
3878	return ret;
3879}
3880
3881/**
3882 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3883 *				using DME_SET primitives.
3884 * @hba: per adapter instance
3885 * @mode: powr mode value
3886 *
3887 * Returns 0 on success, non-zero value on failure
3888 */
3889static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3890{
3891	struct uic_command uic_cmd = {0};
3892	int ret;
3893
3894	if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3895		ret = ufshcd_dme_set(hba,
3896				UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3897		if (ret) {
3898			dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3899						__func__, ret);
3900			goto out;
3901		}
3902	}
3903
3904	uic_cmd.command = UIC_CMD_DME_SET;
3905	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3906	uic_cmd.argument3 = mode;
3907	ufshcd_hold(hba, false);
3908	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3909	ufshcd_release(hba);
3910
3911out:
3912	return ret;
3913}
3914
3915int ufshcd_link_recovery(struct ufs_hba *hba)
3916{
3917	int ret;
3918	unsigned long flags;
3919
3920	spin_lock_irqsave(hba->host->host_lock, flags);
3921	hba->ufshcd_state = UFSHCD_STATE_RESET;
3922	ufshcd_set_eh_in_progress(hba);
3923	spin_unlock_irqrestore(hba->host->host_lock, flags);
3924
3925	/* Reset the attached device */
3926	ufshcd_vops_device_reset(hba);
3927
3928	ret = ufshcd_host_reset_and_restore(hba);
3929
3930	spin_lock_irqsave(hba->host->host_lock, flags);
3931	if (ret)
3932		hba->ufshcd_state = UFSHCD_STATE_ERROR;
3933	ufshcd_clear_eh_in_progress(hba);
3934	spin_unlock_irqrestore(hba->host->host_lock, flags);
3935
3936	if (ret)
3937		dev_err(hba->dev, "%s: link recovery failed, err %d",
3938			__func__, ret);
3939
3940	return ret;
3941}
3942EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
3943
3944static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3945{
3946	int ret;
3947	struct uic_command uic_cmd = {0};
3948	ktime_t start = ktime_get();
3949
3950	ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3951
3952	uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3953	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3954	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3955			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3956
3957	if (ret)
3958		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3959			__func__, ret);
3960	else
3961		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3962								POST_CHANGE);
3963
3964	return ret;
3965}
3966
3967int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3968{
3969	struct uic_command uic_cmd = {0};
3970	int ret;
3971	ktime_t start = ktime_get();
3972
3973	ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3974
3975	uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3976	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3977	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3978			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3979
3980	if (ret) {
3981		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3982			__func__, ret);
3983	} else {
3984		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3985								POST_CHANGE);
3986		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3987		hba->ufs_stats.hibern8_exit_cnt++;
3988	}
3989
3990	return ret;
3991}
3992EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
3993
3994void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3995{
3996	unsigned long flags;
3997	bool update = false;
3998
3999	if (!ufshcd_is_auto_hibern8_supported(hba))
4000		return;
4001
4002	spin_lock_irqsave(hba->host->host_lock, flags);
4003	if (hba->ahit != ahit) {
4004		hba->ahit = ahit;
4005		update = true;
4006	}
4007	spin_unlock_irqrestore(hba->host->host_lock, flags);
4008
4009	if (update && !pm_runtime_suspended(hba->dev)) {
4010		pm_runtime_get_sync(hba->dev);
4011		ufshcd_hold(hba, false);
4012		ufshcd_auto_hibern8_enable(hba);
4013		ufshcd_release(hba);
4014		pm_runtime_put(hba->dev);
4015	}
4016}
4017EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4018
4019void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4020{
4021	unsigned long flags;
4022
4023	if (!ufshcd_is_auto_hibern8_supported(hba))
4024		return;
4025
4026	spin_lock_irqsave(hba->host->host_lock, flags);
4027	ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4028	spin_unlock_irqrestore(hba->host->host_lock, flags);
4029}
4030
4031 /**
4032 * ufshcd_init_pwr_info - setting the POR (power on reset)
4033 * values in hba power info
4034 * @hba: per-adapter instance
4035 */
4036static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4037{
4038	hba->pwr_info.gear_rx = UFS_PWM_G1;
4039	hba->pwr_info.gear_tx = UFS_PWM_G1;
4040	hba->pwr_info.lane_rx = 1;
4041	hba->pwr_info.lane_tx = 1;
4042	hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4043	hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4044	hba->pwr_info.hs_rate = 0;
4045}
4046
4047/**
4048 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4049 * @hba: per-adapter instance
4050 */
4051static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4052{
4053	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4054
4055	if (hba->max_pwr_info.is_valid)
4056		return 0;
4057
4058	pwr_info->pwr_tx = FAST_MODE;
4059	pwr_info->pwr_rx = FAST_MODE;
4060	pwr_info->hs_rate = PA_HS_MODE_B;
4061
4062	/* Get the connected lane count */
4063	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4064			&pwr_info->lane_rx);
4065	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4066			&pwr_info->lane_tx);
4067
4068	if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4069		dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4070				__func__,
4071				pwr_info->lane_rx,
4072				pwr_info->lane_tx);
4073		return -EINVAL;
4074	}
4075
4076	/*
4077	 * First, get the maximum gears of HS speed.
4078	 * If a zero value, it means there is no HSGEAR capability.
4079	 * Then, get the maximum gears of PWM speed.
4080	 */
4081	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4082	if (!pwr_info->gear_rx) {
4083		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4084				&pwr_info->gear_rx);
4085		if (!pwr_info->gear_rx) {
4086			dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4087				__func__, pwr_info->gear_rx);
4088			return -EINVAL;
4089		}
4090		pwr_info->pwr_rx = SLOW_MODE;
4091	}
4092
4093	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4094			&pwr_info->gear_tx);
4095	if (!pwr_info->gear_tx) {
4096		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4097				&pwr_info->gear_tx);
4098		if (!pwr_info->gear_tx) {
4099			dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4100				__func__, pwr_info->gear_tx);
4101			return -EINVAL;
4102		}
4103		pwr_info->pwr_tx = SLOW_MODE;
4104	}
4105
4106	hba->max_pwr_info.is_valid = true;
4107	return 0;
4108}
4109
4110static int ufshcd_change_power_mode(struct ufs_hba *hba,
4111			     struct ufs_pa_layer_attr *pwr_mode)
4112{
4113	int ret;
4114
4115	/* if already configured to the requested pwr_mode */
4116	if (!hba->force_pmc &&
4117	    pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4118	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4119	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4120	    pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4121	    pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4122	    pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4123	    pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4124		dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4125		return 0;
4126	}
4127
4128	/*
4129	 * Configure attributes for power mode change with below.
4130	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4131	 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4132	 * - PA_HSSERIES
4133	 */
4134	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4135	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4136			pwr_mode->lane_rx);
4137	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4138			pwr_mode->pwr_rx == FAST_MODE)
4139		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4140	else
4141		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4142
4143	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4144	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4145			pwr_mode->lane_tx);
4146	if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4147			pwr_mode->pwr_tx == FAST_MODE)
4148		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4149	else
4150		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4151
4152	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4153	    pwr_mode->pwr_tx == FASTAUTO_MODE ||
4154	    pwr_mode->pwr_rx == FAST_MODE ||
4155	    pwr_mode->pwr_tx == FAST_MODE)
4156		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4157						pwr_mode->hs_rate);
4158
4159	if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4160		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4161				DL_FC0ProtectionTimeOutVal_Default);
4162		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4163				DL_TC0ReplayTimeOutVal_Default);
4164		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4165				DL_AFC0ReqTimeOutVal_Default);
4166		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4167				DL_FC1ProtectionTimeOutVal_Default);
4168		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4169				DL_TC1ReplayTimeOutVal_Default);
4170		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4171				DL_AFC1ReqTimeOutVal_Default);
4172
4173		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4174				DL_FC0ProtectionTimeOutVal_Default);
4175		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4176				DL_TC0ReplayTimeOutVal_Default);
4177		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4178				DL_AFC0ReqTimeOutVal_Default);
4179	}
4180
4181	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4182			| pwr_mode->pwr_tx);
4183
4184	if (ret) {
4185		dev_err(hba->dev,
4186			"%s: power mode change failed %d\n", __func__, ret);
4187	} else {
4188		ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4189								pwr_mode);
4190
4191		memcpy(&hba->pwr_info, pwr_mode,
4192			sizeof(struct ufs_pa_layer_attr));
4193	}
4194
4195	return ret;
4196}
4197
4198/**
4199 * ufshcd_config_pwr_mode - configure a new power mode
4200 * @hba: per-adapter instance
4201 * @desired_pwr_mode: desired power configuration
4202 */
4203int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4204		struct ufs_pa_layer_attr *desired_pwr_mode)
4205{
4206	struct ufs_pa_layer_attr final_params = { 0 };
4207	int ret;
4208
4209	ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4210					desired_pwr_mode, &final_params);
4211
4212	if (ret)
4213		memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4214
4215	ret = ufshcd_change_power_mode(hba, &final_params);
4216
4217	return ret;
4218}
4219EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4220
4221/**
4222 * ufshcd_complete_dev_init() - checks device readiness
4223 * @hba: per-adapter instance
4224 *
4225 * Set fDeviceInit flag and poll until device toggles it.
4226 */
4227static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4228{
4229	int err;
4230	bool flag_res = true;
4231	ktime_t timeout;
4232
4233	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4234		QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4235	if (err) {
4236		dev_err(hba->dev,
4237			"%s setting fDeviceInit flag failed with error %d\n",
4238			__func__, err);
4239		goto out;
4240	}
4241
4242	/* Poll fDeviceInit flag to be cleared */
4243	timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4244	do {
4245		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4246					QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4247		if (!flag_res)
4248			break;
4249		usleep_range(5000, 10000);
4250	} while (ktime_before(ktime_get(), timeout));
4251
4252	if (err) {
4253		dev_err(hba->dev,
4254				"%s reading fDeviceInit flag failed with error %d\n",
4255				__func__, err);
4256	} else if (flag_res) {
4257		dev_err(hba->dev,
4258				"%s fDeviceInit was not cleared by the device\n",
4259				__func__);
4260		err = -EBUSY;
4261	}
4262out:
4263	return err;
4264}
4265
4266/**
4267 * ufshcd_make_hba_operational - Make UFS controller operational
4268 * @hba: per adapter instance
4269 *
4270 * To bring UFS host controller to operational state,
4271 * 1. Enable required interrupts
4272 * 2. Configure interrupt aggregation
4273 * 3. Program UTRL and UTMRL base address
4274 * 4. Configure run-stop-registers
4275 *
4276 * Returns 0 on success, non-zero value on failure
4277 */
4278int ufshcd_make_hba_operational(struct ufs_hba *hba)
4279{
4280	int err = 0;
4281	u32 reg;
4282
4283	/* Enable required interrupts */
4284	ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4285
4286	/* Configure interrupt aggregation */
4287	if (ufshcd_is_intr_aggr_allowed(hba))
4288		ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4289	else
4290		ufshcd_disable_intr_aggr(hba);
4291
4292	/* Configure UTRL and UTMRL base address registers */
4293	ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4294			REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4295	ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4296			REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4297	ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4298			REG_UTP_TASK_REQ_LIST_BASE_L);
4299	ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4300			REG_UTP_TASK_REQ_LIST_BASE_H);
4301
4302	/*
4303	 * Make sure base address and interrupt setup are updated before
4304	 * enabling the run/stop registers below.
4305	 */
4306	wmb();
4307
4308	/*
4309	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4310	 */
4311	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4312	if (!(ufshcd_get_lists_status(reg))) {
4313		ufshcd_enable_run_stop_reg(hba);
4314	} else {
4315		dev_err(hba->dev,
4316			"Host controller not ready to process requests");
4317		err = -EIO;
4318	}
4319
4320	return err;
4321}
4322EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4323
4324/**
4325 * ufshcd_hba_stop - Send controller to reset state
4326 * @hba: per adapter instance
4327 */
4328static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4329{
4330	unsigned long flags;
4331	int err;
4332
4333	/*
4334	 * Obtain the host lock to prevent that the controller is disabled
4335	 * while the UFS interrupt handler is active on another CPU.
4336	 */
4337	spin_lock_irqsave(hba->host->host_lock, flags);
4338	ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4339	spin_unlock_irqrestore(hba->host->host_lock, flags);
4340
4341	err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4342					CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4343					10, 1);
4344	if (err)
4345		dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4346}
4347
4348/**
4349 * ufshcd_hba_execute_hce - initialize the controller
4350 * @hba: per adapter instance
4351 *
4352 * The controller resets itself and controller firmware initialization
4353 * sequence kicks off. When controller is ready it will set
4354 * the Host Controller Enable bit to 1.
4355 *
4356 * Returns 0 on success, non-zero value on failure
4357 */
4358static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4359{
4360	int retry;
4361
4362	if (!ufshcd_is_hba_active(hba))
4363		/* change controller state to "reset state" */
4364		ufshcd_hba_stop(hba);
4365
4366	/* UniPro link is disabled at this point */
4367	ufshcd_set_link_off(hba);
4368
4369	ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4370
4371	/* start controller initialization sequence */
4372	ufshcd_hba_start(hba);
4373
4374	/*
4375	 * To initialize a UFS host controller HCE bit must be set to 1.
4376	 * During initialization the HCE bit value changes from 1->0->1.
4377	 * When the host controller completes initialization sequence
4378	 * it sets the value of HCE bit to 1. The same HCE bit is read back
4379	 * to check if the controller has completed initialization sequence.
4380	 * So without this delay the value HCE = 1, set in the previous
4381	 * instruction might be read back.
4382	 * This delay can be changed based on the controller.
4383	 */
4384	ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4385
4386	/* wait for the host controller to complete initialization */
4387	retry = 50;
4388	while (ufshcd_is_hba_active(hba)) {
4389		if (retry) {
4390			retry--;
4391		} else {
4392			dev_err(hba->dev,
4393				"Controller enable failed\n");
4394			return -EIO;
4395		}
4396		usleep_range(1000, 1100);
4397	}
4398
4399	/* enable UIC related interrupts */
4400	ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4401
4402	ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4403
4404	return 0;
4405}
4406
4407int ufshcd_hba_enable(struct ufs_hba *hba)
4408{
4409	int ret;
4410
4411	if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4412		ufshcd_set_link_off(hba);
4413		ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4414
4415		/* enable UIC related interrupts */
4416		ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4417		ret = ufshcd_dme_reset(hba);
4418		if (!ret) {
4419			ret = ufshcd_dme_enable(hba);
4420			if (!ret)
4421				ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4422			if (ret)
4423				dev_err(hba->dev,
4424					"Host controller enable failed with non-hce\n");
4425		}
4426	} else {
4427		ret = ufshcd_hba_execute_hce(hba);
4428	}
4429
4430	return ret;
4431}
4432EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4433
4434static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4435{
4436	int tx_lanes = 0, i, err = 0;
4437
4438	if (!peer)
4439		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4440			       &tx_lanes);
4441	else
4442		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4443				    &tx_lanes);
4444	for (i = 0; i < tx_lanes; i++) {
4445		if (!peer)
4446			err = ufshcd_dme_set(hba,
4447				UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4448					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4449					0);
4450		else
4451			err = ufshcd_dme_peer_set(hba,
4452				UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4453					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4454					0);
4455		if (err) {
4456			dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4457				__func__, peer, i, err);
4458			break;
4459		}
4460	}
4461
4462	return err;
4463}
4464
4465static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4466{
4467	return ufshcd_disable_tx_lcc(hba, true);
4468}
4469
4470void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4471			    u32 reg)
4472{
4473	reg_hist->reg[reg_hist->pos] = reg;
4474	reg_hist->tstamp[reg_hist->pos] = ktime_get();
4475	reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4476}
4477EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
4478
4479/**
4480 * ufshcd_link_startup - Initialize unipro link startup
4481 * @hba: per adapter instance
4482 *
4483 * Returns 0 for success, non-zero in case of failure
4484 */
4485static int ufshcd_link_startup(struct ufs_hba *hba)
4486{
4487	int ret;
4488	int retries = DME_LINKSTARTUP_RETRIES;
4489	bool link_startup_again = false;
4490
4491	/*
4492	 * If UFS device isn't active then we will have to issue link startup
4493	 * 2 times to make sure the device state move to active.
4494	 */
4495	if (!ufshcd_is_ufs_dev_active(hba))
4496		link_startup_again = true;
4497
4498link_startup:
4499	do {
4500		ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4501
4502		ret = ufshcd_dme_link_startup(hba);
4503
4504		/* check if device is detected by inter-connect layer */
4505		if (!ret && !ufshcd_is_device_present(hba)) {
4506			ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4507					       0);
4508			dev_err(hba->dev, "%s: Device not present\n", __func__);
4509			ret = -ENXIO;
4510			goto out;
4511		}
4512
4513		/*
4514		 * DME link lost indication is only received when link is up,
4515		 * but we can't be sure if the link is up until link startup
4516		 * succeeds. So reset the local Uni-Pro and try again.
4517		 */
4518		if (ret && ufshcd_hba_enable(hba)) {
4519			ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4520					       (u32)ret);
4521			goto out;
4522		}
4523	} while (ret && retries--);
4524
4525	if (ret) {
4526		/* failed to get the link up... retire */
4527		ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4528				       (u32)ret);
4529		goto out;
4530	}
4531
4532	if (link_startup_again) {
4533		link_startup_again = false;
4534		retries = DME_LINKSTARTUP_RETRIES;
4535		goto link_startup;
4536	}
4537
4538	/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4539	ufshcd_init_pwr_info(hba);
4540	ufshcd_print_pwr_info(hba);
4541
4542	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4543		ret = ufshcd_disable_device_tx_lcc(hba);
4544		if (ret)
4545			goto out;
4546	}
4547
4548	/* Include any host controller configuration via UIC commands */
4549	ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4550	if (ret)
4551		goto out;
4552
4553	/* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4554	ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4555	ret = ufshcd_make_hba_operational(hba);
4556out:
4557	if (ret) {
4558		dev_err(hba->dev, "link startup failed %d\n", ret);
4559		ufshcd_print_host_state(hba);
4560		ufshcd_print_pwr_info(hba);
4561		ufshcd_print_host_regs(hba);
4562	}
4563	return ret;
4564}
4565
4566/**
4567 * ufshcd_verify_dev_init() - Verify device initialization
4568 * @hba: per-adapter instance
4569 *
4570 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4571 * device Transport Protocol (UTP) layer is ready after a reset.
4572 * If the UTP layer at the device side is not initialized, it may
4573 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4574 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4575 */
4576static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4577{
4578	int err = 0;
4579	int retries;
4580
4581	ufshcd_hold(hba, false);
4582	mutex_lock(&hba->dev_cmd.lock);
4583	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4584		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4585					       NOP_OUT_TIMEOUT);
4586
4587		if (!err || err == -ETIMEDOUT)
4588			break;
4589
4590		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4591	}
4592	mutex_unlock(&hba->dev_cmd.lock);
4593	ufshcd_release(hba);
4594
4595	if (err)
4596		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4597	return err;
4598}
4599
4600/**
4601 * ufshcd_set_queue_depth - set lun queue depth
4602 * @sdev: pointer to SCSI device
4603 *
4604 * Read bLUQueueDepth value and activate scsi tagged command
4605 * queueing. For WLUN, queue depth is set to 1. For best-effort
4606 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4607 * value that host can queue.
4608 */
4609static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4610{
4611	int ret = 0;
4612	u8 lun_qdepth;
4613	struct ufs_hba *hba;
4614
4615	hba = shost_priv(sdev->host);
4616
4617	lun_qdepth = hba->nutrs;
4618	ret = ufshcd_read_unit_desc_param(hba,
4619					  ufshcd_scsi_to_upiu_lun(sdev->lun),
4620					  UNIT_DESC_PARAM_LU_Q_DEPTH,
4621					  &lun_qdepth,
4622					  sizeof(lun_qdepth));
4623
4624	/* Some WLUN doesn't support unit descriptor */
4625	if (ret == -EOPNOTSUPP)
4626		lun_qdepth = 1;
4627	else if (!lun_qdepth)
4628		/* eventually, we can figure out the real queue depth */
4629		lun_qdepth = hba->nutrs;
4630	else
4631		lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4632
4633	dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4634			__func__, lun_qdepth);
4635	scsi_change_queue_depth(sdev, lun_qdepth);
4636}
4637
4638/*
4639 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4640 * @hba: per-adapter instance
4641 * @lun: UFS device lun id
4642 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4643 *
4644 * Returns 0 in case of success and b_lu_write_protect status would be returned
4645 * @b_lu_write_protect parameter.
4646 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4647 * Returns -EINVAL in case of invalid parameters passed to this function.
4648 */
4649static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4650			    u8 lun,
4651			    u8 *b_lu_write_protect)
4652{
4653	int ret;
4654
4655	if (!b_lu_write_protect)
4656		ret = -EINVAL;
4657	/*
4658	 * According to UFS device spec, RPMB LU can't be write
4659	 * protected so skip reading bLUWriteProtect parameter for
4660	 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4661	 */
4662	else if (lun >= hba->dev_info.max_lu_supported)
4663		ret = -ENOTSUPP;
4664	else
4665		ret = ufshcd_read_unit_desc_param(hba,
4666					  lun,
4667					  UNIT_DESC_PARAM_LU_WR_PROTECT,
4668					  b_lu_write_protect,
4669					  sizeof(*b_lu_write_protect));
4670	return ret;
4671}
4672
4673/**
4674 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4675 * status
4676 * @hba: per-adapter instance
4677 * @sdev: pointer to SCSI device
4678 *
4679 */
4680static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4681						    struct scsi_device *sdev)
4682{
4683	if (hba->dev_info.f_power_on_wp_en &&
4684	    !hba->dev_info.is_lu_power_on_wp) {
4685		u8 b_lu_write_protect;
4686
4687		if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4688				      &b_lu_write_protect) &&
4689		    (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4690			hba->dev_info.is_lu_power_on_wp = true;
4691	}
4692}
4693
4694/**
4695 * ufshcd_slave_alloc - handle initial SCSI device configurations
4696 * @sdev: pointer to SCSI device
4697 *
4698 * Returns success
4699 */
4700static int ufshcd_slave_alloc(struct scsi_device *sdev)
4701{
4702	struct ufs_hba *hba;
4703
4704	hba = shost_priv(sdev->host);
4705
4706	/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4707	sdev->use_10_for_ms = 1;
4708
4709	/* DBD field should be set to 1 in mode sense(10) */
4710	sdev->set_dbd_for_ms = 1;
4711
4712	/* allow SCSI layer to restart the device in case of errors */
4713	sdev->allow_restart = 1;
4714
4715	/* REPORT SUPPORTED OPERATION CODES is not supported */
4716	sdev->no_report_opcodes = 1;
4717
4718	/* WRITE_SAME command is not supported */
4719	sdev->no_write_same = 1;
4720
4721	ufshcd_set_queue_depth(sdev);
4722
4723	ufshcd_get_lu_power_on_wp_status(hba, sdev);
4724
4725	return 0;
4726}
4727
4728/**
4729 * ufshcd_change_queue_depth - change queue depth
4730 * @sdev: pointer to SCSI device
4731 * @depth: required depth to set
4732 *
4733 * Change queue depth and make sure the max. limits are not crossed.
4734 */
4735static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4736{
4737	struct ufs_hba *hba = shost_priv(sdev->host);
4738
4739	if (depth > hba->nutrs)
4740		depth = hba->nutrs;
4741	return scsi_change_queue_depth(sdev, depth);
4742}
4743
4744/**
4745 * ufshcd_slave_configure - adjust SCSI device configurations
4746 * @sdev: pointer to SCSI device
4747 */
4748static int ufshcd_slave_configure(struct scsi_device *sdev)
4749{
4750	struct ufs_hba *hba = shost_priv(sdev->host);
4751	struct request_queue *q = sdev->request_queue;
4752
4753	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4754	if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
4755		blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
4756
4757	if (ufshcd_is_rpm_autosuspend_allowed(hba))
4758		sdev->rpm_autosuspend = 1;
4759
4760	ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4761
4762	return 0;
4763}
4764
4765/**
4766 * ufshcd_slave_destroy - remove SCSI device configurations
4767 * @sdev: pointer to SCSI device
4768 */
4769static void ufshcd_slave_destroy(struct scsi_device *sdev)
4770{
4771	struct ufs_hba *hba;
4772
4773	hba = shost_priv(sdev->host);
4774	/* Drop the reference as it won't be needed anymore */
4775	if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4776		unsigned long flags;
4777
4778		spin_lock_irqsave(hba->host->host_lock, flags);
4779		hba->sdev_ufs_device = NULL;
4780		spin_unlock_irqrestore(hba->host->host_lock, flags);
4781	}
4782}
4783
4784/**
4785 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4786 * @lrbp: pointer to local reference block of completed command
4787 * @scsi_status: SCSI command status
4788 *
4789 * Returns value base on SCSI command status
4790 */
4791static inline int
4792ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4793{
4794	int result = 0;
4795
4796	switch (scsi_status) {
4797	case SAM_STAT_CHECK_CONDITION:
4798		ufshcd_copy_sense_data(lrbp);
4799		fallthrough;
4800	case SAM_STAT_GOOD:
4801		result |= DID_OK << 16 |
4802			  COMMAND_COMPLETE << 8 |
4803			  scsi_status;
4804		break;
4805	case SAM_STAT_TASK_SET_FULL:
4806	case SAM_STAT_BUSY:
4807	case SAM_STAT_TASK_ABORTED:
4808		ufshcd_copy_sense_data(lrbp);
4809		result |= scsi_status;
4810		break;
4811	default:
4812		result |= DID_ERROR << 16;
4813		break;
4814	} /* end of switch */
4815
4816	return result;
4817}
4818
4819/**
4820 * ufshcd_transfer_rsp_status - Get overall status of the response
4821 * @hba: per adapter instance
4822 * @lrbp: pointer to local reference block of completed command
4823 *
4824 * Returns result of the command to notify SCSI midlayer
4825 */
4826static inline int
4827ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4828{
4829	int result = 0;
4830	int scsi_status;
4831	int ocs;
4832
4833	/* overall command status of utrd */
4834	ocs = ufshcd_get_tr_ocs(lrbp);
4835
4836	if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4837		if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
4838					MASK_RSP_UPIU_RESULT)
4839			ocs = OCS_SUCCESS;
4840	}
4841
4842	switch (ocs) {
4843	case OCS_SUCCESS:
4844		result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4845		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4846		switch (result) {
4847		case UPIU_TRANSACTION_RESPONSE:
4848			/*
4849			 * get the response UPIU result to extract
4850			 * the SCSI command status
4851			 */
4852			result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4853
4854			/*
4855			 * get the result based on SCSI status response
4856			 * to notify the SCSI midlayer of the command status
4857			 */
4858			scsi_status = result & MASK_SCSI_STATUS;
4859			result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4860
4861			/*
4862			 * Currently we are only supporting BKOPs exception
4863			 * events hence we can ignore BKOPs exception event
4864			 * during power management callbacks. BKOPs exception
4865			 * event is not expected to be raised in runtime suspend
4866			 * callback as it allows the urgent bkops.
4867			 * During system suspend, we are anyway forcefully
4868			 * disabling the bkops and if urgent bkops is needed
4869			 * it will be enabled on system resume. Long term
4870			 * solution could be to abort the system suspend if
4871			 * UFS device needs urgent BKOPs.
4872			 */
4873			if (!hba->pm_op_in_progress &&
4874			    ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4875			    schedule_work(&hba->eeh_work)) {
4876				/*
4877				 * Prevent suspend once eeh_work is scheduled
4878				 * to avoid deadlock between ufshcd_suspend
4879				 * and exception event handler.
4880				 */
4881				pm_runtime_get_noresume(hba->dev);
4882			}
4883			break;
4884		case UPIU_TRANSACTION_REJECT_UPIU:
4885			/* TODO: handle Reject UPIU Response */
4886			result = DID_ERROR << 16;
4887			dev_err(hba->dev,
4888				"Reject UPIU not fully implemented\n");
4889			break;
4890		default:
4891			dev_err(hba->dev,
4892				"Unexpected request response code = %x\n",
4893				result);
4894			result = DID_ERROR << 16;
4895			break;
4896		}
4897		break;
4898	case OCS_ABORTED:
4899		result |= DID_ABORT << 16;
4900		break;
4901	case OCS_INVALID_COMMAND_STATUS:
4902		result |= DID_REQUEUE << 16;
4903		break;
4904	case OCS_INVALID_CMD_TABLE_ATTR:
4905	case OCS_INVALID_PRDT_ATTR:
4906	case OCS_MISMATCH_DATA_BUF_SIZE:
4907	case OCS_MISMATCH_RESP_UPIU_SIZE:
4908	case OCS_PEER_COMM_FAILURE:
4909	case OCS_FATAL_ERROR:
4910	case OCS_DEVICE_FATAL_ERROR:
4911	case OCS_INVALID_CRYPTO_CONFIG:
4912	case OCS_GENERAL_CRYPTO_ERROR:
4913	default:
4914		result |= DID_ERROR << 16;
4915		dev_err(hba->dev,
4916				"OCS error from controller = %x for tag %d\n",
4917				ocs, lrbp->task_tag);
4918		ufshcd_print_host_regs(hba);
4919		ufshcd_print_host_state(hba);
4920		break;
4921	} /* end of switch */
4922
4923	if ((host_byte(result) != DID_OK) &&
4924	    (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
4925		ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4926	return result;
4927}
4928
4929/**
4930 * ufshcd_uic_cmd_compl - handle completion of uic command
4931 * @hba: per adapter instance
4932 * @intr_status: interrupt status generated by the controller
4933 *
4934 * Returns
4935 *  IRQ_HANDLED - If interrupt is valid
4936 *  IRQ_NONE    - If invalid interrupt
4937 */
4938static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4939{
4940	irqreturn_t retval = IRQ_NONE;
4941
4942	if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4943		hba->active_uic_cmd->argument2 |=
4944			ufshcd_get_uic_cmd_result(hba);
4945		hba->active_uic_cmd->argument3 =
4946			ufshcd_get_dme_attr_val(hba);
4947		if (!hba->uic_async_done)
4948			hba->active_uic_cmd->cmd_active = 0;
4949		complete(&hba->active_uic_cmd->done);
4950		retval = IRQ_HANDLED;
4951	}
4952
4953	if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
4954		hba->active_uic_cmd->cmd_active = 0;
4955		complete(hba->uic_async_done);
4956		retval = IRQ_HANDLED;
4957	}
4958
4959	if (retval == IRQ_HANDLED)
4960		ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
4961					     "complete");
4962	return retval;
4963}
4964
4965/**
4966 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4967 * @hba: per adapter instance
4968 * @completed_reqs: requests to complete
4969 */
4970static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4971					unsigned long completed_reqs)
4972{
4973	struct ufshcd_lrb *lrbp;
4974	struct scsi_cmnd *cmd;
4975	int result;
4976	int index;
4977
4978	for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4979		lrbp = &hba->lrb[index];
4980		lrbp->compl_time_stamp = ktime_get();
4981		cmd = lrbp->cmd;
4982		if (cmd) {
4983			ufshcd_add_command_trace(hba, index, "complete");
4984			result = ufshcd_transfer_rsp_status(hba, lrbp);
4985			scsi_dma_unmap(cmd);
4986			cmd->result = result;
4987			/* Mark completed command as NULL in LRB */
4988			lrbp->cmd = NULL;
4989			/* Do not touch lrbp after scsi done */
4990			cmd->scsi_done(cmd);
4991			__ufshcd_release(hba);
4992		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4993			lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4994			if (hba->dev_cmd.complete) {
4995				ufshcd_add_command_trace(hba, index,
4996						"dev_complete");
4997				complete(hba->dev_cmd.complete);
4998			}
4999		}
5000		if (ufshcd_is_clkscaling_supported(hba))
5001			hba->clk_scaling.active_reqs--;
5002	}
5003
5004	/* clear corresponding bits of completed commands */
5005	hba->outstanding_reqs ^= completed_reqs;
5006
5007	ufshcd_clk_scaling_update_busy(hba);
5008}
5009
5010/**
5011 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5012 * @hba: per adapter instance
5013 *
5014 * Returns
5015 *  IRQ_HANDLED - If interrupt is valid
5016 *  IRQ_NONE    - If invalid interrupt
5017 */
5018static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5019{
5020	unsigned long completed_reqs;
5021	u32 tr_doorbell;
5022
5023	/* Resetting interrupt aggregation counters first and reading the
5024	 * DOOR_BELL afterward allows us to handle all the completed requests.
5025	 * In order to prevent other interrupts starvation the DB is read once
5026	 * after reset. The down side of this solution is the possibility of
5027	 * false interrupt if device completes another request after resetting
5028	 * aggregation and before reading the DB.
5029	 */
5030	if (ufshcd_is_intr_aggr_allowed(hba) &&
5031	    !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5032		ufshcd_reset_intr_aggr(hba);
5033
5034	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5035	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5036
5037	if (completed_reqs) {
5038		__ufshcd_transfer_req_compl(hba, completed_reqs);
5039		return IRQ_HANDLED;
5040	} else {
5041		return IRQ_NONE;
5042	}
5043}
5044
5045/**
5046 * ufshcd_disable_ee - disable exception event
5047 * @hba: per-adapter instance
5048 * @mask: exception event to disable
5049 *
5050 * Disables exception event in the device so that the EVENT_ALERT
5051 * bit is not set.
5052 *
5053 * Returns zero on success, non-zero error value on failure.
5054 */
5055static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5056{
5057	int err = 0;
5058	u32 val;
5059
5060	if (!(hba->ee_ctrl_mask & mask))
5061		goto out;
5062
5063	val = hba->ee_ctrl_mask & ~mask;
5064	val &= MASK_EE_STATUS;
5065	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5066			QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5067	if (!err)
5068		hba->ee_ctrl_mask &= ~mask;
5069out:
5070	return err;
5071}
5072
5073/**
5074 * ufshcd_enable_ee - enable exception event
5075 * @hba: per-adapter instance
5076 * @mask: exception event to enable
5077 *
5078 * Enable corresponding exception event in the device to allow
5079 * device to alert host in critical scenarios.
5080 *
5081 * Returns zero on success, non-zero error value on failure.
5082 */
5083static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5084{
5085	int err = 0;
5086	u32 val;
5087
5088	if (hba->ee_ctrl_mask & mask)
5089		goto out;
5090
5091	val = hba->ee_ctrl_mask | mask;
5092	val &= MASK_EE_STATUS;
5093	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5094			QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5095	if (!err)
5096		hba->ee_ctrl_mask |= mask;
5097out:
5098	return err;
5099}
5100
5101/**
5102 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5103 * @hba: per-adapter instance
5104 *
5105 * Allow device to manage background operations on its own. Enabling
5106 * this might lead to inconsistent latencies during normal data transfers
5107 * as the device is allowed to manage its own way of handling background
5108 * operations.
5109 *
5110 * Returns zero on success, non-zero on failure.
5111 */
5112static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5113{
5114	int err = 0;
5115
5116	if (hba->auto_bkops_enabled)
5117		goto out;
5118
5119	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5120			QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5121	if (err) {
5122		dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5123				__func__, err);
5124		goto out;
5125	}
5126
5127	hba->auto_bkops_enabled = true;
5128	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5129
5130	/* No need of URGENT_BKOPS exception from the device */
5131	err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5132	if (err)
5133		dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5134				__func__, err);
5135out:
5136	return err;
5137}
5138
5139/**
5140 * ufshcd_disable_auto_bkops - block device in doing background operations
5141 * @hba: per-adapter instance
5142 *
5143 * Disabling background operations improves command response latency but
5144 * has drawback of device moving into critical state where the device is
5145 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5146 * host is idle so that BKOPS are managed effectively without any negative
5147 * impacts.
5148 *
5149 * Returns zero on success, non-zero on failure.
5150 */
5151static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5152{
5153	int err = 0;
5154
5155	if (!hba->auto_bkops_enabled)
5156		goto out;
5157
5158	/*
5159	 * If host assisted BKOPs is to be enabled, make sure
5160	 * urgent bkops exception is allowed.
5161	 */
5162	err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5163	if (err) {
5164		dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5165				__func__, err);
5166		goto out;
5167	}
5168
5169	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5170			QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5171	if (err) {
5172		dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5173				__func__, err);
5174		ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5175		goto out;
5176	}
5177
5178	hba->auto_bkops_enabled = false;
5179	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5180	hba->is_urgent_bkops_lvl_checked = false;
5181out:
5182	return err;
5183}
5184
5185/**
5186 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5187 * @hba: per adapter instance
5188 *
5189 * After a device reset the device may toggle the BKOPS_EN flag
5190 * to default value. The s/w tracking variables should be updated
5191 * as well. This function would change the auto-bkops state based on
5192 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5193 */
5194static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5195{
5196	if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5197		hba->auto_bkops_enabled = false;
5198		hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5199		ufshcd_enable_auto_bkops(hba);
5200	} else {
5201		hba->auto_bkops_enabled = true;
5202		hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5203		ufshcd_disable_auto_bkops(hba);
5204	}
5205	hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5206	hba->is_urgent_bkops_lvl_checked = false;
5207}
5208
5209static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5210{
5211	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5212			QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5213}
5214
5215/**
5216 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5217 * @hba: per-adapter instance
5218 * @status: bkops_status value
5219 *
5220 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5221 * flag in the device to permit background operations if the device
5222 * bkops_status is greater than or equal to "status" argument passed to
5223 * this function, disable otherwise.
5224 *
5225 * Returns 0 for success, non-zero in case of failure.
5226 *
5227 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5228 * to know whether auto bkops is enabled or disabled after this function
5229 * returns control to it.
5230 */
5231static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5232			     enum bkops_status status)
5233{
5234	int err;
5235	u32 curr_status = 0;
5236
5237	err = ufshcd_get_bkops_status(hba, &curr_status);
5238	if (err) {
5239		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5240				__func__, err);
5241		goto out;
5242	} else if (curr_status > BKOPS_STATUS_MAX) {
5243		dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5244				__func__, curr_status);
5245		err = -EINVAL;
5246		goto out;
5247	}
5248
5249	if (curr_status >= status)
5250		err = ufshcd_enable_auto_bkops(hba);
5251	else
5252		err = ufshcd_disable_auto_bkops(hba);
5253out:
5254	return err;
5255}
5256
5257/**
5258 * ufshcd_urgent_bkops - handle urgent bkops exception event
5259 * @hba: per-adapter instance
5260 *
5261 * Enable fBackgroundOpsEn flag in the device to permit background
5262 * operations.
5263 *
5264 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5265 * and negative error value for any other failure.
5266 */
5267static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5268{
5269	return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5270}
5271
5272static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5273{
5274	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5275			QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5276}
5277
5278static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5279{
5280	int err;
5281	u32 curr_status = 0;
5282
5283	if (hba->is_urgent_bkops_lvl_checked)
5284		goto enable_auto_bkops;
5285
5286	err = ufshcd_get_bkops_status(hba, &curr_status);
5287	if (err) {
5288		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5289				__func__, err);
5290		goto out;
5291	}
5292
5293	/*
5294	 * We are seeing that some devices are raising the urgent bkops
5295	 * exception events even when BKOPS status doesn't indicate performace
5296	 * impacted or critical. Handle these device by determining their urgent
5297	 * bkops status at runtime.
5298	 */
5299	if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5300		dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5301				__func__, curr_status);
5302		/* update the current status as the urgent bkops level */
5303		hba->urgent_bkops_lvl = curr_status;
5304		hba->is_urgent_bkops_lvl_checked = true;
5305	}
5306
5307enable_auto_bkops:
5308	err = ufshcd_enable_auto_bkops(hba);
5309out:
5310	if (err < 0)
5311		dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5312				__func__, err);
5313}
5314
5315static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5316{
5317	int ret;
5318	u8 index;
5319	enum query_opcode opcode;
5320
5321	if (!ufshcd_is_wb_allowed(hba))
5322		return 0;
5323
5324	if (!(enable ^ hba->wb_enabled))
5325		return 0;
5326	if (enable)
5327		opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5328	else
5329		opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5330
5331	index = ufshcd_wb_get_query_index(hba);
5332	ret = ufshcd_query_flag_retry(hba, opcode,
5333				      QUERY_FLAG_IDN_WB_EN, index, NULL);
5334	if (ret) {
5335		dev_err(hba->dev, "%s write booster %s failed %d\n",
5336			__func__, enable ? "enable" : "disable", ret);
5337		return ret;
5338	}
5339
5340	hba->wb_enabled = enable;
5341	dev_dbg(hba->dev, "%s write booster %s %d\n",
5342			__func__, enable ? "enable" : "disable", ret);
5343
5344	return ret;
5345}
5346
5347static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5348{
5349	int val;
5350	u8 index;
5351
5352	if (set)
5353		val =  UPIU_QUERY_OPCODE_SET_FLAG;
5354	else
5355		val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5356
5357	index = ufshcd_wb_get_query_index(hba);
5358	return ufshcd_query_flag_retry(hba, val,
5359				QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5360				index, NULL);
5361}
5362
5363static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5364{
5365	if (enable)
5366		ufshcd_wb_buf_flush_enable(hba);
5367	else
5368		ufshcd_wb_buf_flush_disable(hba);
5369
5370}
5371
5372static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5373{
5374	int ret;
5375	u8 index;
5376
5377	if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5378		return 0;
5379
5380	index = ufshcd_wb_get_query_index(hba);
5381	ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5382				      QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5383				      index, NULL);
5384	if (ret)
5385		dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5386			__func__, ret);
5387	else
5388		hba->wb_buf_flush_enabled = true;
5389
5390	dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5391	return ret;
5392}
5393
5394static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5395{
5396	int ret;
5397	u8 index;
5398
5399	if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5400		return 0;
5401
5402	index = ufshcd_wb_get_query_index(hba);
5403	ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5404				      QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5405				      index, NULL);
5406	if (ret) {
5407		dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5408			 __func__, ret);
5409	} else {
5410		hba->wb_buf_flush_enabled = false;
5411		dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5412	}
5413
5414	return ret;
5415}
5416
5417static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5418						u32 avail_buf)
5419{
5420	u32 cur_buf;
5421	int ret;
5422	u8 index;
5423
5424	index = ufshcd_wb_get_query_index(hba);
5425	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5426					      QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5427					      index, 0, &cur_buf);
5428	if (ret) {
5429		dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5430			__func__, ret);
5431		return false;
5432	}
5433
5434	if (!cur_buf) {
5435		dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5436			 cur_buf);
5437		return false;
5438	}
5439	/* Let it continue to flush when available buffer exceeds threshold */
5440	if (avail_buf < hba->vps->wb_flush_threshold)
5441		return true;
5442
5443	return false;
5444}
5445
5446static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5447{
5448	int ret;
5449	u32 avail_buf;
5450	u8 index;
5451
5452	if (!ufshcd_is_wb_allowed(hba))
5453		return false;
5454	/*
5455	 * The ufs device needs the vcc to be ON to flush.
5456	 * With user-space reduction enabled, it's enough to enable flush
5457	 * by checking only the available buffer. The threshold
5458	 * defined here is > 90% full.
5459	 * With user-space preserved enabled, the current-buffer
5460	 * should be checked too because the wb buffer size can reduce
5461	 * when disk tends to be full. This info is provided by current
5462	 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5463	 * keeping vcc on when current buffer is empty.
5464	 */
5465	index = ufshcd_wb_get_query_index(hba);
5466	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5467				      QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5468				      index, 0, &avail_buf);
5469	if (ret) {
5470		dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5471			 __func__, ret);
5472		return false;
5473	}
5474
5475	if (!hba->dev_info.b_presrv_uspc_en) {
5476		if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5477			return true;
5478		return false;
5479	}
5480
5481	return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5482}
5483
5484static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5485{
5486	struct ufs_hba *hba = container_of(to_delayed_work(work),
5487					   struct ufs_hba,
5488					   rpm_dev_flush_recheck_work);
5489	/*
5490	 * To prevent unnecessary VCC power drain after device finishes
5491	 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5492	 * after a certain delay to recheck the threshold by next runtime
5493	 * suspend.
5494	 */
5495	pm_runtime_get_sync(hba->dev);
5496	pm_runtime_put_sync(hba->dev);
5497}
5498
5499/**
5500 * ufshcd_exception_event_handler - handle exceptions raised by device
5501 * @work: pointer to work data
5502 *
5503 * Read bExceptionEventStatus attribute from the device and handle the
5504 * exception event accordingly.
5505 */
5506static void ufshcd_exception_event_handler(struct work_struct *work)
5507{
5508	struct ufs_hba *hba;
5509	int err;
5510	u32 status = 0;
5511	hba = container_of(work, struct ufs_hba, eeh_work);
5512
5513	pm_runtime_get_sync(hba->dev);
5514	ufshcd_scsi_block_requests(hba);
5515	err = ufshcd_get_ee_status(hba, &status);
5516	if (err) {
5517		dev_err(hba->dev, "%s: failed to get exception status %d\n",
5518				__func__, err);
5519		goto out;
5520	}
5521
5522	status &= hba->ee_ctrl_mask;
5523
5524	if (status & MASK_EE_URGENT_BKOPS)
5525		ufshcd_bkops_exception_event_handler(hba);
5526
5527out:
5528	ufshcd_scsi_unblock_requests(hba);
5529	/*
5530	 * pm_runtime_get_noresume is called while scheduling
5531	 * eeh_work to avoid suspend racing with exception work.
5532	 * Hence decrement usage counter using pm_runtime_put_noidle
5533	 * to allow suspend on completion of exception event handler.
5534	 */
5535	pm_runtime_put_noidle(hba->dev);
5536	pm_runtime_put(hba->dev);
5537	return;
5538}
5539
5540/* Complete requests that have door-bell cleared */
5541static void ufshcd_complete_requests(struct ufs_hba *hba)
5542{
5543	ufshcd_transfer_req_compl(hba);
5544	ufshcd_tmc_handler(hba);
5545}
5546
5547/**
5548 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5549 *				to recover from the DL NAC errors or not.
5550 * @hba: per-adapter instance
5551 *
5552 * Returns true if error handling is required, false otherwise
5553 */
5554static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5555{
5556	unsigned long flags;
5557	bool err_handling = true;
5558
5559	spin_lock_irqsave(hba->host->host_lock, flags);
5560	/*
5561	 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5562	 * device fatal error and/or DL NAC & REPLAY timeout errors.
5563	 */
5564	if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5565		goto out;
5566
5567	if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5568	    ((hba->saved_err & UIC_ERROR) &&
5569	     (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5570		goto out;
5571
5572	if ((hba->saved_err & UIC_ERROR) &&
5573	    (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5574		int err;
5575		/*
5576		 * wait for 50ms to see if we can get any other errors or not.
5577		 */
5578		spin_unlock_irqrestore(hba->host->host_lock, flags);
5579		msleep(50);
5580		spin_lock_irqsave(hba->host->host_lock, flags);
5581
5582		/*
5583		 * now check if we have got any other severe errors other than
5584		 * DL NAC error?
5585		 */
5586		if ((hba->saved_err & INT_FATAL_ERRORS) ||
5587		    ((hba->saved_err & UIC_ERROR) &&
5588		    (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5589			goto out;
5590
5591		/*
5592		 * As DL NAC is the only error received so far, send out NOP
5593		 * command to confirm if link is still active or not.
5594		 *   - If we don't get any response then do error recovery.
5595		 *   - If we get response then clear the DL NAC error bit.
5596		 */
5597
5598		spin_unlock_irqrestore(hba->host->host_lock, flags);
5599		err = ufshcd_verify_dev_init(hba);
5600		spin_lock_irqsave(hba->host->host_lock, flags);
5601
5602		if (err)
5603			goto out;
5604
5605		/* Link seems to be alive hence ignore the DL NAC errors */
5606		if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5607			hba->saved_err &= ~UIC_ERROR;
5608		/* clear NAC error */
5609		hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5610		if (!hba->saved_uic_err)
5611			err_handling = false;
5612	}
5613out:
5614	spin_unlock_irqrestore(hba->host->host_lock, flags);
5615	return err_handling;
5616}
5617
5618/* host lock must be held before calling this func */
5619static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5620{
5621	return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5622	       (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5623}
5624
5625/* host lock must be held before calling this func */
5626static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5627{
5628	/* handle fatal errors only when link is not in error state */
5629	if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5630		if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5631		    ufshcd_is_saved_err_fatal(hba))
5632			hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5633		else
5634			hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5635		queue_work(hba->eh_wq, &hba->eh_work);
5636	}
5637}
5638
5639static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5640{
5641	pm_runtime_get_sync(hba->dev);
5642	if (pm_runtime_suspended(hba->dev)) {
5643		/*
5644		 * Don't assume anything of pm_runtime_get_sync(), if
5645		 * resume fails, irq and clocks can be OFF, and powers
5646		 * can be OFF or in LPM.
5647		 */
5648		ufshcd_setup_hba_vreg(hba, true);
5649		ufshcd_enable_irq(hba);
5650		ufshcd_setup_vreg(hba, true);
5651		ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5652		ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5653		ufshcd_hold(hba, false);
5654		if (!ufshcd_is_clkgating_allowed(hba))
5655			ufshcd_setup_clocks(hba, true);
5656		ufshcd_release(hba);
5657		ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
5658	} else {
5659		ufshcd_hold(hba, false);
5660		if (hba->clk_scaling.is_allowed) {
5661			cancel_work_sync(&hba->clk_scaling.suspend_work);
5662			cancel_work_sync(&hba->clk_scaling.resume_work);
5663			ufshcd_suspend_clkscaling(hba);
5664		}
5665	}
5666}
5667
5668static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5669{
5670	ufshcd_release(hba);
5671	if (hba->clk_scaling.is_allowed)
5672		ufshcd_resume_clkscaling(hba);
5673	pm_runtime_put(hba->dev);
5674}
5675
5676static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5677{
5678	return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5679		(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5680			ufshcd_is_link_broken(hba))));
5681}
5682
5683#ifdef CONFIG_PM
5684static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5685{
5686	struct Scsi_Host *shost = hba->host;
5687	struct scsi_device *sdev;
5688	struct request_queue *q;
5689	int ret;
5690
5691	/*
5692	 * Set RPM status of hba device to RPM_ACTIVE,
5693	 * this also clears its runtime error.
5694	 */
5695	ret = pm_runtime_set_active(hba->dev);
5696	/*
5697	 * If hba device had runtime error, we also need to resume those
5698	 * scsi devices under hba in case any of them has failed to be
5699	 * resumed due to hba runtime resume failure. This is to unblock
5700	 * blk_queue_enter in case there are bios waiting inside it.
5701	 */
5702	if (!ret) {
5703		shost_for_each_device(sdev, shost) {
5704			q = sdev->request_queue;
5705			if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
5706				       q->rpm_status == RPM_SUSPENDING))
5707				pm_request_resume(q->dev);
5708		}
5709	}
5710}
5711#else
5712static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
5713{
5714}
5715#endif
5716
5717static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
5718{
5719	struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
5720	u32 mode;
5721
5722	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
5723
5724	if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
5725		return true;
5726
5727	if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
5728		return true;
5729
5730	return false;
5731}
5732
5733/**
5734 * ufshcd_err_handler - handle UFS errors that require s/w attention
5735 * @work: pointer to work structure
5736 */
5737static void ufshcd_err_handler(struct work_struct *work)
5738{
5739	struct ufs_hba *hba;
5740	unsigned long flags;
5741	bool err_xfer = false;
5742	bool err_tm = false;
5743	int err = 0, pmc_err;
5744	int tag;
5745	bool needs_reset = false, needs_restore = false;
5746
5747	hba = container_of(work, struct ufs_hba, eh_work);
5748
5749	spin_lock_irqsave(hba->host->host_lock, flags);
5750	if (ufshcd_err_handling_should_stop(hba)) {
5751		if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5752			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5753		spin_unlock_irqrestore(hba->host->host_lock, flags);
5754		return;
5755	}
5756	ufshcd_set_eh_in_progress(hba);
5757	spin_unlock_irqrestore(hba->host->host_lock, flags);
5758	ufshcd_err_handling_prepare(hba);
5759	spin_lock_irqsave(hba->host->host_lock, flags);
5760	ufshcd_scsi_block_requests(hba);
5761	/*
5762	 * A full reset and restore might have happened after preparation
5763	 * is finished, double check whether we should stop.
5764	 */
5765	if (ufshcd_err_handling_should_stop(hba)) {
5766		if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5767			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5768		goto out;
5769	}
5770	hba->ufshcd_state = UFSHCD_STATE_RESET;
5771
5772	/* Complete requests that have door-bell cleared by h/w */
5773	ufshcd_complete_requests(hba);
5774
5775	if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5776		bool ret;
5777
5778		spin_unlock_irqrestore(hba->host->host_lock, flags);
5779		/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5780		ret = ufshcd_quirk_dl_nac_errors(hba);
5781		spin_lock_irqsave(hba->host->host_lock, flags);
5782		if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
5783			goto skip_err_handling;
5784	}
5785
5786	if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5787	    ufshcd_is_saved_err_fatal(hba) ||
5788	    ((hba->saved_err & UIC_ERROR) &&
5789	     (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5790				    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5791		needs_reset = true;
5792
5793	if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
5794	    (hba->saved_uic_err &&
5795	     (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
5796		bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
5797
5798		spin_unlock_irqrestore(hba->host->host_lock, flags);
5799		ufshcd_print_host_state(hba);
5800		ufshcd_print_pwr_info(hba);
5801		ufshcd_print_host_regs(hba);
5802		ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5803		ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
5804		spin_lock_irqsave(hba->host->host_lock, flags);
5805	}
5806
5807	/*
5808	 * if host reset is required then skip clearing the pending
5809	 * transfers forcefully because they will get cleared during
5810	 * host reset and restore
5811	 */
5812	if (needs_reset)
5813		goto do_reset;
5814
5815	/*
5816	 * If LINERESET was caught, UFS might have been put to PWM mode,
5817	 * check if power mode restore is needed.
5818	 */
5819	if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
5820		hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5821		if (!hba->saved_uic_err)
5822			hba->saved_err &= ~UIC_ERROR;
5823		spin_unlock_irqrestore(hba->host->host_lock, flags);
5824		if (ufshcd_is_pwr_mode_restore_needed(hba))
5825			needs_restore = true;
5826		spin_lock_irqsave(hba->host->host_lock, flags);
5827		if (!hba->saved_err && !needs_restore)
5828			goto skip_err_handling;
5829	}
5830
5831	hba->silence_err_logs = true;
5832	/* release lock as clear command might sleep */
5833	spin_unlock_irqrestore(hba->host->host_lock, flags);
5834	/* Clear pending transfer requests */
5835	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5836		if (ufshcd_try_to_abort_task(hba, tag)) {
5837			err_xfer = true;
5838			goto lock_skip_pending_xfer_clear;
5839		}
5840	}
5841
5842	/* Clear pending task management requests */
5843	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5844		if (ufshcd_clear_tm_cmd(hba, tag)) {
5845			err_tm = true;
5846			goto lock_skip_pending_xfer_clear;
5847		}
5848	}
5849
5850lock_skip_pending_xfer_clear:
5851	spin_lock_irqsave(hba->host->host_lock, flags);
5852
5853	/* Complete the requests that are cleared by s/w */
5854	ufshcd_complete_requests(hba);
5855	hba->silence_err_logs = false;
5856
5857	if (err_xfer || err_tm) {
5858		needs_reset = true;
5859		goto do_reset;
5860	}
5861
5862	/*
5863	 * After all reqs and tasks are cleared from doorbell,
5864	 * now it is safe to retore power mode.
5865	 */
5866	if (needs_restore) {
5867		spin_unlock_irqrestore(hba->host->host_lock, flags);
5868		/*
5869		 * Hold the scaling lock just in case dev cmds
5870		 * are sent via bsg and/or sysfs.
5871		 */
5872		down_write(&hba->clk_scaling_lock);
5873		hba->force_pmc = true;
5874		pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
5875		if (pmc_err) {
5876			needs_reset = true;
5877			dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
5878					__func__, pmc_err);
5879		}
5880		hba->force_pmc = false;
5881		ufshcd_print_pwr_info(hba);
5882		up_write(&hba->clk_scaling_lock);
5883		spin_lock_irqsave(hba->host->host_lock, flags);
5884	}
5885
5886do_reset:
5887	/* Fatal errors need reset */
5888	if (needs_reset) {
5889		unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5890
5891		/*
5892		 * ufshcd_reset_and_restore() does the link reinitialization
5893		 * which will need atleast one empty doorbell slot to send the
5894		 * device management commands (NOP and query commands).
5895		 * If there is no slot empty at this moment then free up last
5896		 * slot forcefully.
5897		 */
5898		if (hba->outstanding_reqs == max_doorbells)
5899			__ufshcd_transfer_req_compl(hba,
5900						    (1UL << (hba->nutrs - 1)));
5901
5902		hba->force_reset = false;
5903		spin_unlock_irqrestore(hba->host->host_lock, flags);
5904		err = ufshcd_reset_and_restore(hba);
5905		if (err)
5906			dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
5907					__func__, err);
5908		else
5909			ufshcd_recover_pm_error(hba);
5910		spin_lock_irqsave(hba->host->host_lock, flags);
5911	}
5912
5913skip_err_handling:
5914	if (!needs_reset) {
5915		if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5916			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5917		if (hba->saved_err || hba->saved_uic_err)
5918			dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5919			    __func__, hba->saved_err, hba->saved_uic_err);
5920	}
5921
5922out:
5923	ufshcd_clear_eh_in_progress(hba);
5924	spin_unlock_irqrestore(hba->host->host_lock, flags);
5925	ufshcd_scsi_unblock_requests(hba);
5926	ufshcd_err_handling_unprepare(hba);
5927}
5928
5929/**
5930 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5931 * @hba: per-adapter instance
5932 *
5933 * Returns
5934 *  IRQ_HANDLED - If interrupt is valid
5935 *  IRQ_NONE    - If invalid interrupt
5936 */
5937static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
5938{
5939	u32 reg;
5940	irqreturn_t retval = IRQ_NONE;
5941
5942	/* PHY layer error */
5943	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5944	if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5945	    (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
5946		ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5947		/*
5948		 * To know whether this error is fatal or not, DB timeout
5949		 * must be checked but this error is handled separately.
5950		 */
5951		if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
5952			dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
5953					__func__);
5954
5955		/* Got a LINERESET indication. */
5956		if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
5957			struct uic_command *cmd = NULL;
5958
5959			hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
5960			if (hba->uic_async_done && hba->active_uic_cmd)
5961				cmd = hba->active_uic_cmd;
5962			/*
5963			 * Ignore the LINERESET during power mode change
5964			 * operation via DME_SET command.
5965			 */
5966			if (cmd && (cmd->command == UIC_CMD_DME_SET))
5967				hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5968		}
5969		retval |= IRQ_HANDLED;
5970	}
5971
5972	/* PA_INIT_ERROR is fatal and needs UIC reset */
5973	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5974	if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5975	    (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
5976		ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5977
5978		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5979			hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5980		else if (hba->dev_quirks &
5981				UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5982			if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5983				hba->uic_error |=
5984					UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5985			else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5986				hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5987		}
5988		retval |= IRQ_HANDLED;
5989	}
5990
5991	/* UIC NL/TL/DME errors needs software retry */
5992	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5993	if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5994	    (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
5995		ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5996		hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5997		retval |= IRQ_HANDLED;
5998	}
5999
6000	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6001	if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6002	    (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6003		ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
6004		hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6005		retval |= IRQ_HANDLED;
6006	}
6007
6008	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6009	if ((reg & UIC_DME_ERROR) &&
6010	    (reg & UIC_DME_ERROR_CODE_MASK)) {
6011		ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
6012		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6013		retval |= IRQ_HANDLED;
6014	}
6015
6016	dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6017			__func__, hba->uic_error);
6018	return retval;
6019}
6020
6021static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
6022					 u32 intr_mask)
6023{
6024	if (!ufshcd_is_auto_hibern8_supported(hba) ||
6025	    !ufshcd_is_auto_hibern8_enabled(hba))
6026		return false;
6027
6028	if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
6029		return false;
6030
6031	if (hba->active_uic_cmd &&
6032	    (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
6033	    hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
6034		return false;
6035
6036	return true;
6037}
6038
6039/**
6040 * ufshcd_check_errors - Check for errors that need s/w attention
6041 * @hba: per-adapter instance
6042 *
6043 * Returns
6044 *  IRQ_HANDLED - If interrupt is valid
6045 *  IRQ_NONE    - If invalid interrupt
6046 */
6047static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6048{
6049	bool queue_eh_work = false;
6050	irqreturn_t retval = IRQ_NONE;
6051
6052	if (hba->errors & INT_FATAL_ERRORS) {
6053		ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
6054		queue_eh_work = true;
6055	}
6056
6057	if (hba->errors & UIC_ERROR) {
6058		hba->uic_error = 0;
6059		retval = ufshcd_update_uic_error(hba);
6060		if (hba->uic_error)
6061			queue_eh_work = true;
6062	}
6063
6064	if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6065		dev_err(hba->dev,
6066			"%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6067			__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6068			"Enter" : "Exit",
6069			hba->errors, ufshcd_get_upmcrs(hba));
6070		ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
6071				       hba->errors);
6072		ufshcd_set_link_broken(hba);
6073		queue_eh_work = true;
6074	}
6075
6076	if (queue_eh_work) {
6077		/*
6078		 * update the transfer error masks to sticky bits, let's do this
6079		 * irrespective of current ufshcd_state.
6080		 */
6081		hba->saved_err |= hba->errors;
6082		hba->saved_uic_err |= hba->uic_error;
6083
6084		/* dump controller state before resetting */
6085		if ((hba->saved_err & (INT_FATAL_ERRORS)) ||
6086		    (hba->saved_uic_err &&
6087		     (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6088			dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6089					__func__, hba->saved_err,
6090					hba->saved_uic_err);
6091			ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6092					 "host_regs: ");
6093			ufshcd_print_pwr_info(hba);
6094		}
6095		ufshcd_schedule_eh_work(hba);
6096		retval |= IRQ_HANDLED;
6097	}
6098	/*
6099	 * if (!queue_eh_work) -
6100	 * Other errors are either non-fatal where host recovers
6101	 * itself without s/w intervention or errors that will be
6102	 * handled by the SCSI core layer.
6103	 */
6104	return retval;
6105}
6106
6107/**
6108 * ufshcd_tmc_handler - handle task management function completion
6109 * @hba: per adapter instance
6110 *
6111 * Returns
6112 *  IRQ_HANDLED - If interrupt is valid
6113 *  IRQ_NONE    - If invalid interrupt
6114 */
6115static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6116{
6117	unsigned long pending, issued;
6118	irqreturn_t ret = IRQ_NONE;
6119	int tag;
6120
6121	pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6122
6123	issued = hba->outstanding_tasks & ~pending;
6124	for_each_set_bit(tag, &issued, hba->nutmrs) {
6125		struct request *req = hba->tmf_rqs[tag];
6126		struct completion *c = req->end_io_data;
6127
6128		complete(c);
6129		ret = IRQ_HANDLED;
6130	}
6131
6132	return ret;
6133}
6134
6135/**
6136 * ufshcd_sl_intr - Interrupt service routine
6137 * @hba: per adapter instance
6138 * @intr_status: contains interrupts generated by the controller
6139 *
6140 * Returns
6141 *  IRQ_HANDLED - If interrupt is valid
6142 *  IRQ_NONE    - If invalid interrupt
6143 */
6144static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6145{
6146	irqreturn_t retval = IRQ_NONE;
6147
6148	hba->errors = UFSHCD_ERROR_MASK & intr_status;
6149
6150	if (ufshcd_is_auto_hibern8_error(hba, intr_status))
6151		hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
6152
6153	if (hba->errors)
6154		retval |= ufshcd_check_errors(hba);
6155
6156	if (intr_status & UFSHCD_UIC_MASK)
6157		retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6158
6159	if (intr_status & UTP_TASK_REQ_COMPL)
6160		retval |= ufshcd_tmc_handler(hba);
6161
6162	if (intr_status & UTP_TRANSFER_REQ_COMPL)
6163		retval |= ufshcd_transfer_req_compl(hba);
6164
6165	return retval;
6166}
6167
6168/**
6169 * ufshcd_intr - Main interrupt service routine
6170 * @irq: irq number
6171 * @__hba: pointer to adapter instance
6172 *
6173 * Returns
6174 *  IRQ_HANDLED - If interrupt is valid
6175 *  IRQ_NONE    - If invalid interrupt
6176 */
6177static irqreturn_t ufshcd_intr(int irq, void *__hba)
6178{
6179	u32 intr_status, enabled_intr_status = 0;
6180	irqreturn_t retval = IRQ_NONE;
6181	struct ufs_hba *hba = __hba;
6182	int retries = hba->nutrs;
6183
6184	spin_lock(hba->host->host_lock);
6185	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6186	hba->ufs_stats.last_intr_status = intr_status;
6187	hba->ufs_stats.last_intr_ts = ktime_get();
6188
6189	/*
6190	 * There could be max of hba->nutrs reqs in flight and in worst case
6191	 * if the reqs get finished 1 by 1 after the interrupt status is
6192	 * read, make sure we handle them by checking the interrupt status
6193	 * again in a loop until we process all of the reqs before returning.
6194	 */
6195	while (intr_status && retries--) {
6196		enabled_intr_status =
6197			intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6198		if (intr_status)
6199			ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6200		if (enabled_intr_status)
6201			retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6202
6203		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6204	}
6205
6206	if (enabled_intr_status && retval == IRQ_NONE &&
6207				!ufshcd_eh_in_progress(hba)) {
6208		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6209					__func__,
6210					intr_status,
6211					hba->ufs_stats.last_intr_status,
6212					enabled_intr_status);
6213		ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6214	}
6215
6216	spin_unlock(hba->host->host_lock);
6217	return retval;
6218}
6219
6220static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6221{
6222	int err = 0;
6223	u32 mask = 1 << tag;
6224	unsigned long flags;
6225
6226	if (!test_bit(tag, &hba->outstanding_tasks))
6227		goto out;
6228
6229	spin_lock_irqsave(hba->host->host_lock, flags);
6230	ufshcd_utmrl_clear(hba, tag);
6231	spin_unlock_irqrestore(hba->host->host_lock, flags);
6232
6233	/* poll for max. 1 sec to clear door bell register by h/w */
6234	err = ufshcd_wait_for_register(hba,
6235			REG_UTP_TASK_REQ_DOOR_BELL,
6236			mask, 0, 1000, 1000);
6237out:
6238	return err;
6239}
6240
6241static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6242		struct utp_task_req_desc *treq, u8 tm_function)
6243{
6244	struct request_queue *q = hba->tmf_queue;
6245	struct Scsi_Host *host = hba->host;
6246	DECLARE_COMPLETION_ONSTACK(wait);
6247	struct request *req;
6248	unsigned long flags;
6249	int task_tag, err;
6250
6251	/*
6252	 * blk_get_request() is used here only to get a free tag.
6253	 */
6254	req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6255	if (IS_ERR(req))
6256		return PTR_ERR(req);
6257
6258	req->end_io_data = &wait;
6259	ufshcd_hold(hba, false);
6260
6261	spin_lock_irqsave(host->host_lock, flags);
6262
6263	task_tag = req->tag;
6264	hba->tmf_rqs[req->tag] = req;
6265	treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6266
6267	memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6268	ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6269
6270	/* send command to the controller */
6271	__set_bit(task_tag, &hba->outstanding_tasks);
6272
6273	/* Make sure descriptors are ready before ringing the task doorbell */
6274	wmb();
6275
6276	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6277	/* Make sure that doorbell is committed immediately */
6278	wmb();
6279
6280	spin_unlock_irqrestore(host->host_lock, flags);
6281
6282	ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6283
6284	/* wait until the task management command is completed */
6285	err = wait_for_completion_io_timeout(&wait,
6286			msecs_to_jiffies(TM_CMD_TIMEOUT));
6287	if (!err) {
6288		ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6289		dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6290				__func__, tm_function);
6291		if (ufshcd_clear_tm_cmd(hba, task_tag))
6292			dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6293					__func__, task_tag);
6294		err = -ETIMEDOUT;
6295	} else {
6296		err = 0;
6297		memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6298
6299		ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6300	}
6301
6302	spin_lock_irqsave(hba->host->host_lock, flags);
6303	hba->tmf_rqs[req->tag] = NULL;
6304	__clear_bit(task_tag, &hba->outstanding_tasks);
6305	spin_unlock_irqrestore(hba->host->host_lock, flags);
6306
6307	ufshcd_release(hba);
6308	blk_put_request(req);
6309
6310	return err;
6311}
6312
6313/**
6314 * ufshcd_issue_tm_cmd - issues task management commands to controller
6315 * @hba: per adapter instance
6316 * @lun_id: LUN ID to which TM command is sent
6317 * @task_id: task ID to which the TM command is applicable
6318 * @tm_function: task management function opcode
6319 * @tm_response: task management service response return value
6320 *
6321 * Returns non-zero value on error, zero on success.
6322 */
6323static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6324		u8 tm_function, u8 *tm_response)
6325{
6326	struct utp_task_req_desc treq = { { 0 }, };
6327	int ocs_value, err;
6328
6329	/* Configure task request descriptor */
6330	treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6331	treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6332
6333	/* Configure task request UPIU */
6334	treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6335				  cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6336	treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6337
6338	/*
6339	 * The host shall provide the same value for LUN field in the basic
6340	 * header and for Input Parameter.
6341	 */
6342	treq.input_param1 = cpu_to_be32(lun_id);
6343	treq.input_param2 = cpu_to_be32(task_id);
6344
6345	err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6346	if (err == -ETIMEDOUT)
6347		return err;
6348
6349	ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6350	if (ocs_value != OCS_SUCCESS)
6351		dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6352				__func__, ocs_value);
6353	else if (tm_response)
6354		*tm_response = be32_to_cpu(treq.output_param1) &
6355				MASK_TM_SERVICE_RESP;
6356	return err;
6357}
6358
6359/**
6360 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6361 * @hba:	per-adapter instance
6362 * @req_upiu:	upiu request
6363 * @rsp_upiu:	upiu reply
6364 * @desc_buff:	pointer to descriptor buffer, NULL if NA
6365 * @buff_len:	descriptor size, 0 if NA
6366 * @cmd_type:	specifies the type (NOP, Query...)
6367 * @desc_op:	descriptor operation
6368 *
6369 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6370 * Therefore, it "rides" the device management infrastructure: uses its tag and
6371 * tasks work queues.
6372 *
6373 * Since there is only one available tag for device management commands,
6374 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6375 */
6376static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6377					struct utp_upiu_req *req_upiu,
6378					struct utp_upiu_req *rsp_upiu,
6379					u8 *desc_buff, int *buff_len,
6380					enum dev_cmd_type cmd_type,
6381					enum query_opcode desc_op)
6382{
6383	struct request_queue *q = hba->cmd_queue;
6384	struct request *req;
6385	struct ufshcd_lrb *lrbp;
6386	int err = 0;
6387	int tag;
6388	struct completion wait;
6389	unsigned long flags;
6390	u8 upiu_flags;
6391
6392	down_read(&hba->clk_scaling_lock);
6393
6394	req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6395	if (IS_ERR(req)) {
6396		err = PTR_ERR(req);
6397		goto out_unlock;
6398	}
6399	tag = req->tag;
6400	WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6401
6402	init_completion(&wait);
6403	lrbp = &hba->lrb[tag];
6404	WARN_ON(lrbp->cmd);
6405
6406	lrbp->cmd = NULL;
6407	lrbp->sense_bufflen = 0;
6408	lrbp->sense_buffer = NULL;
6409	lrbp->task_tag = tag;
6410	lrbp->lun = 0;
6411	lrbp->intr_cmd = true;
6412	ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6413	hba->dev_cmd.type = cmd_type;
6414
6415	switch (hba->ufs_version) {
6416	case UFSHCI_VERSION_10:
6417	case UFSHCI_VERSION_11:
6418		lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6419		break;
6420	default:
6421		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6422		break;
6423	}
6424
6425	/* update the task tag in the request upiu */
6426	req_upiu->header.dword_0 |= cpu_to_be32(tag);
6427
6428	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6429
6430	/* just copy the upiu request as it is */
6431	memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6432	if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6433		/* The Data Segment Area is optional depending upon the query
6434		 * function value. for WRITE DESCRIPTOR, the data segment
6435		 * follows right after the tsf.
6436		 */
6437		memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6438		*buff_len = 0;
6439	}
6440
6441	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6442
6443	hba->dev_cmd.complete = &wait;
6444
6445	/* Make sure descriptors are ready before ringing the doorbell */
6446	wmb();
6447	spin_lock_irqsave(hba->host->host_lock, flags);
6448	ufshcd_send_command(hba, tag);
6449	spin_unlock_irqrestore(hba->host->host_lock, flags);
6450
6451	/*
6452	 * ignore the returning value here - ufshcd_check_query_response is
6453	 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6454	 * read the response directly ignoring all errors.
6455	 */
6456	ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6457
6458	/* just copy the upiu response as it is */
6459	memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6460	if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6461		u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6462		u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6463			       MASK_QUERY_DATA_SEG_LEN;
6464
6465		if (*buff_len >= resp_len) {
6466			memcpy(desc_buff, descp, resp_len);
6467			*buff_len = resp_len;
6468		} else {
6469			dev_warn(hba->dev,
6470				 "%s: rsp size %d is bigger than buffer size %d",
6471				 __func__, resp_len, *buff_len);
6472			*buff_len = 0;
6473			err = -EINVAL;
6474		}
6475	}
6476
6477	blk_put_request(req);
6478out_unlock:
6479	up_read(&hba->clk_scaling_lock);
6480	return err;
6481}
6482
6483/**
6484 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6485 * @hba:	per-adapter instance
6486 * @req_upiu:	upiu request
6487 * @rsp_upiu:	upiu reply - only 8 DW as we do not support scsi commands
6488 * @msgcode:	message code, one of UPIU Transaction Codes Initiator to Target
6489 * @desc_buff:	pointer to descriptor buffer, NULL if NA
6490 * @buff_len:	descriptor size, 0 if NA
6491 * @desc_op:	descriptor operation
6492 *
6493 * Supports UTP Transfer requests (nop and query), and UTP Task
6494 * Management requests.
6495 * It is up to the caller to fill the upiu conent properly, as it will
6496 * be copied without any further input validations.
6497 */
6498int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6499			     struct utp_upiu_req *req_upiu,
6500			     struct utp_upiu_req *rsp_upiu,
6501			     int msgcode,
6502			     u8 *desc_buff, int *buff_len,
6503			     enum query_opcode desc_op)
6504{
6505	int err;
6506	enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6507	struct utp_task_req_desc treq = { { 0 }, };
6508	int ocs_value;
6509	u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6510
6511	switch (msgcode) {
6512	case UPIU_TRANSACTION_NOP_OUT:
6513		cmd_type = DEV_CMD_TYPE_NOP;
6514		fallthrough;
6515	case UPIU_TRANSACTION_QUERY_REQ:
6516		ufshcd_hold(hba, false);
6517		mutex_lock(&hba->dev_cmd.lock);
6518		err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6519						   desc_buff, buff_len,
6520						   cmd_type, desc_op);
6521		mutex_unlock(&hba->dev_cmd.lock);
6522		ufshcd_release(hba);
6523
6524		break;
6525	case UPIU_TRANSACTION_TASK_REQ:
6526		treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6527		treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6528
6529		memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6530
6531		err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6532		if (err == -ETIMEDOUT)
6533			break;
6534
6535		ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6536		if (ocs_value != OCS_SUCCESS) {
6537			dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6538				ocs_value);
6539			break;
6540		}
6541
6542		memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6543
6544		break;
6545	default:
6546		err = -EINVAL;
6547
6548		break;
6549	}
6550
6551	return err;
6552}
6553
6554/**
6555 * ufshcd_eh_device_reset_handler - device reset handler registered to
6556 *                                    scsi layer.
6557 * @cmd: SCSI command pointer
6558 *
6559 * Returns SUCCESS/FAILED
6560 */
6561static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6562{
6563	struct Scsi_Host *host;
6564	struct ufs_hba *hba;
6565	u32 pos;
6566	int err;
6567	u8 resp = 0xF, lun;
6568	unsigned long flags;
6569
6570	host = cmd->device->host;
6571	hba = shost_priv(host);
6572
6573	lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
6574	err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
6575	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6576		if (!err)
6577			err = resp;
6578		goto out;
6579	}
6580
6581	/* clear the commands that were pending for corresponding LUN */
6582	for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6583		if (hba->lrb[pos].lun == lun) {
6584			err = ufshcd_clear_cmd(hba, pos);
6585			if (err)
6586				break;
6587		}
6588	}
6589	spin_lock_irqsave(host->host_lock, flags);
6590	ufshcd_transfer_req_compl(hba);
6591	spin_unlock_irqrestore(host->host_lock, flags);
6592
6593out:
6594	hba->req_abort_count = 0;
6595	ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
6596	if (!err) {
6597		err = SUCCESS;
6598	} else {
6599		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6600		err = FAILED;
6601	}
6602	return err;
6603}
6604
6605static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6606{
6607	struct ufshcd_lrb *lrbp;
6608	int tag;
6609
6610	for_each_set_bit(tag, &bitmap, hba->nutrs) {
6611		lrbp = &hba->lrb[tag];
6612		lrbp->req_abort_skip = true;
6613	}
6614}
6615
6616/**
6617 * ufshcd_try_to_abort_task - abort a specific task
6618 * @cmd: SCSI command pointer
6619 *
6620 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6621 * command, and in host controller by clearing the door-bell register. There can
6622 * be race between controller sending the command to the device while abort is
6623 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6624 * really issued and then try to abort it.
6625 *
6626 * Returns zero on success, non-zero on failure
6627 */
6628static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6629{
6630	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6631	int err = 0;
6632	int poll_cnt;
6633	u8 resp = 0xF;
6634	u32 reg;
6635
6636	for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6637		err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6638				UFS_QUERY_TASK, &resp);
6639		if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6640			/* cmd pending in the device */
6641			dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6642				__func__, tag);
6643			break;
6644		} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6645			/*
6646			 * cmd not pending in the device, check if it is
6647			 * in transition.
6648			 */
6649			dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6650				__func__, tag);
6651			reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6652			if (reg & (1 << tag)) {
6653				/* sleep for max. 200us to stabilize */
6654				usleep_range(100, 200);
6655				continue;
6656			}
6657			/* command completed already */
6658			dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6659				__func__, tag);
6660			goto out;
6661		} else {
6662			dev_err(hba->dev,
6663				"%s: no response from device. tag = %d, err %d\n",
6664				__func__, tag, err);
6665			if (!err)
6666				err = resp; /* service response error */
6667			goto out;
6668		}
6669	}
6670
6671	if (!poll_cnt) {
6672		err = -EBUSY;
6673		goto out;
6674	}
6675
6676	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6677			UFS_ABORT_TASK, &resp);
6678	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6679		if (!err) {
6680			err = resp; /* service response error */
6681			dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6682				__func__, tag, err);
6683		}
6684		goto out;
6685	}
6686
6687	err = ufshcd_clear_cmd(hba, tag);
6688	if (err)
6689		dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6690			__func__, tag, err);
6691
6692out:
6693	return err;
6694}
6695
6696/**
6697 * ufshcd_abort - scsi host template eh_abort_handler callback
6698 * @cmd: SCSI command pointer
6699 *
6700 * Returns SUCCESS/FAILED
6701 */
6702static int ufshcd_abort(struct scsi_cmnd *cmd)
6703{
6704	struct Scsi_Host *host;
6705	struct ufs_hba *hba;
6706	unsigned long flags;
6707	unsigned int tag;
6708	int err = 0;
6709	struct ufshcd_lrb *lrbp;
6710	u32 reg;
6711
6712	host = cmd->device->host;
6713	hba = shost_priv(host);
6714	tag = cmd->request->tag;
6715	lrbp = &hba->lrb[tag];
6716	if (!ufshcd_valid_tag(hba, tag)) {
6717		dev_err(hba->dev,
6718			"%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6719			__func__, tag, cmd, cmd->request);
6720		BUG();
6721	}
6722
6723	/*
6724	 * Task abort to the device W-LUN is illegal. When this command
6725	 * will fail, due to spec violation, scsi err handling next step
6726	 * will be to send LU reset which, again, is a spec violation.
6727	 * To avoid these unnecessary/illegal step we skip to the last error
6728	 * handling stage: reset and restore.
6729	 */
6730	if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6731		return ufshcd_eh_host_reset_handler(cmd);
6732
6733	ufshcd_hold(hba, false);
6734	reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6735	/* If command is already aborted/completed, return SUCCESS */
6736	if (!(test_bit(tag, &hba->outstanding_reqs))) {
6737		dev_err(hba->dev,
6738			"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6739			__func__, tag, hba->outstanding_reqs, reg);
6740		goto out;
6741	}
6742
6743	/* Print Transfer Request of aborted task */
6744	dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6745
6746	/*
6747	 * Print detailed info about aborted request.
6748	 * As more than one request might get aborted at the same time,
6749	 * print full information only for the first aborted request in order
6750	 * to reduce repeated printouts. For other aborted requests only print
6751	 * basic details.
6752	 */
6753	scsi_print_command(hba->lrb[tag].cmd);
6754	if (!hba->req_abort_count) {
6755		ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6756		ufshcd_print_host_regs(hba);
6757		ufshcd_print_host_state(hba);
6758		ufshcd_print_pwr_info(hba);
6759		ufshcd_print_trs(hba, 1 << tag, true);
6760	} else {
6761		ufshcd_print_trs(hba, 1 << tag, false);
6762	}
6763	hba->req_abort_count++;
6764
6765	if (!(reg & (1 << tag))) {
6766		dev_err(hba->dev,
6767		"%s: cmd was completed, but without a notifying intr, tag = %d",
6768		__func__, tag);
6769		goto cleanup;
6770	}
6771
6772	/* Skip task abort in case previous aborts failed and report failure */
6773	if (lrbp->req_abort_skip)
6774		err = -EIO;
6775	else
6776		err = ufshcd_try_to_abort_task(hba, tag);
6777
6778	if (!err) {
6779cleanup:
6780		spin_lock_irqsave(host->host_lock, flags);
6781		__ufshcd_transfer_req_compl(hba, (1UL << tag));
6782		spin_unlock_irqrestore(host->host_lock, flags);
6783out:
6784		err = SUCCESS;
6785	} else {
6786		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6787		ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6788		err = FAILED;
6789	}
6790
6791	/*
6792	 * This ufshcd_release() corresponds to the original scsi cmd that got
6793	 * aborted here (as we won't get any IRQ for it).
6794	 */
6795	ufshcd_release(hba);
6796	return err;
6797}
6798
6799/**
6800 * ufshcd_host_reset_and_restore - reset and restore host controller
6801 * @hba: per-adapter instance
6802 *
6803 * Note that host controller reset may issue DME_RESET to
6804 * local and remote (device) Uni-Pro stack and the attributes
6805 * are reset to default state.
6806 *
6807 * Returns zero on success, non-zero on failure
6808 */
6809static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6810{
6811	int err;
6812	unsigned long flags;
6813
6814	/*
6815	 * Stop the host controller and complete the requests
6816	 * cleared by h/w
6817	 */
6818	ufshcd_hba_stop(hba);
6819
6820	spin_lock_irqsave(hba->host->host_lock, flags);
6821	hba->silence_err_logs = true;
6822	ufshcd_complete_requests(hba);
6823	hba->silence_err_logs = false;
6824	spin_unlock_irqrestore(hba->host->host_lock, flags);
6825
6826	/* scale up clocks to max frequency before full reinitialization */
6827	ufshcd_set_clk_freq(hba, true);
6828
6829	err = ufshcd_hba_enable(hba);
6830	if (err)
6831		goto out;
6832
6833	/* Establish the link again and restore the device */
6834	err = ufshcd_probe_hba(hba, false);
6835
6836out:
6837	if (err)
6838		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6839	ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6840	return err;
6841}
6842
6843/**
6844 * ufshcd_reset_and_restore - reset and re-initialize host/device
6845 * @hba: per-adapter instance
6846 *
6847 * Reset and recover device, host and re-establish link. This
6848 * is helpful to recover the communication in fatal error conditions.
6849 *
6850 * Returns zero on success, non-zero on failure
6851 */
6852static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6853{
6854	u32 saved_err;
6855	u32 saved_uic_err;
6856	int err = 0;
6857	unsigned long flags;
6858	int retries = MAX_HOST_RESET_RETRIES;
6859
6860	/*
6861	 * This is a fresh start, cache and clear saved error first,
6862	 * in case new error generated during reset and restore.
6863	 */
6864	spin_lock_irqsave(hba->host->host_lock, flags);
6865	saved_err = hba->saved_err;
6866	saved_uic_err = hba->saved_uic_err;
6867	hba->saved_err = 0;
6868	hba->saved_uic_err = 0;
6869	spin_unlock_irqrestore(hba->host->host_lock, flags);
6870
6871	do {
6872		/* Reset the attached device */
6873		ufshcd_vops_device_reset(hba);
6874
6875		err = ufshcd_host_reset_and_restore(hba);
6876	} while (err && --retries);
6877
6878	spin_lock_irqsave(hba->host->host_lock, flags);
6879	/*
6880	 * Inform scsi mid-layer that we did reset and allow to handle
6881	 * Unit Attention properly.
6882	 */
6883	scsi_report_bus_reset(hba->host, 0);
6884	if (err) {
6885		hba->saved_err |= saved_err;
6886		hba->saved_uic_err |= saved_uic_err;
6887	}
6888	spin_unlock_irqrestore(hba->host->host_lock, flags);
6889
6890	return err;
6891}
6892
6893/**
6894 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6895 * @cmd: SCSI command pointer
6896 *
6897 * Returns SUCCESS/FAILED
6898 */
6899static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6900{
6901	int err = SUCCESS;
6902	unsigned long flags;
6903	struct ufs_hba *hba;
6904
6905	hba = shost_priv(cmd->device->host);
6906
6907	spin_lock_irqsave(hba->host->host_lock, flags);
6908	hba->force_reset = true;
6909	ufshcd_schedule_eh_work(hba);
6910	dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
6911	spin_unlock_irqrestore(hba->host->host_lock, flags);
6912
6913	flush_work(&hba->eh_work);
6914
6915	spin_lock_irqsave(hba->host->host_lock, flags);
6916	if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
6917		err = FAILED;
6918	spin_unlock_irqrestore(hba->host->host_lock, flags);
6919
6920	return err;
6921}
6922
6923/**
6924 * ufshcd_get_max_icc_level - calculate the ICC level
6925 * @sup_curr_uA: max. current supported by the regulator
6926 * @start_scan: row at the desc table to start scan from
6927 * @buff: power descriptor buffer
6928 *
6929 * Returns calculated max ICC level for specific regulator
6930 */
6931static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6932{
6933	int i;
6934	int curr_uA;
6935	u16 data;
6936	u16 unit;
6937
6938	for (i = start_scan; i >= 0; i--) {
6939		data = be16_to_cpup((__be16 *)&buff[2 * i]);
6940		unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6941						ATTR_ICC_LVL_UNIT_OFFSET;
6942		curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6943		switch (unit) {
6944		case UFSHCD_NANO_AMP:
6945			curr_uA = curr_uA / 1000;
6946			break;
6947		case UFSHCD_MILI_AMP:
6948			curr_uA = curr_uA * 1000;
6949			break;
6950		case UFSHCD_AMP:
6951			curr_uA = curr_uA * 1000 * 1000;
6952			break;
6953		case UFSHCD_MICRO_AMP:
6954		default:
6955			break;
6956		}
6957		if (sup_curr_uA >= curr_uA)
6958			break;
6959	}
6960	if (i < 0) {
6961		i = 0;
6962		pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6963	}
6964
6965	return (u32)i;
6966}
6967
6968/**
6969 * ufshcd_calc_icc_level - calculate the max ICC level
6970 * In case regulators are not initialized we'll return 0
6971 * @hba: per-adapter instance
6972 * @desc_buf: power descriptor buffer to extract ICC levels from.
6973 * @len: length of desc_buff
6974 *
6975 * Returns calculated ICC level
6976 */
6977static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6978							u8 *desc_buf, int len)
6979{
6980	u32 icc_level = 0;
6981
6982	if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6983						!hba->vreg_info.vccq2) {
6984		dev_err(hba->dev,
6985			"%s: Regulator capability was not set, actvIccLevel=%d",
6986							__func__, icc_level);
6987		goto out;
6988	}
6989
6990	if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6991		icc_level = ufshcd_get_max_icc_level(
6992				hba->vreg_info.vcc->max_uA,
6993				POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6994				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6995
6996	if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6997		icc_level = ufshcd_get_max_icc_level(
6998				hba->vreg_info.vccq->max_uA,
6999				icc_level,
7000				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7001
7002	if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7003		icc_level = ufshcd_get_max_icc_level(
7004				hba->vreg_info.vccq2->max_uA,
7005				icc_level,
7006				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7007out:
7008	return icc_level;
7009}
7010
7011static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7012{
7013	int ret;
7014	int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7015	u8 *desc_buf;
7016	u32 icc_level;
7017
7018	desc_buf = kmalloc(buff_len, GFP_KERNEL);
7019	if (!desc_buf)
7020		return;
7021
7022	ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7023				     desc_buf, buff_len);
7024	if (ret) {
7025		dev_err(hba->dev,
7026			"%s: Failed reading power descriptor.len = %d ret = %d",
7027			__func__, buff_len, ret);
7028		goto out;
7029	}
7030
7031	icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7032							 buff_len);
7033	dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7034
7035	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7036		QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7037
7038	if (ret)
7039		dev_err(hba->dev,
7040			"%s: Failed configuring bActiveICCLevel = %d ret = %d",
7041			__func__, icc_level, ret);
7042
7043out:
7044	kfree(desc_buf);
7045}
7046
7047static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7048{
7049	scsi_autopm_get_device(sdev);
7050	blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7051	if (sdev->rpm_autosuspend)
7052		pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7053						 RPM_AUTOSUSPEND_DELAY_MS);
7054	scsi_autopm_put_device(sdev);
7055}
7056
7057/**
7058 * ufshcd_scsi_add_wlus - Adds required W-LUs
7059 * @hba: per-adapter instance
7060 *
7061 * UFS device specification requires the UFS devices to support 4 well known
7062 * logical units:
7063 *	"REPORT_LUNS" (address: 01h)
7064 *	"UFS Device" (address: 50h)
7065 *	"RPMB" (address: 44h)
7066 *	"BOOT" (address: 30h)
7067 * UFS device's power management needs to be controlled by "POWER CONDITION"
7068 * field of SSU (START STOP UNIT) command. But this "power condition" field
7069 * will take effect only when its sent to "UFS device" well known logical unit
7070 * hence we require the scsi_device instance to represent this logical unit in
7071 * order for the UFS host driver to send the SSU command for power management.
7072 *
7073 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7074 * Block) LU so user space process can control this LU. User space may also
7075 * want to have access to BOOT LU.
7076 *
7077 * This function adds scsi device instances for each of all well known LUs
7078 * (except "REPORT LUNS" LU).
7079 *
7080 * Returns zero on success (all required W-LUs are added successfully),
7081 * non-zero error value on failure (if failed to add any of the required W-LU).
7082 */
7083static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7084{
7085	int ret = 0;
7086	struct scsi_device *sdev_boot;
7087
7088	hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7089		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7090	if (IS_ERR(hba->sdev_ufs_device)) {
7091		ret = PTR_ERR(hba->sdev_ufs_device);
7092		hba->sdev_ufs_device = NULL;
7093		goto out;
7094	}
7095	ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
7096	scsi_device_put(hba->sdev_ufs_device);
7097
7098	hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7099		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7100	if (IS_ERR(hba->sdev_rpmb)) {
7101		ret = PTR_ERR(hba->sdev_rpmb);
7102		goto remove_sdev_ufs_device;
7103	}
7104	ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7105	scsi_device_put(hba->sdev_rpmb);
7106
7107	sdev_boot = __scsi_add_device(hba->host, 0, 0,
7108		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7109	if (IS_ERR(sdev_boot)) {
7110		dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7111	} else {
7112		ufshcd_blk_pm_runtime_init(sdev_boot);
7113		scsi_device_put(sdev_boot);
7114	}
7115	goto out;
7116
7117remove_sdev_ufs_device:
7118	scsi_remove_device(hba->sdev_ufs_device);
7119out:
7120	return ret;
7121}
7122
7123static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7124{
7125	struct ufs_dev_info *dev_info = &hba->dev_info;
7126	u8 lun;
7127	u32 d_lu_wb_buf_alloc;
7128
7129	if (!ufshcd_is_wb_allowed(hba))
7130		return;
7131	/*
7132	 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7133	 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7134	 * enabled
7135	 */
7136	if (!(dev_info->wspecversion >= 0x310 ||
7137	      dev_info->wspecversion == 0x220 ||
7138	     (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7139		goto wb_disabled;
7140
7141	if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7142	    DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7143		goto wb_disabled;
7144
7145	dev_info->d_ext_ufs_feature_sup =
7146		get_unaligned_be32(desc_buf +
7147				   DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7148
7149	if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
7150		goto wb_disabled;
7151
7152	/*
7153	 * WB may be supported but not configured while provisioning.
7154	 * The spec says, in dedicated wb buffer mode,
7155	 * a max of 1 lun would have wb buffer configured.
7156	 * Now only shared buffer mode is supported.
7157	 */
7158	dev_info->b_wb_buffer_type =
7159		desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7160
7161	dev_info->b_presrv_uspc_en =
7162		desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7163
7164	if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
7165		dev_info->d_wb_alloc_units =
7166		get_unaligned_be32(desc_buf +
7167				   DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
7168		if (!dev_info->d_wb_alloc_units)
7169			goto wb_disabled;
7170	} else {
7171		for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7172			d_lu_wb_buf_alloc = 0;
7173			ufshcd_read_unit_desc_param(hba,
7174					lun,
7175					UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7176					(u8 *)&d_lu_wb_buf_alloc,
7177					sizeof(d_lu_wb_buf_alloc));
7178			if (d_lu_wb_buf_alloc) {
7179				dev_info->wb_dedicated_lu = lun;
7180				break;
7181			}
7182		}
7183
7184		if (!d_lu_wb_buf_alloc)
7185			goto wb_disabled;
7186	}
7187	return;
7188
7189wb_disabled:
7190	hba->caps &= ~UFSHCD_CAP_WB_EN;
7191}
7192
7193void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7194{
7195	struct ufs_dev_fix *f;
7196	struct ufs_dev_info *dev_info = &hba->dev_info;
7197
7198	if (!fixups)
7199		return;
7200
7201	for (f = fixups; f->quirk; f++) {
7202		if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7203		     f->wmanufacturerid == UFS_ANY_VENDOR) &&
7204		     ((dev_info->model &&
7205		       STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7206		      !strcmp(f->model, UFS_ANY_MODEL)))
7207			hba->dev_quirks |= f->quirk;
7208	}
7209}
7210EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7211
7212static void ufs_fixup_device_setup(struct ufs_hba *hba)
7213{
7214	/* fix by general quirk table */
7215	ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7216
7217	/* allow vendors to fix quirks */
7218	ufshcd_vops_fixup_dev_quirks(hba);
7219}
7220
7221static int ufs_get_device_desc(struct ufs_hba *hba)
7222{
7223	int err;
7224	u8 model_index;
7225	u8 *desc_buf;
7226	struct ufs_dev_info *dev_info = &hba->dev_info;
7227
7228	desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7229	if (!desc_buf) {
7230		err = -ENOMEM;
7231		goto out;
7232	}
7233
7234	err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7235				     hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7236	if (err) {
7237		dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7238			__func__, err);
7239		goto out;
7240	}
7241
7242	/*
7243	 * getting vendor (manufacturerID) and Bank Index in big endian
7244	 * format
7245	 */
7246	dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7247				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7248
7249	/* getting Specification Version in big endian format */
7250	dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7251				      desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7252
7253	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7254
7255	err = ufshcd_read_string_desc(hba, model_index,
7256				      &dev_info->model, SD_ASCII_STD);
7257	if (err < 0) {
7258		dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7259			__func__, err);
7260		goto out;
7261	}
7262
7263	ufs_fixup_device_setup(hba);
7264
7265	ufshcd_wb_probe(hba, desc_buf);
7266
7267	/*
7268	 * ufshcd_read_string_desc returns size of the string
7269	 * reset the error value
7270	 */
7271	err = 0;
7272
7273out:
7274	kfree(desc_buf);
7275	return err;
7276}
7277
7278static void ufs_put_device_desc(struct ufs_hba *hba)
7279{
7280	struct ufs_dev_info *dev_info = &hba->dev_info;
7281
7282	kfree(dev_info->model);
7283	dev_info->model = NULL;
7284}
7285
7286/**
7287 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7288 * @hba: per-adapter instance
7289 *
7290 * PA_TActivate parameter can be tuned manually if UniPro version is less than
7291 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7292 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7293 * the hibern8 exit latency.
7294 *
7295 * Returns zero on success, non-zero error value on failure.
7296 */
7297static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7298{
7299	int ret = 0;
7300	u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7301
7302	ret = ufshcd_dme_peer_get(hba,
7303				  UIC_ARG_MIB_SEL(
7304					RX_MIN_ACTIVATETIME_CAPABILITY,
7305					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7306				  &peer_rx_min_activatetime);
7307	if (ret)
7308		goto out;
7309
7310	/* make sure proper unit conversion is applied */
7311	tuned_pa_tactivate =
7312		((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7313		 / PA_TACTIVATE_TIME_UNIT_US);
7314	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7315			     tuned_pa_tactivate);
7316
7317out:
7318	return ret;
7319}
7320
7321/**
7322 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7323 * @hba: per-adapter instance
7324 *
7325 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7326 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7327 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7328 * This optimal value can help reduce the hibern8 exit latency.
7329 *
7330 * Returns zero on success, non-zero error value on failure.
7331 */
7332static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7333{
7334	int ret = 0;
7335	u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7336	u32 max_hibern8_time, tuned_pa_hibern8time;
7337
7338	ret = ufshcd_dme_get(hba,
7339			     UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7340					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7341				  &local_tx_hibern8_time_cap);
7342	if (ret)
7343		goto out;
7344
7345	ret = ufshcd_dme_peer_get(hba,
7346				  UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7347					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7348				  &peer_rx_hibern8_time_cap);
7349	if (ret)
7350		goto out;
7351
7352	max_hibern8_time = max(local_tx_hibern8_time_cap,
7353			       peer_rx_hibern8_time_cap);
7354	/* make sure proper unit conversion is applied */
7355	tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7356				/ PA_HIBERN8_TIME_UNIT_US);
7357	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7358			     tuned_pa_hibern8time);
7359out:
7360	return ret;
7361}
7362
7363/**
7364 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7365 * less than device PA_TACTIVATE time.
7366 * @hba: per-adapter instance
7367 *
7368 * Some UFS devices require host PA_TACTIVATE to be lower than device
7369 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7370 * for such devices.
7371 *
7372 * Returns zero on success, non-zero error value on failure.
7373 */
7374static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7375{
7376	int ret = 0;
7377	u32 granularity, peer_granularity;
7378	u32 pa_tactivate, peer_pa_tactivate;
7379	u32 pa_tactivate_us, peer_pa_tactivate_us;
7380	u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7381
7382	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7383				  &granularity);
7384	if (ret)
7385		goto out;
7386
7387	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7388				  &peer_granularity);
7389	if (ret)
7390		goto out;
7391
7392	if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7393	    (granularity > PA_GRANULARITY_MAX_VAL)) {
7394		dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7395			__func__, granularity);
7396		return -EINVAL;
7397	}
7398
7399	if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7400	    (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7401		dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7402			__func__, peer_granularity);
7403		return -EINVAL;
7404	}
7405
7406	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7407	if (ret)
7408		goto out;
7409
7410	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7411				  &peer_pa_tactivate);
7412	if (ret)
7413		goto out;
7414
7415	pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7416	peer_pa_tactivate_us = peer_pa_tactivate *
7417			     gran_to_us_table[peer_granularity - 1];
7418
7419	if (pa_tactivate_us > peer_pa_tactivate_us) {
7420		u32 new_peer_pa_tactivate;
7421
7422		new_peer_pa_tactivate = pa_tactivate_us /
7423				      gran_to_us_table[peer_granularity - 1];
7424		new_peer_pa_tactivate++;
7425		ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7426					  new_peer_pa_tactivate);
7427	}
7428
7429out:
7430	return ret;
7431}
7432
7433static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7434{
7435	if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7436		ufshcd_tune_pa_tactivate(hba);
7437		ufshcd_tune_pa_hibern8time(hba);
7438	}
7439
7440	ufshcd_vops_apply_dev_quirks(hba);
7441
7442	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7443		/* set 1ms timeout for PA_TACTIVATE */
7444		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7445
7446	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7447		ufshcd_quirk_tune_host_pa_tactivate(hba);
7448}
7449
7450static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7451{
7452	hba->ufs_stats.hibern8_exit_cnt = 0;
7453	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7454	hba->req_abort_count = 0;
7455}
7456
7457static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7458{
7459	int err;
7460	size_t buff_len;
7461	u8 *desc_buf;
7462
7463	buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7464	desc_buf = kmalloc(buff_len, GFP_KERNEL);
7465	if (!desc_buf) {
7466		err = -ENOMEM;
7467		goto out;
7468	}
7469
7470	err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7471				     desc_buf, buff_len);
7472	if (err) {
7473		dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7474				__func__, err);
7475		goto out;
7476	}
7477
7478	if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7479		hba->dev_info.max_lu_supported = 32;
7480	else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7481		hba->dev_info.max_lu_supported = 8;
7482
7483out:
7484	kfree(desc_buf);
7485	return err;
7486}
7487
7488static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7489	{19200000, REF_CLK_FREQ_19_2_MHZ},
7490	{26000000, REF_CLK_FREQ_26_MHZ},
7491	{38400000, REF_CLK_FREQ_38_4_MHZ},
7492	{52000000, REF_CLK_FREQ_52_MHZ},
7493	{0, REF_CLK_FREQ_INVAL},
7494};
7495
7496static enum ufs_ref_clk_freq
7497ufs_get_bref_clk_from_hz(unsigned long freq)
7498{
7499	int i;
7500
7501	for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7502		if (ufs_ref_clk_freqs[i].freq_hz == freq)
7503			return ufs_ref_clk_freqs[i].val;
7504
7505	return REF_CLK_FREQ_INVAL;
7506}
7507
7508void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7509{
7510	unsigned long freq;
7511
7512	freq = clk_get_rate(refclk);
7513
7514	hba->dev_ref_clk_freq =
7515		ufs_get_bref_clk_from_hz(freq);
7516
7517	if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7518		dev_err(hba->dev,
7519		"invalid ref_clk setting = %ld\n", freq);
7520}
7521
7522static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7523{
7524	int err;
7525	u32 ref_clk;
7526	u32 freq = hba->dev_ref_clk_freq;
7527
7528	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7529			QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7530
7531	if (err) {
7532		dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7533			err);
7534		goto out;
7535	}
7536
7537	if (ref_clk == freq)
7538		goto out; /* nothing to update */
7539
7540	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7541			QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7542
7543	if (err) {
7544		dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7545			ufs_ref_clk_freqs[freq].freq_hz);
7546		goto out;
7547	}
7548
7549	dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7550			ufs_ref_clk_freqs[freq].freq_hz);
7551
7552out:
7553	return err;
7554}
7555
7556static int ufshcd_device_params_init(struct ufs_hba *hba)
7557{
7558	bool flag;
7559	int ret, i;
7560
7561	 /* Init device descriptor sizes */
7562	for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7563		hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7564
7565	/* Init UFS geometry descriptor related parameters */
7566	ret = ufshcd_device_geo_params_init(hba);
7567	if (ret)
7568		goto out;
7569
7570	/* Check and apply UFS device quirks */
7571	ret = ufs_get_device_desc(hba);
7572	if (ret) {
7573		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7574			__func__, ret);
7575		goto out;
7576	}
7577
7578	ufshcd_get_ref_clk_gating_wait(hba);
7579
7580	if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7581			QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7582		hba->dev_info.f_power_on_wp_en = flag;
7583
7584	/* Probe maximum power mode co-supported by both UFS host and device */
7585	if (ufshcd_get_max_pwr_mode(hba))
7586		dev_err(hba->dev,
7587			"%s: Failed getting max supported power mode\n",
7588			__func__);
7589out:
7590	return ret;
7591}
7592
7593/**
7594 * ufshcd_add_lus - probe and add UFS logical units
7595 * @hba: per-adapter instance
7596 */
7597static int ufshcd_add_lus(struct ufs_hba *hba)
7598{
7599	int ret;
7600
7601	/* Add required well known logical units to scsi mid layer */
7602	ret = ufshcd_scsi_add_wlus(hba);
7603	if (ret)
7604		goto out;
7605
7606	/* Initialize devfreq after UFS device is detected */
7607	if (ufshcd_is_clkscaling_supported(hba)) {
7608		memcpy(&hba->clk_scaling.saved_pwr_info.info,
7609			&hba->pwr_info,
7610			sizeof(struct ufs_pa_layer_attr));
7611		hba->clk_scaling.saved_pwr_info.is_valid = true;
7612		if (!hba->devfreq) {
7613			ret = ufshcd_devfreq_init(hba);
7614			if (ret)
7615				goto out;
7616		}
7617
7618		hba->clk_scaling.is_allowed = true;
7619	}
7620
7621	ufs_bsg_probe(hba);
7622	scsi_scan_host(hba->host);
7623	pm_runtime_put_sync(hba->dev);
7624
7625out:
7626	return ret;
7627}
7628
7629static int
7630ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
7631
7632static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
7633{
7634	struct scsi_device *sdp;
7635	unsigned long flags;
7636	int ret = 0;
7637
7638	spin_lock_irqsave(hba->host->host_lock, flags);
7639	if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
7640		sdp = hba->sdev_ufs_device;
7641	else if (wlun == UFS_UPIU_RPMB_WLUN)
7642		sdp = hba->sdev_rpmb;
7643	else
7644		BUG();
7645	if (sdp) {
7646		ret = scsi_device_get(sdp);
7647		if (!ret && !scsi_device_online(sdp)) {
7648			ret = -ENODEV;
7649			scsi_device_put(sdp);
7650		}
7651	} else {
7652		ret = -ENODEV;
7653	}
7654	spin_unlock_irqrestore(hba->host->host_lock, flags);
7655	if (ret)
7656		goto out_err;
7657
7658	ret = ufshcd_send_request_sense(hba, sdp);
7659	scsi_device_put(sdp);
7660out_err:
7661	if (ret)
7662		dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
7663				__func__, wlun, ret);
7664	return ret;
7665}
7666
7667static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
7668{
7669	int ret = 0;
7670
7671	if (!hba->wlun_dev_clr_ua)
7672		goto out;
7673
7674	ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
7675	if (!ret)
7676		ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
7677	if (!ret)
7678		hba->wlun_dev_clr_ua = false;
7679out:
7680	if (ret)
7681		dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
7682				__func__, ret);
7683	return ret;
7684}
7685
7686/**
7687 * ufshcd_probe_hba - probe hba to detect device and initialize
7688 * @hba: per-adapter instance
7689 * @async: asynchronous execution or not
7690 *
7691 * Execute link-startup and verify device initialization
7692 */
7693static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7694{
7695	int ret;
7696	unsigned long flags;
7697	ktime_t start = ktime_get();
7698
7699	ret = ufshcd_link_startup(hba);
7700	if (ret)
7701		goto out;
7702
7703	/* Debug counters initialization */
7704	ufshcd_clear_dbg_ufs_stats(hba);
7705
7706	/* UniPro link is active now */
7707	ufshcd_set_link_active(hba);
7708
7709	/* Verify device initialization by sending NOP OUT UPIU */
7710	ret = ufshcd_verify_dev_init(hba);
7711	if (ret)
7712		goto out;
7713
7714	/* Initiate UFS initialization, and waiting until completion */
7715	ret = ufshcd_complete_dev_init(hba);
7716	if (ret)
7717		goto out;
7718
7719	/*
7720	 * Initialize UFS device parameters used by driver, these
7721	 * parameters are associated with UFS descriptors.
7722	 */
7723	if (async) {
7724		ret = ufshcd_device_params_init(hba);
7725		if (ret)
7726			goto out;
7727	}
7728
7729	ufshcd_tune_unipro_params(hba);
7730
7731	/* UFS device is also active now */
7732	ufshcd_set_ufs_dev_active(hba);
7733	ufshcd_force_reset_auto_bkops(hba);
7734	hba->wlun_dev_clr_ua = true;
7735
7736	/* Gear up to HS gear if supported */
7737	if (hba->max_pwr_info.is_valid) {
7738		/*
7739		 * Set the right value to bRefClkFreq before attempting to
7740		 * switch to HS gears.
7741		 */
7742		if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7743			ufshcd_set_dev_ref_clk(hba);
7744		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7745		if (ret) {
7746			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7747					__func__, ret);
7748			goto out;
7749		}
7750		ufshcd_print_pwr_info(hba);
7751	}
7752
7753	/*
7754	 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7755	 * and for removable UFS card as well, hence always set the parameter.
7756	 * Note: Error handler may issue the device reset hence resetting
7757	 * bActiveICCLevel as well so it is always safe to set this here.
7758	 */
7759	ufshcd_set_active_icc_lvl(hba);
7760
7761	ufshcd_wb_config(hba);
7762	/* Enable Auto-Hibernate if configured */
7763	ufshcd_auto_hibern8_enable(hba);
7764
7765out:
7766	spin_lock_irqsave(hba->host->host_lock, flags);
7767	if (ret)
7768		hba->ufshcd_state = UFSHCD_STATE_ERROR;
7769	else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7770		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7771	spin_unlock_irqrestore(hba->host->host_lock, flags);
7772
7773	trace_ufshcd_init(dev_name(hba->dev), ret,
7774		ktime_to_us(ktime_sub(ktime_get(), start)),
7775		hba->curr_dev_pwr_mode, hba->uic_link_state);
7776	return ret;
7777}
7778
7779/**
7780 * ufshcd_async_scan - asynchronous execution for probing hba
7781 * @data: data pointer to pass to this function
7782 * @cookie: cookie data
7783 */
7784static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7785{
7786	struct ufs_hba *hba = (struct ufs_hba *)data;
7787	int ret;
7788
7789	/* Initialize hba, detect and initialize UFS device */
7790	ret = ufshcd_probe_hba(hba, true);
7791	if (ret)
7792		goto out;
7793
7794	/* Probe and add UFS logical units  */
7795	ret = ufshcd_add_lus(hba);
7796out:
7797	/*
7798	 * If we failed to initialize the device or the device is not
7799	 * present, turn off the power/clocks etc.
7800	 */
7801	if (ret) {
7802		pm_runtime_put_sync(hba->dev);
7803		ufshcd_exit_clk_scaling(hba);
7804		ufshcd_hba_exit(hba);
7805	} else {
7806		ufshcd_clear_ua_wluns(hba);
7807	}
7808}
7809
7810static const struct attribute_group *ufshcd_driver_groups[] = {
7811	&ufs_sysfs_unit_descriptor_group,
7812	&ufs_sysfs_lun_attributes_group,
7813	NULL,
7814};
7815
7816static struct ufs_hba_variant_params ufs_hba_vps = {
7817	.hba_enable_delay_us		= 1000,
7818	.wb_flush_threshold		= UFS_WB_BUF_REMAIN_PERCENT(40),
7819	.devfreq_profile.polling_ms	= 100,
7820	.devfreq_profile.target		= ufshcd_devfreq_target,
7821	.devfreq_profile.get_dev_status	= ufshcd_devfreq_get_dev_status,
7822	.ondemand_data.upthreshold	= 70,
7823	.ondemand_data.downdifferential	= 5,
7824};
7825
7826static struct scsi_host_template ufshcd_driver_template = {
7827	.module			= THIS_MODULE,
7828	.name			= UFSHCD,
7829	.proc_name		= UFSHCD,
7830	.queuecommand		= ufshcd_queuecommand,
7831	.slave_alloc		= ufshcd_slave_alloc,
7832	.slave_configure	= ufshcd_slave_configure,
7833	.slave_destroy		= ufshcd_slave_destroy,
7834	.change_queue_depth	= ufshcd_change_queue_depth,
7835	.eh_abort_handler	= ufshcd_abort,
7836	.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7837	.eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7838	.this_id		= -1,
7839	.sg_tablesize		= SG_ALL,
7840	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
7841	.can_queue		= UFSHCD_CAN_QUEUE,
7842	.max_segment_size	= PRDT_DATA_BYTE_COUNT_MAX,
7843	.max_host_blocked	= 1,
7844	.track_queue_depth	= 1,
7845	.sdev_groups		= ufshcd_driver_groups,
7846	.dma_boundary		= PAGE_SIZE - 1,
7847	.rpm_autosuspend_delay	= RPM_AUTOSUSPEND_DELAY_MS,
7848};
7849
7850static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7851				   int ua)
7852{
7853	int ret;
7854
7855	if (!vreg)
7856		return 0;
7857
7858	/*
7859	 * "set_load" operation shall be required on those regulators
7860	 * which specifically configured current limitation. Otherwise
7861	 * zero max_uA may cause unexpected behavior when regulator is
7862	 * enabled or set as high power mode.
7863	 */
7864	if (!vreg->max_uA)
7865		return 0;
7866
7867	ret = regulator_set_load(vreg->reg, ua);
7868	if (ret < 0) {
7869		dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7870				__func__, vreg->name, ua, ret);
7871	}
7872
7873	return ret;
7874}
7875
7876static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7877					 struct ufs_vreg *vreg)
7878{
7879	return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7880}
7881
7882static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7883					 struct ufs_vreg *vreg)
7884{
7885	if (!vreg)
7886		return 0;
7887
7888	return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7889}
7890
7891static int ufshcd_config_vreg(struct device *dev,
7892		struct ufs_vreg *vreg, bool on)
7893{
7894	int ret = 0;
7895	struct regulator *reg;
7896	const char *name;
7897	int min_uV, uA_load;
7898
7899	BUG_ON(!vreg);
7900
7901	reg = vreg->reg;
7902	name = vreg->name;
7903
7904	if (regulator_count_voltages(reg) > 0) {
7905		uA_load = on ? vreg->max_uA : 0;
7906		ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7907		if (ret)
7908			goto out;
7909
7910		if (vreg->min_uV && vreg->max_uV) {
7911			min_uV = on ? vreg->min_uV : 0;
7912			ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7913			if (ret)
7914				dev_err(dev,
7915					"%s: %s set voltage failed, err=%d\n",
7916					__func__, name, ret);
7917		}
7918	}
7919out:
7920	return ret;
7921}
7922
7923static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7924{
7925	int ret = 0;
7926
7927	if (!vreg || vreg->enabled)
7928		goto out;
7929
7930	ret = ufshcd_config_vreg(dev, vreg, true);
7931	if (!ret)
7932		ret = regulator_enable(vreg->reg);
7933
7934	if (!ret)
7935		vreg->enabled = true;
7936	else
7937		dev_err(dev, "%s: %s enable failed, err=%d\n",
7938				__func__, vreg->name, ret);
7939out:
7940	return ret;
7941}
7942
7943static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7944{
7945	int ret = 0;
7946
7947	if (!vreg || !vreg->enabled)
7948		goto out;
7949
7950	ret = regulator_disable(vreg->reg);
7951
7952	if (!ret) {
7953		/* ignore errors on applying disable config */
7954		ufshcd_config_vreg(dev, vreg, false);
7955		vreg->enabled = false;
7956	} else {
7957		dev_err(dev, "%s: %s disable failed, err=%d\n",
7958				__func__, vreg->name, ret);
7959	}
7960out:
7961	return ret;
7962}
7963
7964static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7965{
7966	int ret = 0;
7967	struct device *dev = hba->dev;
7968	struct ufs_vreg_info *info = &hba->vreg_info;
7969
7970	ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7971	if (ret)
7972		goto out;
7973
7974	ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7975	if (ret)
7976		goto out;
7977
7978	ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7979
7980out:
7981	if (ret) {
7982		ufshcd_toggle_vreg(dev, info->vccq2, false);
7983		ufshcd_toggle_vreg(dev, info->vccq, false);
7984		ufshcd_toggle_vreg(dev, info->vcc, false);
7985	}
7986	return ret;
7987}
7988
7989static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7990{
7991	struct ufs_vreg_info *info = &hba->vreg_info;
7992
7993	return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7994}
7995
7996static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7997{
7998	int ret = 0;
7999
8000	if (!vreg)
8001		goto out;
8002
8003	vreg->reg = devm_regulator_get(dev, vreg->name);
8004	if (IS_ERR(vreg->reg)) {
8005		ret = PTR_ERR(vreg->reg);
8006		dev_err(dev, "%s: %s get failed, err=%d\n",
8007				__func__, vreg->name, ret);
8008	}
8009out:
8010	return ret;
8011}
8012
8013static int ufshcd_init_vreg(struct ufs_hba *hba)
8014{
8015	int ret = 0;
8016	struct device *dev = hba->dev;
8017	struct ufs_vreg_info *info = &hba->vreg_info;
8018
8019	ret = ufshcd_get_vreg(dev, info->vcc);
8020	if (ret)
8021		goto out;
8022
8023	ret = ufshcd_get_vreg(dev, info->vccq);
8024	if (!ret)
8025		ret = ufshcd_get_vreg(dev, info->vccq2);
8026out:
8027	return ret;
8028}
8029
8030static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8031{
8032	struct ufs_vreg_info *info = &hba->vreg_info;
8033
8034	if (info)
8035		return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8036
8037	return 0;
8038}
8039
8040static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8041{
8042	int ret = 0;
8043	struct ufs_clk_info *clki;
8044	struct list_head *head = &hba->clk_list_head;
8045	unsigned long flags;
8046	ktime_t start = ktime_get();
8047	bool clk_state_changed = false;
8048
8049	if (list_empty(head))
8050		goto out;
8051
8052	ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8053	if (ret)
8054		return ret;
8055
8056	list_for_each_entry(clki, head, list) {
8057		if (!IS_ERR_OR_NULL(clki->clk)) {
8058			/*
8059			 * Don't disable clocks which are needed
8060			 * to keep the link active.
8061			 */
8062			if (ufshcd_is_link_active(hba) &&
8063			    clki->keep_link_active)
8064				continue;
8065
8066			clk_state_changed = on ^ clki->enabled;
8067			if (on && !clki->enabled) {
8068				ret = clk_prepare_enable(clki->clk);
8069				if (ret) {
8070					dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8071						__func__, clki->name, ret);
8072					goto out;
8073				}
8074			} else if (!on && clki->enabled) {
8075				clk_disable_unprepare(clki->clk);
8076			}
8077			clki->enabled = on;
8078			dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8079					clki->name, on ? "en" : "dis");
8080		}
8081	}
8082
8083	ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8084	if (ret)
8085		return ret;
8086
8087out:
8088	if (ret) {
8089		list_for_each_entry(clki, head, list) {
8090			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8091				clk_disable_unprepare(clki->clk);
8092		}
8093	} else if (!ret && on) {
8094		spin_lock_irqsave(hba->host->host_lock, flags);
8095		hba->clk_gating.state = CLKS_ON;
8096		trace_ufshcd_clk_gating(dev_name(hba->dev),
8097					hba->clk_gating.state);
8098		spin_unlock_irqrestore(hba->host->host_lock, flags);
8099	}
8100
8101	if (clk_state_changed)
8102		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8103			(on ? "on" : "off"),
8104			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8105	return ret;
8106}
8107
8108static int ufshcd_init_clocks(struct ufs_hba *hba)
8109{
8110	int ret = 0;
8111	struct ufs_clk_info *clki;
8112	struct device *dev = hba->dev;
8113	struct list_head *head = &hba->clk_list_head;
8114
8115	if (list_empty(head))
8116		goto out;
8117
8118	list_for_each_entry(clki, head, list) {
8119		if (!clki->name)
8120			continue;
8121
8122		clki->clk = devm_clk_get(dev, clki->name);
8123		if (IS_ERR(clki->clk)) {
8124			ret = PTR_ERR(clki->clk);
8125			dev_err(dev, "%s: %s clk get failed, %d\n",
8126					__func__, clki->name, ret);
8127			goto out;
8128		}
8129
8130		/*
8131		 * Parse device ref clk freq as per device tree "ref_clk".
8132		 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8133		 * in ufshcd_alloc_host().
8134		 */
8135		if (!strcmp(clki->name, "ref_clk"))
8136			ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8137
8138		if (clki->max_freq) {
8139			ret = clk_set_rate(clki->clk, clki->max_freq);
8140			if (ret) {
8141				dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8142					__func__, clki->name,
8143					clki->max_freq, ret);
8144				goto out;
8145			}
8146			clki->curr_freq = clki->max_freq;
8147		}
8148		dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8149				clki->name, clk_get_rate(clki->clk));
8150	}
8151out:
8152	return ret;
8153}
8154
8155static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8156{
8157	int err = 0;
8158
8159	if (!hba->vops)
8160		goto out;
8161
8162	err = ufshcd_vops_init(hba);
8163	if (err)
8164		goto out;
8165
8166	err = ufshcd_vops_setup_regulators(hba, true);
8167	if (err)
8168		ufshcd_vops_exit(hba);
8169out:
8170	if (err)
8171		dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8172			__func__, ufshcd_get_var_name(hba), err);
8173	return err;
8174}
8175
8176static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8177{
8178	if (!hba->vops)
8179		return;
8180
8181	ufshcd_vops_setup_regulators(hba, false);
8182
8183	ufshcd_vops_exit(hba);
8184}
8185
8186static int ufshcd_hba_init(struct ufs_hba *hba)
8187{
8188	int err;
8189
8190	/*
8191	 * Handle host controller power separately from the UFS device power
8192	 * rails as it will help controlling the UFS host controller power
8193	 * collapse easily which is different than UFS device power collapse.
8194	 * Also, enable the host controller power before we go ahead with rest
8195	 * of the initialization here.
8196	 */
8197	err = ufshcd_init_hba_vreg(hba);
8198	if (err)
8199		goto out;
8200
8201	err = ufshcd_setup_hba_vreg(hba, true);
8202	if (err)
8203		goto out;
8204
8205	err = ufshcd_init_clocks(hba);
8206	if (err)
8207		goto out_disable_hba_vreg;
8208
8209	err = ufshcd_setup_clocks(hba, true);
8210	if (err)
8211		goto out_disable_hba_vreg;
8212
8213	err = ufshcd_init_vreg(hba);
8214	if (err)
8215		goto out_disable_clks;
8216
8217	err = ufshcd_setup_vreg(hba, true);
8218	if (err)
8219		goto out_disable_clks;
8220
8221	err = ufshcd_variant_hba_init(hba);
8222	if (err)
8223		goto out_disable_vreg;
8224
8225	hba->is_powered = true;
8226	goto out;
8227
8228out_disable_vreg:
8229	ufshcd_setup_vreg(hba, false);
8230out_disable_clks:
8231	ufshcd_setup_clocks(hba, false);
8232out_disable_hba_vreg:
8233	ufshcd_setup_hba_vreg(hba, false);
8234out:
8235	return err;
8236}
8237
8238static void ufshcd_hba_exit(struct ufs_hba *hba)
8239{
8240	if (hba->is_powered) {
8241		ufshcd_variant_hba_exit(hba);
8242		ufshcd_setup_vreg(hba, false);
8243		ufshcd_suspend_clkscaling(hba);
8244		if (ufshcd_is_clkscaling_supported(hba))
8245			if (hba->devfreq)
8246				ufshcd_suspend_clkscaling(hba);
8247		ufshcd_setup_clocks(hba, false);
8248		ufshcd_setup_hba_vreg(hba, false);
8249		hba->is_powered = false;
8250		ufs_put_device_desc(hba);
8251	}
8252}
8253
8254static int
8255ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8256{
8257	unsigned char cmd[6] = {REQUEST_SENSE,
8258				0,
8259				0,
8260				0,
8261				UFS_SENSE_SIZE,
8262				0};
8263	char *buffer;
8264	int ret;
8265
8266	buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
8267	if (!buffer) {
8268		ret = -ENOMEM;
8269		goto out;
8270	}
8271
8272	ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
8273			UFS_SENSE_SIZE, NULL, NULL,
8274			msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
8275	if (ret)
8276		pr_err("%s: failed with err %d\n", __func__, ret);
8277
8278	kfree(buffer);
8279out:
8280	return ret;
8281}
8282
8283/**
8284 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8285 *			     power mode
8286 * @hba: per adapter instance
8287 * @pwr_mode: device power mode to set
8288 *
8289 * Returns 0 if requested power mode is set successfully
8290 * Returns non-zero if failed to set the requested power mode
8291 */
8292static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8293				     enum ufs_dev_pwr_mode pwr_mode)
8294{
8295	unsigned char cmd[6] = { START_STOP };
8296	struct scsi_sense_hdr sshdr;
8297	struct scsi_device *sdp;
8298	unsigned long flags;
8299	int ret;
8300
8301	spin_lock_irqsave(hba->host->host_lock, flags);
8302	sdp = hba->sdev_ufs_device;
8303	if (sdp) {
8304		ret = scsi_device_get(sdp);
8305		if (!ret && !scsi_device_online(sdp)) {
8306			ret = -ENODEV;
8307			scsi_device_put(sdp);
8308		}
8309	} else {
8310		ret = -ENODEV;
8311	}
8312	spin_unlock_irqrestore(hba->host->host_lock, flags);
8313
8314	if (ret)
8315		return ret;
8316
8317	/*
8318	 * If scsi commands fail, the scsi mid-layer schedules scsi error-
8319	 * handling, which would wait for host to be resumed. Since we know
8320	 * we are functional while we are here, skip host resume in error
8321	 * handling context.
8322	 */
8323	hba->host->eh_noresume = 1;
8324	if (hba->wlun_dev_clr_ua) {
8325		ret = ufshcd_send_request_sense(hba, sdp);
8326		if (ret)
8327			goto out;
8328		/* Unit attention condition is cleared now */
8329		hba->wlun_dev_clr_ua = false;
8330	}
8331
8332	cmd[4] = pwr_mode << 4;
8333
8334	/*
8335	 * Current function would be generally called from the power management
8336	 * callbacks hence set the RQF_PM flag so that it doesn't resume the
8337	 * already suspended childs.
8338	 */
8339	ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8340			START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8341	if (ret) {
8342		sdev_printk(KERN_WARNING, sdp,
8343			    "START_STOP failed for power mode: %d, result %x\n",
8344			    pwr_mode, ret);
8345		if (driver_byte(ret) == DRIVER_SENSE)
8346			scsi_print_sense_hdr(sdp, NULL, &sshdr);
8347	}
8348
8349	if (!ret)
8350		hba->curr_dev_pwr_mode = pwr_mode;
8351out:
8352	scsi_device_put(sdp);
8353	hba->host->eh_noresume = 0;
8354	return ret;
8355}
8356
8357static int ufshcd_link_state_transition(struct ufs_hba *hba,
8358					enum uic_link_state req_link_state,
8359					int check_for_bkops)
8360{
8361	int ret = 0;
8362
8363	if (req_link_state == hba->uic_link_state)
8364		return 0;
8365
8366	if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8367		ret = ufshcd_uic_hibern8_enter(hba);
8368		if (!ret) {
8369			ufshcd_set_link_hibern8(hba);
8370		} else {
8371			dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8372					__func__, ret);
8373			goto out;
8374		}
8375	}
8376	/*
8377	 * If autobkops is enabled, link can't be turned off because
8378	 * turning off the link would also turn off the device.
8379	 */
8380	else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8381		 (!check_for_bkops || !hba->auto_bkops_enabled)) {
8382		/*
8383		 * Let's make sure that link is in low power mode, we are doing
8384		 * this currently by putting the link in Hibern8. Otherway to
8385		 * put the link in low power mode is to send the DME end point
8386		 * to device and then send the DME reset command to local
8387		 * unipro. But putting the link in hibern8 is much faster.
8388		 */
8389		ret = ufshcd_uic_hibern8_enter(hba);
8390		if (ret) {
8391			dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8392					__func__, ret);
8393			goto out;
8394		}
8395		/*
8396		 * Change controller state to "reset state" which
8397		 * should also put the link in off/reset state
8398		 */
8399		ufshcd_hba_stop(hba);
8400		/*
8401		 * TODO: Check if we need any delay to make sure that
8402		 * controller is reset
8403		 */
8404		ufshcd_set_link_off(hba);
8405	}
8406
8407out:
8408	return ret;
8409}
8410
8411static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8412{
8413	bool vcc_off = false;
8414
8415	/*
8416	 * It seems some UFS devices may keep drawing more than sleep current
8417	 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8418	 * To avoid this situation, add 2ms delay before putting these UFS
8419	 * rails in LPM mode.
8420	 */
8421	if (!ufshcd_is_link_active(hba) &&
8422	    hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8423		usleep_range(2000, 2100);
8424
8425	/*
8426	 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8427	 * power.
8428	 *
8429	 * If UFS device and link is in OFF state, all power supplies (VCC,
8430	 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8431	 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8432	 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8433	 *
8434	 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8435	 * in low power state which would save some power.
8436	 *
8437	 * If Write Booster is enabled and the device needs to flush the WB
8438	 * buffer OR if bkops status is urgent for WB, keep Vcc on.
8439	 */
8440	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8441	    !hba->dev_info.is_lu_power_on_wp) {
8442		ufshcd_setup_vreg(hba, false);
8443		vcc_off = true;
8444	} else if (!ufshcd_is_ufs_dev_active(hba)) {
8445		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8446		vcc_off = true;
8447		if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
8448			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8449			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8450		}
8451	}
8452
8453	/*
8454	 * Some UFS devices require delay after VCC power rail is turned-off.
8455	 */
8456	if (vcc_off && hba->vreg_info.vcc &&
8457		hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8458		usleep_range(5000, 5100);
8459}
8460
8461static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8462{
8463	int ret = 0;
8464
8465	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8466	    !hba->dev_info.is_lu_power_on_wp) {
8467		ret = ufshcd_setup_vreg(hba, true);
8468	} else if (!ufshcd_is_ufs_dev_active(hba)) {
8469		if (!ufshcd_is_link_active(hba)) {
8470			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8471			if (ret)
8472				goto vcc_disable;
8473			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8474			if (ret)
8475				goto vccq_lpm;
8476		}
8477		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8478	}
8479	goto out;
8480
8481vccq_lpm:
8482	ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8483vcc_disable:
8484	ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8485out:
8486	return ret;
8487}
8488
8489static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8490{
8491	if (ufshcd_is_link_off(hba))
8492		ufshcd_setup_hba_vreg(hba, false);
8493}
8494
8495static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8496{
8497	if (ufshcd_is_link_off(hba))
8498		ufshcd_setup_hba_vreg(hba, true);
8499}
8500
8501/**
8502 * ufshcd_suspend - helper function for suspend operations
8503 * @hba: per adapter instance
8504 * @pm_op: desired low power operation type
8505 *
8506 * This function will try to put the UFS device and link into low power
8507 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8508 * (System PM level).
8509 *
8510 * If this function is called during shutdown, it will make sure that
8511 * both UFS device and UFS link is powered off.
8512 *
8513 * NOTE: UFS device & link must be active before we enter in this function.
8514 *
8515 * Returns 0 for success and non-zero for failure
8516 */
8517static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8518{
8519	int ret = 0;
8520	enum ufs_pm_level pm_lvl;
8521	enum ufs_dev_pwr_mode req_dev_pwr_mode;
8522	enum uic_link_state req_link_state;
8523
8524	hba->pm_op_in_progress = 1;
8525	if (!ufshcd_is_shutdown_pm(pm_op)) {
8526		pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8527			 hba->rpm_lvl : hba->spm_lvl;
8528		req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8529		req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8530	} else {
8531		req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8532		req_link_state = UIC_LINK_OFF_STATE;
8533	}
8534
8535	/*
8536	 * If we can't transition into any of the low power modes
8537	 * just gate the clocks.
8538	 */
8539	ufshcd_hold(hba, false);
8540	hba->clk_gating.is_suspended = true;
8541
8542	if (hba->clk_scaling.is_allowed) {
8543		cancel_work_sync(&hba->clk_scaling.suspend_work);
8544		cancel_work_sync(&hba->clk_scaling.resume_work);
8545		ufshcd_suspend_clkscaling(hba);
8546	}
8547
8548	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8549			req_link_state == UIC_LINK_ACTIVE_STATE) {
8550		goto disable_clks;
8551	}
8552
8553	if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8554	    (req_link_state == hba->uic_link_state))
8555		goto enable_gating;
8556
8557	/* UFS device & link must be active before we enter in this function */
8558	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8559		ret = -EINVAL;
8560		goto enable_gating;
8561	}
8562
8563	if (ufshcd_is_runtime_pm(pm_op)) {
8564		if (ufshcd_can_autobkops_during_suspend(hba)) {
8565			/*
8566			 * The device is idle with no requests in the queue,
8567			 * allow background operations if bkops status shows
8568			 * that performance might be impacted.
8569			 */
8570			ret = ufshcd_urgent_bkops(hba);
8571			if (ret)
8572				goto enable_gating;
8573		} else {
8574			/* make sure that auto bkops is disabled */
8575			ufshcd_disable_auto_bkops(hba);
8576		}
8577		/*
8578		 * If device needs to do BKOP or WB buffer flush during
8579		 * Hibern8, keep device power mode as "active power mode"
8580		 * and VCC supply.
8581		 */
8582		hba->dev_info.b_rpm_dev_flush_capable =
8583			hba->auto_bkops_enabled ||
8584			(((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8585			((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8586			ufshcd_is_auto_hibern8_enabled(hba))) &&
8587			ufshcd_wb_need_flush(hba));
8588	}
8589
8590	if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8591		if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8592		    !ufshcd_is_runtime_pm(pm_op)) {
8593			/* ensure that bkops is disabled */
8594			ufshcd_disable_auto_bkops(hba);
8595		}
8596
8597		if (!hba->dev_info.b_rpm_dev_flush_capable) {
8598			ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8599			if (ret)
8600				goto enable_gating;
8601		}
8602	}
8603
8604	flush_work(&hba->eeh_work);
8605	ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8606	if (ret)
8607		goto set_dev_active;
8608
8609	ufshcd_vreg_set_lpm(hba);
8610
8611disable_clks:
8612	/*
8613	 * Call vendor specific suspend callback. As these callbacks may access
8614	 * vendor specific host controller register space call them before the
8615	 * host clocks are ON.
8616	 */
8617	ret = ufshcd_vops_suspend(hba, pm_op);
8618	if (ret)
8619		goto set_link_active;
8620	/*
8621	 * Disable the host irq as host controller as there won't be any
8622	 * host controller transaction expected till resume.
8623	 */
8624	ufshcd_disable_irq(hba);
8625
8626	ufshcd_setup_clocks(hba, false);
8627
8628	if (ufshcd_is_clkgating_allowed(hba)) {
8629		hba->clk_gating.state = CLKS_OFF;
8630		trace_ufshcd_clk_gating(dev_name(hba->dev),
8631					hba->clk_gating.state);
8632	}
8633
8634	/* Put the host controller in low power mode if possible */
8635	ufshcd_hba_vreg_set_lpm(hba);
8636	goto out;
8637
8638set_link_active:
8639	if (hba->clk_scaling.is_allowed)
8640		ufshcd_resume_clkscaling(hba);
8641	ufshcd_vreg_set_hpm(hba);
8642	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8643		ufshcd_set_link_active(hba);
8644	else if (ufshcd_is_link_off(hba))
8645		ufshcd_host_reset_and_restore(hba);
8646set_dev_active:
8647	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8648		ufshcd_disable_auto_bkops(hba);
8649enable_gating:
8650	if (hba->clk_scaling.is_allowed)
8651		ufshcd_resume_clkscaling(hba);
8652	hba->clk_gating.is_suspended = false;
8653	hba->dev_info.b_rpm_dev_flush_capable = false;
8654	ufshcd_release(hba);
8655out:
8656	if (hba->dev_info.b_rpm_dev_flush_capable) {
8657		schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8658			msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8659	}
8660
8661	hba->pm_op_in_progress = 0;
8662
8663	if (ret)
8664		ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
8665	return ret;
8666}
8667
8668/**
8669 * ufshcd_resume - helper function for resume operations
8670 * @hba: per adapter instance
8671 * @pm_op: runtime PM or system PM
8672 *
8673 * This function basically brings the UFS device, UniPro link and controller
8674 * to active state.
8675 *
8676 * Returns 0 for success and non-zero for failure
8677 */
8678static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8679{
8680	int ret;
8681	enum uic_link_state old_link_state;
8682
8683	hba->pm_op_in_progress = 1;
8684	old_link_state = hba->uic_link_state;
8685
8686	ufshcd_hba_vreg_set_hpm(hba);
8687	/* Make sure clocks are enabled before accessing controller */
8688	ret = ufshcd_setup_clocks(hba, true);
8689	if (ret)
8690		goto out;
8691
8692	/* enable the host irq as host controller would be active soon */
8693	ufshcd_enable_irq(hba);
8694
8695	ret = ufshcd_vreg_set_hpm(hba);
8696	if (ret)
8697		goto disable_irq_and_vops_clks;
8698
8699	/*
8700	 * Call vendor specific resume callback. As these callbacks may access
8701	 * vendor specific host controller register space call them when the
8702	 * host clocks are ON.
8703	 */
8704	ret = ufshcd_vops_resume(hba, pm_op);
8705	if (ret)
8706		goto disable_vreg;
8707
8708	if (ufshcd_is_link_hibern8(hba)) {
8709		ret = ufshcd_uic_hibern8_exit(hba);
8710		if (!ret) {
8711			ufshcd_set_link_active(hba);
8712		} else {
8713			dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8714					__func__, ret);
8715			goto vendor_suspend;
8716		}
8717	} else if (ufshcd_is_link_off(hba)) {
8718		/*
8719		 * A full initialization of the host and the device is
8720		 * required since the link was put to off during suspend.
8721		 */
8722		ret = ufshcd_reset_and_restore(hba);
8723		/*
8724		 * ufshcd_reset_and_restore() should have already
8725		 * set the link state as active
8726		 */
8727		if (ret || !ufshcd_is_link_active(hba))
8728			goto vendor_suspend;
8729	}
8730
8731	if (!ufshcd_is_ufs_dev_active(hba)) {
8732		ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8733		if (ret)
8734			goto set_old_link_state;
8735	}
8736
8737	if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8738		ufshcd_enable_auto_bkops(hba);
8739	else
8740		/*
8741		 * If BKOPs operations are urgently needed at this moment then
8742		 * keep auto-bkops enabled or else disable it.
8743		 */
8744		ufshcd_urgent_bkops(hba);
8745
8746	hba->clk_gating.is_suspended = false;
8747
8748	if (hba->clk_scaling.is_allowed)
8749		ufshcd_resume_clkscaling(hba);
8750
8751	/* Enable Auto-Hibernate if configured */
8752	ufshcd_auto_hibern8_enable(hba);
8753
8754	if (hba->dev_info.b_rpm_dev_flush_capable) {
8755		hba->dev_info.b_rpm_dev_flush_capable = false;
8756		cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8757	}
8758
8759	/* Schedule clock gating in case of no access to UFS device yet */
8760	ufshcd_release(hba);
8761
8762	goto out;
8763
8764set_old_link_state:
8765	ufshcd_link_state_transition(hba, old_link_state, 0);
8766vendor_suspend:
8767	ufshcd_vops_suspend(hba, pm_op);
8768disable_vreg:
8769	ufshcd_vreg_set_lpm(hba);
8770disable_irq_and_vops_clks:
8771	ufshcd_disable_irq(hba);
8772	if (hba->clk_scaling.is_allowed)
8773		ufshcd_suspend_clkscaling(hba);
8774	ufshcd_setup_clocks(hba, false);
8775	if (ufshcd_is_clkgating_allowed(hba)) {
8776		hba->clk_gating.state = CLKS_OFF;
8777		trace_ufshcd_clk_gating(dev_name(hba->dev),
8778					hba->clk_gating.state);
8779	}
8780out:
8781	hba->pm_op_in_progress = 0;
8782	if (ret)
8783		ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
8784	return ret;
8785}
8786
8787/**
8788 * ufshcd_system_suspend - system suspend routine
8789 * @hba: per adapter instance
8790 *
8791 * Check the description of ufshcd_suspend() function for more details.
8792 *
8793 * Returns 0 for success and non-zero for failure
8794 */
8795int ufshcd_system_suspend(struct ufs_hba *hba)
8796{
8797	int ret = 0;
8798	ktime_t start = ktime_get();
8799
8800	if (!hba || !hba->is_powered)
8801		return 0;
8802
8803	cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
8804
8805	if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8806	     hba->curr_dev_pwr_mode) &&
8807	    (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8808	     hba->uic_link_state) &&
8809	     pm_runtime_suspended(hba->dev) &&
8810	     !hba->dev_info.b_rpm_dev_flush_capable)
8811		goto out;
8812
8813	if (pm_runtime_suspended(hba->dev)) {
8814		/*
8815		 * UFS device and/or UFS link low power states during runtime
8816		 * suspend seems to be different than what is expected during
8817		 * system suspend. Hence runtime resume the devic & link and
8818		 * let the system suspend low power states to take effect.
8819		 * TODO: If resume takes longer time, we might have optimize
8820		 * it in future by not resuming everything if possible.
8821		 */
8822		ret = ufshcd_runtime_resume(hba);
8823		if (ret)
8824			goto out;
8825	}
8826
8827	ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8828out:
8829	trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8830		ktime_to_us(ktime_sub(ktime_get(), start)),
8831		hba->curr_dev_pwr_mode, hba->uic_link_state);
8832	if (!ret)
8833		hba->is_sys_suspended = true;
8834	return ret;
8835}
8836EXPORT_SYMBOL(ufshcd_system_suspend);
8837
8838/**
8839 * ufshcd_system_resume - system resume routine
8840 * @hba: per adapter instance
8841 *
8842 * Returns 0 for success and non-zero for failure
8843 */
8844
8845int ufshcd_system_resume(struct ufs_hba *hba)
8846{
8847	int ret = 0;
8848	ktime_t start = ktime_get();
8849
8850	if (!hba)
8851		return -EINVAL;
8852
8853	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8854		/*
8855		 * Let the runtime resume take care of resuming
8856		 * if runtime suspended.
8857		 */
8858		goto out;
8859	else
8860		ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8861out:
8862	trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8863		ktime_to_us(ktime_sub(ktime_get(), start)),
8864		hba->curr_dev_pwr_mode, hba->uic_link_state);
8865	if (!ret)
8866		hba->is_sys_suspended = false;
8867	return ret;
8868}
8869EXPORT_SYMBOL(ufshcd_system_resume);
8870
8871/**
8872 * ufshcd_runtime_suspend - runtime suspend routine
8873 * @hba: per adapter instance
8874 *
8875 * Check the description of ufshcd_suspend() function for more details.
8876 *
8877 * Returns 0 for success and non-zero for failure
8878 */
8879int ufshcd_runtime_suspend(struct ufs_hba *hba)
8880{
8881	int ret = 0;
8882	ktime_t start = ktime_get();
8883
8884	if (!hba)
8885		return -EINVAL;
8886
8887	if (!hba->is_powered)
8888		goto out;
8889	else
8890		ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8891out:
8892	trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8893		ktime_to_us(ktime_sub(ktime_get(), start)),
8894		hba->curr_dev_pwr_mode, hba->uic_link_state);
8895	return ret;
8896}
8897EXPORT_SYMBOL(ufshcd_runtime_suspend);
8898
8899/**
8900 * ufshcd_runtime_resume - runtime resume routine
8901 * @hba: per adapter instance
8902 *
8903 * This function basically brings the UFS device, UniPro link and controller
8904 * to active state. Following operations are done in this function:
8905 *
8906 * 1. Turn on all the controller related clocks
8907 * 2. Bring the UniPro link out of Hibernate state
8908 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8909 *    to active state.
8910 * 4. If auto-bkops is enabled on the device, disable it.
8911 *
8912 * So following would be the possible power state after this function return
8913 * successfully:
8914 *	S1: UFS device in Active state with VCC rail ON
8915 *	    UniPro link in Active state
8916 *	    All the UFS/UniPro controller clocks are ON
8917 *
8918 * Returns 0 for success and non-zero for failure
8919 */
8920int ufshcd_runtime_resume(struct ufs_hba *hba)
8921{
8922	int ret = 0;
8923	ktime_t start = ktime_get();
8924
8925	if (!hba)
8926		return -EINVAL;
8927
8928	if (!hba->is_powered)
8929		goto out;
8930	else
8931		ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8932out:
8933	trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8934		ktime_to_us(ktime_sub(ktime_get(), start)),
8935		hba->curr_dev_pwr_mode, hba->uic_link_state);
8936	return ret;
8937}
8938EXPORT_SYMBOL(ufshcd_runtime_resume);
8939
8940int ufshcd_runtime_idle(struct ufs_hba *hba)
8941{
8942	return 0;
8943}
8944EXPORT_SYMBOL(ufshcd_runtime_idle);
8945
8946/**
8947 * ufshcd_shutdown - shutdown routine
8948 * @hba: per adapter instance
8949 *
8950 * This function would power off both UFS device and UFS link.
8951 *
8952 * Returns 0 always to allow force shutdown even in case of errors.
8953 */
8954int ufshcd_shutdown(struct ufs_hba *hba)
8955{
8956	int ret = 0;
8957
8958	if (!hba->is_powered)
8959		goto out;
8960
8961	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8962		goto out;
8963
8964	pm_runtime_get_sync(hba->dev);
8965
8966	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8967out:
8968	if (ret)
8969		dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8970	/* allow force shutdown even in case of errors */
8971	return 0;
8972}
8973EXPORT_SYMBOL(ufshcd_shutdown);
8974
8975/**
8976 * ufshcd_remove - de-allocate SCSI host and host memory space
8977 *		data structure memory
8978 * @hba: per adapter instance
8979 */
8980void ufshcd_remove(struct ufs_hba *hba)
8981{
8982	ufs_bsg_remove(hba);
8983	ufs_sysfs_remove_nodes(hba->dev);
8984	blk_cleanup_queue(hba->tmf_queue);
8985	blk_mq_free_tag_set(&hba->tmf_tag_set);
8986	blk_cleanup_queue(hba->cmd_queue);
8987	scsi_remove_host(hba->host);
8988	destroy_workqueue(hba->eh_wq);
8989	/* disable interrupts */
8990	ufshcd_disable_intr(hba, hba->intr_mask);
8991	ufshcd_hba_stop(hba);
8992
8993	ufshcd_exit_clk_scaling(hba);
8994	ufshcd_exit_clk_gating(hba);
8995	if (ufshcd_is_clkscaling_supported(hba))
8996		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8997	ufshcd_hba_exit(hba);
8998}
8999EXPORT_SYMBOL_GPL(ufshcd_remove);
9000
9001/**
9002 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9003 * @hba: pointer to Host Bus Adapter (HBA)
9004 */
9005void ufshcd_dealloc_host(struct ufs_hba *hba)
9006{
9007	ufshcd_crypto_destroy_keyslot_manager(hba);
9008	scsi_host_put(hba->host);
9009}
9010EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9011
9012/**
9013 * ufshcd_set_dma_mask - Set dma mask based on the controller
9014 *			 addressing capability
9015 * @hba: per adapter instance
9016 *
9017 * Returns 0 for success, non-zero for failure
9018 */
9019static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9020{
9021	if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9022		if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9023			return 0;
9024	}
9025	return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9026}
9027
9028/**
9029 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9030 * @dev: pointer to device handle
9031 * @hba_handle: driver private handle
9032 * Returns 0 on success, non-zero value on failure
9033 */
9034int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9035{
9036	struct Scsi_Host *host;
9037	struct ufs_hba *hba;
9038	int err = 0;
9039
9040	if (!dev) {
9041		dev_err(dev,
9042		"Invalid memory reference for dev is NULL\n");
9043		err = -ENODEV;
9044		goto out_error;
9045	}
9046
9047	host = scsi_host_alloc(&ufshcd_driver_template,
9048				sizeof(struct ufs_hba));
9049	if (!host) {
9050		dev_err(dev, "scsi_host_alloc failed\n");
9051		err = -ENOMEM;
9052		goto out_error;
9053	}
9054	hba = shost_priv(host);
9055	hba->host = host;
9056	hba->dev = dev;
9057	*hba_handle = hba;
9058	hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9059
9060	INIT_LIST_HEAD(&hba->clk_list_head);
9061
9062out_error:
9063	return err;
9064}
9065EXPORT_SYMBOL(ufshcd_alloc_host);
9066
9067/* This function exists because blk_mq_alloc_tag_set() requires this. */
9068static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9069				     const struct blk_mq_queue_data *qd)
9070{
9071	WARN_ON_ONCE(true);
9072	return BLK_STS_NOTSUPP;
9073}
9074
9075static const struct blk_mq_ops ufshcd_tmf_ops = {
9076	.queue_rq = ufshcd_queue_tmf,
9077};
9078
9079/**
9080 * ufshcd_init - Driver initialization routine
9081 * @hba: per-adapter instance
9082 * @mmio_base: base register address
9083 * @irq: Interrupt line of device
9084 * Returns 0 on success, non-zero value on failure
9085 */
9086int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9087{
9088	int err;
9089	struct Scsi_Host *host = hba->host;
9090	struct device *dev = hba->dev;
9091	char eh_wq_name[sizeof("ufs_eh_wq_00")];
9092
9093	/*
9094	 * dev_set_drvdata() must be called before any callbacks are registered
9095	 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
9096	 * sysfs).
9097	 */
9098	dev_set_drvdata(dev, hba);
9099
9100	if (!mmio_base) {
9101		dev_err(hba->dev,
9102		"Invalid memory reference for mmio_base is NULL\n");
9103		err = -ENODEV;
9104		goto out_error;
9105	}
9106
9107	hba->mmio_base = mmio_base;
9108	hba->irq = irq;
9109	hba->vps = &ufs_hba_vps;
9110
9111	err = ufshcd_hba_init(hba);
9112	if (err)
9113		goto out_error;
9114
9115	/* Read capabilities registers */
9116	err = ufshcd_hba_capabilities(hba);
9117	if (err)
9118		goto out_disable;
9119
9120	/* Get UFS version supported by the controller */
9121	hba->ufs_version = ufshcd_get_ufs_version(hba);
9122
9123	if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9124	    (hba->ufs_version != UFSHCI_VERSION_11) &&
9125	    (hba->ufs_version != UFSHCI_VERSION_20) &&
9126	    (hba->ufs_version != UFSHCI_VERSION_21))
9127		dev_err(hba->dev, "invalid UFS version 0x%x\n",
9128			hba->ufs_version);
9129
9130	/* Get Interrupt bit mask per version */
9131	hba->intr_mask = ufshcd_get_intr_mask(hba);
9132
9133	err = ufshcd_set_dma_mask(hba);
9134	if (err) {
9135		dev_err(hba->dev, "set dma mask failed\n");
9136		goto out_disable;
9137	}
9138
9139	/* Allocate memory for host memory space */
9140	err = ufshcd_memory_alloc(hba);
9141	if (err) {
9142		dev_err(hba->dev, "Memory allocation failed\n");
9143		goto out_disable;
9144	}
9145
9146	/* Configure LRB */
9147	ufshcd_host_memory_configure(hba);
9148
9149	host->can_queue = hba->nutrs;
9150	host->cmd_per_lun = hba->nutrs;
9151	host->max_id = UFSHCD_MAX_ID;
9152	host->max_lun = UFS_MAX_LUNS;
9153	host->max_channel = UFSHCD_MAX_CHANNEL;
9154	host->unique_id = host->host_no;
9155	host->max_cmd_len = UFS_CDB_SIZE;
9156
9157	hba->max_pwr_info.is_valid = false;
9158
9159	/* Initialize work queues */
9160	snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9161		 hba->host->host_no);
9162	hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9163	if (!hba->eh_wq) {
9164		dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9165				__func__);
9166		err = -ENOMEM;
9167		goto out_disable;
9168	}
9169	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9170	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9171
9172	/* Initialize UIC command mutex */
9173	mutex_init(&hba->uic_cmd_mutex);
9174
9175	/* Initialize mutex for device management commands */
9176	mutex_init(&hba->dev_cmd.lock);
9177
9178	init_rwsem(&hba->clk_scaling_lock);
9179
9180	ufshcd_init_clk_gating(hba);
9181
9182	ufshcd_init_clk_scaling(hba);
9183
9184	/*
9185	 * In order to avoid any spurious interrupt immediately after
9186	 * registering UFS controller interrupt handler, clear any pending UFS
9187	 * interrupt status and disable all the UFS interrupts.
9188	 */
9189	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9190		      REG_INTERRUPT_STATUS);
9191	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9192	/*
9193	 * Make sure that UFS interrupts are disabled and any pending interrupt
9194	 * status is cleared before registering UFS interrupt handler.
9195	 */
9196	mb();
9197
9198	/* IRQ registration */
9199	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9200	if (err) {
9201		dev_err(hba->dev, "request irq failed\n");
9202		goto exit_gating;
9203	} else {
9204		hba->is_irq_enabled = true;
9205	}
9206
9207	err = scsi_add_host(host, hba->dev);
9208	if (err) {
9209		dev_err(hba->dev, "scsi_add_host failed\n");
9210		goto exit_gating;
9211	}
9212
9213	hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9214	if (IS_ERR(hba->cmd_queue)) {
9215		err = PTR_ERR(hba->cmd_queue);
9216		goto out_remove_scsi_host;
9217	}
9218
9219	hba->tmf_tag_set = (struct blk_mq_tag_set) {
9220		.nr_hw_queues	= 1,
9221		.queue_depth	= hba->nutmrs,
9222		.ops		= &ufshcd_tmf_ops,
9223		.flags		= BLK_MQ_F_NO_SCHED,
9224	};
9225	err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9226	if (err < 0)
9227		goto free_cmd_queue;
9228	hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9229	if (IS_ERR(hba->tmf_queue)) {
9230		err = PTR_ERR(hba->tmf_queue);
9231		goto free_tmf_tag_set;
9232	}
9233	hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
9234				    sizeof(*hba->tmf_rqs), GFP_KERNEL);
9235	if (!hba->tmf_rqs) {
9236		err = -ENOMEM;
9237		goto free_tmf_queue;
9238	}
9239
9240	/* Reset the attached device */
9241	ufshcd_vops_device_reset(hba);
9242
9243	ufshcd_init_crypto(hba);
9244
9245	/* Host controller enable */
9246	err = ufshcd_hba_enable(hba);
9247	if (err) {
9248		dev_err(hba->dev, "Host controller enable failed\n");
9249		ufshcd_print_host_regs(hba);
9250		ufshcd_print_host_state(hba);
9251		goto free_tmf_queue;
9252	}
9253
9254	/*
9255	 * Set the default power management level for runtime and system PM.
9256	 * Default power saving mode is to keep UFS link in Hibern8 state
9257	 * and UFS device in sleep state.
9258	 */
9259	hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9260						UFS_SLEEP_PWR_MODE,
9261						UIC_LINK_HIBERN8_STATE);
9262	hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9263						UFS_SLEEP_PWR_MODE,
9264						UIC_LINK_HIBERN8_STATE);
9265
9266	INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9267			  ufshcd_rpm_dev_flush_recheck_work);
9268
9269	/* Set the default auto-hiberate idle timer value to 150 ms */
9270	if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9271		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9272			    FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9273	}
9274
9275	/* Hold auto suspend until async scan completes */
9276	pm_runtime_get_sync(dev);
9277	atomic_set(&hba->scsi_block_reqs_cnt, 0);
9278	/*
9279	 * We are assuming that device wasn't put in sleep/power-down
9280	 * state exclusively during the boot stage before kernel.
9281	 * This assumption helps avoid doing link startup twice during
9282	 * ufshcd_probe_hba().
9283	 */
9284	ufshcd_set_ufs_dev_active(hba);
9285
9286	async_schedule(ufshcd_async_scan, hba);
9287	ufs_sysfs_add_nodes(hba->dev);
9288
9289	return 0;
9290
9291free_tmf_queue:
9292	blk_cleanup_queue(hba->tmf_queue);
9293free_tmf_tag_set:
9294	blk_mq_free_tag_set(&hba->tmf_tag_set);
9295free_cmd_queue:
9296	blk_cleanup_queue(hba->cmd_queue);
9297out_remove_scsi_host:
9298	scsi_remove_host(hba->host);
9299exit_gating:
9300	ufshcd_exit_clk_scaling(hba);
9301	ufshcd_exit_clk_gating(hba);
9302	destroy_workqueue(hba->eh_wq);
9303out_disable:
9304	hba->is_irq_enabled = false;
9305	ufshcd_hba_exit(hba);
9306out_error:
9307	return err;
9308}
9309EXPORT_SYMBOL_GPL(ufshcd_init);
9310
9311MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9312MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9313MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9314MODULE_SOFTDEP("pre: governor_simpleondemand");
9315MODULE_LICENSE("GPL");
9316MODULE_VERSION(UFSHCD_DRIVER_VERSION);
9317