xref: /kernel/linux/linux-5.10/drivers/mmc/host/cqhci.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 */
4
5#include <linux/delay.h>
6#include <linux/highmem.h>
7#include <linux/io.h>
8#include <linux/iopoll.h>
9#include <linux/module.h>
10#include <linux/dma-mapping.h>
11#include <linux/slab.h>
12#include <linux/scatterlist.h>
13#include <linux/platform_device.h>
14#include <linux/ktime.h>
15
16#include <linux/mmc/mmc.h>
17#include <linux/mmc/host.h>
18#include <linux/mmc/card.h>
19
20#include "cqhci.h"
21
22#define DCMD_SLOT 31
23#define NUM_SLOTS 32
24
25struct cqhci_slot {
26	struct mmc_request *mrq;
27	unsigned int flags;
28#define CQHCI_EXTERNAL_TIMEOUT	BIT(0)
29#define CQHCI_COMPLETED		BIT(1)
30#define CQHCI_HOST_CRC		BIT(2)
31#define CQHCI_HOST_TIMEOUT	BIT(3)
32#define CQHCI_HOST_OTHER	BIT(4)
33};
34
35static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
36{
37	return cq_host->desc_base + (tag * cq_host->slot_sz);
38}
39
40static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
41{
42	u8 *desc = get_desc(cq_host, tag);
43
44	return desc + cq_host->task_desc_len;
45}
46
47static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
48{
49	return cq_host->trans_desc_dma_base +
50		(cq_host->mmc->max_segs * tag *
51		 cq_host->trans_desc_len);
52}
53
54static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
55{
56	return cq_host->trans_desc_base +
57		(cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
58}
59
60static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
61{
62	u8 *link_temp;
63	dma_addr_t trans_temp;
64
65	link_temp = get_link_desc(cq_host, tag);
66	trans_temp = get_trans_desc_dma(cq_host, tag);
67
68	memset(link_temp, 0, cq_host->link_desc_len);
69	if (cq_host->link_desc_len > 8)
70		*(link_temp + 8) = 0;
71
72	if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
73		*link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
74		return;
75	}
76
77	*link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
78
79	if (cq_host->dma64) {
80		__le64 *data_addr = (__le64 __force *)(link_temp + 4);
81
82		data_addr[0] = cpu_to_le64(trans_temp);
83	} else {
84		__le32 *data_addr = (__le32 __force *)(link_temp + 4);
85
86		data_addr[0] = cpu_to_le32(trans_temp);
87	}
88}
89
90static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
91{
92	cqhci_writel(cq_host, set, CQHCI_ISTE);
93	cqhci_writel(cq_host, set, CQHCI_ISGE);
94}
95
96#define DRV_NAME "cqhci"
97
98#define CQHCI_DUMP(f, x...) \
99	pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
100
101static void cqhci_dumpregs(struct cqhci_host *cq_host)
102{
103	struct mmc_host *mmc = cq_host->mmc;
104
105	CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
106
107	CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
108		   cqhci_readl(cq_host, CQHCI_CAP),
109		   cqhci_readl(cq_host, CQHCI_VER));
110	CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
111		   cqhci_readl(cq_host, CQHCI_CFG),
112		   cqhci_readl(cq_host, CQHCI_CTL));
113	CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
114		   cqhci_readl(cq_host, CQHCI_IS),
115		   cqhci_readl(cq_host, CQHCI_ISTE));
116	CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
117		   cqhci_readl(cq_host, CQHCI_ISGE),
118		   cqhci_readl(cq_host, CQHCI_IC));
119	CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
120		   cqhci_readl(cq_host, CQHCI_TDLBA),
121		   cqhci_readl(cq_host, CQHCI_TDLBAU));
122	CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
123		   cqhci_readl(cq_host, CQHCI_TDBR),
124		   cqhci_readl(cq_host, CQHCI_TCN));
125	CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
126		   cqhci_readl(cq_host, CQHCI_DQS),
127		   cqhci_readl(cq_host, CQHCI_DPT));
128	CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
129		   cqhci_readl(cq_host, CQHCI_TCLR),
130		   cqhci_readl(cq_host, CQHCI_SSC1));
131	CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
132		   cqhci_readl(cq_host, CQHCI_SSC2),
133		   cqhci_readl(cq_host, CQHCI_CRDCT));
134	CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
135		   cqhci_readl(cq_host, CQHCI_RMEM),
136		   cqhci_readl(cq_host, CQHCI_TERRI));
137	CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
138		   cqhci_readl(cq_host, CQHCI_CRI),
139		   cqhci_readl(cq_host, CQHCI_CRA));
140
141	if (cq_host->ops->dumpregs)
142		cq_host->ops->dumpregs(mmc);
143	else
144		CQHCI_DUMP(": ===========================================\n");
145}
146
147/*
148 * The allocated descriptor table for task, link & transfer descritors
149 * looks like:
150 * |----------|
151 * |task desc |  |->|----------|
152 * |----------|  |  |trans desc|
153 * |link desc-|->|  |----------|
154 * |----------|          .
155 *      .                .
156 *  no. of slots      max-segs
157 *      .           |----------|
158 * |----------|
159 * The idea here is to create the [task+trans] table and mark & point the
160 * link desc to the transfer desc table on a per slot basis.
161 */
162static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
163{
164	int i = 0;
165
166	/* task descriptor can be 64/128 bit irrespective of arch */
167	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
168		cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
169			       CQHCI_TASK_DESC_SZ, CQHCI_CFG);
170		cq_host->task_desc_len = 16;
171	} else {
172		cq_host->task_desc_len = 8;
173	}
174
175	/*
176	 * 96 bits length of transfer desc instead of 128 bits which means
177	 * ADMA would expect next valid descriptor at the 96th bit
178	 * or 128th bit
179	 */
180	if (cq_host->dma64) {
181		if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
182			cq_host->trans_desc_len = 12;
183		else
184			cq_host->trans_desc_len = 16;
185		cq_host->link_desc_len = 16;
186	} else {
187		cq_host->trans_desc_len = 8;
188		cq_host->link_desc_len = 8;
189	}
190
191	/* total size of a slot: 1 task & 1 transfer (link) */
192	cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
193
194	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
195
196	cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
197		cq_host->mmc->cqe_qdepth;
198
199	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
200		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
201		 cq_host->slot_sz);
202
203	/*
204	 * allocate a dma-mapped chunk of memory for the descriptors
205	 * allocate a dma-mapped chunk of memory for link descriptors
206	 * setup each link-desc memory offset per slot-number to
207	 * the descriptor table.
208	 */
209	cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
210						 cq_host->desc_size,
211						 &cq_host->desc_dma_base,
212						 GFP_KERNEL);
213	if (!cq_host->desc_base)
214		return -ENOMEM;
215
216	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
217					      cq_host->data_size,
218					      &cq_host->trans_desc_dma_base,
219					      GFP_KERNEL);
220	if (!cq_host->trans_desc_base) {
221		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
222				   cq_host->desc_base,
223				   cq_host->desc_dma_base);
224		cq_host->desc_base = NULL;
225		cq_host->desc_dma_base = 0;
226		return -ENOMEM;
227	}
228
229	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
230		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
231		(unsigned long long)cq_host->desc_dma_base,
232		(unsigned long long)cq_host->trans_desc_dma_base);
233
234	for (; i < (cq_host->num_slots); i++)
235		setup_trans_desc(cq_host, i);
236
237	return 0;
238}
239
240static void __cqhci_enable(struct cqhci_host *cq_host)
241{
242	struct mmc_host *mmc = cq_host->mmc;
243	u32 cqcfg;
244
245	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
246
247	/* Configuration must not be changed while enabled */
248	if (cqcfg & CQHCI_ENABLE) {
249		cqcfg &= ~CQHCI_ENABLE;
250		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
251	}
252
253	cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
254
255	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
256		cqcfg |= CQHCI_DCMD;
257
258	if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
259		cqcfg |= CQHCI_TASK_DESC_SZ;
260
261	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
262
263	cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
264		     CQHCI_TDLBA);
265	cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
266		     CQHCI_TDLBAU);
267
268	cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
269
270	cqhci_set_irqs(cq_host, 0);
271
272	cqcfg |= CQHCI_ENABLE;
273
274	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
275
276	if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
277		cqhci_writel(cq_host, 0, CQHCI_CTL);
278
279	mmc->cqe_on = true;
280
281	if (cq_host->ops->enable)
282		cq_host->ops->enable(mmc);
283
284	/* Ensure all writes are done before interrupts are enabled */
285	wmb();
286
287	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
288
289	cq_host->activated = true;
290}
291
292static void __cqhci_disable(struct cqhci_host *cq_host)
293{
294	u32 cqcfg;
295
296	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
297	cqcfg &= ~CQHCI_ENABLE;
298	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
299
300	cq_host->mmc->cqe_on = false;
301
302	cq_host->activated = false;
303}
304
305int cqhci_deactivate(struct mmc_host *mmc)
306{
307	struct cqhci_host *cq_host = mmc->cqe_private;
308
309	if (cq_host->enabled && cq_host->activated)
310		__cqhci_disable(cq_host);
311
312	return 0;
313}
314EXPORT_SYMBOL(cqhci_deactivate);
315
316int cqhci_resume(struct mmc_host *mmc)
317{
318	/* Re-enable is done upon first request */
319	return 0;
320}
321EXPORT_SYMBOL(cqhci_resume);
322
323static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
324{
325	struct cqhci_host *cq_host = mmc->cqe_private;
326	int err;
327
328	if (!card->ext_csd.cmdq_en)
329		return -EINVAL;
330
331	if (cq_host->enabled)
332		return 0;
333
334	cq_host->rca = card->rca;
335
336	err = cqhci_host_alloc_tdl(cq_host);
337	if (err) {
338		pr_err("%s: Failed to enable CQE, error %d\n",
339		       mmc_hostname(mmc), err);
340		return err;
341	}
342
343	__cqhci_enable(cq_host);
344
345	cq_host->enabled = true;
346
347#ifdef DEBUG
348	cqhci_dumpregs(cq_host);
349#endif
350	return 0;
351}
352
353/* CQHCI is idle and should halt immediately, so set a small timeout */
354#define CQHCI_OFF_TIMEOUT 100
355
356static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
357{
358	return cqhci_readl(cq_host, CQHCI_CTL);
359}
360
361static void cqhci_off(struct mmc_host *mmc)
362{
363	struct cqhci_host *cq_host = mmc->cqe_private;
364	u32 reg;
365	int err;
366
367	if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
368		return;
369
370	if (cq_host->ops->disable)
371		cq_host->ops->disable(mmc, false);
372
373	cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
374
375	err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
376				 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
377	if (err < 0)
378		pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
379	else
380		pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
381
382	if (cq_host->ops->post_disable)
383		cq_host->ops->post_disable(mmc);
384
385	mmc->cqe_on = false;
386}
387
388static void cqhci_disable(struct mmc_host *mmc)
389{
390	struct cqhci_host *cq_host = mmc->cqe_private;
391
392	if (!cq_host->enabled)
393		return;
394
395	cqhci_off(mmc);
396
397	__cqhci_disable(cq_host);
398
399	dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
400			   cq_host->trans_desc_base,
401			   cq_host->trans_desc_dma_base);
402
403	dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
404			   cq_host->desc_base,
405			   cq_host->desc_dma_base);
406
407	cq_host->trans_desc_base = NULL;
408	cq_host->desc_base = NULL;
409
410	cq_host->enabled = false;
411}
412
413static void cqhci_prep_task_desc(struct mmc_request *mrq,
414					u64 *data, bool intr)
415{
416	u32 req_flags = mrq->data->flags;
417
418	*data = CQHCI_VALID(1) |
419		CQHCI_END(1) |
420		CQHCI_INT(intr) |
421		CQHCI_ACT(0x5) |
422		CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
423		CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
424		CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
425		CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
426		CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
427		CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
428		CQHCI_BLK_COUNT(mrq->data->blocks) |
429		CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
430
431	pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
432		 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
433}
434
435static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
436{
437	int sg_count;
438	struct mmc_data *data = mrq->data;
439
440	if (!data)
441		return -EINVAL;
442
443	sg_count = dma_map_sg(mmc_dev(host), data->sg,
444			      data->sg_len,
445			      (data->flags & MMC_DATA_WRITE) ?
446			      DMA_TO_DEVICE : DMA_FROM_DEVICE);
447	if (!sg_count) {
448		pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
449		return -ENOMEM;
450	}
451
452	return sg_count;
453}
454
455static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
456				bool dma64)
457{
458	__le32 *attr = (__le32 __force *)desc;
459
460	*attr = (CQHCI_VALID(1) |
461		 CQHCI_END(end ? 1 : 0) |
462		 CQHCI_INT(0) |
463		 CQHCI_ACT(0x4) |
464		 CQHCI_DAT_LENGTH(len));
465
466	if (dma64) {
467		__le64 *dataddr = (__le64 __force *)(desc + 4);
468
469		dataddr[0] = cpu_to_le64(addr);
470	} else {
471		__le32 *dataddr = (__le32 __force *)(desc + 4);
472
473		dataddr[0] = cpu_to_le32(addr);
474	}
475}
476
477static int cqhci_prep_tran_desc(struct mmc_request *mrq,
478			       struct cqhci_host *cq_host, int tag)
479{
480	struct mmc_data *data = mrq->data;
481	int i, sg_count, len;
482	bool end = false;
483	bool dma64 = cq_host->dma64;
484	dma_addr_t addr;
485	u8 *desc;
486	struct scatterlist *sg;
487
488	sg_count = cqhci_dma_map(mrq->host, mrq);
489	if (sg_count < 0) {
490		pr_err("%s: %s: unable to map sg lists, %d\n",
491				mmc_hostname(mrq->host), __func__, sg_count);
492		return sg_count;
493	}
494
495	desc = get_trans_desc(cq_host, tag);
496
497	for_each_sg(data->sg, sg, sg_count, i) {
498		addr = sg_dma_address(sg);
499		len = sg_dma_len(sg);
500
501		if ((i+1) == sg_count)
502			end = true;
503		cqhci_set_tran_desc(desc, addr, len, end, dma64);
504		desc += cq_host->trans_desc_len;
505	}
506
507	return 0;
508}
509
510static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
511				   struct mmc_request *mrq)
512{
513	u64 *task_desc = NULL;
514	u64 data = 0;
515	u8 resp_type;
516	u8 *desc;
517	__le64 *dataddr;
518	struct cqhci_host *cq_host = mmc->cqe_private;
519	u8 timing;
520
521	if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
522		resp_type = 0x0;
523		timing = 0x1;
524	} else {
525		if (mrq->cmd->flags & MMC_RSP_R1B) {
526			resp_type = 0x3;
527			timing = 0x0;
528		} else {
529			resp_type = 0x2;
530			timing = 0x1;
531		}
532	}
533
534	task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
535	memset(task_desc, 0, cq_host->task_desc_len);
536	data |= (CQHCI_VALID(1) |
537		 CQHCI_END(1) |
538		 CQHCI_INT(1) |
539		 CQHCI_QBAR(1) |
540		 CQHCI_ACT(0x5) |
541		 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
542		 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
543	if (cq_host->ops->update_dcmd_desc)
544		cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
545	*task_desc |= data;
546	desc = (u8 *)task_desc;
547	pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
548		 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
549	dataddr = (__le64 __force *)(desc + 4);
550	dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
551
552}
553
554static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
555{
556	struct mmc_data *data = mrq->data;
557
558	if (data) {
559		dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
560			     (data->flags & MMC_DATA_READ) ?
561			     DMA_FROM_DEVICE : DMA_TO_DEVICE);
562	}
563}
564
565static inline int cqhci_tag(struct mmc_request *mrq)
566{
567	return mrq->cmd ? DCMD_SLOT : mrq->tag;
568}
569
570static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
571{
572	int err = 0;
573	u64 data = 0;
574	u64 *task_desc = NULL;
575	int tag = cqhci_tag(mrq);
576	struct cqhci_host *cq_host = mmc->cqe_private;
577	unsigned long flags;
578
579	if (!cq_host->enabled) {
580		pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
581		return -EINVAL;
582	}
583
584	/* First request after resume has to re-enable */
585	if (!cq_host->activated)
586		__cqhci_enable(cq_host);
587
588	if (!mmc->cqe_on) {
589		if (cq_host->ops->pre_enable)
590			cq_host->ops->pre_enable(mmc);
591
592		cqhci_writel(cq_host, 0, CQHCI_CTL);
593		mmc->cqe_on = true;
594		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
595		if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
596			pr_err("%s: cqhci: CQE failed to exit halt state\n",
597			       mmc_hostname(mmc));
598		}
599		if (cq_host->ops->enable)
600			cq_host->ops->enable(mmc);
601	}
602
603	if (mrq->data) {
604		task_desc = (__le64 __force *)get_desc(cq_host, tag);
605		cqhci_prep_task_desc(mrq, &data, 1);
606		*task_desc = cpu_to_le64(data);
607		err = cqhci_prep_tran_desc(mrq, cq_host, tag);
608		if (err) {
609			pr_err("%s: cqhci: failed to setup tx desc: %d\n",
610			       mmc_hostname(mmc), err);
611			return err;
612		}
613	} else {
614		cqhci_prep_dcmd_desc(mmc, mrq);
615	}
616
617	spin_lock_irqsave(&cq_host->lock, flags);
618
619	if (cq_host->recovery_halt) {
620		err = -EBUSY;
621		goto out_unlock;
622	}
623
624	cq_host->slot[tag].mrq = mrq;
625	cq_host->slot[tag].flags = 0;
626
627	cq_host->qcnt += 1;
628	/* Make sure descriptors are ready before ringing the doorbell */
629	wmb();
630	cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
631	if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
632		pr_debug("%s: cqhci: doorbell not set for tag %d\n",
633			 mmc_hostname(mmc), tag);
634out_unlock:
635	spin_unlock_irqrestore(&cq_host->lock, flags);
636
637	if (err)
638		cqhci_post_req(mmc, mrq);
639
640	return err;
641}
642
643static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
644				  bool notify)
645{
646	struct cqhci_host *cq_host = mmc->cqe_private;
647
648	if (!cq_host->recovery_halt) {
649		cq_host->recovery_halt = true;
650		pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
651		wake_up(&cq_host->wait_queue);
652		if (notify && mrq->recovery_notifier)
653			mrq->recovery_notifier(mrq);
654	}
655}
656
657static unsigned int cqhci_error_flags(int error1, int error2)
658{
659	int error = error1 ? error1 : error2;
660
661	switch (error) {
662	case -EILSEQ:
663		return CQHCI_HOST_CRC;
664	case -ETIMEDOUT:
665		return CQHCI_HOST_TIMEOUT;
666	default:
667		return CQHCI_HOST_OTHER;
668	}
669}
670
671static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
672			    int data_error)
673{
674	struct cqhci_host *cq_host = mmc->cqe_private;
675	struct cqhci_slot *slot;
676	u32 terri;
677	int tag;
678
679	spin_lock(&cq_host->lock);
680
681	terri = cqhci_readl(cq_host, CQHCI_TERRI);
682
683	pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
684		 mmc_hostname(mmc), status, cmd_error, data_error, terri);
685
686	/* Forget about errors when recovery has already been triggered */
687	if (cq_host->recovery_halt)
688		goto out_unlock;
689
690	if (!cq_host->qcnt) {
691		WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
692			  mmc_hostname(mmc), status, cmd_error, data_error,
693			  terri);
694		goto out_unlock;
695	}
696
697	if (CQHCI_TERRI_C_VALID(terri)) {
698		tag = CQHCI_TERRI_C_TASK(terri);
699		slot = &cq_host->slot[tag];
700		if (slot->mrq) {
701			slot->flags = cqhci_error_flags(cmd_error, data_error);
702			cqhci_recovery_needed(mmc, slot->mrq, true);
703		}
704	}
705
706	if (CQHCI_TERRI_D_VALID(terri)) {
707		tag = CQHCI_TERRI_D_TASK(terri);
708		slot = &cq_host->slot[tag];
709		if (slot->mrq) {
710			slot->flags = cqhci_error_flags(data_error, cmd_error);
711			cqhci_recovery_needed(mmc, slot->mrq, true);
712		}
713	}
714
715	if (!cq_host->recovery_halt) {
716		/*
717		 * The only way to guarantee forward progress is to mark at
718		 * least one task in error, so if none is indicated, pick one.
719		 */
720		for (tag = 0; tag < NUM_SLOTS; tag++) {
721			slot = &cq_host->slot[tag];
722			if (!slot->mrq)
723				continue;
724			slot->flags = cqhci_error_flags(data_error, cmd_error);
725			cqhci_recovery_needed(mmc, slot->mrq, true);
726			break;
727		}
728	}
729
730out_unlock:
731	spin_unlock(&cq_host->lock);
732}
733
734static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
735{
736	struct cqhci_host *cq_host = mmc->cqe_private;
737	struct cqhci_slot *slot = &cq_host->slot[tag];
738	struct mmc_request *mrq = slot->mrq;
739	struct mmc_data *data;
740
741	if (!mrq) {
742		WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
743			  mmc_hostname(mmc), tag);
744		return;
745	}
746
747	/* No completions allowed during recovery */
748	if (cq_host->recovery_halt) {
749		slot->flags |= CQHCI_COMPLETED;
750		return;
751	}
752
753	slot->mrq = NULL;
754
755	cq_host->qcnt -= 1;
756
757	data = mrq->data;
758	if (data) {
759		if (data->error)
760			data->bytes_xfered = 0;
761		else
762			data->bytes_xfered = data->blksz * data->blocks;
763	}
764
765	mmc_cqe_request_done(mmc, mrq);
766}
767
768irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
769		      int data_error)
770{
771	u32 status;
772	unsigned long tag = 0, comp_status;
773	struct cqhci_host *cq_host = mmc->cqe_private;
774
775	status = cqhci_readl(cq_host, CQHCI_IS);
776	cqhci_writel(cq_host, status, CQHCI_IS);
777
778	pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
779
780	if ((status & CQHCI_IS_RED) || cmd_error || data_error)
781		cqhci_error_irq(mmc, status, cmd_error, data_error);
782
783	if (status & CQHCI_IS_TCC) {
784		/* read TCN and complete the request */
785		comp_status = cqhci_readl(cq_host, CQHCI_TCN);
786		cqhci_writel(cq_host, comp_status, CQHCI_TCN);
787		pr_debug("%s: cqhci: TCN: 0x%08lx\n",
788			 mmc_hostname(mmc), comp_status);
789
790		spin_lock(&cq_host->lock);
791
792		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
793			/* complete the corresponding mrq */
794			pr_debug("%s: cqhci: completing tag %lu\n",
795				 mmc_hostname(mmc), tag);
796			cqhci_finish_mrq(mmc, tag);
797		}
798
799		if (cq_host->waiting_for_idle && !cq_host->qcnt) {
800			cq_host->waiting_for_idle = false;
801			wake_up(&cq_host->wait_queue);
802		}
803
804		spin_unlock(&cq_host->lock);
805	}
806
807	if (status & CQHCI_IS_TCL)
808		wake_up(&cq_host->wait_queue);
809
810	if (status & CQHCI_IS_HAC)
811		wake_up(&cq_host->wait_queue);
812
813	return IRQ_HANDLED;
814}
815EXPORT_SYMBOL(cqhci_irq);
816
817static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
818{
819	unsigned long flags;
820	bool is_idle;
821
822	spin_lock_irqsave(&cq_host->lock, flags);
823	is_idle = !cq_host->qcnt || cq_host->recovery_halt;
824	*ret = cq_host->recovery_halt ? -EBUSY : 0;
825	cq_host->waiting_for_idle = !is_idle;
826	spin_unlock_irqrestore(&cq_host->lock, flags);
827
828	return is_idle;
829}
830
831static int cqhci_wait_for_idle(struct mmc_host *mmc)
832{
833	struct cqhci_host *cq_host = mmc->cqe_private;
834	int ret;
835
836	wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
837
838	return ret;
839}
840
841static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
842			  bool *recovery_needed)
843{
844	struct cqhci_host *cq_host = mmc->cqe_private;
845	int tag = cqhci_tag(mrq);
846	struct cqhci_slot *slot = &cq_host->slot[tag];
847	unsigned long flags;
848	bool timed_out;
849
850	spin_lock_irqsave(&cq_host->lock, flags);
851	timed_out = slot->mrq == mrq;
852	if (timed_out) {
853		slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
854		cqhci_recovery_needed(mmc, mrq, false);
855		*recovery_needed = cq_host->recovery_halt;
856	}
857	spin_unlock_irqrestore(&cq_host->lock, flags);
858
859	if (timed_out) {
860		pr_err("%s: cqhci: timeout for tag %d\n",
861		       mmc_hostname(mmc), tag);
862		cqhci_dumpregs(cq_host);
863	}
864
865	return timed_out;
866}
867
868static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
869{
870	return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
871}
872
873static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
874{
875	struct cqhci_host *cq_host = mmc->cqe_private;
876	bool ret;
877	u32 ctl;
878
879	cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
880
881	ctl = cqhci_readl(cq_host, CQHCI_CTL);
882	ctl |= CQHCI_CLEAR_ALL_TASKS;
883	cqhci_writel(cq_host, ctl, CQHCI_CTL);
884
885	wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
886			   msecs_to_jiffies(timeout) + 1);
887
888	cqhci_set_irqs(cq_host, 0);
889
890	ret = cqhci_tasks_cleared(cq_host);
891
892	if (!ret)
893		pr_warn("%s: cqhci: Failed to clear tasks\n",
894			mmc_hostname(mmc));
895
896	return ret;
897}
898
899static bool cqhci_halted(struct cqhci_host *cq_host)
900{
901	return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
902}
903
904static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
905{
906	struct cqhci_host *cq_host = mmc->cqe_private;
907	bool ret;
908	u32 ctl;
909
910	if (cqhci_halted(cq_host))
911		return true;
912
913	cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
914
915	ctl = cqhci_readl(cq_host, CQHCI_CTL);
916	ctl |= CQHCI_HALT;
917	cqhci_writel(cq_host, ctl, CQHCI_CTL);
918
919	wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
920			   msecs_to_jiffies(timeout) + 1);
921
922	cqhci_set_irqs(cq_host, 0);
923
924	ret = cqhci_halted(cq_host);
925
926	if (!ret)
927		pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
928
929	return ret;
930}
931
932/*
933 * After halting we expect to be able to use the command line. We interpret the
934 * failure to halt to mean the data lines might still be in use (and the upper
935 * layers will need to send a STOP command), however failing to halt complicates
936 * the recovery, so set a timeout that would reasonably allow I/O to complete.
937 */
938#define CQHCI_START_HALT_TIMEOUT	500
939
940static void cqhci_recovery_start(struct mmc_host *mmc)
941{
942	struct cqhci_host *cq_host = mmc->cqe_private;
943
944	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
945
946	WARN_ON(!cq_host->recovery_halt);
947
948	cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
949
950	if (cq_host->ops->disable)
951		cq_host->ops->disable(mmc, true);
952
953	mmc->cqe_on = false;
954}
955
956static int cqhci_error_from_flags(unsigned int flags)
957{
958	if (!flags)
959		return 0;
960
961	/* CRC errors might indicate re-tuning so prefer to report that */
962	if (flags & CQHCI_HOST_CRC)
963		return -EILSEQ;
964
965	if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
966		return -ETIMEDOUT;
967
968	return -EIO;
969}
970
971static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
972{
973	struct cqhci_slot *slot = &cq_host->slot[tag];
974	struct mmc_request *mrq = slot->mrq;
975	struct mmc_data *data;
976
977	if (!mrq)
978		return;
979
980	slot->mrq = NULL;
981
982	cq_host->qcnt -= 1;
983
984	data = mrq->data;
985	if (data) {
986		data->bytes_xfered = 0;
987		data->error = cqhci_error_from_flags(slot->flags);
988	} else {
989		mrq->cmd->error = cqhci_error_from_flags(slot->flags);
990	}
991
992	mmc_cqe_request_done(cq_host->mmc, mrq);
993}
994
995static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
996{
997	int i;
998
999	for (i = 0; i < cq_host->num_slots; i++)
1000		cqhci_recover_mrq(cq_host, i);
1001}
1002
1003/*
1004 * By now the command and data lines should be unused so there is no reason for
1005 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1006 * problems clearing tasks, so be generous.
1007 */
1008#define CQHCI_FINISH_HALT_TIMEOUT	20
1009
1010/* CQHCI could be expected to clear it's internal state pretty quickly */
1011#define CQHCI_CLEAR_TIMEOUT		20
1012
1013static void cqhci_recovery_finish(struct mmc_host *mmc)
1014{
1015	struct cqhci_host *cq_host = mmc->cqe_private;
1016	unsigned long flags;
1017	u32 cqcfg;
1018	bool ok;
1019
1020	pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1021
1022	WARN_ON(!cq_host->recovery_halt);
1023
1024	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1025
1026	/*
1027	 * The specification contradicts itself, by saying that tasks cannot be
1028	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1029	 * be disabled/re-enabled, but not to disable before clearing tasks.
1030	 * Have a go anyway.
1031	 */
1032	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1033		ok = false;
1034
1035	/* Disable to make sure tasks really are cleared */
1036	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1037	cqcfg &= ~CQHCI_ENABLE;
1038	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1039
1040	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1041	cqcfg |= CQHCI_ENABLE;
1042	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1043
1044	cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1045
1046	if (!ok)
1047		cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
1048
1049	cqhci_recover_mrqs(cq_host);
1050
1051	WARN_ON(cq_host->qcnt);
1052
1053	spin_lock_irqsave(&cq_host->lock, flags);
1054	cq_host->qcnt = 0;
1055	cq_host->recovery_halt = false;
1056	mmc->cqe_on = false;
1057	spin_unlock_irqrestore(&cq_host->lock, flags);
1058
1059	/* Ensure all writes are done before interrupts are re-enabled */
1060	wmb();
1061
1062	cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1063
1064	cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1065
1066	pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1067}
1068
1069static const struct mmc_cqe_ops cqhci_cqe_ops = {
1070	.cqe_enable = cqhci_enable,
1071	.cqe_disable = cqhci_disable,
1072	.cqe_request = cqhci_request,
1073	.cqe_post_req = cqhci_post_req,
1074	.cqe_off = cqhci_off,
1075	.cqe_wait_for_idle = cqhci_wait_for_idle,
1076	.cqe_timeout = cqhci_timeout,
1077	.cqe_recovery_start = cqhci_recovery_start,
1078	.cqe_recovery_finish = cqhci_recovery_finish,
1079};
1080
1081struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1082{
1083	struct cqhci_host *cq_host;
1084	struct resource *cqhci_memres = NULL;
1085
1086	/* check and setup CMDQ interface */
1087	cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1088						   "cqhci");
1089	if (!cqhci_memres) {
1090		dev_dbg(&pdev->dev, "CMDQ not supported\n");
1091		return ERR_PTR(-EINVAL);
1092	}
1093
1094	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1095	if (!cq_host)
1096		return ERR_PTR(-ENOMEM);
1097	cq_host->mmio = devm_ioremap(&pdev->dev,
1098				     cqhci_memres->start,
1099				     resource_size(cqhci_memres));
1100	if (!cq_host->mmio) {
1101		dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1102		return ERR_PTR(-EBUSY);
1103	}
1104	dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1105
1106	return cq_host;
1107}
1108EXPORT_SYMBOL(cqhci_pltfm_init);
1109
1110static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1111{
1112	return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1113}
1114
1115static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1116{
1117	u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1118
1119	return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1120}
1121
1122int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1123	      bool dma64)
1124{
1125	int err;
1126
1127	cq_host->dma64 = dma64;
1128	cq_host->mmc = mmc;
1129	cq_host->mmc->cqe_private = cq_host;
1130
1131	cq_host->num_slots = NUM_SLOTS;
1132	cq_host->dcmd_slot = DCMD_SLOT;
1133
1134	mmc->cqe_ops = &cqhci_cqe_ops;
1135
1136	mmc->cqe_qdepth = NUM_SLOTS;
1137	if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1138		mmc->cqe_qdepth -= 1;
1139
1140	cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1141				     sizeof(*cq_host->slot), GFP_KERNEL);
1142	if (!cq_host->slot) {
1143		err = -ENOMEM;
1144		goto out_err;
1145	}
1146
1147	spin_lock_init(&cq_host->lock);
1148
1149	init_completion(&cq_host->halt_comp);
1150	init_waitqueue_head(&cq_host->wait_queue);
1151
1152	pr_info("%s: CQHCI version %u.%02u\n",
1153		mmc_hostname(mmc), cqhci_ver_major(cq_host),
1154		cqhci_ver_minor(cq_host));
1155
1156	return 0;
1157
1158out_err:
1159	pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1160	       mmc_hostname(mmc), cqhci_ver_major(cq_host),
1161	       cqhci_ver_minor(cq_host), err);
1162	return err;
1163}
1164EXPORT_SYMBOL(cqhci_init);
1165
1166MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1167MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1168MODULE_LICENSE("GPL v2");
1169