1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
4 *  Copyright (C) 2013, Imagination Technologies
5 *
6 *  JZ4740 SD/MMC controller driver
7 */
8
9#include <linux/bitops.h>
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/mmc/host.h>
19#include <linux/mmc/slot-gpio.h>
20#include <linux/module.h>
21#include <linux/of_device.h>
22#include <linux/pinctrl/consumer.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25
26#include <asm/cacheflush.h>
27
28#define JZ_REG_MMC_STRPCL	0x00
29#define JZ_REG_MMC_STATUS	0x04
30#define JZ_REG_MMC_CLKRT	0x08
31#define JZ_REG_MMC_CMDAT	0x0C
32#define JZ_REG_MMC_RESTO	0x10
33#define JZ_REG_MMC_RDTO		0x14
34#define JZ_REG_MMC_BLKLEN	0x18
35#define JZ_REG_MMC_NOB		0x1C
36#define JZ_REG_MMC_SNOB		0x20
37#define JZ_REG_MMC_IMASK	0x24
38#define JZ_REG_MMC_IREG		0x28
39#define JZ_REG_MMC_CMD		0x2C
40#define JZ_REG_MMC_ARG		0x30
41#define JZ_REG_MMC_RESP_FIFO	0x34
42#define JZ_REG_MMC_RXFIFO	0x38
43#define JZ_REG_MMC_TXFIFO	0x3C
44#define JZ_REG_MMC_LPM		0x40
45#define JZ_REG_MMC_DMAC		0x44
46
47#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
48#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
49#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
50#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
51#define JZ_MMC_STRPCL_RESET BIT(3)
52#define JZ_MMC_STRPCL_START_OP BIT(2)
53#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
54#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
55#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
56
57
58#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
59#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
60#define JZ_MMC_STATUS_PRG_DONE BIT(13)
61#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
62#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
63#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
64#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
65#define JZ_MMC_STATUS_CLK_EN BIT(8)
66#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
67#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
68#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
69#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
70#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
71#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
72#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
73#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
74
75#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
76#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
77
78
79#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
80#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
81#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
82#define	JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
83#define JZ_MMC_CMDAT_DMA_EN BIT(8)
84#define JZ_MMC_CMDAT_INIT BIT(7)
85#define JZ_MMC_CMDAT_BUSY BIT(6)
86#define JZ_MMC_CMDAT_STREAM BIT(5)
87#define JZ_MMC_CMDAT_WRITE BIT(4)
88#define JZ_MMC_CMDAT_DATA_EN BIT(3)
89#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
90#define JZ_MMC_CMDAT_RSP_R1 1
91#define JZ_MMC_CMDAT_RSP_R2 2
92#define JZ_MMC_CMDAT_RSP_R3 3
93
94#define JZ_MMC_IRQ_SDIO BIT(7)
95#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
96#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
97#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
98#define JZ_MMC_IRQ_PRG_DONE BIT(1)
99#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
100
101#define JZ_MMC_DMAC_DMA_SEL BIT(1)
102#define JZ_MMC_DMAC_DMA_EN BIT(0)
103
104#define	JZ_MMC_LPM_DRV_RISING BIT(31)
105#define	JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
106#define	JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
107#define	JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
108#define	JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
109
110#define JZ_MMC_CLK_RATE 24000000
111#define JZ_MMC_REQ_TIMEOUT_MS 5000
112
113enum jz4740_mmc_version {
114	JZ_MMC_JZ4740,
115	JZ_MMC_JZ4725B,
116	JZ_MMC_JZ4760,
117	JZ_MMC_JZ4780,
118	JZ_MMC_X1000,
119};
120
121enum jz4740_mmc_state {
122	JZ4740_MMC_STATE_READ_RESPONSE,
123	JZ4740_MMC_STATE_TRANSFER_DATA,
124	JZ4740_MMC_STATE_SEND_STOP,
125	JZ4740_MMC_STATE_DONE,
126};
127
128/*
129 * The MMC core allows to prepare a mmc_request while another mmc_request
130 * is in-flight. This is used via the pre_req/post_req hooks.
131 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
132 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
133 * flags to keep track of the mmc_request mapping state.
134 *
135 * COOKIE_UNMAPPED: the request is not mapped.
136 * COOKIE_PREMAPPED: the request was mapped in pre_req,
137 * and should be unmapped in post_req.
138 * COOKIE_MAPPED: the request was mapped in the irq handler,
139 * and should be unmapped before mmc_request_done is called..
140 */
141enum jz4780_cookie {
142	COOKIE_UNMAPPED = 0,
143	COOKIE_PREMAPPED,
144	COOKIE_MAPPED,
145};
146
147struct jz4740_mmc_host {
148	struct mmc_host *mmc;
149	struct platform_device *pdev;
150	struct clk *clk;
151
152	enum jz4740_mmc_version version;
153
154	int irq;
155	int card_detect_irq;
156
157	void __iomem *base;
158	struct resource *mem_res;
159	struct mmc_request *req;
160	struct mmc_command *cmd;
161
162	unsigned long waiting;
163
164	uint32_t cmdat;
165
166	uint32_t irq_mask;
167
168	spinlock_t lock;
169
170	struct timer_list timeout_timer;
171	struct sg_mapping_iter miter;
172	enum jz4740_mmc_state state;
173
174	/* DMA support */
175	struct dma_chan *dma_rx;
176	struct dma_chan *dma_tx;
177	bool use_dma;
178
179/* The DMA trigger level is 8 words, that is to say, the DMA read
180 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
181 * trigger is when data words in MSC_TXFIFO is < 8.
182 */
183#define JZ4740_MMC_FIFO_HALF_SIZE 8
184};
185
186static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
187				      uint32_t val)
188{
189	if (host->version >= JZ_MMC_JZ4725B)
190		return writel(val, host->base + JZ_REG_MMC_IMASK);
191	else
192		return writew(val, host->base + JZ_REG_MMC_IMASK);
193}
194
195static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
196				     uint32_t val)
197{
198	if (host->version >= JZ_MMC_JZ4780)
199		writel(val, host->base + JZ_REG_MMC_IREG);
200	else
201		writew(val, host->base + JZ_REG_MMC_IREG);
202}
203
204static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
205{
206	if (host->version >= JZ_MMC_JZ4780)
207		return readl(host->base + JZ_REG_MMC_IREG);
208	else
209		return readw(host->base + JZ_REG_MMC_IREG);
210}
211
212/*----------------------------------------------------------------------------*/
213/* DMA infrastructure */
214
215static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
216{
217	if (!host->use_dma)
218		return;
219
220	dma_release_channel(host->dma_tx);
221	dma_release_channel(host->dma_rx);
222}
223
224static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
225{
226	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
227	if (IS_ERR(host->dma_tx)) {
228		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
229		return PTR_ERR(host->dma_tx);
230	}
231
232	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
233	if (IS_ERR(host->dma_rx)) {
234		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
235		dma_release_channel(host->dma_tx);
236		return PTR_ERR(host->dma_rx);
237	}
238
239	/*
240	 * Limit the maximum segment size in any SG entry according to
241	 * the parameters of the DMA engine device.
242	 */
243	if (host->dma_tx) {
244		struct device *dev = host->dma_tx->device->dev;
245		unsigned int max_seg_size = dma_get_max_seg_size(dev);
246
247		if (max_seg_size < host->mmc->max_seg_size)
248			host->mmc->max_seg_size = max_seg_size;
249	}
250
251	if (host->dma_rx) {
252		struct device *dev = host->dma_rx->device->dev;
253		unsigned int max_seg_size = dma_get_max_seg_size(dev);
254
255		if (max_seg_size < host->mmc->max_seg_size)
256			host->mmc->max_seg_size = max_seg_size;
257	}
258
259	return 0;
260}
261
262static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
263						       struct mmc_data *data)
264{
265	return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
266}
267
268static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
269				 struct mmc_data *data)
270{
271	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
272	enum dma_data_direction dir = mmc_get_dma_dir(data);
273
274	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
275	data->host_cookie = COOKIE_UNMAPPED;
276}
277
278/* Prepares DMA data for current or next transfer.
279 * A request can be in-flight when this is called.
280 */
281static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
282				       struct mmc_data *data,
283				       int cookie)
284{
285	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
286	enum dma_data_direction dir = mmc_get_dma_dir(data);
287	int sg_count;
288
289	if (data->host_cookie == COOKIE_PREMAPPED)
290		return data->sg_count;
291
292	sg_count = dma_map_sg(chan->device->dev,
293			data->sg,
294			data->sg_len,
295			dir);
296
297	if (sg_count <= 0) {
298		dev_err(mmc_dev(host->mmc),
299			"Failed to map scatterlist for DMA operation\n");
300		return -EINVAL;
301	}
302
303	data->sg_count = sg_count;
304	data->host_cookie = cookie;
305
306	return data->sg_count;
307}
308
309static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
310					 struct mmc_data *data)
311{
312	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
313	struct dma_async_tx_descriptor *desc;
314	struct dma_slave_config conf = {
315		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
316		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
317		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
318		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
319	};
320	int sg_count;
321
322	if (data->flags & MMC_DATA_WRITE) {
323		conf.direction = DMA_MEM_TO_DEV;
324		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
325	} else {
326		conf.direction = DMA_DEV_TO_MEM;
327		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
328	}
329
330	sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
331	if (sg_count < 0)
332		return sg_count;
333
334	dmaengine_slave_config(chan, &conf);
335	desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
336			conf.direction,
337			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
338	if (!desc) {
339		dev_err(mmc_dev(host->mmc),
340			"Failed to allocate DMA %s descriptor",
341			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
342		goto dma_unmap;
343	}
344
345	dmaengine_submit(desc);
346	dma_async_issue_pending(chan);
347
348	return 0;
349
350dma_unmap:
351	if (data->host_cookie == COOKIE_MAPPED)
352		jz4740_mmc_dma_unmap(host, data);
353	return -ENOMEM;
354}
355
356static void jz4740_mmc_pre_request(struct mmc_host *mmc,
357				   struct mmc_request *mrq)
358{
359	struct jz4740_mmc_host *host = mmc_priv(mmc);
360	struct mmc_data *data = mrq->data;
361
362	if (!host->use_dma)
363		return;
364
365	data->host_cookie = COOKIE_UNMAPPED;
366	if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
367		data->host_cookie = COOKIE_UNMAPPED;
368}
369
370static void jz4740_mmc_post_request(struct mmc_host *mmc,
371				    struct mmc_request *mrq,
372				    int err)
373{
374	struct jz4740_mmc_host *host = mmc_priv(mmc);
375	struct mmc_data *data = mrq->data;
376
377	if (data && data->host_cookie != COOKIE_UNMAPPED)
378		jz4740_mmc_dma_unmap(host, data);
379
380	if (err) {
381		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
382
383		dmaengine_terminate_all(chan);
384	}
385}
386
387/*----------------------------------------------------------------------------*/
388
389static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
390	unsigned int irq, bool enabled)
391{
392	unsigned long flags;
393
394	spin_lock_irqsave(&host->lock, flags);
395	if (enabled)
396		host->irq_mask &= ~irq;
397	else
398		host->irq_mask |= irq;
399
400	jz4740_mmc_write_irq_mask(host, host->irq_mask);
401	spin_unlock_irqrestore(&host->lock, flags);
402}
403
404static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
405	bool start_transfer)
406{
407	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
408
409	if (start_transfer)
410		val |= JZ_MMC_STRPCL_START_OP;
411
412	writew(val, host->base + JZ_REG_MMC_STRPCL);
413}
414
415static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
416{
417	uint32_t status;
418	unsigned int timeout = 1000;
419
420	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
421	do {
422		status = readl(host->base + JZ_REG_MMC_STATUS);
423	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
424}
425
426static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
427{
428	uint32_t status;
429	unsigned int timeout = 1000;
430
431	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
432	udelay(10);
433	do {
434		status = readl(host->base + JZ_REG_MMC_STATUS);
435	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
436}
437
438static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
439{
440	struct mmc_request *req;
441	struct mmc_data *data;
442
443	req = host->req;
444	data = req->data;
445	host->req = NULL;
446
447	if (data && data->host_cookie == COOKIE_MAPPED)
448		jz4740_mmc_dma_unmap(host, data);
449	mmc_request_done(host->mmc, req);
450}
451
452static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
453	unsigned int irq)
454{
455	unsigned int timeout = 0x800;
456	uint32_t status;
457
458	do {
459		status = jz4740_mmc_read_irq_reg(host);
460	} while (!(status & irq) && --timeout);
461
462	if (timeout == 0) {
463		set_bit(0, &host->waiting);
464		mod_timer(&host->timeout_timer,
465			  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
466		jz4740_mmc_set_irq_enabled(host, irq, true);
467		return true;
468	}
469
470	return false;
471}
472
473static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
474	struct mmc_data *data)
475{
476	int status;
477
478	status = readl(host->base + JZ_REG_MMC_STATUS);
479	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
480		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
481			host->req->cmd->error = -ETIMEDOUT;
482			data->error = -ETIMEDOUT;
483		} else {
484			host->req->cmd->error = -EIO;
485			data->error = -EIO;
486		}
487	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
488		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
489			host->req->cmd->error = -ETIMEDOUT;
490			data->error = -ETIMEDOUT;
491		} else {
492			host->req->cmd->error = -EIO;
493			data->error = -EIO;
494		}
495	}
496}
497
498static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
499	struct mmc_data *data)
500{
501	struct sg_mapping_iter *miter = &host->miter;
502	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
503	uint32_t *buf;
504	bool timeout;
505	size_t i, j;
506
507	while (sg_miter_next(miter)) {
508		buf = miter->addr;
509		i = miter->length / 4;
510		j = i / 8;
511		i = i & 0x7;
512		while (j) {
513			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
514			if (unlikely(timeout))
515				goto poll_timeout;
516
517			writel(buf[0], fifo_addr);
518			writel(buf[1], fifo_addr);
519			writel(buf[2], fifo_addr);
520			writel(buf[3], fifo_addr);
521			writel(buf[4], fifo_addr);
522			writel(buf[5], fifo_addr);
523			writel(buf[6], fifo_addr);
524			writel(buf[7], fifo_addr);
525			buf += 8;
526			--j;
527		}
528		if (unlikely(i)) {
529			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
530			if (unlikely(timeout))
531				goto poll_timeout;
532
533			while (i) {
534				writel(*buf, fifo_addr);
535				++buf;
536				--i;
537			}
538		}
539		data->bytes_xfered += miter->length;
540	}
541	sg_miter_stop(miter);
542
543	return false;
544
545poll_timeout:
546	miter->consumed = (void *)buf - miter->addr;
547	data->bytes_xfered += miter->consumed;
548	sg_miter_stop(miter);
549
550	return true;
551}
552
553static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
554				struct mmc_data *data)
555{
556	struct sg_mapping_iter *miter = &host->miter;
557	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
558	uint32_t *buf;
559	uint32_t d;
560	uint32_t status;
561	size_t i, j;
562	unsigned int timeout;
563
564	while (sg_miter_next(miter)) {
565		buf = miter->addr;
566		i = miter->length;
567		j = i / 32;
568		i = i & 0x1f;
569		while (j) {
570			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
571			if (unlikely(timeout))
572				goto poll_timeout;
573
574			buf[0] = readl(fifo_addr);
575			buf[1] = readl(fifo_addr);
576			buf[2] = readl(fifo_addr);
577			buf[3] = readl(fifo_addr);
578			buf[4] = readl(fifo_addr);
579			buf[5] = readl(fifo_addr);
580			buf[6] = readl(fifo_addr);
581			buf[7] = readl(fifo_addr);
582
583			buf += 8;
584			--j;
585		}
586
587		if (unlikely(i)) {
588			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
589			if (unlikely(timeout))
590				goto poll_timeout;
591
592			while (i >= 4) {
593				*buf++ = readl(fifo_addr);
594				i -= 4;
595			}
596			if (unlikely(i > 0)) {
597				d = readl(fifo_addr);
598				memcpy(buf, &d, i);
599			}
600		}
601		data->bytes_xfered += miter->length;
602
603		/* This can go away once MIPS implements
604		 * flush_kernel_dcache_page */
605		flush_dcache_page(miter->page);
606	}
607	sg_miter_stop(miter);
608
609	/* For whatever reason there is sometime one word more in the fifo then
610	 * requested */
611	timeout = 1000;
612	status = readl(host->base + JZ_REG_MMC_STATUS);
613	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
614		d = readl(fifo_addr);
615		status = readl(host->base + JZ_REG_MMC_STATUS);
616	}
617
618	return false;
619
620poll_timeout:
621	miter->consumed = (void *)buf - miter->addr;
622	data->bytes_xfered += miter->consumed;
623	sg_miter_stop(miter);
624
625	return true;
626}
627
628static void jz4740_mmc_timeout(struct timer_list *t)
629{
630	struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
631
632	if (!test_and_clear_bit(0, &host->waiting))
633		return;
634
635	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
636
637	host->req->cmd->error = -ETIMEDOUT;
638	jz4740_mmc_request_done(host);
639}
640
641static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
642	struct mmc_command *cmd)
643{
644	int i;
645	uint16_t tmp;
646	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
647
648	if (cmd->flags & MMC_RSP_136) {
649		tmp = readw(fifo_addr);
650		for (i = 0; i < 4; ++i) {
651			cmd->resp[i] = tmp << 24;
652			tmp = readw(fifo_addr);
653			cmd->resp[i] |= tmp << 8;
654			tmp = readw(fifo_addr);
655			cmd->resp[i] |= tmp >> 8;
656		}
657	} else {
658		cmd->resp[0] = readw(fifo_addr) << 24;
659		cmd->resp[0] |= readw(fifo_addr) << 8;
660		cmd->resp[0] |= readw(fifo_addr) & 0xff;
661	}
662}
663
664static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
665	struct mmc_command *cmd)
666{
667	uint32_t cmdat = host->cmdat;
668
669	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
670	jz4740_mmc_clock_disable(host);
671
672	host->cmd = cmd;
673
674	if (cmd->flags & MMC_RSP_BUSY)
675		cmdat |= JZ_MMC_CMDAT_BUSY;
676
677	switch (mmc_resp_type(cmd)) {
678	case MMC_RSP_R1B:
679	case MMC_RSP_R1:
680		cmdat |= JZ_MMC_CMDAT_RSP_R1;
681		break;
682	case MMC_RSP_R2:
683		cmdat |= JZ_MMC_CMDAT_RSP_R2;
684		break;
685	case MMC_RSP_R3:
686		cmdat |= JZ_MMC_CMDAT_RSP_R3;
687		break;
688	default:
689		break;
690	}
691
692	if (cmd->data) {
693		cmdat |= JZ_MMC_CMDAT_DATA_EN;
694		if (cmd->data->flags & MMC_DATA_WRITE)
695			cmdat |= JZ_MMC_CMDAT_WRITE;
696		if (host->use_dma) {
697			/*
698			 * The 4780's MMC controller has integrated DMA ability
699			 * in addition to being able to use the external DMA
700			 * controller. It moves DMA control bits to a separate
701			 * register. The DMA_SEL bit chooses the external
702			 * controller over the integrated one. Earlier SoCs
703			 * can only use the external controller, and have a
704			 * single DMA enable bit in CMDAT.
705			 */
706			if (host->version >= JZ_MMC_JZ4780) {
707				writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
708				       host->base + JZ_REG_MMC_DMAC);
709			} else {
710				cmdat |= JZ_MMC_CMDAT_DMA_EN;
711			}
712		} else if (host->version >= JZ_MMC_JZ4780) {
713			writel(0, host->base + JZ_REG_MMC_DMAC);
714		}
715
716		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
717		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
718	}
719
720	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
721	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
722	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
723
724	jz4740_mmc_clock_enable(host, 1);
725}
726
727static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
728{
729	struct mmc_command *cmd = host->req->cmd;
730	struct mmc_data *data = cmd->data;
731	int direction;
732
733	if (data->flags & MMC_DATA_READ)
734		direction = SG_MITER_TO_SG;
735	else
736		direction = SG_MITER_FROM_SG;
737
738	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
739}
740
741
742static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
743{
744	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
745	struct mmc_command *cmd = host->req->cmd;
746	struct mmc_request *req = host->req;
747	struct mmc_data *data = cmd->data;
748	bool timeout = false;
749
750	if (cmd->error)
751		host->state = JZ4740_MMC_STATE_DONE;
752
753	switch (host->state) {
754	case JZ4740_MMC_STATE_READ_RESPONSE:
755		if (cmd->flags & MMC_RSP_PRESENT)
756			jz4740_mmc_read_response(host, cmd);
757
758		if (!data)
759			break;
760
761		jz_mmc_prepare_data_transfer(host);
762		fallthrough;
763
764	case JZ4740_MMC_STATE_TRANSFER_DATA:
765		if (host->use_dma) {
766			/* Use DMA if enabled.
767			 * Data transfer direction is defined later by
768			 * relying on data flags in
769			 * jz4740_mmc_prepare_dma_data() and
770			 * jz4740_mmc_start_dma_transfer().
771			 */
772			timeout = jz4740_mmc_start_dma_transfer(host, data);
773			data->bytes_xfered = data->blocks * data->blksz;
774		} else if (data->flags & MMC_DATA_READ)
775			/* Use PIO if DMA is not enabled.
776			 * Data transfer direction was defined before
777			 * by relying on data flags in
778			 * jz_mmc_prepare_data_transfer().
779			 */
780			timeout = jz4740_mmc_read_data(host, data);
781		else
782			timeout = jz4740_mmc_write_data(host, data);
783
784		if (unlikely(timeout)) {
785			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
786			break;
787		}
788
789		jz4740_mmc_transfer_check_state(host, data);
790
791		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
792		if (unlikely(timeout)) {
793			host->state = JZ4740_MMC_STATE_SEND_STOP;
794			break;
795		}
796		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
797		fallthrough;
798
799	case JZ4740_MMC_STATE_SEND_STOP:
800		if (!req->stop)
801			break;
802
803		jz4740_mmc_send_command(host, req->stop);
804
805		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
806			timeout = jz4740_mmc_poll_irq(host,
807						      JZ_MMC_IRQ_PRG_DONE);
808			if (timeout) {
809				host->state = JZ4740_MMC_STATE_DONE;
810				break;
811			}
812		}
813	case JZ4740_MMC_STATE_DONE:
814		break;
815	}
816
817	if (!timeout)
818		jz4740_mmc_request_done(host);
819
820	return IRQ_HANDLED;
821}
822
823static irqreturn_t jz_mmc_irq(int irq, void *devid)
824{
825	struct jz4740_mmc_host *host = devid;
826	struct mmc_command *cmd = host->cmd;
827	uint32_t irq_reg, status, tmp;
828
829	status = readl(host->base + JZ_REG_MMC_STATUS);
830	irq_reg = jz4740_mmc_read_irq_reg(host);
831
832	tmp = irq_reg;
833	irq_reg &= ~host->irq_mask;
834
835	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
836		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
837
838	if (tmp != irq_reg)
839		jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
840
841	if (irq_reg & JZ_MMC_IRQ_SDIO) {
842		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
843		mmc_signal_sdio_irq(host->mmc);
844		irq_reg &= ~JZ_MMC_IRQ_SDIO;
845	}
846
847	if (host->req && cmd && irq_reg) {
848		if (test_and_clear_bit(0, &host->waiting)) {
849			del_timer(&host->timeout_timer);
850
851			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
852				cmd->error = -ETIMEDOUT;
853			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
854				cmd->error = -EIO;
855			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
856				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
857				if (cmd->data)
858					cmd->data->error = -EIO;
859				cmd->error = -EIO;
860			}
861
862			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
863			jz4740_mmc_write_irq_reg(host, irq_reg);
864
865			return IRQ_WAKE_THREAD;
866		}
867	}
868
869	return IRQ_HANDLED;
870}
871
872static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
873{
874	int div = 0;
875	int real_rate;
876
877	jz4740_mmc_clock_disable(host);
878	clk_set_rate(host->clk, host->mmc->f_max);
879
880	real_rate = clk_get_rate(host->clk);
881
882	while (real_rate > rate && div < 7) {
883		++div;
884		real_rate >>= 1;
885	}
886
887	writew(div, host->base + JZ_REG_MMC_CLKRT);
888
889	if (real_rate > 25000000) {
890		if (host->version >= JZ_MMC_X1000) {
891			writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
892				   JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
893				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
894				   host->base + JZ_REG_MMC_LPM);
895		} else if (host->version >= JZ_MMC_JZ4760) {
896			writel(JZ_MMC_LPM_DRV_RISING |
897				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
898				   host->base + JZ_REG_MMC_LPM);
899		} else if (host->version >= JZ_MMC_JZ4725B)
900			writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
901				   host->base + JZ_REG_MMC_LPM);
902	}
903
904	return real_rate;
905}
906
907static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
908{
909	struct jz4740_mmc_host *host = mmc_priv(mmc);
910
911	host->req = req;
912
913	jz4740_mmc_write_irq_reg(host, ~0);
914	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
915
916	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
917	set_bit(0, &host->waiting);
918	mod_timer(&host->timeout_timer,
919		  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
920	jz4740_mmc_send_command(host, req->cmd);
921}
922
923static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
924{
925	struct jz4740_mmc_host *host = mmc_priv(mmc);
926	if (ios->clock)
927		jz4740_mmc_set_clock_rate(host, ios->clock);
928
929	switch (ios->power_mode) {
930	case MMC_POWER_UP:
931		jz4740_mmc_reset(host);
932		if (!IS_ERR(mmc->supply.vmmc))
933			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
934		host->cmdat |= JZ_MMC_CMDAT_INIT;
935		clk_prepare_enable(host->clk);
936		break;
937	case MMC_POWER_ON:
938		break;
939	default:
940		if (!IS_ERR(mmc->supply.vmmc))
941			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
942		clk_disable_unprepare(host->clk);
943		break;
944	}
945
946	switch (ios->bus_width) {
947	case MMC_BUS_WIDTH_1:
948		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
949		break;
950	case MMC_BUS_WIDTH_4:
951		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
952		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
953		break;
954	case MMC_BUS_WIDTH_8:
955		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
956		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
957		break;
958	default:
959		break;
960	}
961}
962
963static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
964{
965	struct jz4740_mmc_host *host = mmc_priv(mmc);
966	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
967}
968
969static const struct mmc_host_ops jz4740_mmc_ops = {
970	.request	= jz4740_mmc_request,
971	.pre_req	= jz4740_mmc_pre_request,
972	.post_req	= jz4740_mmc_post_request,
973	.set_ios	= jz4740_mmc_set_ios,
974	.get_ro		= mmc_gpio_get_ro,
975	.get_cd		= mmc_gpio_get_cd,
976	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
977};
978
979static const struct of_device_id jz4740_mmc_of_match[] = {
980	{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
981	{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
982	{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
983	{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
984	{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
985	{},
986};
987MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
988
989static int jz4740_mmc_probe(struct platform_device* pdev)
990{
991	int ret;
992	struct mmc_host *mmc;
993	struct jz4740_mmc_host *host;
994	const struct of_device_id *match;
995
996	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
997	if (!mmc) {
998		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
999		return -ENOMEM;
1000	}
1001
1002	host = mmc_priv(mmc);
1003
1004	match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
1005	if (match) {
1006		host->version = (enum jz4740_mmc_version)match->data;
1007	} else {
1008		/* JZ4740 should be the only one using legacy probe */
1009		host->version = JZ_MMC_JZ4740;
1010	}
1011
1012	ret = mmc_of_parse(mmc);
1013	if (ret) {
1014		dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
1015		goto err_free_host;
1016	}
1017
1018	mmc_regulator_get_supply(mmc);
1019
1020	host->irq = platform_get_irq(pdev, 0);
1021	if (host->irq < 0) {
1022		ret = host->irq;
1023		goto err_free_host;
1024	}
1025
1026	host->clk = devm_clk_get(&pdev->dev, "mmc");
1027	if (IS_ERR(host->clk)) {
1028		ret = PTR_ERR(host->clk);
1029		dev_err(&pdev->dev, "Failed to get mmc clock\n");
1030		goto err_free_host;
1031	}
1032
1033	host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1034	host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
1035	if (IS_ERR(host->base)) {
1036		ret = PTR_ERR(host->base);
1037		dev_err(&pdev->dev, "Failed to ioremap base memory\n");
1038		goto err_free_host;
1039	}
1040
1041	mmc->ops = &jz4740_mmc_ops;
1042	if (!mmc->f_max)
1043		mmc->f_max = JZ_MMC_CLK_RATE;
1044
1045	/*
1046	 * There seems to be a problem with this driver on the JZ4760 and
1047	 * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
1048	 * the communication fails with many SD cards.
1049	 * Until this bug is sorted out, limit the maximum rate to 24 MHz.
1050	 */
1051	if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
1052		mmc->f_max = JZ_MMC_CLK_RATE;
1053
1054	mmc->f_min = mmc->f_max / 128;
1055	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1056
1057	/*
1058	 * We use a fixed timeout of 5s, hence inform the core about it. A
1059	 * future improvement should instead respect the cmd->busy_timeout.
1060	 */
1061	mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
1062
1063	mmc->max_blk_size = (1 << 10) - 1;
1064	mmc->max_blk_count = (1 << 15) - 1;
1065	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1066
1067	mmc->max_segs = 128;
1068	mmc->max_seg_size = mmc->max_req_size;
1069
1070	host->mmc = mmc;
1071	host->pdev = pdev;
1072	spin_lock_init(&host->lock);
1073	host->irq_mask = ~0;
1074
1075	jz4740_mmc_reset(host);
1076
1077	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1078			dev_name(&pdev->dev), host);
1079	if (ret) {
1080		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1081		goto err_free_host;
1082	}
1083
1084	jz4740_mmc_clock_disable(host);
1085	timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
1086
1087	ret = jz4740_mmc_acquire_dma_channels(host);
1088	if (ret == -EPROBE_DEFER)
1089		goto err_free_irq;
1090	host->use_dma = !ret;
1091
1092	platform_set_drvdata(pdev, host);
1093	ret = mmc_add_host(mmc);
1094
1095	if (ret) {
1096		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1097		goto err_release_dma;
1098	}
1099	dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
1100
1101	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1102		 host->use_dma ? "DMA" : "PIO",
1103		 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
1104		 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
1105
1106	return 0;
1107
1108err_release_dma:
1109	if (host->use_dma)
1110		jz4740_mmc_release_dma_channels(host);
1111err_free_irq:
1112	free_irq(host->irq, host);
1113err_free_host:
1114	mmc_free_host(mmc);
1115
1116	return ret;
1117}
1118
1119static int jz4740_mmc_remove(struct platform_device *pdev)
1120{
1121	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1122
1123	del_timer_sync(&host->timeout_timer);
1124	jz4740_mmc_set_irq_enabled(host, 0xff, false);
1125	jz4740_mmc_reset(host);
1126
1127	mmc_remove_host(host->mmc);
1128
1129	free_irq(host->irq, host);
1130
1131	if (host->use_dma)
1132		jz4740_mmc_release_dma_channels(host);
1133
1134	mmc_free_host(host->mmc);
1135
1136	return 0;
1137}
1138
1139static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
1140{
1141	return pinctrl_pm_select_sleep_state(dev);
1142}
1143
1144static int __maybe_unused jz4740_mmc_resume(struct device *dev)
1145{
1146	return pinctrl_select_default_state(dev);
1147}
1148
1149static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1150	jz4740_mmc_resume);
1151
1152static struct platform_driver jz4740_mmc_driver = {
1153	.probe = jz4740_mmc_probe,
1154	.remove = jz4740_mmc_remove,
1155	.driver = {
1156		.name = "jz4740-mmc",
1157		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1158		.of_match_table = of_match_ptr(jz4740_mmc_of_match),
1159		.pm = pm_ptr(&jz4740_mmc_pm_ops),
1160	},
1161};
1162
1163module_platform_driver(jz4740_mmc_driver);
1164
1165MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1166MODULE_LICENSE("GPL");
1167MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1168