1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  WM8505/WM8650 SD/MMC Host Controller
4 *
5 *  Copyright (C) 2010 Tony Prisk
6 *  Copyright (C) 2008 WonderMedia Technologies, Inc.
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/ioport.h>
13#include <linux/errno.h>
14#include <linux/dma-mapping.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/clk.h>
19#include <linux/interrupt.h>
20
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/of_device.h>
25
26#include <linux/mmc/host.h>
27#include <linux/mmc/mmc.h>
28#include <linux/mmc/sd.h>
29
30#include <asm/byteorder.h>
31
32
33#define DRIVER_NAME "wmt-sdhc"
34
35
36/* MMC/SD controller registers */
37#define SDMMC_CTLR			0x00
38#define SDMMC_CMD			0x01
39#define SDMMC_RSPTYPE			0x02
40#define SDMMC_ARG			0x04
41#define SDMMC_BUSMODE			0x08
42#define SDMMC_BLKLEN			0x0C
43#define SDMMC_BLKCNT			0x0E
44#define SDMMC_RSP			0x10
45#define SDMMC_CBCR			0x20
46#define SDMMC_INTMASK0			0x24
47#define SDMMC_INTMASK1			0x25
48#define SDMMC_STS0			0x28
49#define SDMMC_STS1			0x29
50#define SDMMC_STS2			0x2A
51#define SDMMC_STS3			0x2B
52#define SDMMC_RSPTIMEOUT		0x2C
53#define SDMMC_CLK			0x30	/* VT8500 only */
54#define SDMMC_EXTCTRL			0x34
55#define SDMMC_SBLKLEN			0x38
56#define SDMMC_DMATIMEOUT		0x3C
57
58
59/* SDMMC_CTLR bit fields */
60#define CTLR_CMD_START			0x01
61#define CTLR_CMD_WRITE			0x04
62#define CTLR_FIFO_RESET			0x08
63
64/* SDMMC_BUSMODE bit fields */
65#define BM_SPI_MODE			0x01
66#define BM_FOURBIT_MODE			0x02
67#define BM_EIGHTBIT_MODE		0x04
68#define BM_SD_OFF			0x10
69#define BM_SPI_CS			0x20
70#define BM_SD_POWER			0x40
71#define BM_SOFT_RESET			0x80
72
73/* SDMMC_BLKLEN bit fields */
74#define BLKL_CRCERR_ABORT		0x0800
75#define BLKL_CD_POL_HIGH		0x1000
76#define BLKL_GPI_CD			0x2000
77#define BLKL_DATA3_CD			0x4000
78#define BLKL_INT_ENABLE			0x8000
79
80/* SDMMC_INTMASK0 bit fields */
81#define INT0_MBLK_TRAN_DONE_INT_EN	0x10
82#define INT0_BLK_TRAN_DONE_INT_EN	0x20
83#define INT0_CD_INT_EN			0x40
84#define INT0_DI_INT_EN			0x80
85
86/* SDMMC_INTMASK1 bit fields */
87#define INT1_CMD_RES_TRAN_DONE_INT_EN	0x02
88#define INT1_CMD_RES_TOUT_INT_EN	0x04
89#define INT1_MBLK_AUTO_STOP_INT_EN	0x08
90#define INT1_DATA_TOUT_INT_EN		0x10
91#define INT1_RESCRC_ERR_INT_EN		0x20
92#define INT1_RCRC_ERR_INT_EN		0x40
93#define INT1_WCRC_ERR_INT_EN		0x80
94
95/* SDMMC_STS0 bit fields */
96#define STS0_WRITE_PROTECT		0x02
97#define STS0_CD_DATA3			0x04
98#define STS0_CD_GPI			0x08
99#define STS0_MBLK_DONE			0x10
100#define STS0_BLK_DONE			0x20
101#define STS0_CARD_DETECT		0x40
102#define STS0_DEVICE_INS			0x80
103
104/* SDMMC_STS1 bit fields */
105#define STS1_SDIO_INT			0x01
106#define STS1_CMDRSP_DONE		0x02
107#define STS1_RSP_TIMEOUT		0x04
108#define STS1_AUTOSTOP_DONE		0x08
109#define STS1_DATA_TIMEOUT		0x10
110#define STS1_RSP_CRC_ERR		0x20
111#define STS1_RCRC_ERR			0x40
112#define STS1_WCRC_ERR			0x80
113
114/* SDMMC_STS2 bit fields */
115#define STS2_CMD_RES_BUSY		0x10
116#define STS2_DATARSP_BUSY		0x20
117#define STS2_DIS_FORCECLK		0x80
118
119/* SDMMC_EXTCTRL bit fields */
120#define EXT_EIGHTBIT			0x04
121
122/* MMC/SD DMA Controller Registers */
123#define SDDMA_GCR			0x100
124#define SDDMA_IER			0x104
125#define SDDMA_ISR			0x108
126#define SDDMA_DESPR			0x10C
127#define SDDMA_RBR			0x110
128#define SDDMA_DAR			0x114
129#define SDDMA_BAR			0x118
130#define SDDMA_CPR			0x11C
131#define SDDMA_CCR			0x120
132
133
134/* SDDMA_GCR bit fields */
135#define DMA_GCR_DMA_EN			0x00000001
136#define DMA_GCR_SOFT_RESET		0x00000100
137
138/* SDDMA_IER bit fields */
139#define DMA_IER_INT_EN			0x00000001
140
141/* SDDMA_ISR bit fields */
142#define DMA_ISR_INT_STS			0x00000001
143
144/* SDDMA_RBR bit fields */
145#define DMA_RBR_FORMAT			0x40000000
146#define DMA_RBR_END			0x80000000
147
148/* SDDMA_CCR bit fields */
149#define DMA_CCR_RUN			0x00000080
150#define DMA_CCR_IF_TO_PERIPHERAL	0x00000000
151#define DMA_CCR_PERIPHERAL_TO_IF	0x00400000
152
153/* SDDMA_CCR event status */
154#define DMA_CCR_EVT_NO_STATUS		0x00000000
155#define DMA_CCR_EVT_UNDERRUN		0x00000001
156#define DMA_CCR_EVT_OVERRUN		0x00000002
157#define DMA_CCR_EVT_DESP_READ		0x00000003
158#define DMA_CCR_EVT_DATA_RW		0x00000004
159#define DMA_CCR_EVT_EARLY_END		0x00000005
160#define DMA_CCR_EVT_SUCCESS		0x0000000F
161
162#define PDMA_READ			0x00
163#define PDMA_WRITE			0x01
164
165#define WMT_SD_POWER_OFF		0
166#define WMT_SD_POWER_ON			1
167
168struct wmt_dma_descriptor {
169	u32 flags;
170	u32 data_buffer_addr;
171	u32 branch_addr;
172	u32 reserved1;
173};
174
175struct wmt_mci_caps {
176	unsigned int	f_min;
177	unsigned int	f_max;
178	u32		ocr_avail;
179	u32		caps;
180	u32		max_seg_size;
181	u32		max_segs;
182	u32		max_blk_size;
183};
184
185struct wmt_mci_priv {
186	struct mmc_host *mmc;
187	void __iomem *sdmmc_base;
188
189	int irq_regular;
190	int irq_dma;
191
192	void *dma_desc_buffer;
193	dma_addr_t dma_desc_device_addr;
194
195	struct completion cmdcomp;
196	struct completion datacomp;
197
198	struct completion *comp_cmd;
199	struct completion *comp_dma;
200
201	struct mmc_request *req;
202	struct mmc_command *cmd;
203
204	struct clk *clk_sdmmc;
205	struct device *dev;
206
207	u8 power_inverted;
208	u8 cd_inverted;
209};
210
211static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
212{
213	u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
214
215	if (enable ^ priv->power_inverted)
216		reg_tmp &= ~BM_SD_OFF;
217	else
218		reg_tmp |= BM_SD_OFF;
219
220	writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
221}
222
223static void wmt_mci_read_response(struct mmc_host *mmc)
224{
225	struct wmt_mci_priv *priv;
226	int idx1, idx2;
227	u8 tmp_resp;
228	u32 response;
229
230	priv = mmc_priv(mmc);
231
232	for (idx1 = 0; idx1 < 4; idx1++) {
233		response = 0;
234		for (idx2 = 0; idx2 < 4; idx2++) {
235			if ((idx1 == 3) && (idx2 == 3))
236				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
237			else
238				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
239						 (idx1*4) + idx2 + 1);
240			response |= (tmp_resp << (idx2 * 8));
241		}
242		priv->cmd->resp[idx1] = cpu_to_be32(response);
243	}
244}
245
246static void wmt_mci_start_command(struct wmt_mci_priv *priv)
247{
248	u32 reg_tmp;
249
250	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
251	writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
252}
253
254static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
255				u32 arg, u8 rsptype)
256{
257	struct wmt_mci_priv *priv;
258	u32 reg_tmp;
259
260	priv = mmc_priv(mmc);
261
262	/* write command, arg, resptype registers */
263	writeb(command, priv->sdmmc_base + SDMMC_CMD);
264	writel(arg, priv->sdmmc_base + SDMMC_ARG);
265	writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
266
267	/* reset response FIFO */
268	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
269	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
270
271	/* ensure clock enabled - VT3465 */
272	wmt_set_sd_power(priv, WMT_SD_POWER_ON);
273
274	/* clear status bits */
275	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
276	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
277	writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
278	writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
279
280	/* set command type */
281	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
282	writeb((reg_tmp & 0x0F) | (cmdtype << 4),
283	       priv->sdmmc_base + SDMMC_CTLR);
284
285	return 0;
286}
287
288static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
289{
290	writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
291	writel(0, priv->sdmmc_base + SDDMA_IER);
292}
293
294static void wmt_complete_data_request(struct wmt_mci_priv *priv)
295{
296	struct mmc_request *req;
297	req = priv->req;
298
299	req->data->bytes_xfered = req->data->blksz * req->data->blocks;
300
301	/* unmap the DMA pages used for write data */
302	if (req->data->flags & MMC_DATA_WRITE)
303		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
304			     req->data->sg_len, DMA_TO_DEVICE);
305	else
306		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
307			     req->data->sg_len, DMA_FROM_DEVICE);
308
309	/* Check if the DMA ISR returned a data error */
310	if ((req->cmd->error) || (req->data->error))
311		mmc_request_done(priv->mmc, req);
312	else {
313		wmt_mci_read_response(priv->mmc);
314		if (!req->data->stop) {
315			/* single-block read/write requests end here */
316			mmc_request_done(priv->mmc, req);
317		} else {
318			/*
319			 * we change the priv->cmd variable so the response is
320			 * stored in the stop struct rather than the original
321			 * calling command struct
322			 */
323			priv->comp_cmd = &priv->cmdcomp;
324			init_completion(priv->comp_cmd);
325			priv->cmd = req->data->stop;
326			wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
327					     7, req->data->stop->arg, 9);
328			wmt_mci_start_command(priv);
329		}
330	}
331}
332
333static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
334{
335	struct wmt_mci_priv *priv;
336
337	int status;
338
339	priv = (struct wmt_mci_priv *)data;
340
341	status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
342
343	if (status != DMA_CCR_EVT_SUCCESS) {
344		dev_err(priv->dev, "DMA Error: Status = %d\n", status);
345		priv->req->data->error = -ETIMEDOUT;
346		complete(priv->comp_dma);
347		return IRQ_HANDLED;
348	}
349
350	priv->req->data->error = 0;
351
352	wmt_mci_disable_dma(priv);
353
354	complete(priv->comp_dma);
355
356	if (priv->comp_cmd) {
357		if (completion_done(priv->comp_cmd)) {
358			/*
359			 * if the command (regular) interrupt has already
360			 * completed, finish off the request otherwise we wait
361			 * for the command interrupt and finish from there.
362			 */
363			wmt_complete_data_request(priv);
364		}
365	}
366
367	return IRQ_HANDLED;
368}
369
370static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
371{
372	struct wmt_mci_priv *priv;
373	u32 status0;
374	u32 status1;
375	u32 status2;
376	u32 reg_tmp;
377	int cmd_done;
378
379	priv = (struct wmt_mci_priv *)data;
380	cmd_done = 0;
381	status0 = readb(priv->sdmmc_base + SDMMC_STS0);
382	status1 = readb(priv->sdmmc_base + SDMMC_STS1);
383	status2 = readb(priv->sdmmc_base + SDMMC_STS2);
384
385	/* Check for card insertion */
386	reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
387	if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
388		mmc_detect_change(priv->mmc, 0);
389		if (priv->cmd)
390			priv->cmd->error = -ETIMEDOUT;
391		if (priv->comp_cmd)
392			complete(priv->comp_cmd);
393		if (priv->comp_dma) {
394			wmt_mci_disable_dma(priv);
395			complete(priv->comp_dma);
396		}
397		writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
398		return IRQ_HANDLED;
399	}
400
401	if ((!priv->req->data) ||
402	    ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
403		/* handle non-data & stop_transmission requests */
404		if (status1 & STS1_CMDRSP_DONE) {
405			priv->cmd->error = 0;
406			cmd_done = 1;
407		} else if ((status1 & STS1_RSP_TIMEOUT) ||
408			   (status1 & STS1_DATA_TIMEOUT)) {
409			priv->cmd->error = -ETIMEDOUT;
410			cmd_done = 1;
411		}
412
413		if (cmd_done) {
414			priv->comp_cmd = NULL;
415
416			if (!priv->cmd->error)
417				wmt_mci_read_response(priv->mmc);
418
419			priv->cmd = NULL;
420
421			mmc_request_done(priv->mmc, priv->req);
422		}
423	} else {
424		/* handle data requests */
425		if (status1 & STS1_CMDRSP_DONE) {
426			if (priv->cmd)
427				priv->cmd->error = 0;
428			if (priv->comp_cmd)
429				complete(priv->comp_cmd);
430		}
431
432		if ((status1 & STS1_RSP_TIMEOUT) ||
433		    (status1 & STS1_DATA_TIMEOUT)) {
434			if (priv->cmd)
435				priv->cmd->error = -ETIMEDOUT;
436			if (priv->comp_cmd)
437				complete(priv->comp_cmd);
438			if (priv->comp_dma) {
439				wmt_mci_disable_dma(priv);
440				complete(priv->comp_dma);
441			}
442		}
443
444		if (priv->comp_dma) {
445			/*
446			 * If the dma interrupt has already completed, finish
447			 * off the request; otherwise we wait for the DMA
448			 * interrupt and finish from there.
449			 */
450			if (completion_done(priv->comp_dma))
451				wmt_complete_data_request(priv);
452		}
453	}
454
455	writeb(status0, priv->sdmmc_base + SDMMC_STS0);
456	writeb(status1, priv->sdmmc_base + SDMMC_STS1);
457	writeb(status2, priv->sdmmc_base + SDMMC_STS2);
458
459	return IRQ_HANDLED;
460}
461
462static void wmt_reset_hardware(struct mmc_host *mmc)
463{
464	struct wmt_mci_priv *priv;
465	u32 reg_tmp;
466
467	priv = mmc_priv(mmc);
468
469	/* reset controller */
470	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
471	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
472
473	/* reset response FIFO */
474	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
475	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
476
477	/* enable GPI pin to detect card */
478	writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
479
480	/* clear interrupt status */
481	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
482	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
483
484	/* setup interrupts */
485	writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
486	       SDMMC_INTMASK0);
487	writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
488	       INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
489
490	/* set the DMA timeout */
491	writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
492
493	/* auto clock freezing enable */
494	reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
495	writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
496
497	/* set a default clock speed of 400Khz */
498	clk_set_rate(priv->clk_sdmmc, 400000);
499}
500
501static int wmt_dma_init(struct mmc_host *mmc)
502{
503	struct wmt_mci_priv *priv;
504
505	priv = mmc_priv(mmc);
506
507	writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
508	writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
509	if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
510		return 0;
511	else
512		return 1;
513}
514
515static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
516		u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
517{
518	desc->flags = 0x40000000 | req_count;
519	if (end)
520		desc->flags |= 0x80000000;
521	desc->data_buffer_addr = buffer_addr;
522	desc->branch_addr = branch_addr;
523}
524
525static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
526{
527	struct wmt_mci_priv *priv;
528	u32 reg_tmp;
529
530	priv = mmc_priv(mmc);
531
532	/* Enable DMA Interrupts */
533	writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
534
535	/* Write DMA Descriptor Pointer Register */
536	writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
537
538	writel(0x00, priv->sdmmc_base + SDDMA_CCR);
539
540	if (dir == PDMA_WRITE) {
541		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
542		writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
543		       SDDMA_CCR);
544	} else {
545		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
546		writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
547		       SDDMA_CCR);
548	}
549}
550
551static void wmt_dma_start(struct wmt_mci_priv *priv)
552{
553	u32 reg_tmp;
554
555	reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
556	writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
557}
558
559static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
560{
561	struct wmt_mci_priv *priv;
562	struct wmt_dma_descriptor *desc;
563	u8 command;
564	u8 cmdtype;
565	u32 arg;
566	u8 rsptype;
567	u32 reg_tmp;
568
569	struct scatterlist *sg;
570	int i;
571	int sg_cnt;
572	int offset;
573	u32 dma_address;
574	int desc_cnt;
575
576	priv = mmc_priv(mmc);
577	priv->req = req;
578
579	/*
580	 * Use the cmd variable to pass a pointer to the resp[] structure
581	 * This is required on multi-block requests to pass the pointer to the
582	 * stop command
583	 */
584	priv->cmd = req->cmd;
585
586	command = req->cmd->opcode;
587	arg = req->cmd->arg;
588	rsptype = mmc_resp_type(req->cmd);
589	cmdtype = 0;
590
591	/* rsptype=7 only valid for SPI commands - should be =2 for SD */
592	if (rsptype == 7)
593		rsptype = 2;
594	/* rsptype=21 is R1B, convert for controller */
595	if (rsptype == 21)
596		rsptype = 9;
597
598	if (!req->data) {
599		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
600		wmt_mci_start_command(priv);
601		/* completion is now handled in the regular_isr() */
602	}
603	if (req->data) {
604		priv->comp_cmd = &priv->cmdcomp;
605		init_completion(priv->comp_cmd);
606
607		wmt_dma_init(mmc);
608
609		/* set controller data length */
610		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
611		writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
612		       priv->sdmmc_base + SDMMC_BLKLEN);
613
614		/* set controller block count */
615		writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
616
617		desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
618
619		if (req->data->flags & MMC_DATA_WRITE) {
620			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
621					    req->data->sg_len, DMA_TO_DEVICE);
622			cmdtype = 1;
623			if (req->data->blocks > 1)
624				cmdtype = 3;
625		} else {
626			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
627					    req->data->sg_len, DMA_FROM_DEVICE);
628			cmdtype = 2;
629			if (req->data->blocks > 1)
630				cmdtype = 4;
631		}
632
633		dma_address = priv->dma_desc_device_addr + 16;
634		desc_cnt = 0;
635
636		for_each_sg(req->data->sg, sg, sg_cnt, i) {
637			offset = 0;
638			while (offset < sg_dma_len(sg)) {
639				wmt_dma_init_descriptor(desc, req->data->blksz,
640						sg_dma_address(sg)+offset,
641						dma_address, 0);
642				desc++;
643				desc_cnt++;
644				offset += req->data->blksz;
645				dma_address += 16;
646				if (desc_cnt == req->data->blocks)
647					break;
648			}
649		}
650		desc--;
651		desc->flags |= 0x80000000;
652
653		if (req->data->flags & MMC_DATA_WRITE)
654			wmt_dma_config(mmc, priv->dma_desc_device_addr,
655				       PDMA_WRITE);
656		else
657			wmt_dma_config(mmc, priv->dma_desc_device_addr,
658				       PDMA_READ);
659
660		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
661
662		priv->comp_dma = &priv->datacomp;
663		init_completion(priv->comp_dma);
664
665		wmt_dma_start(priv);
666		wmt_mci_start_command(priv);
667	}
668}
669
670static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
671{
672	struct wmt_mci_priv *priv;
673	u32 busmode, extctrl;
674
675	priv = mmc_priv(mmc);
676
677	if (ios->power_mode == MMC_POWER_UP) {
678		wmt_reset_hardware(mmc);
679
680		wmt_set_sd_power(priv, WMT_SD_POWER_ON);
681	}
682	if (ios->power_mode == MMC_POWER_OFF)
683		wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
684
685	if (ios->clock != 0)
686		clk_set_rate(priv->clk_sdmmc, ios->clock);
687
688	busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
689	extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
690
691	busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
692	extctrl &= ~EXT_EIGHTBIT;
693
694	switch (ios->bus_width) {
695	case MMC_BUS_WIDTH_8:
696		busmode |= BM_EIGHTBIT_MODE;
697		extctrl |= EXT_EIGHTBIT;
698		break;
699	case MMC_BUS_WIDTH_4:
700		busmode |= BM_FOURBIT_MODE;
701		break;
702	case MMC_BUS_WIDTH_1:
703		break;
704	}
705
706	writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
707	writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
708}
709
710static int wmt_mci_get_ro(struct mmc_host *mmc)
711{
712	struct wmt_mci_priv *priv = mmc_priv(mmc);
713
714	return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
715}
716
717static int wmt_mci_get_cd(struct mmc_host *mmc)
718{
719	struct wmt_mci_priv *priv = mmc_priv(mmc);
720	u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
721
722	return !(cd ^ priv->cd_inverted);
723}
724
725static const struct mmc_host_ops wmt_mci_ops = {
726	.request = wmt_mci_request,
727	.set_ios = wmt_mci_set_ios,
728	.get_ro = wmt_mci_get_ro,
729	.get_cd = wmt_mci_get_cd,
730};
731
732/* Controller capabilities */
733static struct wmt_mci_caps wm8505_caps = {
734	.f_min = 390425,
735	.f_max = 50000000,
736	.ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
737	.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
738		MMC_CAP_SD_HIGHSPEED,
739	.max_seg_size = 65024,
740	.max_segs = 128,
741	.max_blk_size = 2048,
742};
743
744static const struct of_device_id wmt_mci_dt_ids[] = {
745	{ .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
746	{ /* Sentinel */ },
747};
748
749static int wmt_mci_probe(struct platform_device *pdev)
750{
751	struct mmc_host *mmc;
752	struct wmt_mci_priv *priv;
753	struct device_node *np = pdev->dev.of_node;
754	const struct of_device_id *of_id =
755		of_match_device(wmt_mci_dt_ids, &pdev->dev);
756	const struct wmt_mci_caps *wmt_caps;
757	int ret;
758	int regular_irq, dma_irq;
759
760	if (!of_id || !of_id->data) {
761		dev_err(&pdev->dev, "Controller capabilities data missing\n");
762		return -EFAULT;
763	}
764
765	wmt_caps = of_id->data;
766
767	if (!np) {
768		dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
769		return -EFAULT;
770	}
771
772	regular_irq = irq_of_parse_and_map(np, 0);
773	dma_irq = irq_of_parse_and_map(np, 1);
774
775	if (!regular_irq || !dma_irq) {
776		dev_err(&pdev->dev, "Getting IRQs failed!\n");
777		ret = -ENXIO;
778		goto fail1;
779	}
780
781	mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
782	if (!mmc) {
783		dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
784		ret = -ENOMEM;
785		goto fail1;
786	}
787
788	mmc->ops = &wmt_mci_ops;
789	mmc->f_min = wmt_caps->f_min;
790	mmc->f_max = wmt_caps->f_max;
791	mmc->ocr_avail = wmt_caps->ocr_avail;
792	mmc->caps = wmt_caps->caps;
793
794	mmc->max_seg_size = wmt_caps->max_seg_size;
795	mmc->max_segs = wmt_caps->max_segs;
796	mmc->max_blk_size = wmt_caps->max_blk_size;
797
798	mmc->max_req_size = (16*512*mmc->max_segs);
799	mmc->max_blk_count = mmc->max_req_size / 512;
800
801	priv = mmc_priv(mmc);
802	priv->mmc = mmc;
803	priv->dev = &pdev->dev;
804
805	priv->power_inverted = 0;
806	priv->cd_inverted = 0;
807
808	if (of_get_property(np, "sdon-inverted", NULL))
809		priv->power_inverted = 1;
810	if (of_get_property(np, "cd-inverted", NULL))
811		priv->cd_inverted = 1;
812
813	priv->sdmmc_base = of_iomap(np, 0);
814	if (!priv->sdmmc_base) {
815		dev_err(&pdev->dev, "Failed to map IO space\n");
816		ret = -ENOMEM;
817		goto fail2;
818	}
819
820	priv->irq_regular = regular_irq;
821	priv->irq_dma = dma_irq;
822
823	ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
824	if (ret) {
825		dev_err(&pdev->dev, "Register regular IRQ fail\n");
826		goto fail3;
827	}
828
829	ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
830	if (ret) {
831		dev_err(&pdev->dev, "Register DMA IRQ fail\n");
832		goto fail4;
833	}
834
835	/* alloc some DMA buffers for descriptors/transfers */
836	priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
837						   mmc->max_blk_count * 16,
838						   &priv->dma_desc_device_addr,
839						   GFP_KERNEL);
840	if (!priv->dma_desc_buffer) {
841		dev_err(&pdev->dev, "DMA alloc fail\n");
842		ret = -EPERM;
843		goto fail5;
844	}
845
846	platform_set_drvdata(pdev, mmc);
847
848	priv->clk_sdmmc = of_clk_get(np, 0);
849	if (IS_ERR(priv->clk_sdmmc)) {
850		dev_err(&pdev->dev, "Error getting clock\n");
851		ret = PTR_ERR(priv->clk_sdmmc);
852		goto fail5_and_a_half;
853	}
854
855	ret = clk_prepare_enable(priv->clk_sdmmc);
856	if (ret)
857		goto fail6;
858
859	/* configure the controller to a known 'ready' state */
860	wmt_reset_hardware(mmc);
861
862	ret = mmc_add_host(mmc);
863	if (ret)
864		goto fail7;
865
866	dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
867
868	return 0;
869fail7:
870	clk_disable_unprepare(priv->clk_sdmmc);
871fail6:
872	clk_put(priv->clk_sdmmc);
873fail5_and_a_half:
874	dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
875			  priv->dma_desc_buffer, priv->dma_desc_device_addr);
876fail5:
877	free_irq(dma_irq, priv);
878fail4:
879	free_irq(regular_irq, priv);
880fail3:
881	iounmap(priv->sdmmc_base);
882fail2:
883	mmc_free_host(mmc);
884fail1:
885	return ret;
886}
887
888static int wmt_mci_remove(struct platform_device *pdev)
889{
890	struct mmc_host *mmc;
891	struct wmt_mci_priv *priv;
892	struct resource *res;
893	u32 reg_tmp;
894
895	mmc = platform_get_drvdata(pdev);
896	priv = mmc_priv(mmc);
897
898	/* reset SD controller */
899	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
900	writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
901	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
902	writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
903	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
904	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
905
906	/* release the dma buffers */
907	dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
908			  priv->dma_desc_buffer, priv->dma_desc_device_addr);
909
910	mmc_remove_host(mmc);
911
912	free_irq(priv->irq_regular, priv);
913	free_irq(priv->irq_dma, priv);
914
915	iounmap(priv->sdmmc_base);
916
917	clk_disable_unprepare(priv->clk_sdmmc);
918	clk_put(priv->clk_sdmmc);
919
920	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
921	release_mem_region(res->start, resource_size(res));
922
923	mmc_free_host(mmc);
924
925	dev_info(&pdev->dev, "WMT MCI device removed\n");
926
927	return 0;
928}
929
930#ifdef CONFIG_PM
931static int wmt_mci_suspend(struct device *dev)
932{
933	u32 reg_tmp;
934	struct mmc_host *mmc = dev_get_drvdata(dev);
935	struct wmt_mci_priv *priv;
936
937	if (!mmc)
938		return 0;
939
940	priv = mmc_priv(mmc);
941	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
942	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
943	       SDMMC_BUSMODE);
944
945	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
946	writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
947
948	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
949	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
950
951	clk_disable(priv->clk_sdmmc);
952	return 0;
953}
954
955static int wmt_mci_resume(struct device *dev)
956{
957	u32 reg_tmp;
958	struct mmc_host *mmc = dev_get_drvdata(dev);
959	struct wmt_mci_priv *priv;
960
961	if (mmc) {
962		priv = mmc_priv(mmc);
963		clk_enable(priv->clk_sdmmc);
964
965		reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
966		writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
967		       SDMMC_BUSMODE);
968
969		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
970		writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
971		       priv->sdmmc_base + SDMMC_BLKLEN);
972
973		reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
974		writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
975		       SDMMC_INTMASK0);
976
977	}
978
979	return 0;
980}
981
982static const struct dev_pm_ops wmt_mci_pm = {
983	.suspend        = wmt_mci_suspend,
984	.resume         = wmt_mci_resume,
985};
986
987#define wmt_mci_pm_ops (&wmt_mci_pm)
988
989#else	/* !CONFIG_PM */
990
991#define wmt_mci_pm_ops NULL
992
993#endif
994
995static struct platform_driver wmt_mci_driver = {
996	.probe = wmt_mci_probe,
997	.remove = wmt_mci_remove,
998	.driver = {
999		.name = DRIVER_NAME,
1000		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1001		.pm = wmt_mci_pm_ops,
1002		.of_match_table = wmt_mci_dt_ids,
1003	},
1004};
1005
1006module_platform_driver(wmt_mci_driver);
1007
1008MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
1009MODULE_AUTHOR("Tony Prisk");
1010MODULE_LICENSE("GPL v2");
1011MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);
1012