1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
4 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
5 *
6 * Copyright 2008 Embedded Alley Solutions, Inc.
7 * Copyright 2009-2011 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/ioport.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/dma/mxs-dma.h>
20#include <linux/highmem.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/completion.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/mmc.h>
26#include <linux/mmc/sdio.h>
27#include <linux/mmc/slot-gpio.h>
28#include <linux/regulator/consumer.h>
29#include <linux/module.h>
30#include <linux/stmp_device.h>
31#include <linux/spi/mxs-spi.h>
32
33#define DRIVER_NAME	"mxs-mmc"
34
35#define MXS_MMC_IRQ_BITS	(BM_SSP_CTRL1_SDIO_IRQ		| \
36				 BM_SSP_CTRL1_RESP_ERR_IRQ	| \
37				 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ	| \
38				 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ	| \
39				 BM_SSP_CTRL1_DATA_CRC_IRQ	| \
40				 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ	| \
41				 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \
42				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
43
44/* card detect polling timeout */
45#define MXS_MMC_DETECT_TIMEOUT			(HZ/2)
46
47struct mxs_mmc_host {
48	struct mxs_ssp			ssp;
49
50	struct mmc_host			*mmc;
51	struct mmc_request		*mrq;
52	struct mmc_command		*cmd;
53	struct mmc_data			*data;
54
55	unsigned char			bus_width;
56	spinlock_t			lock;
57	int				sdio_irq_en;
58	bool				broken_cd;
59};
60
61static int mxs_mmc_get_cd(struct mmc_host *mmc)
62{
63	struct mxs_mmc_host *host = mmc_priv(mmc);
64	struct mxs_ssp *ssp = &host->ssp;
65	int present, ret;
66
67	if (host->broken_cd)
68		return -ENOSYS;
69
70	ret = mmc_gpio_get_cd(mmc);
71	if (ret >= 0)
72		return ret;
73
74	present = mmc->caps & MMC_CAP_NEEDS_POLL ||
75		!(readl(ssp->base + HW_SSP_STATUS(ssp)) &
76			BM_SSP_STATUS_CARD_DETECT);
77
78	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
79		present = !present;
80
81	return present;
82}
83
84static int mxs_mmc_reset(struct mxs_mmc_host *host)
85{
86	struct mxs_ssp *ssp = &host->ssp;
87	u32 ctrl0, ctrl1;
88	int ret;
89
90	ret = stmp_reset_block(ssp->base);
91	if (ret)
92		return ret;
93
94	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
95	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
96		BF_SSP(0x7, CTRL1_WORD_LENGTH) |
97		BM_SSP_CTRL1_DMA_ENABLE |
98		BM_SSP_CTRL1_POLARITY |
99		BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
100		BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
101		BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
102		BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
103		BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
104
105	writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
106	       BF_SSP(2, TIMING_CLOCK_DIVIDE) |
107	       BF_SSP(0, TIMING_CLOCK_RATE),
108	       ssp->base + HW_SSP_TIMING(ssp));
109
110	if (host->sdio_irq_en) {
111		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
112		ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
113	}
114
115	writel(ctrl0, ssp->base + HW_SSP_CTRL0);
116	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
117	return 0;
118}
119
120static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
121			      struct mmc_command *cmd);
122
123static void mxs_mmc_request_done(struct mxs_mmc_host *host)
124{
125	struct mmc_command *cmd = host->cmd;
126	struct mmc_data *data = host->data;
127	struct mmc_request *mrq = host->mrq;
128	struct mxs_ssp *ssp = &host->ssp;
129
130	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
131		if (mmc_resp_type(cmd) & MMC_RSP_136) {
132			cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
133			cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
134			cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
135			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
136		} else {
137			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
138		}
139	}
140
141	if (cmd == mrq->sbc) {
142		/* Finished CMD23, now send actual command. */
143		mxs_mmc_start_cmd(host, mrq->cmd);
144		return;
145	} else if (data) {
146		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
147			     data->sg_len, ssp->dma_dir);
148		/*
149		 * If there was an error on any block, we mark all
150		 * data blocks as being in error.
151		 */
152		if (!data->error)
153			data->bytes_xfered = data->blocks * data->blksz;
154		else
155			data->bytes_xfered = 0;
156
157		host->data = NULL;
158		if (data->stop && (data->error || !mrq->sbc)) {
159			mxs_mmc_start_cmd(host, mrq->stop);
160			return;
161		}
162	}
163
164	host->mrq = NULL;
165	mmc_request_done(host->mmc, mrq);
166}
167
168static void mxs_mmc_dma_irq_callback(void *param)
169{
170	struct mxs_mmc_host *host = param;
171
172	mxs_mmc_request_done(host);
173}
174
175static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
176{
177	struct mxs_mmc_host *host = dev_id;
178	struct mmc_command *cmd = host->cmd;
179	struct mmc_data *data = host->data;
180	struct mxs_ssp *ssp = &host->ssp;
181	u32 stat;
182
183	spin_lock(&host->lock);
184
185	stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
186	writel(stat & MXS_MMC_IRQ_BITS,
187	       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
188
189	spin_unlock(&host->lock);
190
191	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
192		mmc_signal_sdio_irq(host->mmc);
193
194	if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
195		cmd->error = -ETIMEDOUT;
196	else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
197		cmd->error = -EIO;
198
199	if (data) {
200		if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
201			    BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
202			data->error = -ETIMEDOUT;
203		else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
204			data->error = -EILSEQ;
205		else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
206				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
207			data->error = -EIO;
208	}
209
210	return IRQ_HANDLED;
211}
212
213static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
214	struct mxs_mmc_host *host, unsigned long flags)
215{
216	struct mxs_ssp *ssp = &host->ssp;
217	struct dma_async_tx_descriptor *desc;
218	struct mmc_data *data = host->data;
219	struct scatterlist * sgl;
220	unsigned int sg_len;
221
222	if (data) {
223		/* data */
224		dma_map_sg(mmc_dev(host->mmc), data->sg,
225			   data->sg_len, ssp->dma_dir);
226		sgl = data->sg;
227		sg_len = data->sg_len;
228	} else {
229		/* pio */
230		sgl = (struct scatterlist *) ssp->ssp_pio_words;
231		sg_len = SSP_PIO_NUM;
232	}
233
234	desc = dmaengine_prep_slave_sg(ssp->dmach,
235				sgl, sg_len, ssp->slave_dirn, flags);
236	if (desc) {
237		desc->callback = mxs_mmc_dma_irq_callback;
238		desc->callback_param = host;
239	} else {
240		if (data)
241			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
242				     data->sg_len, ssp->dma_dir);
243	}
244
245	return desc;
246}
247
248static void mxs_mmc_bc(struct mxs_mmc_host *host)
249{
250	struct mxs_ssp *ssp = &host->ssp;
251	struct mmc_command *cmd = host->cmd;
252	struct dma_async_tx_descriptor *desc;
253	u32 ctrl0, cmd0, cmd1;
254
255	ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
256	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
257	cmd1 = cmd->arg;
258
259	if (host->sdio_irq_en) {
260		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
261		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
262	}
263
264	ssp->ssp_pio_words[0] = ctrl0;
265	ssp->ssp_pio_words[1] = cmd0;
266	ssp->ssp_pio_words[2] = cmd1;
267	ssp->dma_dir = DMA_NONE;
268	ssp->slave_dirn = DMA_TRANS_NONE;
269	desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
270	if (!desc)
271		goto out;
272
273	dmaengine_submit(desc);
274	dma_async_issue_pending(ssp->dmach);
275	return;
276
277out:
278	dev_warn(mmc_dev(host->mmc),
279		 "%s: failed to prep dma\n", __func__);
280}
281
282static void mxs_mmc_ac(struct mxs_mmc_host *host)
283{
284	struct mxs_ssp *ssp = &host->ssp;
285	struct mmc_command *cmd = host->cmd;
286	struct dma_async_tx_descriptor *desc;
287	u32 ignore_crc, get_resp, long_resp;
288	u32 ctrl0, cmd0, cmd1;
289
290	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
291			0 : BM_SSP_CTRL0_IGNORE_CRC;
292	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
293			BM_SSP_CTRL0_GET_RESP : 0;
294	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
295			BM_SSP_CTRL0_LONG_RESP : 0;
296
297	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
298	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
299	cmd1 = cmd->arg;
300
301	if (cmd->opcode == MMC_STOP_TRANSMISSION)
302		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
303
304	if (host->sdio_irq_en) {
305		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
306		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
307	}
308
309	ssp->ssp_pio_words[0] = ctrl0;
310	ssp->ssp_pio_words[1] = cmd0;
311	ssp->ssp_pio_words[2] = cmd1;
312	ssp->dma_dir = DMA_NONE;
313	ssp->slave_dirn = DMA_TRANS_NONE;
314	desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
315	if (!desc)
316		goto out;
317
318	dmaengine_submit(desc);
319	dma_async_issue_pending(ssp->dmach);
320	return;
321
322out:
323	dev_warn(mmc_dev(host->mmc),
324		 "%s: failed to prep dma\n", __func__);
325}
326
327static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
328{
329	const unsigned int ssp_timeout_mul = 4096;
330	/*
331	 * Calculate ticks in ms since ns are large numbers
332	 * and might overflow
333	 */
334	const unsigned int clock_per_ms = clock_rate / 1000;
335	const unsigned int ms = ns / 1000;
336	const unsigned int ticks = ms * clock_per_ms;
337	const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
338
339	WARN_ON(ssp_ticks == 0);
340	return ssp_ticks;
341}
342
343static void mxs_mmc_adtc(struct mxs_mmc_host *host)
344{
345	struct mmc_command *cmd = host->cmd;
346	struct mmc_data *data = cmd->data;
347	struct dma_async_tx_descriptor *desc;
348	struct scatterlist *sgl = data->sg, *sg;
349	unsigned int sg_len = data->sg_len;
350	unsigned int i;
351
352	unsigned short dma_data_dir, timeout;
353	enum dma_transfer_direction slave_dirn;
354	unsigned int data_size = 0, log2_blksz;
355	unsigned int blocks = data->blocks;
356
357	struct mxs_ssp *ssp = &host->ssp;
358
359	u32 ignore_crc, get_resp, long_resp, read;
360	u32 ctrl0, cmd0, cmd1, val;
361
362	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
363			0 : BM_SSP_CTRL0_IGNORE_CRC;
364	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
365			BM_SSP_CTRL0_GET_RESP : 0;
366	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
367			BM_SSP_CTRL0_LONG_RESP : 0;
368
369	if (data->flags & MMC_DATA_WRITE) {
370		dma_data_dir = DMA_TO_DEVICE;
371		slave_dirn = DMA_MEM_TO_DEV;
372		read = 0;
373	} else {
374		dma_data_dir = DMA_FROM_DEVICE;
375		slave_dirn = DMA_DEV_TO_MEM;
376		read = BM_SSP_CTRL0_READ;
377	}
378
379	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
380		ignore_crc | get_resp | long_resp |
381		BM_SSP_CTRL0_DATA_XFER | read |
382		BM_SSP_CTRL0_WAIT_FOR_IRQ |
383		BM_SSP_CTRL0_ENABLE;
384
385	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
386
387	/* get logarithm to base 2 of block size for setting register */
388	log2_blksz = ilog2(data->blksz);
389
390	/*
391	 * take special care of the case that data size from data->sg
392	 * is not equal to blocks x blksz
393	 */
394	for_each_sg(sgl, sg, sg_len, i)
395		data_size += sg->length;
396
397	if (data_size != data->blocks * data->blksz)
398		blocks = 1;
399
400	/* xfer count, block size and count need to be set differently */
401	if (ssp_is_old(ssp)) {
402		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
403		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
404			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
405	} else {
406		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
407		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
408		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
409		       ssp->base + HW_SSP_BLOCK_SIZE);
410	}
411
412	if (cmd->opcode == SD_IO_RW_EXTENDED)
413		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
414
415	cmd1 = cmd->arg;
416
417	if (host->sdio_irq_en) {
418		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
419		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
420	}
421
422	/* set the timeout count */
423	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
424	val = readl(ssp->base + HW_SSP_TIMING(ssp));
425	val &= ~(BM_SSP_TIMING_TIMEOUT);
426	val |= BF_SSP(timeout, TIMING_TIMEOUT);
427	writel(val, ssp->base + HW_SSP_TIMING(ssp));
428
429	/* pio */
430	ssp->ssp_pio_words[0] = ctrl0;
431	ssp->ssp_pio_words[1] = cmd0;
432	ssp->ssp_pio_words[2] = cmd1;
433	ssp->dma_dir = DMA_NONE;
434	ssp->slave_dirn = DMA_TRANS_NONE;
435	desc = mxs_mmc_prep_dma(host, 0);
436	if (!desc)
437		goto out;
438
439	/* append data sg */
440	WARN_ON(host->data != NULL);
441	host->data = data;
442	ssp->dma_dir = dma_data_dir;
443	ssp->slave_dirn = slave_dirn;
444	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
445	if (!desc)
446		goto out;
447
448	dmaengine_submit(desc);
449	dma_async_issue_pending(ssp->dmach);
450	return;
451out:
452	dev_warn(mmc_dev(host->mmc),
453		 "%s: failed to prep dma\n", __func__);
454}
455
456static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
457			      struct mmc_command *cmd)
458{
459	host->cmd = cmd;
460
461	switch (mmc_cmd_type(cmd)) {
462	case MMC_CMD_BC:
463		mxs_mmc_bc(host);
464		break;
465	case MMC_CMD_BCR:
466		mxs_mmc_ac(host);
467		break;
468	case MMC_CMD_AC:
469		mxs_mmc_ac(host);
470		break;
471	case MMC_CMD_ADTC:
472		mxs_mmc_adtc(host);
473		break;
474	default:
475		dev_warn(mmc_dev(host->mmc),
476			 "%s: unknown MMC command\n", __func__);
477		break;
478	}
479}
480
481static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
482{
483	struct mxs_mmc_host *host = mmc_priv(mmc);
484
485	WARN_ON(host->mrq != NULL);
486	host->mrq = mrq;
487
488	if (mrq->sbc)
489		mxs_mmc_start_cmd(host, mrq->sbc);
490	else
491		mxs_mmc_start_cmd(host, mrq->cmd);
492}
493
494static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
495{
496	struct mxs_mmc_host *host = mmc_priv(mmc);
497
498	if (ios->bus_width == MMC_BUS_WIDTH_8)
499		host->bus_width = 2;
500	else if (ios->bus_width == MMC_BUS_WIDTH_4)
501		host->bus_width = 1;
502	else
503		host->bus_width = 0;
504
505	if (ios->clock)
506		mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
507}
508
509static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
510{
511	struct mxs_mmc_host *host = mmc_priv(mmc);
512	struct mxs_ssp *ssp = &host->ssp;
513	unsigned long flags;
514
515	spin_lock_irqsave(&host->lock, flags);
516
517	host->sdio_irq_en = enable;
518
519	if (enable) {
520		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
521		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
522		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
523		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
524	} else {
525		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
526		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
527		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
528		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
529	}
530
531	spin_unlock_irqrestore(&host->lock, flags);
532
533	if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
534			BM_SSP_STATUS_SDIO_IRQ)
535		mmc_signal_sdio_irq(host->mmc);
536
537}
538
539static const struct mmc_host_ops mxs_mmc_ops = {
540	.request = mxs_mmc_request,
541	.get_ro = mmc_gpio_get_ro,
542	.get_cd = mxs_mmc_get_cd,
543	.set_ios = mxs_mmc_set_ios,
544	.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
545};
546
547static const struct of_device_id mxs_mmc_dt_ids[] = {
548	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
549	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
550	{ /* sentinel */ }
551};
552MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
553
554static void mxs_mmc_regulator_disable(void *regulator)
555{
556	regulator_disable(regulator);
557}
558
559static int mxs_mmc_probe(struct platform_device *pdev)
560{
561	struct device_node *np = pdev->dev.of_node;
562	struct mxs_mmc_host *host;
563	struct mmc_host *mmc;
564	int ret = 0, irq_err;
565	struct regulator *reg_vmmc;
566	struct mxs_ssp *ssp;
567
568	irq_err = platform_get_irq(pdev, 0);
569	if (irq_err < 0)
570		return irq_err;
571
572	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
573	if (!mmc)
574		return -ENOMEM;
575
576	host = mmc_priv(mmc);
577	ssp = &host->ssp;
578	ssp->dev = &pdev->dev;
579	ssp->base = devm_platform_ioremap_resource(pdev, 0);
580	if (IS_ERR(ssp->base)) {
581		ret = PTR_ERR(ssp->base);
582		goto out_mmc_free;
583	}
584
585	ssp->devid = (enum mxs_ssp_id)of_device_get_match_data(&pdev->dev);
586
587	host->mmc = mmc;
588	host->sdio_irq_en = 0;
589
590	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
591	if (!IS_ERR(reg_vmmc)) {
592		ret = regulator_enable(reg_vmmc);
593		if (ret) {
594			dev_err(&pdev->dev,
595				"Failed to enable vmmc regulator: %d\n", ret);
596			goto out_mmc_free;
597		}
598
599		ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
600					       reg_vmmc);
601		if (ret)
602			goto out_mmc_free;
603	}
604
605	ssp->clk = devm_clk_get(&pdev->dev, NULL);
606	if (IS_ERR(ssp->clk)) {
607		ret = PTR_ERR(ssp->clk);
608		goto out_mmc_free;
609	}
610	ret = clk_prepare_enable(ssp->clk);
611	if (ret)
612		goto out_mmc_free;
613
614	ret = mxs_mmc_reset(host);
615	if (ret) {
616		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
617		goto out_clk_disable;
618	}
619
620	ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
621	if (IS_ERR(ssp->dmach)) {
622		dev_err(mmc_dev(host->mmc),
623			"%s: failed to request dma\n", __func__);
624		ret = PTR_ERR(ssp->dmach);
625		goto out_clk_disable;
626	}
627
628	/* set mmc core parameters */
629	mmc->ops = &mxs_mmc_ops;
630	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
631		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
632
633	host->broken_cd = of_property_read_bool(np, "broken-cd");
634
635	mmc->f_min = 400000;
636	mmc->f_max = 288000000;
637
638	ret = mmc_of_parse(mmc);
639	if (ret)
640		goto out_free_dma;
641
642	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
643
644	mmc->max_segs = 52;
645	mmc->max_blk_size = 1 << 0xf;
646	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
647	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
648	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
649
650	platform_set_drvdata(pdev, mmc);
651
652	spin_lock_init(&host->lock);
653
654	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
655			       dev_name(&pdev->dev), host);
656	if (ret)
657		goto out_free_dma;
658
659	ret = mmc_add_host(mmc);
660	if (ret)
661		goto out_free_dma;
662
663	dev_info(mmc_dev(host->mmc), "initialized\n");
664
665	return 0;
666
667out_free_dma:
668	dma_release_channel(ssp->dmach);
669out_clk_disable:
670	clk_disable_unprepare(ssp->clk);
671out_mmc_free:
672	mmc_free_host(mmc);
673	return ret;
674}
675
676static void mxs_mmc_remove(struct platform_device *pdev)
677{
678	struct mmc_host *mmc = platform_get_drvdata(pdev);
679	struct mxs_mmc_host *host = mmc_priv(mmc);
680	struct mxs_ssp *ssp = &host->ssp;
681
682	mmc_remove_host(mmc);
683
684	if (ssp->dmach)
685		dma_release_channel(ssp->dmach);
686
687	clk_disable_unprepare(ssp->clk);
688
689	mmc_free_host(mmc);
690}
691
692#ifdef CONFIG_PM_SLEEP
693static int mxs_mmc_suspend(struct device *dev)
694{
695	struct mmc_host *mmc = dev_get_drvdata(dev);
696	struct mxs_mmc_host *host = mmc_priv(mmc);
697	struct mxs_ssp *ssp = &host->ssp;
698
699	clk_disable_unprepare(ssp->clk);
700	return 0;
701}
702
703static int mxs_mmc_resume(struct device *dev)
704{
705	struct mmc_host *mmc = dev_get_drvdata(dev);
706	struct mxs_mmc_host *host = mmc_priv(mmc);
707	struct mxs_ssp *ssp = &host->ssp;
708
709	return clk_prepare_enable(ssp->clk);
710}
711#endif
712
713static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
714
715static struct platform_driver mxs_mmc_driver = {
716	.probe		= mxs_mmc_probe,
717	.remove_new	= mxs_mmc_remove,
718	.driver		= {
719		.name	= DRIVER_NAME,
720		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
721		.pm	= &mxs_mmc_pm_ops,
722		.of_match_table = mxs_mmc_dt_ids,
723	},
724};
725
726module_platform_driver(mxs_mmc_driver);
727
728MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
729MODULE_AUTHOR("Freescale Semiconductor");
730MODULE_LICENSE("GPL");
731MODULE_ALIAS("platform:" DRIVER_NAME);
732