xref: /kernel/linux/linux-6.6/drivers/mmc/host/mmci.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 *
5 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 *  Copyright (C) 2010 ST-Ericsson SA
7 */
8#include <linux/module.h>
9#include <linux/moduleparam.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/log2.h>
21#include <linux/mmc/mmc.h>
22#include <linux/mmc/pm.h>
23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
25#include <linux/mmc/sd.h>
26#include <linux/mmc/slot-gpio.h>
27#include <linux/amba/bus.h>
28#include <linux/clk.h>
29#include <linux/scatterlist.h>
30#include <linux/of.h>
31#include <linux/regulator/consumer.h>
32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/amba/mmci.h>
35#include <linux/pm_runtime.h>
36#include <linux/types.h>
37#include <linux/pinctrl/consumer.h>
38#include <linux/reset.h>
39#include <linux/gpio/consumer.h>
40#include <linux/workqueue.h>
41
42#include <asm/div64.h>
43#include <asm/io.h>
44
45#include "mmci.h"
46
47#define DRIVER_NAME "mmci-pl18x"
48
49static void mmci_variant_init(struct mmci_host *host);
50static void ux500_variant_init(struct mmci_host *host);
51static void ux500v2_variant_init(struct mmci_host *host);
52
53static unsigned int fmax = 515633;
54
55static struct variant_data variant_arm = {
56	.fifosize		= 16 * 4,
57	.fifohalfsize		= 8 * 4,
58	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
59	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
60	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
61	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
62	.datalength_bits	= 16,
63	.datactrl_blocksz	= 11,
64	.pwrreg_powerup		= MCI_PWR_UP,
65	.f_max			= 100000000,
66	.reversed_irq_handling	= true,
67	.mmcimask1		= true,
68	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
69	.start_err		= MCI_STARTBITERR,
70	.opendrain		= MCI_ROD,
71	.init			= mmci_variant_init,
72};
73
74static struct variant_data variant_arm_extended_fifo = {
75	.fifosize		= 128 * 4,
76	.fifohalfsize		= 64 * 4,
77	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
78	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
79	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
80	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
81	.datalength_bits	= 16,
82	.datactrl_blocksz	= 11,
83	.pwrreg_powerup		= MCI_PWR_UP,
84	.f_max			= 100000000,
85	.mmcimask1		= true,
86	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
87	.start_err		= MCI_STARTBITERR,
88	.opendrain		= MCI_ROD,
89	.init			= mmci_variant_init,
90};
91
92static struct variant_data variant_arm_extended_fifo_hwfc = {
93	.fifosize		= 128 * 4,
94	.fifohalfsize		= 64 * 4,
95	.clkreg_enable		= MCI_ARM_HWFCEN,
96	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
97	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
98	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
99	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
100	.datalength_bits	= 16,
101	.datactrl_blocksz	= 11,
102	.pwrreg_powerup		= MCI_PWR_UP,
103	.f_max			= 100000000,
104	.mmcimask1		= true,
105	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
106	.start_err		= MCI_STARTBITERR,
107	.opendrain		= MCI_ROD,
108	.init			= mmci_variant_init,
109};
110
111static struct variant_data variant_u300 = {
112	.fifosize		= 16 * 4,
113	.fifohalfsize		= 8 * 4,
114	.clkreg_enable		= MCI_ST_U300_HWFCEN,
115	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
116	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
117	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
118	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
119	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
120	.datalength_bits	= 16,
121	.datactrl_blocksz	= 11,
122	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
123	.st_sdio			= true,
124	.pwrreg_powerup		= MCI_PWR_ON,
125	.f_max			= 100000000,
126	.signal_direction	= true,
127	.pwrreg_clkgate		= true,
128	.pwrreg_nopower		= true,
129	.mmcimask1		= true,
130	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
131	.start_err		= MCI_STARTBITERR,
132	.opendrain		= MCI_OD,
133	.init			= mmci_variant_init,
134};
135
136static struct variant_data variant_nomadik = {
137	.fifosize		= 16 * 4,
138	.fifohalfsize		= 8 * 4,
139	.clkreg			= MCI_CLK_ENABLE,
140	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
141	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
142	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
143	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
144	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
145	.datalength_bits	= 24,
146	.datactrl_blocksz	= 11,
147	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
148	.st_sdio		= true,
149	.st_clkdiv		= true,
150	.pwrreg_powerup		= MCI_PWR_ON,
151	.f_max			= 100000000,
152	.signal_direction	= true,
153	.pwrreg_clkgate		= true,
154	.pwrreg_nopower		= true,
155	.mmcimask1		= true,
156	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
157	.start_err		= MCI_STARTBITERR,
158	.opendrain		= MCI_OD,
159	.init			= mmci_variant_init,
160};
161
162static struct variant_data variant_ux500 = {
163	.fifosize		= 30 * 4,
164	.fifohalfsize		= 8 * 4,
165	.clkreg			= MCI_CLK_ENABLE,
166	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
167	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
168	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
169	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
170	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
171	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
172	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
173	.datalength_bits	= 24,
174	.datactrl_blocksz	= 11,
175	.datactrl_any_blocksz	= true,
176	.dma_power_of_2		= true,
177	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
178	.st_sdio		= true,
179	.st_clkdiv		= true,
180	.pwrreg_powerup		= MCI_PWR_ON,
181	.f_max			= 100000000,
182	.signal_direction	= true,
183	.pwrreg_clkgate		= true,
184	.busy_detect		= true,
185	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
186	.busy_detect_flag	= MCI_ST_CARDBUSY,
187	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
188	.pwrreg_nopower		= true,
189	.mmcimask1		= true,
190	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
191	.start_err		= MCI_STARTBITERR,
192	.opendrain		= MCI_OD,
193	.init			= ux500_variant_init,
194};
195
196static struct variant_data variant_ux500v2 = {
197	.fifosize		= 30 * 4,
198	.fifohalfsize		= 8 * 4,
199	.clkreg			= MCI_CLK_ENABLE,
200	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
201	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
202	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
203	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
204	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
205	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
206	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
207	.datactrl_mask_ddrmode	= MCI_DPSM_ST_DDRMODE,
208	.datalength_bits	= 24,
209	.datactrl_blocksz	= 11,
210	.datactrl_any_blocksz	= true,
211	.dma_power_of_2		= true,
212	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
213	.st_sdio		= true,
214	.st_clkdiv		= true,
215	.pwrreg_powerup		= MCI_PWR_ON,
216	.f_max			= 100000000,
217	.signal_direction	= true,
218	.pwrreg_clkgate		= true,
219	.busy_detect		= true,
220	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
221	.busy_detect_flag	= MCI_ST_CARDBUSY,
222	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
223	.pwrreg_nopower		= true,
224	.mmcimask1		= true,
225	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
226	.start_err		= MCI_STARTBITERR,
227	.opendrain		= MCI_OD,
228	.init			= ux500v2_variant_init,
229};
230
231static struct variant_data variant_stm32 = {
232	.fifosize		= 32 * 4,
233	.fifohalfsize		= 8 * 4,
234	.clkreg			= MCI_CLK_ENABLE,
235	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
236	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
237	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
238	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
239	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
240	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
241	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
242	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
243	.datalength_bits	= 24,
244	.datactrl_blocksz	= 11,
245	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
246	.st_sdio		= true,
247	.st_clkdiv		= true,
248	.pwrreg_powerup		= MCI_PWR_ON,
249	.f_max			= 48000000,
250	.pwrreg_clkgate		= true,
251	.pwrreg_nopower		= true,
252	.init			= mmci_variant_init,
253};
254
255static struct variant_data variant_stm32_sdmmc = {
256	.fifosize		= 16 * 4,
257	.fifohalfsize		= 8 * 4,
258	.f_max			= 208000000,
259	.stm32_clkdiv		= true,
260	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
261	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
262	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
263	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
264	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
265	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
266	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
267	.datactrl_first		= true,
268	.datacnt_useless	= true,
269	.datalength_bits	= 25,
270	.datactrl_blocksz	= 14,
271	.datactrl_any_blocksz	= true,
272	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
273	.stm32_idmabsize_mask	= GENMASK(12, 5),
274	.stm32_idmabsize_align	= BIT(5),
275	.busy_timeout		= true,
276	.busy_detect		= true,
277	.busy_detect_flag	= MCI_STM32_BUSYD0,
278	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
279	.init			= sdmmc_variant_init,
280};
281
282static struct variant_data variant_stm32_sdmmcv2 = {
283	.fifosize		= 16 * 4,
284	.fifohalfsize		= 8 * 4,
285	.f_max			= 267000000,
286	.stm32_clkdiv		= true,
287	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
288	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
289	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
290	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
291	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
292	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
293	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
294	.datactrl_first		= true,
295	.datacnt_useless	= true,
296	.datalength_bits	= 25,
297	.datactrl_blocksz	= 14,
298	.datactrl_any_blocksz	= true,
299	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
300	.stm32_idmabsize_mask	= GENMASK(16, 5),
301	.stm32_idmabsize_align	= BIT(5),
302	.dma_lli		= true,
303	.busy_timeout		= true,
304	.busy_detect		= true,
305	.busy_detect_flag	= MCI_STM32_BUSYD0,
306	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
307	.init			= sdmmc_variant_init,
308};
309
310static struct variant_data variant_stm32_sdmmcv3 = {
311	.fifosize		= 256 * 4,
312	.fifohalfsize		= 128 * 4,
313	.f_max			= 267000000,
314	.stm32_clkdiv		= true,
315	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
316	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
317	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
318	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
319	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
320	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
321	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
322	.datactrl_first		= true,
323	.datacnt_useless	= true,
324	.datalength_bits	= 25,
325	.datactrl_blocksz	= 14,
326	.datactrl_any_blocksz	= true,
327	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
328	.stm32_idmabsize_mask	= GENMASK(16, 6),
329	.stm32_idmabsize_align	= BIT(6),
330	.dma_lli		= true,
331	.busy_timeout		= true,
332	.busy_detect		= true,
333	.busy_detect_flag	= MCI_STM32_BUSYD0,
334	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
335	.init			= sdmmc_variant_init,
336};
337
338static struct variant_data variant_qcom = {
339	.fifosize		= 16 * 4,
340	.fifohalfsize		= 8 * 4,
341	.clkreg			= MCI_CLK_ENABLE,
342	.clkreg_enable		= MCI_QCOM_CLK_FLOWENA |
343				  MCI_QCOM_CLK_SELECT_IN_FBCLK,
344	.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
345	.datactrl_mask_ddrmode	= MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
346	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
347	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
348	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
349	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
350	.data_cmd_enable	= MCI_CPSM_QCOM_DATCMD,
351	.datalength_bits	= 24,
352	.datactrl_blocksz	= 11,
353	.datactrl_any_blocksz	= true,
354	.pwrreg_powerup		= MCI_PWR_UP,
355	.f_max			= 208000000,
356	.explicit_mclk_control	= true,
357	.qcom_fifo		= true,
358	.qcom_dml		= true,
359	.mmcimask1		= true,
360	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
361	.start_err		= MCI_STARTBITERR,
362	.opendrain		= MCI_ROD,
363	.init			= qcom_variant_init,
364};
365
366/* Busy detection for the ST Micro variant */
367static int mmci_card_busy(struct mmc_host *mmc)
368{
369	struct mmci_host *host = mmc_priv(mmc);
370	unsigned long flags;
371	int busy = 0;
372
373	spin_lock_irqsave(&host->lock, flags);
374	if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
375		busy = 1;
376	spin_unlock_irqrestore(&host->lock, flags);
377
378	return busy;
379}
380
381static void mmci_reg_delay(struct mmci_host *host)
382{
383	/*
384	 * According to the spec, at least three feedback clock cycles
385	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
386	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
387	 * Worst delay time during card init is at 100 kHz => 30 us.
388	 * Worst delay time when up and running is at 25 MHz => 120 ns.
389	 */
390	if (host->cclk < 25000000)
391		udelay(30);
392	else
393		ndelay(120);
394}
395
396/*
397 * This must be called with host->lock held
398 */
399void mmci_write_clkreg(struct mmci_host *host, u32 clk)
400{
401	if (host->clk_reg != clk) {
402		host->clk_reg = clk;
403		writel(clk, host->base + MMCICLOCK);
404	}
405}
406
407/*
408 * This must be called with host->lock held
409 */
410void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
411{
412	if (host->pwr_reg != pwr) {
413		host->pwr_reg = pwr;
414		writel(pwr, host->base + MMCIPOWER);
415	}
416}
417
418/*
419 * This must be called with host->lock held
420 */
421static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
422{
423	/* Keep busy mode in DPSM if enabled */
424	datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
425
426	if (host->datactrl_reg != datactrl) {
427		host->datactrl_reg = datactrl;
428		writel(datactrl, host->base + MMCIDATACTRL);
429	}
430}
431
432/*
433 * This must be called with host->lock held
434 */
435static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
436{
437	struct variant_data *variant = host->variant;
438	u32 clk = variant->clkreg;
439
440	/* Make sure cclk reflects the current calculated clock */
441	host->cclk = 0;
442
443	if (desired) {
444		if (variant->explicit_mclk_control) {
445			host->cclk = host->mclk;
446		} else if (desired >= host->mclk) {
447			clk = MCI_CLK_BYPASS;
448			if (variant->st_clkdiv)
449				clk |= MCI_ST_UX500_NEG_EDGE;
450			host->cclk = host->mclk;
451		} else if (variant->st_clkdiv) {
452			/*
453			 * DB8500 TRM says f = mclk / (clkdiv + 2)
454			 * => clkdiv = (mclk / f) - 2
455			 * Round the divider up so we don't exceed the max
456			 * frequency
457			 */
458			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
459			if (clk >= 256)
460				clk = 255;
461			host->cclk = host->mclk / (clk + 2);
462		} else {
463			/*
464			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
465			 * => clkdiv = mclk / (2 * f) - 1
466			 */
467			clk = host->mclk / (2 * desired) - 1;
468			if (clk >= 256)
469				clk = 255;
470			host->cclk = host->mclk / (2 * (clk + 1));
471		}
472
473		clk |= variant->clkreg_enable;
474		clk |= MCI_CLK_ENABLE;
475		/* This hasn't proven to be worthwhile */
476		/* clk |= MCI_CLK_PWRSAVE; */
477	}
478
479	/* Set actual clock for debug */
480	host->mmc->actual_clock = host->cclk;
481
482	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
483		clk |= MCI_4BIT_BUS;
484	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
485		clk |= variant->clkreg_8bit_bus_enable;
486
487	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
488	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
489		clk |= variant->clkreg_neg_edge_enable;
490
491	mmci_write_clkreg(host, clk);
492}
493
494static void mmci_dma_release(struct mmci_host *host)
495{
496	if (host->ops && host->ops->dma_release)
497		host->ops->dma_release(host);
498
499	host->use_dma = false;
500}
501
502static void mmci_dma_setup(struct mmci_host *host)
503{
504	if (!host->ops || !host->ops->dma_setup)
505		return;
506
507	if (host->ops->dma_setup(host))
508		return;
509
510	/* initialize pre request cookie */
511	host->next_cookie = 1;
512
513	host->use_dma = true;
514}
515
516/*
517 * Validate mmc prerequisites
518 */
519static int mmci_validate_data(struct mmci_host *host,
520			      struct mmc_data *data)
521{
522	struct variant_data *variant = host->variant;
523
524	if (!data)
525		return 0;
526	if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
527		dev_err(mmc_dev(host->mmc),
528			"unsupported block size (%d bytes)\n", data->blksz);
529		return -EINVAL;
530	}
531
532	if (host->ops && host->ops->validate_data)
533		return host->ops->validate_data(host, data);
534
535	return 0;
536}
537
538static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
539{
540	int err;
541
542	if (!host->ops || !host->ops->prep_data)
543		return 0;
544
545	err = host->ops->prep_data(host, data, next);
546
547	if (next && !err)
548		data->host_cookie = ++host->next_cookie < 0 ?
549			1 : host->next_cookie;
550
551	return err;
552}
553
554static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
555		      int err)
556{
557	if (host->ops && host->ops->unprep_data)
558		host->ops->unprep_data(host, data, err);
559
560	data->host_cookie = 0;
561}
562
563static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
564{
565	WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
566
567	if (host->ops && host->ops->get_next_data)
568		host->ops->get_next_data(host, data);
569}
570
571static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
572{
573	struct mmc_data *data = host->data;
574	int ret;
575
576	if (!host->use_dma)
577		return -EINVAL;
578
579	ret = mmci_prep_data(host, data, false);
580	if (ret)
581		return ret;
582
583	if (!host->ops || !host->ops->dma_start)
584		return -EINVAL;
585
586	/* Okay, go for it. */
587	dev_vdbg(mmc_dev(host->mmc),
588		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
589		 data->sg_len, data->blksz, data->blocks, data->flags);
590
591	ret = host->ops->dma_start(host, &datactrl);
592	if (ret)
593		return ret;
594
595	/* Trigger the DMA transfer */
596	mmci_write_datactrlreg(host, datactrl);
597
598	/*
599	 * Let the MMCI say when the data is ended and it's time
600	 * to fire next DMA request. When that happens, MMCI will
601	 * call mmci_data_end()
602	 */
603	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
604	       host->base + MMCIMASK0);
605	return 0;
606}
607
608static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
609{
610	if (!host->use_dma)
611		return;
612
613	if (host->ops && host->ops->dma_finalize)
614		host->ops->dma_finalize(host, data);
615}
616
617static void mmci_dma_error(struct mmci_host *host)
618{
619	if (!host->use_dma)
620		return;
621
622	if (host->ops && host->ops->dma_error)
623		host->ops->dma_error(host);
624}
625
626static void
627mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
628{
629	writel(0, host->base + MMCICOMMAND);
630
631	BUG_ON(host->data);
632
633	host->mrq = NULL;
634	host->cmd = NULL;
635
636	mmc_request_done(host->mmc, mrq);
637}
638
639static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
640{
641	void __iomem *base = host->base;
642	struct variant_data *variant = host->variant;
643
644	if (host->singleirq) {
645		unsigned int mask0 = readl(base + MMCIMASK0);
646
647		mask0 &= ~variant->irq_pio_mask;
648		mask0 |= mask;
649
650		writel(mask0, base + MMCIMASK0);
651	}
652
653	if (variant->mmcimask1)
654		writel(mask, base + MMCIMASK1);
655
656	host->mask1_reg = mask;
657}
658
659static void mmci_stop_data(struct mmci_host *host)
660{
661	mmci_write_datactrlreg(host, 0);
662	mmci_set_mask1(host, 0);
663	host->data = NULL;
664}
665
666static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
667{
668	unsigned int flags = SG_MITER_ATOMIC;
669
670	if (data->flags & MMC_DATA_READ)
671		flags |= SG_MITER_TO_SG;
672	else
673		flags |= SG_MITER_FROM_SG;
674
675	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
676}
677
678static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
679{
680	return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
681}
682
683static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
684{
685	return MCI_DPSM_ENABLE | (host->data->blksz << 16);
686}
687
688static void ux500_busy_clear_mask_done(struct mmci_host *host)
689{
690	void __iomem *base = host->base;
691
692	writel(host->variant->busy_detect_mask, base + MMCICLEAR);
693	writel(readl(base + MMCIMASK0) &
694	       ~host->variant->busy_detect_mask, base + MMCIMASK0);
695	host->busy_state = MMCI_BUSY_DONE;
696	host->busy_status = 0;
697}
698
699/*
700 * ux500_busy_complete() - this will wait until the busy status
701 * goes off, saving any status that occur in the meantime into
702 * host->busy_status until we know the card is not busy any more.
703 * The function returns true when the busy detection is ended
704 * and we should continue processing the command.
705 *
706 * The Ux500 typically fires two IRQs over a busy cycle like this:
707 *
708 *  DAT0 busy          +-----------------+
709 *                     |                 |
710 *  DAT0 not busy  ----+                 +--------
711 *
712 *                     ^                 ^
713 *                     |                 |
714 *                    IRQ1              IRQ2
715 */
716static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
717				u32 status, u32 err_msk)
718{
719	void __iomem *base = host->base;
720	int retries = 10;
721
722	if (status & err_msk) {
723		/* Stop any ongoing busy detection if an error occurs */
724		ux500_busy_clear_mask_done(host);
725		goto out_ret_state;
726	}
727
728	/*
729	 * The state transitions are encoded in a state machine crossing
730	 * the edges in this switch statement.
731	 */
732	switch (host->busy_state) {
733
734	/*
735	 * Before unmasking for the busy end IRQ, confirm that the
736	 * command was sent successfully. To keep track of having a
737	 * command in-progress, waiting for busy signaling to end,
738	 * store the status in host->busy_status.
739	 *
740	 * Note that, the card may need a couple of clock cycles before
741	 * it starts signaling busy on DAT0, hence re-read the
742	 * MMCISTATUS register here, to allow the busy bit to be set.
743	 */
744	case MMCI_BUSY_DONE:
745		/*
746		 * Save the first status register read to be sure to catch
747		 * all bits that may be lost will retrying. If the command
748		 * is still busy this will result in assigning 0 to
749		 * host->busy_status, which is what it should be in IDLE.
750		 */
751		host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
752		while (retries) {
753			status = readl(base + MMCISTATUS);
754			/* Keep accumulating status bits */
755			host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
756			if (status & host->variant->busy_detect_flag) {
757				writel(readl(base + MMCIMASK0) |
758				       host->variant->busy_detect_mask,
759				       base + MMCIMASK0);
760				host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
761				schedule_delayed_work(&host->ux500_busy_timeout_work,
762				      msecs_to_jiffies(cmd->busy_timeout));
763				goto out_ret_state;
764			}
765			retries--;
766		}
767		dev_dbg(mmc_dev(host->mmc),
768			"no busy signalling in time CMD%02x\n", cmd->opcode);
769		ux500_busy_clear_mask_done(host);
770		break;
771
772	/*
773	 * If there is a command in-progress that has been successfully
774	 * sent, then bail out if busy status is set and wait for the
775	 * busy end IRQ.
776	 *
777	 * Note that, the HW triggers an IRQ on both edges while
778	 * monitoring DAT0 for busy completion, but there is only one
779	 * status bit in MMCISTATUS for the busy state. Therefore
780	 * both the start and the end interrupts needs to be cleared,
781	 * one after the other. So, clear the busy start IRQ here.
782	 */
783	case MMCI_BUSY_WAITING_FOR_START_IRQ:
784		if (status & host->variant->busy_detect_flag) {
785			host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
786			writel(host->variant->busy_detect_mask, base + MMCICLEAR);
787			host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
788		} else {
789			dev_dbg(mmc_dev(host->mmc),
790				"lost busy status when waiting for busy start IRQ CMD%02x\n",
791				cmd->opcode);
792			cancel_delayed_work(&host->ux500_busy_timeout_work);
793			ux500_busy_clear_mask_done(host);
794		}
795		break;
796
797	case MMCI_BUSY_WAITING_FOR_END_IRQ:
798		if (!(status & host->variant->busy_detect_flag)) {
799			host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
800			writel(host->variant->busy_detect_mask, base + MMCICLEAR);
801			cancel_delayed_work(&host->ux500_busy_timeout_work);
802			ux500_busy_clear_mask_done(host);
803		} else {
804			dev_dbg(mmc_dev(host->mmc),
805				"busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
806				cmd->opcode);
807		}
808		break;
809
810	default:
811		dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
812			host->busy_state, cmd->opcode);
813		break;
814	}
815
816out_ret_state:
817	return (host->busy_state == MMCI_BUSY_DONE);
818}
819
820/*
821 * All the DMA operation mode stuff goes inside this ifdef.
822 * This assumes that you have a generic DMA device interface,
823 * no custom DMA interfaces are supported.
824 */
825#ifdef CONFIG_DMA_ENGINE
826struct mmci_dmae_next {
827	struct dma_async_tx_descriptor *desc;
828	struct dma_chan	*chan;
829};
830
831struct mmci_dmae_priv {
832	struct dma_chan	*cur;
833	struct dma_chan	*rx_channel;
834	struct dma_chan	*tx_channel;
835	struct dma_async_tx_descriptor	*desc_current;
836	struct mmci_dmae_next next_data;
837};
838
839int mmci_dmae_setup(struct mmci_host *host)
840{
841	const char *rxname, *txname;
842	struct mmci_dmae_priv *dmae;
843
844	dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
845	if (!dmae)
846		return -ENOMEM;
847
848	host->dma_priv = dmae;
849
850	dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
851	if (IS_ERR(dmae->rx_channel)) {
852		int ret = PTR_ERR(dmae->rx_channel);
853		dmae->rx_channel = NULL;
854		return ret;
855	}
856
857	dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
858	if (IS_ERR(dmae->tx_channel)) {
859		if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
860			dev_warn(mmc_dev(host->mmc),
861				 "Deferred probe for TX channel ignored\n");
862		dmae->tx_channel = NULL;
863	}
864
865	/*
866	 * If only an RX channel is specified, the driver will
867	 * attempt to use it bidirectionally, however if it
868	 * is specified but cannot be located, DMA will be disabled.
869	 */
870	if (dmae->rx_channel && !dmae->tx_channel)
871		dmae->tx_channel = dmae->rx_channel;
872
873	if (dmae->rx_channel)
874		rxname = dma_chan_name(dmae->rx_channel);
875	else
876		rxname = "none";
877
878	if (dmae->tx_channel)
879		txname = dma_chan_name(dmae->tx_channel);
880	else
881		txname = "none";
882
883	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
884		 rxname, txname);
885
886	/*
887	 * Limit the maximum segment size in any SG entry according to
888	 * the parameters of the DMA engine device.
889	 */
890	if (dmae->tx_channel) {
891		struct device *dev = dmae->tx_channel->device->dev;
892		unsigned int max_seg_size = dma_get_max_seg_size(dev);
893
894		if (max_seg_size < host->mmc->max_seg_size)
895			host->mmc->max_seg_size = max_seg_size;
896	}
897	if (dmae->rx_channel) {
898		struct device *dev = dmae->rx_channel->device->dev;
899		unsigned int max_seg_size = dma_get_max_seg_size(dev);
900
901		if (max_seg_size < host->mmc->max_seg_size)
902			host->mmc->max_seg_size = max_seg_size;
903	}
904
905	if (!dmae->tx_channel || !dmae->rx_channel) {
906		mmci_dmae_release(host);
907		return -EINVAL;
908	}
909
910	return 0;
911}
912
913/*
914 * This is used in or so inline it
915 * so it can be discarded.
916 */
917void mmci_dmae_release(struct mmci_host *host)
918{
919	struct mmci_dmae_priv *dmae = host->dma_priv;
920
921	if (dmae->rx_channel)
922		dma_release_channel(dmae->rx_channel);
923	if (dmae->tx_channel)
924		dma_release_channel(dmae->tx_channel);
925	dmae->rx_channel = dmae->tx_channel = NULL;
926}
927
928static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
929{
930	struct mmci_dmae_priv *dmae = host->dma_priv;
931	struct dma_chan *chan;
932
933	if (data->flags & MMC_DATA_READ)
934		chan = dmae->rx_channel;
935	else
936		chan = dmae->tx_channel;
937
938	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
939		     mmc_get_dma_dir(data));
940}
941
942void mmci_dmae_error(struct mmci_host *host)
943{
944	struct mmci_dmae_priv *dmae = host->dma_priv;
945
946	if (!dma_inprogress(host))
947		return;
948
949	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
950	dmaengine_terminate_all(dmae->cur);
951	host->dma_in_progress = false;
952	dmae->cur = NULL;
953	dmae->desc_current = NULL;
954	host->data->host_cookie = 0;
955
956	mmci_dma_unmap(host, host->data);
957}
958
959void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
960{
961	struct mmci_dmae_priv *dmae = host->dma_priv;
962	u32 status;
963	int i;
964
965	if (!dma_inprogress(host))
966		return;
967
968	/* Wait up to 1ms for the DMA to complete */
969	for (i = 0; ; i++) {
970		status = readl(host->base + MMCISTATUS);
971		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
972			break;
973		udelay(10);
974	}
975
976	/*
977	 * Check to see whether we still have some data left in the FIFO -
978	 * this catches DMA controllers which are unable to monitor the
979	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
980	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
981	 */
982	if (status & MCI_RXDATAAVLBLMASK) {
983		mmci_dma_error(host);
984		if (!data->error)
985			data->error = -EIO;
986	} else if (!data->host_cookie) {
987		mmci_dma_unmap(host, data);
988	}
989
990	/*
991	 * Use of DMA with scatter-gather is impossible.
992	 * Give up with DMA and switch back to PIO mode.
993	 */
994	if (status & MCI_RXDATAAVLBLMASK) {
995		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
996		mmci_dma_release(host);
997	}
998
999	host->dma_in_progress = false;
1000	dmae->cur = NULL;
1001	dmae->desc_current = NULL;
1002}
1003
1004/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
1005static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
1006				struct dma_chan **dma_chan,
1007				struct dma_async_tx_descriptor **dma_desc)
1008{
1009	struct mmci_dmae_priv *dmae = host->dma_priv;
1010	struct variant_data *variant = host->variant;
1011	struct dma_slave_config conf = {
1012		.src_addr = host->phybase + MMCIFIFO,
1013		.dst_addr = host->phybase + MMCIFIFO,
1014		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1015		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1016		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
1017		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
1018		.device_fc = false,
1019	};
1020	struct dma_chan *chan;
1021	struct dma_device *device;
1022	struct dma_async_tx_descriptor *desc;
1023	int nr_sg;
1024	unsigned long flags = DMA_CTRL_ACK;
1025
1026	if (data->flags & MMC_DATA_READ) {
1027		conf.direction = DMA_DEV_TO_MEM;
1028		chan = dmae->rx_channel;
1029	} else {
1030		conf.direction = DMA_MEM_TO_DEV;
1031		chan = dmae->tx_channel;
1032	}
1033
1034	/* If there's no DMA channel, fall back to PIO */
1035	if (!chan)
1036		return -EINVAL;
1037
1038	/* If less than or equal to the fifo size, don't bother with DMA */
1039	if (data->blksz * data->blocks <= variant->fifosize)
1040		return -EINVAL;
1041
1042	/*
1043	 * This is necessary to get SDIO working on the Ux500. We do not yet
1044	 * know if this is a bug in:
1045	 * - The Ux500 DMA controller (DMA40)
1046	 * - The MMCI DMA interface on the Ux500
1047	 * some power of two blocks (such as 64 bytes) are sent regularly
1048	 * during SDIO traffic and those work fine so for these we enable DMA
1049	 * transfers.
1050	 */
1051	if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
1052		return -EINVAL;
1053
1054	device = chan->device;
1055	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
1056			   mmc_get_dma_dir(data));
1057	if (nr_sg == 0)
1058		return -EINVAL;
1059
1060	if (host->variant->qcom_dml)
1061		flags |= DMA_PREP_INTERRUPT;
1062
1063	dmaengine_slave_config(chan, &conf);
1064	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
1065					    conf.direction, flags);
1066	if (!desc)
1067		goto unmap_exit;
1068
1069	*dma_chan = chan;
1070	*dma_desc = desc;
1071
1072	return 0;
1073
1074 unmap_exit:
1075	dma_unmap_sg(device->dev, data->sg, data->sg_len,
1076		     mmc_get_dma_dir(data));
1077	return -ENOMEM;
1078}
1079
1080int mmci_dmae_prep_data(struct mmci_host *host,
1081			struct mmc_data *data,
1082			bool next)
1083{
1084	struct mmci_dmae_priv *dmae = host->dma_priv;
1085	struct mmci_dmae_next *nd = &dmae->next_data;
1086
1087	if (!host->use_dma)
1088		return -EINVAL;
1089
1090	if (next)
1091		return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
1092	/* Check if next job is already prepared. */
1093	if (dmae->cur && dmae->desc_current)
1094		return 0;
1095
1096	/* No job were prepared thus do it now. */
1097	return _mmci_dmae_prep_data(host, data, &dmae->cur,
1098				    &dmae->desc_current);
1099}
1100
1101int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
1102{
1103	struct mmci_dmae_priv *dmae = host->dma_priv;
1104	int ret;
1105
1106	host->dma_in_progress = true;
1107	ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
1108	if (ret < 0) {
1109		host->dma_in_progress = false;
1110		return ret;
1111	}
1112	dma_async_issue_pending(dmae->cur);
1113
1114	*datactrl |= MCI_DPSM_DMAENABLE;
1115
1116	return 0;
1117}
1118
1119void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
1120{
1121	struct mmci_dmae_priv *dmae = host->dma_priv;
1122	struct mmci_dmae_next *next = &dmae->next_data;
1123
1124	if (!host->use_dma)
1125		return;
1126
1127	WARN_ON(!data->host_cookie && (next->desc || next->chan));
1128
1129	dmae->desc_current = next->desc;
1130	dmae->cur = next->chan;
1131	next->desc = NULL;
1132	next->chan = NULL;
1133}
1134
1135void mmci_dmae_unprep_data(struct mmci_host *host,
1136			   struct mmc_data *data, int err)
1137
1138{
1139	struct mmci_dmae_priv *dmae = host->dma_priv;
1140
1141	if (!host->use_dma)
1142		return;
1143
1144	mmci_dma_unmap(host, data);
1145
1146	if (err) {
1147		struct mmci_dmae_next *next = &dmae->next_data;
1148		struct dma_chan *chan;
1149		if (data->flags & MMC_DATA_READ)
1150			chan = dmae->rx_channel;
1151		else
1152			chan = dmae->tx_channel;
1153		dmaengine_terminate_all(chan);
1154
1155		if (dmae->desc_current == next->desc)
1156			dmae->desc_current = NULL;
1157
1158		if (dmae->cur == next->chan) {
1159			host->dma_in_progress = false;
1160			dmae->cur = NULL;
1161		}
1162
1163		next->desc = NULL;
1164		next->chan = NULL;
1165	}
1166}
1167
1168static struct mmci_host_ops mmci_variant_ops = {
1169	.prep_data = mmci_dmae_prep_data,
1170	.unprep_data = mmci_dmae_unprep_data,
1171	.get_datactrl_cfg = mmci_get_dctrl_cfg,
1172	.get_next_data = mmci_dmae_get_next_data,
1173	.dma_setup = mmci_dmae_setup,
1174	.dma_release = mmci_dmae_release,
1175	.dma_start = mmci_dmae_start,
1176	.dma_finalize = mmci_dmae_finalize,
1177	.dma_error = mmci_dmae_error,
1178};
1179#else
1180static struct mmci_host_ops mmci_variant_ops = {
1181	.get_datactrl_cfg = mmci_get_dctrl_cfg,
1182};
1183#endif
1184
1185static void mmci_variant_init(struct mmci_host *host)
1186{
1187	host->ops = &mmci_variant_ops;
1188}
1189
1190static void ux500_variant_init(struct mmci_host *host)
1191{
1192	host->ops = &mmci_variant_ops;
1193	host->ops->busy_complete = ux500_busy_complete;
1194}
1195
1196static void ux500v2_variant_init(struct mmci_host *host)
1197{
1198	host->ops = &mmci_variant_ops;
1199	host->ops->busy_complete = ux500_busy_complete;
1200	host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
1201}
1202
1203static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
1204{
1205	struct mmci_host *host = mmc_priv(mmc);
1206	struct mmc_data *data = mrq->data;
1207
1208	if (!data)
1209		return;
1210
1211	WARN_ON(data->host_cookie);
1212
1213	if (mmci_validate_data(host, data))
1214		return;
1215
1216	mmci_prep_data(host, data, true);
1217}
1218
1219static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
1220			      int err)
1221{
1222	struct mmci_host *host = mmc_priv(mmc);
1223	struct mmc_data *data = mrq->data;
1224
1225	if (!data || !data->host_cookie)
1226		return;
1227
1228	mmci_unprep_data(host, data, err);
1229}
1230
1231static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
1232{
1233	struct variant_data *variant = host->variant;
1234	unsigned int datactrl, timeout, irqmask;
1235	unsigned long long clks;
1236	void __iomem *base;
1237
1238	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1239		data->blksz, data->blocks, data->flags);
1240
1241	host->data = data;
1242	host->size = data->blksz * data->blocks;
1243	data->bytes_xfered = 0;
1244
1245	clks = (unsigned long long)data->timeout_ns * host->cclk;
1246	do_div(clks, NSEC_PER_SEC);
1247
1248	timeout = data->timeout_clks + (unsigned int)clks;
1249
1250	base = host->base;
1251	writel(timeout, base + MMCIDATATIMER);
1252	writel(host->size, base + MMCIDATALENGTH);
1253
1254	datactrl = host->ops->get_datactrl_cfg(host);
1255	datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
1256
1257	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1258		u32 clk;
1259
1260		datactrl |= variant->datactrl_mask_sdio;
1261
1262		/*
1263		 * The ST Micro variant for SDIO small write transfers
1264		 * needs to have clock H/W flow control disabled,
1265		 * otherwise the transfer will not start. The threshold
1266		 * depends on the rate of MCLK.
1267		 */
1268		if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1269		    (host->size < 8 ||
1270		     (host->size <= 8 && host->mclk > 50000000)))
1271			clk = host->clk_reg & ~variant->clkreg_enable;
1272		else
1273			clk = host->clk_reg | variant->clkreg_enable;
1274
1275		mmci_write_clkreg(host, clk);
1276	}
1277
1278	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1279	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1280		datactrl |= variant->datactrl_mask_ddrmode;
1281
1282	/*
1283	 * Attempt to use DMA operation mode, if this
1284	 * should fail, fall back to PIO mode
1285	 */
1286	if (!mmci_dma_start(host, datactrl))
1287		return;
1288
1289	/* IRQ mode, map the SG list for CPU reading/writing */
1290	mmci_init_sg(host, data);
1291
1292	if (data->flags & MMC_DATA_READ) {
1293		irqmask = MCI_RXFIFOHALFFULLMASK;
1294
1295		/*
1296		 * If we have less than the fifo 'half-full' threshold to
1297		 * transfer, trigger a PIO interrupt as soon as any data
1298		 * is available.
1299		 */
1300		if (host->size < variant->fifohalfsize)
1301			irqmask |= MCI_RXDATAAVLBLMASK;
1302	} else {
1303		/*
1304		 * We don't actually need to include "FIFO empty" here
1305		 * since its implicit in "FIFO half empty".
1306		 */
1307		irqmask = MCI_TXFIFOHALFEMPTYMASK;
1308	}
1309
1310	mmci_write_datactrlreg(host, datactrl);
1311	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1312	mmci_set_mask1(host, irqmask);
1313}
1314
1315static void
1316mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1317{
1318	void __iomem *base = host->base;
1319	bool busy_resp = cmd->flags & MMC_RSP_BUSY;
1320	unsigned long long clks;
1321
1322	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1323	    cmd->opcode, cmd->arg, cmd->flags);
1324
1325	if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1326		writel(0, base + MMCICOMMAND);
1327		mmci_reg_delay(host);
1328	}
1329
1330	if (host->variant->cmdreg_stop &&
1331	    cmd->opcode == MMC_STOP_TRANSMISSION)
1332		c |= host->variant->cmdreg_stop;
1333
1334	c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1335	if (cmd->flags & MMC_RSP_PRESENT) {
1336		if (cmd->flags & MMC_RSP_136)
1337			c |= host->variant->cmdreg_lrsp_crc;
1338		else if (cmd->flags & MMC_RSP_CRC)
1339			c |= host->variant->cmdreg_srsp_crc;
1340		else
1341			c |= host->variant->cmdreg_srsp;
1342	}
1343
1344	host->busy_status = 0;
1345	host->busy_state = MMCI_BUSY_DONE;
1346
1347	/* Assign a default timeout if the core does not provide one */
1348	if (busy_resp && !cmd->busy_timeout)
1349		cmd->busy_timeout = 10 * MSEC_PER_SEC;
1350
1351	if (busy_resp && host->variant->busy_timeout) {
1352		if (cmd->busy_timeout > host->mmc->max_busy_timeout)
1353			clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
1354		else
1355			clks = (unsigned long long)cmd->busy_timeout * host->cclk;
1356
1357		do_div(clks, MSEC_PER_SEC);
1358		writel_relaxed(clks, host->base + MMCIDATATIMER);
1359	}
1360
1361	if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
1362		host->ops->pre_sig_volt_switch(host);
1363
1364	if (/*interrupt*/0)
1365		c |= MCI_CPSM_INTERRUPT;
1366
1367	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1368		c |= host->variant->data_cmd_enable;
1369
1370	host->cmd = cmd;
1371
1372	writel(cmd->arg, base + MMCIARGUMENT);
1373	writel(c, base + MMCICOMMAND);
1374}
1375
1376static void mmci_stop_command(struct mmci_host *host)
1377{
1378	host->stop_abort.error = 0;
1379	mmci_start_command(host, &host->stop_abort, 0);
1380}
1381
1382static void
1383mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1384	      unsigned int status)
1385{
1386	unsigned int status_err;
1387
1388	/* Make sure we have data to handle */
1389	if (!data)
1390		return;
1391
1392	/* First check for errors */
1393	status_err = status & (host->variant->start_err |
1394			       MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1395			       MCI_TXUNDERRUN | MCI_RXOVERRUN);
1396
1397	if (status_err) {
1398		u32 remain, success;
1399
1400		/* Terminate the DMA transfer */
1401		mmci_dma_error(host);
1402
1403		/*
1404		 * Calculate how far we are into the transfer.  Note that
1405		 * the data counter gives the number of bytes transferred
1406		 * on the MMC bus, not on the host side.  On reads, this
1407		 * can be as much as a FIFO-worth of data ahead.  This
1408		 * matters for FIFO overruns only.
1409		 */
1410		if (!host->variant->datacnt_useless) {
1411			remain = readl(host->base + MMCIDATACNT);
1412			success = data->blksz * data->blocks - remain;
1413		} else {
1414			success = 0;
1415		}
1416
1417		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1418			status_err, success);
1419		if (status_err & MCI_DATACRCFAIL) {
1420			/* Last block was not successful */
1421			success -= 1;
1422			data->error = -EILSEQ;
1423		} else if (status_err & MCI_DATATIMEOUT) {
1424			data->error = -ETIMEDOUT;
1425		} else if (status_err & MCI_STARTBITERR) {
1426			data->error = -ECOMM;
1427		} else if (status_err & MCI_TXUNDERRUN) {
1428			data->error = -EIO;
1429		} else if (status_err & MCI_RXOVERRUN) {
1430			if (success > host->variant->fifosize)
1431				success -= host->variant->fifosize;
1432			else
1433				success = 0;
1434			data->error = -EIO;
1435		}
1436		data->bytes_xfered = round_down(success, data->blksz);
1437	}
1438
1439	if (status & MCI_DATABLOCKEND)
1440		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1441
1442	if (status & MCI_DATAEND || data->error) {
1443		mmci_dma_finalize(host, data);
1444
1445		mmci_stop_data(host);
1446
1447		if (!data->error)
1448			/* The error clause is handled above, success! */
1449			data->bytes_xfered = data->blksz * data->blocks;
1450
1451		if (!data->stop) {
1452			if (host->variant->cmdreg_stop && data->error)
1453				mmci_stop_command(host);
1454			else
1455				mmci_request_end(host, data->mrq);
1456		} else if (host->mrq->sbc && !data->error) {
1457			mmci_request_end(host, data->mrq);
1458		} else {
1459			mmci_start_command(host, data->stop, 0);
1460		}
1461	}
1462}
1463
1464static void
1465mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1466	     unsigned int status)
1467{
1468	u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
1469	void __iomem *base = host->base;
1470	bool sbc, busy_resp;
1471
1472	if (!cmd)
1473		return;
1474
1475	sbc = (cmd == host->mrq->sbc);
1476	busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1477
1478	/*
1479	 * We need to be one of these interrupts to be considered worth
1480	 * handling. Note that we tag on any latent IRQs postponed
1481	 * due to waiting for busy status.
1482	 */
1483	if (host->variant->busy_timeout && busy_resp)
1484		err_msk |= MCI_DATATIMEOUT;
1485
1486	if (!((status | host->busy_status) &
1487	      (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
1488		return;
1489
1490	/* Handle busy detection on DAT0 if the variant supports it. */
1491	if (busy_resp && host->variant->busy_detect)
1492		if (!host->ops->busy_complete(host, cmd, status, err_msk))
1493			return;
1494
1495	host->cmd = NULL;
1496
1497	if (status & MCI_CMDTIMEOUT) {
1498		cmd->error = -ETIMEDOUT;
1499	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1500		cmd->error = -EILSEQ;
1501	} else if (host->variant->busy_timeout && busy_resp &&
1502		   status & MCI_DATATIMEOUT) {
1503		cmd->error = -ETIMEDOUT;
1504		/*
1505		 * This will wake up mmci_irq_thread() which will issue
1506		 * a hardware reset of the MMCI block.
1507		 */
1508		host->irq_action = IRQ_WAKE_THREAD;
1509	} else {
1510		cmd->resp[0] = readl(base + MMCIRESPONSE0);
1511		cmd->resp[1] = readl(base + MMCIRESPONSE1);
1512		cmd->resp[2] = readl(base + MMCIRESPONSE2);
1513		cmd->resp[3] = readl(base + MMCIRESPONSE3);
1514	}
1515
1516	if ((!sbc && !cmd->data) || cmd->error) {
1517		if (host->data) {
1518			/* Terminate the DMA transfer */
1519			mmci_dma_error(host);
1520
1521			mmci_stop_data(host);
1522			if (host->variant->cmdreg_stop && cmd->error) {
1523				mmci_stop_command(host);
1524				return;
1525			}
1526		}
1527
1528		if (host->irq_action != IRQ_WAKE_THREAD)
1529			mmci_request_end(host, host->mrq);
1530
1531	} else if (sbc) {
1532		mmci_start_command(host, host->mrq->cmd, 0);
1533	} else if (!host->variant->datactrl_first &&
1534		   !(cmd->data->flags & MMC_DATA_READ)) {
1535		mmci_start_data(host, cmd->data);
1536	}
1537}
1538
1539static char *ux500_state_str(struct mmci_host *host)
1540{
1541	switch (host->busy_state) {
1542	case MMCI_BUSY_WAITING_FOR_START_IRQ:
1543		return "waiting for start IRQ";
1544	case MMCI_BUSY_WAITING_FOR_END_IRQ:
1545		return "waiting for end IRQ";
1546	case MMCI_BUSY_DONE:
1547		return "not waiting for IRQs";
1548	default:
1549		return "unknown";
1550	}
1551}
1552
1553/*
1554 * This busy timeout worker is used to "kick" the command IRQ if a
1555 * busy detect IRQ fails to appear in reasonable time. Only used on
1556 * variants with busy detection IRQ delivery.
1557 */
1558static void ux500_busy_timeout_work(struct work_struct *work)
1559{
1560	struct mmci_host *host = container_of(work, struct mmci_host,
1561					ux500_busy_timeout_work.work);
1562	unsigned long flags;
1563	u32 status;
1564
1565	spin_lock_irqsave(&host->lock, flags);
1566
1567	if (host->cmd) {
1568		/* If we are still busy let's tag on a cmd-timeout error. */
1569		status = readl(host->base + MMCISTATUS);
1570		if (status & host->variant->busy_detect_flag) {
1571			status |= MCI_CMDTIMEOUT;
1572			dev_err(mmc_dev(host->mmc),
1573				"timeout in state %s still busy with CMD%02x\n",
1574				ux500_state_str(host), host->cmd->opcode);
1575		} else {
1576			dev_err(mmc_dev(host->mmc),
1577				"timeout in state %s waiting for busy CMD%02x\n",
1578				ux500_state_str(host), host->cmd->opcode);
1579		}
1580
1581		mmci_cmd_irq(host, host->cmd, status);
1582	}
1583
1584	spin_unlock_irqrestore(&host->lock, flags);
1585}
1586
1587static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1588{
1589	return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1590}
1591
1592static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1593{
1594	/*
1595	 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1596	 * from the fifo range should be used
1597	 */
1598	if (status & MCI_RXFIFOHALFFULL)
1599		return host->variant->fifohalfsize;
1600	else if (status & MCI_RXDATAAVLBL)
1601		return 4;
1602
1603	return 0;
1604}
1605
1606static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1607{
1608	void __iomem *base = host->base;
1609	char *ptr = buffer;
1610	u32 status = readl(host->base + MMCISTATUS);
1611	int host_remain = host->size;
1612
1613	do {
1614		int count = host->get_rx_fifocnt(host, status, host_remain);
1615
1616		if (count > remain)
1617			count = remain;
1618
1619		if (count <= 0)
1620			break;
1621
1622		/*
1623		 * SDIO especially may want to send something that is
1624		 * not divisible by 4 (as opposed to card sectors
1625		 * etc). Therefore make sure to always read the last bytes
1626		 * while only doing full 32-bit reads towards the FIFO.
1627		 */
1628		if (unlikely(count & 0x3)) {
1629			if (count < 4) {
1630				unsigned char buf[4];
1631				ioread32_rep(base + MMCIFIFO, buf, 1);
1632				memcpy(ptr, buf, count);
1633			} else {
1634				ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1635				count &= ~0x3;
1636			}
1637		} else {
1638			ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1639		}
1640
1641		ptr += count;
1642		remain -= count;
1643		host_remain -= count;
1644
1645		if (remain == 0)
1646			break;
1647
1648		status = readl(base + MMCISTATUS);
1649	} while (status & MCI_RXDATAAVLBL);
1650
1651	return ptr - buffer;
1652}
1653
1654static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1655{
1656	struct variant_data *variant = host->variant;
1657	void __iomem *base = host->base;
1658	char *ptr = buffer;
1659
1660	do {
1661		unsigned int count, maxcnt;
1662
1663		maxcnt = status & MCI_TXFIFOEMPTY ?
1664			 variant->fifosize : variant->fifohalfsize;
1665		count = min(remain, maxcnt);
1666
1667		/*
1668		 * SDIO especially may want to send something that is
1669		 * not divisible by 4 (as opposed to card sectors
1670		 * etc), and the FIFO only accept full 32-bit writes.
1671		 * So compensate by adding +3 on the count, a single
1672		 * byte become a 32bit write, 7 bytes will be two
1673		 * 32bit writes etc.
1674		 */
1675		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1676
1677		ptr += count;
1678		remain -= count;
1679
1680		if (remain == 0)
1681			break;
1682
1683		status = readl(base + MMCISTATUS);
1684	} while (status & MCI_TXFIFOHALFEMPTY);
1685
1686	return ptr - buffer;
1687}
1688
1689/*
1690 * PIO data transfer IRQ handler.
1691 */
1692static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1693{
1694	struct mmci_host *host = dev_id;
1695	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1696	struct variant_data *variant = host->variant;
1697	void __iomem *base = host->base;
1698	u32 status;
1699
1700	status = readl(base + MMCISTATUS);
1701
1702	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1703
1704	do {
1705		unsigned int remain, len;
1706		char *buffer;
1707
1708		/*
1709		 * For write, we only need to test the half-empty flag
1710		 * here - if the FIFO is completely empty, then by
1711		 * definition it is more than half empty.
1712		 *
1713		 * For read, check for data available.
1714		 */
1715		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1716			break;
1717
1718		if (!sg_miter_next(sg_miter))
1719			break;
1720
1721		buffer = sg_miter->addr;
1722		remain = sg_miter->length;
1723
1724		len = 0;
1725		if (status & MCI_RXACTIVE)
1726			len = mmci_pio_read(host, buffer, remain);
1727		if (status & MCI_TXACTIVE)
1728			len = mmci_pio_write(host, buffer, remain, status);
1729
1730		sg_miter->consumed = len;
1731
1732		host->size -= len;
1733		remain -= len;
1734
1735		if (remain)
1736			break;
1737
1738		status = readl(base + MMCISTATUS);
1739	} while (1);
1740
1741	sg_miter_stop(sg_miter);
1742
1743	/*
1744	 * If we have less than the fifo 'half-full' threshold to transfer,
1745	 * trigger a PIO interrupt as soon as any data is available.
1746	 */
1747	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1748		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1749
1750	/*
1751	 * If we run out of data, disable the data IRQs; this
1752	 * prevents a race where the FIFO becomes empty before
1753	 * the chip itself has disabled the data path, and
1754	 * stops us racing with our data end IRQ.
1755	 */
1756	if (host->size == 0) {
1757		mmci_set_mask1(host, 0);
1758		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1759	}
1760
1761	return IRQ_HANDLED;
1762}
1763
1764/*
1765 * Handle completion of command and data transfers.
1766 */
1767static irqreturn_t mmci_irq(int irq, void *dev_id)
1768{
1769	struct mmci_host *host = dev_id;
1770	u32 status;
1771
1772	spin_lock(&host->lock);
1773	host->irq_action = IRQ_HANDLED;
1774
1775	do {
1776		status = readl(host->base + MMCISTATUS);
1777		if (!status)
1778			break;
1779
1780		if (host->singleirq) {
1781			if (status & host->mask1_reg)
1782				mmci_pio_irq(irq, dev_id);
1783
1784			status &= ~host->variant->irq_pio_mask;
1785		}
1786
1787		/*
1788		 * Busy detection is managed by mmci_cmd_irq(), including to
1789		 * clear the corresponding IRQ.
1790		 */
1791		status &= readl(host->base + MMCIMASK0);
1792		if (host->variant->busy_detect)
1793			writel(status & ~host->variant->busy_detect_mask,
1794			       host->base + MMCICLEAR);
1795		else
1796			writel(status, host->base + MMCICLEAR);
1797
1798		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1799
1800		if (host->variant->reversed_irq_handling) {
1801			mmci_data_irq(host, host->data, status);
1802			mmci_cmd_irq(host, host->cmd, status);
1803		} else {
1804			mmci_cmd_irq(host, host->cmd, status);
1805			mmci_data_irq(host, host->data, status);
1806		}
1807
1808		/*
1809		 * Busy detection has been handled by mmci_cmd_irq() above.
1810		 * Clear the status bit to prevent polling in IRQ context.
1811		 */
1812		if (host->variant->busy_detect_flag)
1813			status &= ~host->variant->busy_detect_flag;
1814
1815	} while (status);
1816
1817	spin_unlock(&host->lock);
1818
1819	return host->irq_action;
1820}
1821
1822/*
1823 * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
1824 *
1825 * A reset is needed for some variants, where a datatimeout for a R1B request
1826 * causes the DPSM to stay busy (non-functional).
1827 */
1828static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
1829{
1830	struct mmci_host *host = dev_id;
1831	unsigned long flags;
1832
1833	if (host->rst) {
1834		reset_control_assert(host->rst);
1835		udelay(2);
1836		reset_control_deassert(host->rst);
1837	}
1838
1839	spin_lock_irqsave(&host->lock, flags);
1840	writel(host->clk_reg, host->base + MMCICLOCK);
1841	writel(host->pwr_reg, host->base + MMCIPOWER);
1842	writel(MCI_IRQENABLE | host->variant->start_err,
1843	       host->base + MMCIMASK0);
1844
1845	host->irq_action = IRQ_HANDLED;
1846	mmci_request_end(host, host->mrq);
1847	spin_unlock_irqrestore(&host->lock, flags);
1848
1849	return host->irq_action;
1850}
1851
1852static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1853{
1854	struct mmci_host *host = mmc_priv(mmc);
1855	unsigned long flags;
1856
1857	WARN_ON(host->mrq != NULL);
1858
1859	mrq->cmd->error = mmci_validate_data(host, mrq->data);
1860	if (mrq->cmd->error) {
1861		mmc_request_done(mmc, mrq);
1862		return;
1863	}
1864
1865	spin_lock_irqsave(&host->lock, flags);
1866
1867	host->mrq = mrq;
1868
1869	if (mrq->data)
1870		mmci_get_next_data(host, mrq->data);
1871
1872	if (mrq->data &&
1873	    (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1874		mmci_start_data(host, mrq->data);
1875
1876	if (mrq->sbc)
1877		mmci_start_command(host, mrq->sbc, 0);
1878	else
1879		mmci_start_command(host, mrq->cmd, 0);
1880
1881	spin_unlock_irqrestore(&host->lock, flags);
1882}
1883
1884static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
1885{
1886	struct mmci_host *host = mmc_priv(mmc);
1887	u32 max_busy_timeout = 0;
1888
1889	if (!host->variant->busy_detect)
1890		return;
1891
1892	if (host->variant->busy_timeout && mmc->actual_clock)
1893		max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
1894							  MSEC_PER_SEC);
1895
1896	mmc->max_busy_timeout = max_busy_timeout;
1897}
1898
1899static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1900{
1901	struct mmci_host *host = mmc_priv(mmc);
1902	struct variant_data *variant = host->variant;
1903	u32 pwr = 0;
1904	unsigned long flags;
1905	int ret;
1906
1907	switch (ios->power_mode) {
1908	case MMC_POWER_OFF:
1909		if (!IS_ERR(mmc->supply.vmmc))
1910			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1911
1912		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1913			regulator_disable(mmc->supply.vqmmc);
1914			host->vqmmc_enabled = false;
1915		}
1916
1917		break;
1918	case MMC_POWER_UP:
1919		if (!IS_ERR(mmc->supply.vmmc))
1920			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1921
1922		/*
1923		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1924		 * and instead uses MCI_PWR_ON so apply whatever value is
1925		 * configured in the variant data.
1926		 */
1927		pwr |= variant->pwrreg_powerup;
1928
1929		break;
1930	case MMC_POWER_ON:
1931		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1932			ret = regulator_enable(mmc->supply.vqmmc);
1933			if (ret < 0)
1934				dev_err(mmc_dev(mmc),
1935					"failed to enable vqmmc regulator\n");
1936			else
1937				host->vqmmc_enabled = true;
1938		}
1939
1940		pwr |= MCI_PWR_ON;
1941		break;
1942	}
1943
1944	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1945		/*
1946		 * The ST Micro variant has some additional bits
1947		 * indicating signal direction for the signals in
1948		 * the SD/MMC bus and feedback-clock usage.
1949		 */
1950		pwr |= host->pwr_reg_add;
1951
1952		if (ios->bus_width == MMC_BUS_WIDTH_4)
1953			pwr &= ~MCI_ST_DATA74DIREN;
1954		else if (ios->bus_width == MMC_BUS_WIDTH_1)
1955			pwr &= (~MCI_ST_DATA74DIREN &
1956				~MCI_ST_DATA31DIREN &
1957				~MCI_ST_DATA2DIREN);
1958	}
1959
1960	if (variant->opendrain) {
1961		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1962			pwr |= variant->opendrain;
1963	} else {
1964		/*
1965		 * If the variant cannot configure the pads by its own, then we
1966		 * expect the pinctrl to be able to do that for us
1967		 */
1968		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1969			pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1970		else
1971			pinctrl_select_default_state(mmc_dev(mmc));
1972	}
1973
1974	/*
1975	 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1976	 * gating the clock, the MCI_PWR_ON bit is cleared.
1977	 */
1978	if (!ios->clock && variant->pwrreg_clkgate)
1979		pwr &= ~MCI_PWR_ON;
1980
1981	if (host->variant->explicit_mclk_control &&
1982	    ios->clock != host->clock_cache) {
1983		ret = clk_set_rate(host->clk, ios->clock);
1984		if (ret < 0)
1985			dev_err(mmc_dev(host->mmc),
1986				"Error setting clock rate (%d)\n", ret);
1987		else
1988			host->mclk = clk_get_rate(host->clk);
1989	}
1990	host->clock_cache = ios->clock;
1991
1992	spin_lock_irqsave(&host->lock, flags);
1993
1994	if (host->ops && host->ops->set_clkreg)
1995		host->ops->set_clkreg(host, ios->clock);
1996	else
1997		mmci_set_clkreg(host, ios->clock);
1998
1999	mmci_set_max_busy_timeout(mmc);
2000
2001	if (host->ops && host->ops->set_pwrreg)
2002		host->ops->set_pwrreg(host, pwr);
2003	else
2004		mmci_write_pwrreg(host, pwr);
2005
2006	mmci_reg_delay(host);
2007
2008	spin_unlock_irqrestore(&host->lock, flags);
2009}
2010
2011static int mmci_get_cd(struct mmc_host *mmc)
2012{
2013	struct mmci_host *host = mmc_priv(mmc);
2014	struct mmci_platform_data *plat = host->plat;
2015	unsigned int status = mmc_gpio_get_cd(mmc);
2016
2017	if (status == -ENOSYS) {
2018		if (!plat->status)
2019			return 1; /* Assume always present */
2020
2021		status = plat->status(mmc_dev(host->mmc));
2022	}
2023	return status;
2024}
2025
2026static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
2027{
2028	struct mmci_host *host = mmc_priv(mmc);
2029	int ret;
2030
2031	ret = mmc_regulator_set_vqmmc(mmc, ios);
2032
2033	if (!ret && host->ops && host->ops->post_sig_volt_switch)
2034		ret = host->ops->post_sig_volt_switch(host, ios);
2035	else if (ret)
2036		ret = 0;
2037
2038	if (ret < 0)
2039		dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
2040
2041	return ret;
2042}
2043
2044static struct mmc_host_ops mmci_ops = {
2045	.request	= mmci_request,
2046	.pre_req	= mmci_pre_request,
2047	.post_req	= mmci_post_request,
2048	.set_ios	= mmci_set_ios,
2049	.get_ro		= mmc_gpio_get_ro,
2050	.get_cd		= mmci_get_cd,
2051	.start_signal_voltage_switch = mmci_sig_volt_switch,
2052};
2053
2054static void mmci_probe_level_translator(struct mmc_host *mmc)
2055{
2056	struct device *dev = mmc_dev(mmc);
2057	struct mmci_host *host = mmc_priv(mmc);
2058	struct gpio_desc *cmd_gpio;
2059	struct gpio_desc *ck_gpio;
2060	struct gpio_desc *ckin_gpio;
2061	int clk_hi, clk_lo;
2062
2063	/*
2064	 * Assume the level translator is present if st,use-ckin is set.
2065	 * This is to cater for DTs which do not implement this test.
2066	 */
2067	host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
2068
2069	cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
2070	if (IS_ERR(cmd_gpio))
2071		goto exit_cmd;
2072
2073	ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
2074	if (IS_ERR(ck_gpio))
2075		goto exit_ck;
2076
2077	ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
2078	if (IS_ERR(ckin_gpio))
2079		goto exit_ckin;
2080
2081	/* All GPIOs are valid, test whether level translator works */
2082
2083	/* Sample CKIN */
2084	clk_hi = !!gpiod_get_value(ckin_gpio);
2085
2086	/* Set CK low */
2087	gpiod_set_value(ck_gpio, 0);
2088
2089	/* Sample CKIN */
2090	clk_lo = !!gpiod_get_value(ckin_gpio);
2091
2092	/* Tristate all */
2093	gpiod_direction_input(cmd_gpio);
2094	gpiod_direction_input(ck_gpio);
2095
2096	/* Level translator is present if CK signal is propagated to CKIN */
2097	if (!clk_hi || clk_lo) {
2098		host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
2099		dev_warn(dev,
2100			 "Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
2101	}
2102
2103	gpiod_put(ckin_gpio);
2104
2105exit_ckin:
2106	gpiod_put(ck_gpio);
2107exit_ck:
2108	gpiod_put(cmd_gpio);
2109exit_cmd:
2110	pinctrl_select_default_state(dev);
2111}
2112
2113static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
2114{
2115	struct mmci_host *host = mmc_priv(mmc);
2116	int ret = mmc_of_parse(mmc);
2117
2118	if (ret)
2119		return ret;
2120
2121	if (of_property_read_bool(np, "st,sig-dir-dat0"))
2122		host->pwr_reg_add |= MCI_ST_DATA0DIREN;
2123	if (of_property_read_bool(np, "st,sig-dir-dat2"))
2124		host->pwr_reg_add |= MCI_ST_DATA2DIREN;
2125	if (of_property_read_bool(np, "st,sig-dir-dat31"))
2126		host->pwr_reg_add |= MCI_ST_DATA31DIREN;
2127	if (of_property_read_bool(np, "st,sig-dir-dat74"))
2128		host->pwr_reg_add |= MCI_ST_DATA74DIREN;
2129	if (of_property_read_bool(np, "st,sig-dir-cmd"))
2130		host->pwr_reg_add |= MCI_ST_CMDDIREN;
2131	if (of_property_read_bool(np, "st,sig-pin-fbclk"))
2132		host->pwr_reg_add |= MCI_ST_FBCLKEN;
2133	if (of_property_read_bool(np, "st,sig-dir"))
2134		host->pwr_reg_add |= MCI_STM32_DIRPOL;
2135	if (of_property_read_bool(np, "st,neg-edge"))
2136		host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
2137	if (of_property_read_bool(np, "st,use-ckin"))
2138		mmci_probe_level_translator(mmc);
2139
2140	if (of_property_read_bool(np, "mmc-cap-mmc-highspeed"))
2141		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2142	if (of_property_read_bool(np, "mmc-cap-sd-highspeed"))
2143		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2144
2145	return 0;
2146}
2147
2148static int mmci_probe(struct amba_device *dev,
2149	const struct amba_id *id)
2150{
2151	struct mmci_platform_data *plat = dev->dev.platform_data;
2152	struct device_node *np = dev->dev.of_node;
2153	struct variant_data *variant = id->data;
2154	struct mmci_host *host;
2155	struct mmc_host *mmc;
2156	int ret;
2157
2158	/* Must have platform data or Device Tree. */
2159	if (!plat && !np) {
2160		dev_err(&dev->dev, "No plat data or DT found\n");
2161		return -EINVAL;
2162	}
2163
2164	if (!plat) {
2165		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
2166		if (!plat)
2167			return -ENOMEM;
2168	}
2169
2170	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
2171	if (!mmc)
2172		return -ENOMEM;
2173
2174	host = mmc_priv(mmc);
2175	host->mmc = mmc;
2176	host->mmc_ops = &mmci_ops;
2177	mmc->ops = &mmci_ops;
2178
2179	ret = mmci_of_parse(np, mmc);
2180	if (ret)
2181		goto host_free;
2182
2183	/*
2184	 * Some variant (STM32) doesn't have opendrain bit, nevertheless
2185	 * pins can be set accordingly using pinctrl
2186	 */
2187	if (!variant->opendrain) {
2188		host->pinctrl = devm_pinctrl_get(&dev->dev);
2189		if (IS_ERR(host->pinctrl)) {
2190			dev_err(&dev->dev, "failed to get pinctrl");
2191			ret = PTR_ERR(host->pinctrl);
2192			goto host_free;
2193		}
2194
2195		host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
2196							    MMCI_PINCTRL_STATE_OPENDRAIN);
2197		if (IS_ERR(host->pins_opendrain)) {
2198			dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
2199			ret = PTR_ERR(host->pins_opendrain);
2200			goto host_free;
2201		}
2202	}
2203
2204	host->hw_designer = amba_manf(dev);
2205	host->hw_revision = amba_rev(dev);
2206	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
2207	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
2208
2209	host->clk = devm_clk_get(&dev->dev, NULL);
2210	if (IS_ERR(host->clk)) {
2211		ret = PTR_ERR(host->clk);
2212		goto host_free;
2213	}
2214
2215	ret = clk_prepare_enable(host->clk);
2216	if (ret)
2217		goto host_free;
2218
2219	if (variant->qcom_fifo)
2220		host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
2221	else
2222		host->get_rx_fifocnt = mmci_get_rx_fifocnt;
2223
2224	host->plat = plat;
2225	host->variant = variant;
2226	host->mclk = clk_get_rate(host->clk);
2227	/*
2228	 * According to the spec, mclk is max 100 MHz,
2229	 * so we try to adjust the clock down to this,
2230	 * (if possible).
2231	 */
2232	if (host->mclk > variant->f_max) {
2233		ret = clk_set_rate(host->clk, variant->f_max);
2234		if (ret < 0)
2235			goto clk_disable;
2236		host->mclk = clk_get_rate(host->clk);
2237		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
2238			host->mclk);
2239	}
2240
2241	host->phybase = dev->res.start;
2242	host->base = devm_ioremap_resource(&dev->dev, &dev->res);
2243	if (IS_ERR(host->base)) {
2244		ret = PTR_ERR(host->base);
2245		goto clk_disable;
2246	}
2247
2248	if (variant->init)
2249		variant->init(host);
2250
2251	/*
2252	 * The ARM and ST versions of the block have slightly different
2253	 * clock divider equations which means that the minimum divider
2254	 * differs too.
2255	 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
2256	 */
2257	if (variant->st_clkdiv)
2258		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
2259	else if (variant->stm32_clkdiv)
2260		mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
2261	else if (variant->explicit_mclk_control)
2262		mmc->f_min = clk_round_rate(host->clk, 100000);
2263	else
2264		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
2265	/*
2266	 * If no maximum operating frequency is supplied, fall back to use
2267	 * the module parameter, which has a (low) default value in case it
2268	 * is not specified. Either value must not exceed the clock rate into
2269	 * the block, of course.
2270	 */
2271	if (mmc->f_max)
2272		mmc->f_max = variant->explicit_mclk_control ?
2273				min(variant->f_max, mmc->f_max) :
2274				min(host->mclk, mmc->f_max);
2275	else
2276		mmc->f_max = variant->explicit_mclk_control ?
2277				fmax : min(host->mclk, fmax);
2278
2279
2280	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
2281
2282	host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
2283	if (IS_ERR(host->rst)) {
2284		ret = PTR_ERR(host->rst);
2285		goto clk_disable;
2286	}
2287	ret = reset_control_deassert(host->rst);
2288	if (ret)
2289		dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
2290
2291	/* Get regulators and the supported OCR mask */
2292	ret = mmc_regulator_get_supply(mmc);
2293	if (ret)
2294		goto clk_disable;
2295
2296	if (!mmc->ocr_avail)
2297		mmc->ocr_avail = plat->ocr_mask;
2298	else if (plat->ocr_mask)
2299		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
2300
2301	/* We support these capabilities. */
2302	mmc->caps |= MMC_CAP_CMD23;
2303
2304	/*
2305	 * Enable busy detection.
2306	 */
2307	if (variant->busy_detect) {
2308		mmci_ops.card_busy = mmci_card_busy;
2309		/*
2310		 * Not all variants have a flag to enable busy detection
2311		 * in the DPSM, but if they do, set it here.
2312		 */
2313		if (variant->busy_dpsm_flag)
2314			mmci_write_datactrlreg(host,
2315					       host->variant->busy_dpsm_flag);
2316		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
2317	}
2318
2319	/* Variants with mandatory busy timeout in HW needs R1B responses. */
2320	if (variant->busy_timeout)
2321		mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
2322
2323	/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
2324	host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
2325	host->stop_abort.arg = 0;
2326	host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
2327
2328	/* We support these PM capabilities. */
2329	mmc->pm_caps |= MMC_PM_KEEP_POWER;
2330
2331	/*
2332	 * We can do SGIO
2333	 */
2334	mmc->max_segs = NR_SG;
2335
2336	/*
2337	 * Since only a certain number of bits are valid in the data length
2338	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
2339	 * single request.
2340	 */
2341	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
2342
2343	/*
2344	 * Set the maximum segment size.  Since we aren't doing DMA
2345	 * (yet) we are only limited by the data length register.
2346	 */
2347	mmc->max_seg_size = mmc->max_req_size;
2348
2349	/*
2350	 * Block size can be up to 2048 bytes, but must be a power of two.
2351	 */
2352	mmc->max_blk_size = 1 << variant->datactrl_blocksz;
2353
2354	/*
2355	 * Limit the number of blocks transferred so that we don't overflow
2356	 * the maximum request size.
2357	 */
2358	mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2359
2360	spin_lock_init(&host->lock);
2361
2362	writel(0, host->base + MMCIMASK0);
2363
2364	if (variant->mmcimask1)
2365		writel(0, host->base + MMCIMASK1);
2366
2367	writel(0xfff, host->base + MMCICLEAR);
2368
2369	/*
2370	 * If:
2371	 * - not using DT but using a descriptor table, or
2372	 * - using a table of descriptors ALONGSIDE DT, or
2373	 * look up these descriptors named "cd" and "wp" right here, fail
2374	 * silently of these do not exist
2375	 */
2376	if (!np) {
2377		ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
2378		if (ret == -EPROBE_DEFER)
2379			goto clk_disable;
2380
2381		ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
2382		if (ret == -EPROBE_DEFER)
2383			goto clk_disable;
2384	}
2385
2386	ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
2387					mmci_irq_thread, IRQF_SHARED,
2388					DRIVER_NAME " (cmd)", host);
2389	if (ret)
2390		goto clk_disable;
2391
2392	if (!dev->irq[1])
2393		host->singleirq = true;
2394	else {
2395		ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2396				IRQF_SHARED, DRIVER_NAME " (pio)", host);
2397		if (ret)
2398			goto clk_disable;
2399	}
2400
2401	if (host->variant->busy_detect)
2402		INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
2403				  ux500_busy_timeout_work);
2404
2405	writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2406
2407	amba_set_drvdata(dev, mmc);
2408
2409	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2410		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2411		 amba_rev(dev), (unsigned long long)dev->res.start,
2412		 dev->irq[0], dev->irq[1]);
2413
2414	mmci_dma_setup(host);
2415
2416	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2417	pm_runtime_use_autosuspend(&dev->dev);
2418
2419	ret = mmc_add_host(mmc);
2420	if (ret)
2421		goto clk_disable;
2422
2423	pm_runtime_put(&dev->dev);
2424	return 0;
2425
2426 clk_disable:
2427	clk_disable_unprepare(host->clk);
2428 host_free:
2429	mmc_free_host(mmc);
2430	return ret;
2431}
2432
2433static void mmci_remove(struct amba_device *dev)
2434{
2435	struct mmc_host *mmc = amba_get_drvdata(dev);
2436
2437	if (mmc) {
2438		struct mmci_host *host = mmc_priv(mmc);
2439		struct variant_data *variant = host->variant;
2440
2441		/*
2442		 * Undo pm_runtime_put() in probe.  We use the _sync
2443		 * version here so that we can access the primecell.
2444		 */
2445		pm_runtime_get_sync(&dev->dev);
2446
2447		mmc_remove_host(mmc);
2448
2449		writel(0, host->base + MMCIMASK0);
2450
2451		if (variant->mmcimask1)
2452			writel(0, host->base + MMCIMASK1);
2453
2454		writel(0, host->base + MMCICOMMAND);
2455		writel(0, host->base + MMCIDATACTRL);
2456
2457		mmci_dma_release(host);
2458		clk_disable_unprepare(host->clk);
2459		mmc_free_host(mmc);
2460	}
2461}
2462
2463#ifdef CONFIG_PM
2464static void mmci_save(struct mmci_host *host)
2465{
2466	unsigned long flags;
2467
2468	spin_lock_irqsave(&host->lock, flags);
2469
2470	writel(0, host->base + MMCIMASK0);
2471	if (host->variant->pwrreg_nopower) {
2472		writel(0, host->base + MMCIDATACTRL);
2473		writel(0, host->base + MMCIPOWER);
2474		writel(0, host->base + MMCICLOCK);
2475	}
2476	mmci_reg_delay(host);
2477
2478	spin_unlock_irqrestore(&host->lock, flags);
2479}
2480
2481static void mmci_restore(struct mmci_host *host)
2482{
2483	unsigned long flags;
2484
2485	spin_lock_irqsave(&host->lock, flags);
2486
2487	if (host->variant->pwrreg_nopower) {
2488		writel(host->clk_reg, host->base + MMCICLOCK);
2489		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2490		writel(host->pwr_reg, host->base + MMCIPOWER);
2491	}
2492	writel(MCI_IRQENABLE | host->variant->start_err,
2493	       host->base + MMCIMASK0);
2494	mmci_reg_delay(host);
2495
2496	spin_unlock_irqrestore(&host->lock, flags);
2497}
2498
2499static int mmci_runtime_suspend(struct device *dev)
2500{
2501	struct amba_device *adev = to_amba_device(dev);
2502	struct mmc_host *mmc = amba_get_drvdata(adev);
2503
2504	if (mmc) {
2505		struct mmci_host *host = mmc_priv(mmc);
2506		pinctrl_pm_select_sleep_state(dev);
2507		mmci_save(host);
2508		clk_disable_unprepare(host->clk);
2509	}
2510
2511	return 0;
2512}
2513
2514static int mmci_runtime_resume(struct device *dev)
2515{
2516	struct amba_device *adev = to_amba_device(dev);
2517	struct mmc_host *mmc = amba_get_drvdata(adev);
2518
2519	if (mmc) {
2520		struct mmci_host *host = mmc_priv(mmc);
2521		clk_prepare_enable(host->clk);
2522		mmci_restore(host);
2523		pinctrl_select_default_state(dev);
2524	}
2525
2526	return 0;
2527}
2528#endif
2529
2530static const struct dev_pm_ops mmci_dev_pm_ops = {
2531	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2532				pm_runtime_force_resume)
2533	SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2534};
2535
2536static const struct amba_id mmci_ids[] = {
2537	{
2538		.id	= 0x00041180,
2539		.mask	= 0xff0fffff,
2540		.data	= &variant_arm,
2541	},
2542	{
2543		.id	= 0x01041180,
2544		.mask	= 0xff0fffff,
2545		.data	= &variant_arm_extended_fifo,
2546	},
2547	{
2548		.id	= 0x02041180,
2549		.mask	= 0xff0fffff,
2550		.data	= &variant_arm_extended_fifo_hwfc,
2551	},
2552	{
2553		.id	= 0x00041181,
2554		.mask	= 0x000fffff,
2555		.data	= &variant_arm,
2556	},
2557	/* ST Micro variants */
2558	{
2559		.id     = 0x00180180,
2560		.mask   = 0x00ffffff,
2561		.data	= &variant_u300,
2562	},
2563	{
2564		.id     = 0x10180180,
2565		.mask   = 0xf0ffffff,
2566		.data	= &variant_nomadik,
2567	},
2568	{
2569		.id     = 0x00280180,
2570		.mask   = 0x00ffffff,
2571		.data	= &variant_nomadik,
2572	},
2573	{
2574		.id     = 0x00480180,
2575		.mask   = 0xf0ffffff,
2576		.data	= &variant_ux500,
2577	},
2578	{
2579		.id     = 0x10480180,
2580		.mask   = 0xf0ffffff,
2581		.data	= &variant_ux500v2,
2582	},
2583	{
2584		.id     = 0x00880180,
2585		.mask   = 0x00ffffff,
2586		.data	= &variant_stm32,
2587	},
2588	{
2589		.id     = 0x10153180,
2590		.mask	= 0xf0ffffff,
2591		.data	= &variant_stm32_sdmmc,
2592	},
2593	{
2594		.id     = 0x00253180,
2595		.mask	= 0xf0ffffff,
2596		.data	= &variant_stm32_sdmmcv2,
2597	},
2598	{
2599		.id     = 0x20253180,
2600		.mask	= 0xf0ffffff,
2601		.data	= &variant_stm32_sdmmcv2,
2602	},
2603	{
2604		.id     = 0x00353180,
2605		.mask	= 0xf0ffffff,
2606		.data	= &variant_stm32_sdmmcv3,
2607	},
2608	/* Qualcomm variants */
2609	{
2610		.id     = 0x00051180,
2611		.mask	= 0x000fffff,
2612		.data	= &variant_qcom,
2613	},
2614	{ 0, 0 },
2615};
2616
2617MODULE_DEVICE_TABLE(amba, mmci_ids);
2618
2619static struct amba_driver mmci_driver = {
2620	.drv		= {
2621		.name	= DRIVER_NAME,
2622		.pm	= &mmci_dev_pm_ops,
2623		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2624	},
2625	.probe		= mmci_probe,
2626	.remove		= mmci_remove,
2627	.id_table	= mmci_ids,
2628};
2629
2630module_amba_driver(mmci_driver);
2631
2632module_param(fmax, uint, 0444);
2633
2634MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2635MODULE_LICENSE("GPL");
2636