xref: /kernel/linux/linux-5.10/drivers/mmc/host/mmci.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 *
5 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 *  Copyright (C) 2010 ST-Ericsson SA
7 */
8#include <linux/module.h>
9#include <linux/moduleparam.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/log2.h>
21#include <linux/mmc/mmc.h>
22#include <linux/mmc/pm.h>
23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
25#include <linux/mmc/sd.h>
26#include <linux/mmc/slot-gpio.h>
27#include <linux/amba/bus.h>
28#include <linux/clk.h>
29#include <linux/scatterlist.h>
30#include <linux/of.h>
31#include <linux/regulator/consumer.h>
32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/amba/mmci.h>
35#include <linux/pm_runtime.h>
36#include <linux/types.h>
37#include <linux/pinctrl/consumer.h>
38#include <linux/reset.h>
39
40#include <asm/div64.h>
41#include <asm/io.h>
42
43#include "mmci.h"
44
45#define DRIVER_NAME "mmci-pl18x"
46
47static void mmci_variant_init(struct mmci_host *host);
48static void ux500_variant_init(struct mmci_host *host);
49static void ux500v2_variant_init(struct mmci_host *host);
50
51static unsigned int fmax = 515633;
52
53static struct variant_data variant_arm = {
54	.fifosize		= 16 * 4,
55	.fifohalfsize		= 8 * 4,
56	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
57	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
58	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
59	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
60	.datalength_bits	= 16,
61	.datactrl_blocksz	= 11,
62	.pwrreg_powerup		= MCI_PWR_UP,
63	.f_max			= 100000000,
64	.reversed_irq_handling	= true,
65	.mmcimask1		= true,
66	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
67	.start_err		= MCI_STARTBITERR,
68	.opendrain		= MCI_ROD,
69	.init			= mmci_variant_init,
70};
71
72static struct variant_data variant_arm_extended_fifo = {
73	.fifosize		= 128 * 4,
74	.fifohalfsize		= 64 * 4,
75	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
76	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
77	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
78	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
79	.datalength_bits	= 16,
80	.datactrl_blocksz	= 11,
81	.pwrreg_powerup		= MCI_PWR_UP,
82	.f_max			= 100000000,
83	.mmcimask1		= true,
84	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
85	.start_err		= MCI_STARTBITERR,
86	.opendrain		= MCI_ROD,
87	.init			= mmci_variant_init,
88};
89
90static struct variant_data variant_arm_extended_fifo_hwfc = {
91	.fifosize		= 128 * 4,
92	.fifohalfsize		= 64 * 4,
93	.clkreg_enable		= MCI_ARM_HWFCEN,
94	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
95	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
96	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
97	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
98	.datalength_bits	= 16,
99	.datactrl_blocksz	= 11,
100	.pwrreg_powerup		= MCI_PWR_UP,
101	.f_max			= 100000000,
102	.mmcimask1		= true,
103	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
104	.start_err		= MCI_STARTBITERR,
105	.opendrain		= MCI_ROD,
106	.init			= mmci_variant_init,
107};
108
109static struct variant_data variant_u300 = {
110	.fifosize		= 16 * 4,
111	.fifohalfsize		= 8 * 4,
112	.clkreg_enable		= MCI_ST_U300_HWFCEN,
113	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
114	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
115	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
116	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
117	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
118	.datalength_bits	= 16,
119	.datactrl_blocksz	= 11,
120	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
121	.st_sdio			= true,
122	.pwrreg_powerup		= MCI_PWR_ON,
123	.f_max			= 100000000,
124	.signal_direction	= true,
125	.pwrreg_clkgate		= true,
126	.pwrreg_nopower		= true,
127	.mmcimask1		= true,
128	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
129	.start_err		= MCI_STARTBITERR,
130	.opendrain		= MCI_OD,
131	.init			= mmci_variant_init,
132};
133
134static struct variant_data variant_nomadik = {
135	.fifosize		= 16 * 4,
136	.fifohalfsize		= 8 * 4,
137	.clkreg			= MCI_CLK_ENABLE,
138	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
139	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
140	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
141	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
142	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
143	.datalength_bits	= 24,
144	.datactrl_blocksz	= 11,
145	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
146	.st_sdio		= true,
147	.st_clkdiv		= true,
148	.pwrreg_powerup		= MCI_PWR_ON,
149	.f_max			= 100000000,
150	.signal_direction	= true,
151	.pwrreg_clkgate		= true,
152	.pwrreg_nopower		= true,
153	.mmcimask1		= true,
154	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
155	.start_err		= MCI_STARTBITERR,
156	.opendrain		= MCI_OD,
157	.init			= mmci_variant_init,
158};
159
160static struct variant_data variant_ux500 = {
161	.fifosize		= 30 * 4,
162	.fifohalfsize		= 8 * 4,
163	.clkreg			= MCI_CLK_ENABLE,
164	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
165	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
166	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
167	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
168	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
169	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
170	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
171	.datalength_bits	= 24,
172	.datactrl_blocksz	= 11,
173	.datactrl_any_blocksz	= true,
174	.dma_power_of_2		= true,
175	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
176	.st_sdio		= true,
177	.st_clkdiv		= true,
178	.pwrreg_powerup		= MCI_PWR_ON,
179	.f_max			= 100000000,
180	.signal_direction	= true,
181	.pwrreg_clkgate		= true,
182	.busy_detect		= true,
183	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
184	.busy_detect_flag	= MCI_ST_CARDBUSY,
185	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
186	.pwrreg_nopower		= true,
187	.mmcimask1		= true,
188	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
189	.start_err		= MCI_STARTBITERR,
190	.opendrain		= MCI_OD,
191	.init			= ux500_variant_init,
192};
193
194static struct variant_data variant_ux500v2 = {
195	.fifosize		= 30 * 4,
196	.fifohalfsize		= 8 * 4,
197	.clkreg			= MCI_CLK_ENABLE,
198	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
199	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
200	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
201	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
202	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
203	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
204	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
205	.datactrl_mask_ddrmode	= MCI_DPSM_ST_DDRMODE,
206	.datalength_bits	= 24,
207	.datactrl_blocksz	= 11,
208	.datactrl_any_blocksz	= true,
209	.dma_power_of_2		= true,
210	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
211	.st_sdio		= true,
212	.st_clkdiv		= true,
213	.pwrreg_powerup		= MCI_PWR_ON,
214	.f_max			= 100000000,
215	.signal_direction	= true,
216	.pwrreg_clkgate		= true,
217	.busy_detect		= true,
218	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
219	.busy_detect_flag	= MCI_ST_CARDBUSY,
220	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
221	.pwrreg_nopower		= true,
222	.mmcimask1		= true,
223	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
224	.start_err		= MCI_STARTBITERR,
225	.opendrain		= MCI_OD,
226	.init			= ux500v2_variant_init,
227};
228
229static struct variant_data variant_stm32 = {
230	.fifosize		= 32 * 4,
231	.fifohalfsize		= 8 * 4,
232	.clkreg			= MCI_CLK_ENABLE,
233	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
234	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
235	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
236	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
237	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
238	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
239	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
240	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
241	.datalength_bits	= 24,
242	.datactrl_blocksz	= 11,
243	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
244	.st_sdio		= true,
245	.st_clkdiv		= true,
246	.pwrreg_powerup		= MCI_PWR_ON,
247	.f_max			= 48000000,
248	.pwrreg_clkgate		= true,
249	.pwrreg_nopower		= true,
250	.init			= mmci_variant_init,
251};
252
253static struct variant_data variant_stm32_sdmmc = {
254	.fifosize		= 16 * 4,
255	.fifohalfsize		= 8 * 4,
256	.f_max			= 208000000,
257	.stm32_clkdiv		= true,
258	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
259	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
260	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
261	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
262	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
263	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
264	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
265	.datactrl_first		= true,
266	.datacnt_useless	= true,
267	.datalength_bits	= 25,
268	.datactrl_blocksz	= 14,
269	.datactrl_any_blocksz	= true,
270	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
271	.stm32_idmabsize_mask	= GENMASK(12, 5),
272	.busy_timeout		= true,
273	.busy_detect		= true,
274	.busy_detect_flag	= MCI_STM32_BUSYD0,
275	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
276	.init			= sdmmc_variant_init,
277};
278
279static struct variant_data variant_stm32_sdmmcv2 = {
280	.fifosize		= 16 * 4,
281	.fifohalfsize		= 8 * 4,
282	.f_max			= 208000000,
283	.stm32_clkdiv		= true,
284	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
285	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
286	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
287	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
288	.cmdreg_stop		= MCI_CPSM_STM32_CMDSTOP,
289	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
290	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
291	.datactrl_first		= true,
292	.datacnt_useless	= true,
293	.datalength_bits	= 25,
294	.datactrl_blocksz	= 14,
295	.datactrl_any_blocksz	= true,
296	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
297	.stm32_idmabsize_mask	= GENMASK(16, 5),
298	.dma_lli		= true,
299	.busy_timeout		= true,
300	.busy_detect		= true,
301	.busy_detect_flag	= MCI_STM32_BUSYD0,
302	.busy_detect_mask	= MCI_STM32_BUSYD0ENDMASK,
303	.init			= sdmmc_variant_init,
304};
305
306static struct variant_data variant_qcom = {
307	.fifosize		= 16 * 4,
308	.fifohalfsize		= 8 * 4,
309	.clkreg			= MCI_CLK_ENABLE,
310	.clkreg_enable		= MCI_QCOM_CLK_FLOWENA |
311				  MCI_QCOM_CLK_SELECT_IN_FBCLK,
312	.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
313	.datactrl_mask_ddrmode	= MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
314	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
315	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
316	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
317	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
318	.data_cmd_enable	= MCI_CPSM_QCOM_DATCMD,
319	.datalength_bits	= 24,
320	.datactrl_blocksz	= 11,
321	.datactrl_any_blocksz	= true,
322	.pwrreg_powerup		= MCI_PWR_UP,
323	.f_max			= 208000000,
324	.explicit_mclk_control	= true,
325	.qcom_fifo		= true,
326	.qcom_dml		= true,
327	.mmcimask1		= true,
328	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
329	.start_err		= MCI_STARTBITERR,
330	.opendrain		= MCI_ROD,
331	.init			= qcom_variant_init,
332};
333
334/* Busy detection for the ST Micro variant */
335static int mmci_card_busy(struct mmc_host *mmc)
336{
337	struct mmci_host *host = mmc_priv(mmc);
338	unsigned long flags;
339	int busy = 0;
340
341	spin_lock_irqsave(&host->lock, flags);
342	if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
343		busy = 1;
344	spin_unlock_irqrestore(&host->lock, flags);
345
346	return busy;
347}
348
349static void mmci_reg_delay(struct mmci_host *host)
350{
351	/*
352	 * According to the spec, at least three feedback clock cycles
353	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
354	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
355	 * Worst delay time during card init is at 100 kHz => 30 us.
356	 * Worst delay time when up and running is at 25 MHz => 120 ns.
357	 */
358	if (host->cclk < 25000000)
359		udelay(30);
360	else
361		ndelay(120);
362}
363
364/*
365 * This must be called with host->lock held
366 */
367void mmci_write_clkreg(struct mmci_host *host, u32 clk)
368{
369	if (host->clk_reg != clk) {
370		host->clk_reg = clk;
371		writel(clk, host->base + MMCICLOCK);
372	}
373}
374
375/*
376 * This must be called with host->lock held
377 */
378void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
379{
380	if (host->pwr_reg != pwr) {
381		host->pwr_reg = pwr;
382		writel(pwr, host->base + MMCIPOWER);
383	}
384}
385
386/*
387 * This must be called with host->lock held
388 */
389static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
390{
391	/* Keep busy mode in DPSM if enabled */
392	datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
393
394	if (host->datactrl_reg != datactrl) {
395		host->datactrl_reg = datactrl;
396		writel(datactrl, host->base + MMCIDATACTRL);
397	}
398}
399
400/*
401 * This must be called with host->lock held
402 */
403static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
404{
405	struct variant_data *variant = host->variant;
406	u32 clk = variant->clkreg;
407
408	/* Make sure cclk reflects the current calculated clock */
409	host->cclk = 0;
410
411	if (desired) {
412		if (variant->explicit_mclk_control) {
413			host->cclk = host->mclk;
414		} else if (desired >= host->mclk) {
415			clk = MCI_CLK_BYPASS;
416			if (variant->st_clkdiv)
417				clk |= MCI_ST_UX500_NEG_EDGE;
418			host->cclk = host->mclk;
419		} else if (variant->st_clkdiv) {
420			/*
421			 * DB8500 TRM says f = mclk / (clkdiv + 2)
422			 * => clkdiv = (mclk / f) - 2
423			 * Round the divider up so we don't exceed the max
424			 * frequency
425			 */
426			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
427			if (clk >= 256)
428				clk = 255;
429			host->cclk = host->mclk / (clk + 2);
430		} else {
431			/*
432			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
433			 * => clkdiv = mclk / (2 * f) - 1
434			 */
435			clk = host->mclk / (2 * desired) - 1;
436			if (clk >= 256)
437				clk = 255;
438			host->cclk = host->mclk / (2 * (clk + 1));
439		}
440
441		clk |= variant->clkreg_enable;
442		clk |= MCI_CLK_ENABLE;
443		/* This hasn't proven to be worthwhile */
444		/* clk |= MCI_CLK_PWRSAVE; */
445	}
446
447	/* Set actual clock for debug */
448	host->mmc->actual_clock = host->cclk;
449
450	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
451		clk |= MCI_4BIT_BUS;
452	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
453		clk |= variant->clkreg_8bit_bus_enable;
454
455	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
456	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
457		clk |= variant->clkreg_neg_edge_enable;
458
459	mmci_write_clkreg(host, clk);
460}
461
462static void mmci_dma_release(struct mmci_host *host)
463{
464	if (host->ops && host->ops->dma_release)
465		host->ops->dma_release(host);
466
467	host->use_dma = false;
468}
469
470static void mmci_dma_setup(struct mmci_host *host)
471{
472	if (!host->ops || !host->ops->dma_setup)
473		return;
474
475	if (host->ops->dma_setup(host))
476		return;
477
478	/* initialize pre request cookie */
479	host->next_cookie = 1;
480
481	host->use_dma = true;
482}
483
484/*
485 * Validate mmc prerequisites
486 */
487static int mmci_validate_data(struct mmci_host *host,
488			      struct mmc_data *data)
489{
490	struct variant_data *variant = host->variant;
491
492	if (!data)
493		return 0;
494	if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
495		dev_err(mmc_dev(host->mmc),
496			"unsupported block size (%d bytes)\n", data->blksz);
497		return -EINVAL;
498	}
499
500	if (host->ops && host->ops->validate_data)
501		return host->ops->validate_data(host, data);
502
503	return 0;
504}
505
506static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
507{
508	int err;
509
510	if (!host->ops || !host->ops->prep_data)
511		return 0;
512
513	err = host->ops->prep_data(host, data, next);
514
515	if (next && !err)
516		data->host_cookie = ++host->next_cookie < 0 ?
517			1 : host->next_cookie;
518
519	return err;
520}
521
522static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
523		      int err)
524{
525	if (host->ops && host->ops->unprep_data)
526		host->ops->unprep_data(host, data, err);
527
528	data->host_cookie = 0;
529}
530
531static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
532{
533	WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
534
535	if (host->ops && host->ops->get_next_data)
536		host->ops->get_next_data(host, data);
537}
538
539static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
540{
541	struct mmc_data *data = host->data;
542	int ret;
543
544	if (!host->use_dma)
545		return -EINVAL;
546
547	ret = mmci_prep_data(host, data, false);
548	if (ret)
549		return ret;
550
551	if (!host->ops || !host->ops->dma_start)
552		return -EINVAL;
553
554	/* Okay, go for it. */
555	dev_vdbg(mmc_dev(host->mmc),
556		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
557		 data->sg_len, data->blksz, data->blocks, data->flags);
558
559	ret = host->ops->dma_start(host, &datactrl);
560	if (ret)
561		return ret;
562
563	/* Trigger the DMA transfer */
564	mmci_write_datactrlreg(host, datactrl);
565
566	/*
567	 * Let the MMCI say when the data is ended and it's time
568	 * to fire next DMA request. When that happens, MMCI will
569	 * call mmci_data_end()
570	 */
571	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
572	       host->base + MMCIMASK0);
573	return 0;
574}
575
576static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
577{
578	if (!host->use_dma)
579		return;
580
581	if (host->ops && host->ops->dma_finalize)
582		host->ops->dma_finalize(host, data);
583}
584
585static void mmci_dma_error(struct mmci_host *host)
586{
587	if (!host->use_dma)
588		return;
589
590	if (host->ops && host->ops->dma_error)
591		host->ops->dma_error(host);
592}
593
594static void
595mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
596{
597	writel(0, host->base + MMCICOMMAND);
598
599	BUG_ON(host->data);
600
601	host->mrq = NULL;
602	host->cmd = NULL;
603
604	mmc_request_done(host->mmc, mrq);
605}
606
607static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
608{
609	void __iomem *base = host->base;
610	struct variant_data *variant = host->variant;
611
612	if (host->singleirq) {
613		unsigned int mask0 = readl(base + MMCIMASK0);
614
615		mask0 &= ~variant->irq_pio_mask;
616		mask0 |= mask;
617
618		writel(mask0, base + MMCIMASK0);
619	}
620
621	if (variant->mmcimask1)
622		writel(mask, base + MMCIMASK1);
623
624	host->mask1_reg = mask;
625}
626
627static void mmci_stop_data(struct mmci_host *host)
628{
629	mmci_write_datactrlreg(host, 0);
630	mmci_set_mask1(host, 0);
631	host->data = NULL;
632}
633
634static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
635{
636	unsigned int flags = SG_MITER_ATOMIC;
637
638	if (data->flags & MMC_DATA_READ)
639		flags |= SG_MITER_TO_SG;
640	else
641		flags |= SG_MITER_FROM_SG;
642
643	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
644}
645
646static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
647{
648	return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
649}
650
651static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
652{
653	return MCI_DPSM_ENABLE | (host->data->blksz << 16);
654}
655
656static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
657{
658	void __iomem *base = host->base;
659
660	/*
661	 * Before unmasking for the busy end IRQ, confirm that the
662	 * command was sent successfully. To keep track of having a
663	 * command in-progress, waiting for busy signaling to end,
664	 * store the status in host->busy_status.
665	 *
666	 * Note that, the card may need a couple of clock cycles before
667	 * it starts signaling busy on DAT0, hence re-read the
668	 * MMCISTATUS register here, to allow the busy bit to be set.
669	 * Potentially we may even need to poll the register for a
670	 * while, to allow it to be set, but tests indicates that it
671	 * isn't needed.
672	 */
673	if (!host->busy_status && !(status & err_msk) &&
674	    (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
675		writel(readl(base + MMCIMASK0) |
676		       host->variant->busy_detect_mask,
677		       base + MMCIMASK0);
678
679		host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
680		return false;
681	}
682
683	/*
684	 * If there is a command in-progress that has been successfully
685	 * sent, then bail out if busy status is set and wait for the
686	 * busy end IRQ.
687	 *
688	 * Note that, the HW triggers an IRQ on both edges while
689	 * monitoring DAT0 for busy completion, but there is only one
690	 * status bit in MMCISTATUS for the busy state. Therefore
691	 * both the start and the end interrupts needs to be cleared,
692	 * one after the other. So, clear the busy start IRQ here.
693	 */
694	if (host->busy_status &&
695	    (status & host->variant->busy_detect_flag)) {
696		writel(host->variant->busy_detect_mask, base + MMCICLEAR);
697		return false;
698	}
699
700	/*
701	 * If there is a command in-progress that has been successfully
702	 * sent and the busy bit isn't set, it means we have received
703	 * the busy end IRQ. Clear and mask the IRQ, then continue to
704	 * process the command.
705	 */
706	if (host->busy_status) {
707		writel(host->variant->busy_detect_mask, base + MMCICLEAR);
708
709		writel(readl(base + MMCIMASK0) &
710		       ~host->variant->busy_detect_mask, base + MMCIMASK0);
711		host->busy_status = 0;
712	}
713
714	return true;
715}
716
717/*
718 * All the DMA operation mode stuff goes inside this ifdef.
719 * This assumes that you have a generic DMA device interface,
720 * no custom DMA interfaces are supported.
721 */
722#ifdef CONFIG_DMA_ENGINE
723struct mmci_dmae_next {
724	struct dma_async_tx_descriptor *desc;
725	struct dma_chan	*chan;
726};
727
728struct mmci_dmae_priv {
729	struct dma_chan	*cur;
730	struct dma_chan	*rx_channel;
731	struct dma_chan	*tx_channel;
732	struct dma_async_tx_descriptor	*desc_current;
733	struct mmci_dmae_next next_data;
734};
735
736int mmci_dmae_setup(struct mmci_host *host)
737{
738	const char *rxname, *txname;
739	struct mmci_dmae_priv *dmae;
740
741	dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
742	if (!dmae)
743		return -ENOMEM;
744
745	host->dma_priv = dmae;
746
747	dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
748	if (IS_ERR(dmae->rx_channel)) {
749		int ret = PTR_ERR(dmae->rx_channel);
750		dmae->rx_channel = NULL;
751		return ret;
752	}
753
754	dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
755	if (IS_ERR(dmae->tx_channel)) {
756		if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
757			dev_warn(mmc_dev(host->mmc),
758				 "Deferred probe for TX channel ignored\n");
759		dmae->tx_channel = NULL;
760	}
761
762	/*
763	 * If only an RX channel is specified, the driver will
764	 * attempt to use it bidirectionally, however if it is
765	 * is specified but cannot be located, DMA will be disabled.
766	 */
767	if (dmae->rx_channel && !dmae->tx_channel)
768		dmae->tx_channel = dmae->rx_channel;
769
770	if (dmae->rx_channel)
771		rxname = dma_chan_name(dmae->rx_channel);
772	else
773		rxname = "none";
774
775	if (dmae->tx_channel)
776		txname = dma_chan_name(dmae->tx_channel);
777	else
778		txname = "none";
779
780	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
781		 rxname, txname);
782
783	/*
784	 * Limit the maximum segment size in any SG entry according to
785	 * the parameters of the DMA engine device.
786	 */
787	if (dmae->tx_channel) {
788		struct device *dev = dmae->tx_channel->device->dev;
789		unsigned int max_seg_size = dma_get_max_seg_size(dev);
790
791		if (max_seg_size < host->mmc->max_seg_size)
792			host->mmc->max_seg_size = max_seg_size;
793	}
794	if (dmae->rx_channel) {
795		struct device *dev = dmae->rx_channel->device->dev;
796		unsigned int max_seg_size = dma_get_max_seg_size(dev);
797
798		if (max_seg_size < host->mmc->max_seg_size)
799			host->mmc->max_seg_size = max_seg_size;
800	}
801
802	if (!dmae->tx_channel || !dmae->rx_channel) {
803		mmci_dmae_release(host);
804		return -EINVAL;
805	}
806
807	return 0;
808}
809
810/*
811 * This is used in or so inline it
812 * so it can be discarded.
813 */
814void mmci_dmae_release(struct mmci_host *host)
815{
816	struct mmci_dmae_priv *dmae = host->dma_priv;
817
818	if (dmae->rx_channel)
819		dma_release_channel(dmae->rx_channel);
820	if (dmae->tx_channel)
821		dma_release_channel(dmae->tx_channel);
822	dmae->rx_channel = dmae->tx_channel = NULL;
823}
824
825static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
826{
827	struct mmci_dmae_priv *dmae = host->dma_priv;
828	struct dma_chan *chan;
829
830	if (data->flags & MMC_DATA_READ)
831		chan = dmae->rx_channel;
832	else
833		chan = dmae->tx_channel;
834
835	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
836		     mmc_get_dma_dir(data));
837}
838
839void mmci_dmae_error(struct mmci_host *host)
840{
841	struct mmci_dmae_priv *dmae = host->dma_priv;
842
843	if (!dma_inprogress(host))
844		return;
845
846	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
847	dmaengine_terminate_all(dmae->cur);
848	host->dma_in_progress = false;
849	dmae->cur = NULL;
850	dmae->desc_current = NULL;
851	host->data->host_cookie = 0;
852
853	mmci_dma_unmap(host, host->data);
854}
855
856void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
857{
858	struct mmci_dmae_priv *dmae = host->dma_priv;
859	u32 status;
860	int i;
861
862	if (!dma_inprogress(host))
863		return;
864
865	/* Wait up to 1ms for the DMA to complete */
866	for (i = 0; ; i++) {
867		status = readl(host->base + MMCISTATUS);
868		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
869			break;
870		udelay(10);
871	}
872
873	/*
874	 * Check to see whether we still have some data left in the FIFO -
875	 * this catches DMA controllers which are unable to monitor the
876	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
877	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
878	 */
879	if (status & MCI_RXDATAAVLBLMASK) {
880		mmci_dma_error(host);
881		if (!data->error)
882			data->error = -EIO;
883	} else if (!data->host_cookie) {
884		mmci_dma_unmap(host, data);
885	}
886
887	/*
888	 * Use of DMA with scatter-gather is impossible.
889	 * Give up with DMA and switch back to PIO mode.
890	 */
891	if (status & MCI_RXDATAAVLBLMASK) {
892		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
893		mmci_dma_release(host);
894	}
895
896	host->dma_in_progress = false;
897	dmae->cur = NULL;
898	dmae->desc_current = NULL;
899}
900
901/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
902static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
903				struct dma_chan **dma_chan,
904				struct dma_async_tx_descriptor **dma_desc)
905{
906	struct mmci_dmae_priv *dmae = host->dma_priv;
907	struct variant_data *variant = host->variant;
908	struct dma_slave_config conf = {
909		.src_addr = host->phybase + MMCIFIFO,
910		.dst_addr = host->phybase + MMCIFIFO,
911		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
912		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
913		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
914		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
915		.device_fc = false,
916	};
917	struct dma_chan *chan;
918	struct dma_device *device;
919	struct dma_async_tx_descriptor *desc;
920	int nr_sg;
921	unsigned long flags = DMA_CTRL_ACK;
922
923	if (data->flags & MMC_DATA_READ) {
924		conf.direction = DMA_DEV_TO_MEM;
925		chan = dmae->rx_channel;
926	} else {
927		conf.direction = DMA_MEM_TO_DEV;
928		chan = dmae->tx_channel;
929	}
930
931	/* If there's no DMA channel, fall back to PIO */
932	if (!chan)
933		return -EINVAL;
934
935	/* If less than or equal to the fifo size, don't bother with DMA */
936	if (data->blksz * data->blocks <= variant->fifosize)
937		return -EINVAL;
938
939	/*
940	 * This is necessary to get SDIO working on the Ux500. We do not yet
941	 * know if this is a bug in:
942	 * - The Ux500 DMA controller (DMA40)
943	 * - The MMCI DMA interface on the Ux500
944	 * some power of two blocks (such as 64 bytes) are sent regularly
945	 * during SDIO traffic and those work fine so for these we enable DMA
946	 * transfers.
947	 */
948	if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
949		return -EINVAL;
950
951	device = chan->device;
952	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
953			   mmc_get_dma_dir(data));
954	if (nr_sg == 0)
955		return -EINVAL;
956
957	if (host->variant->qcom_dml)
958		flags |= DMA_PREP_INTERRUPT;
959
960	dmaengine_slave_config(chan, &conf);
961	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
962					    conf.direction, flags);
963	if (!desc)
964		goto unmap_exit;
965
966	*dma_chan = chan;
967	*dma_desc = desc;
968
969	return 0;
970
971 unmap_exit:
972	dma_unmap_sg(device->dev, data->sg, data->sg_len,
973		     mmc_get_dma_dir(data));
974	return -ENOMEM;
975}
976
977int mmci_dmae_prep_data(struct mmci_host *host,
978			struct mmc_data *data,
979			bool next)
980{
981	struct mmci_dmae_priv *dmae = host->dma_priv;
982	struct mmci_dmae_next *nd = &dmae->next_data;
983
984	if (!host->use_dma)
985		return -EINVAL;
986
987	if (next)
988		return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
989	/* Check if next job is already prepared. */
990	if (dmae->cur && dmae->desc_current)
991		return 0;
992
993	/* No job were prepared thus do it now. */
994	return _mmci_dmae_prep_data(host, data, &dmae->cur,
995				    &dmae->desc_current);
996}
997
998int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
999{
1000	struct mmci_dmae_priv *dmae = host->dma_priv;
1001	int ret;
1002
1003	host->dma_in_progress = true;
1004	ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
1005	if (ret < 0) {
1006		host->dma_in_progress = false;
1007		return ret;
1008	}
1009	dma_async_issue_pending(dmae->cur);
1010
1011	*datactrl |= MCI_DPSM_DMAENABLE;
1012
1013	return 0;
1014}
1015
1016void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
1017{
1018	struct mmci_dmae_priv *dmae = host->dma_priv;
1019	struct mmci_dmae_next *next = &dmae->next_data;
1020
1021	if (!host->use_dma)
1022		return;
1023
1024	WARN_ON(!data->host_cookie && (next->desc || next->chan));
1025
1026	dmae->desc_current = next->desc;
1027	dmae->cur = next->chan;
1028	next->desc = NULL;
1029	next->chan = NULL;
1030}
1031
1032void mmci_dmae_unprep_data(struct mmci_host *host,
1033			   struct mmc_data *data, int err)
1034
1035{
1036	struct mmci_dmae_priv *dmae = host->dma_priv;
1037
1038	if (!host->use_dma)
1039		return;
1040
1041	mmci_dma_unmap(host, data);
1042
1043	if (err) {
1044		struct mmci_dmae_next *next = &dmae->next_data;
1045		struct dma_chan *chan;
1046		if (data->flags & MMC_DATA_READ)
1047			chan = dmae->rx_channel;
1048		else
1049			chan = dmae->tx_channel;
1050		dmaengine_terminate_all(chan);
1051
1052		if (dmae->desc_current == next->desc)
1053			dmae->desc_current = NULL;
1054
1055		if (dmae->cur == next->chan) {
1056			host->dma_in_progress = false;
1057			dmae->cur = NULL;
1058		}
1059
1060		next->desc = NULL;
1061		next->chan = NULL;
1062	}
1063}
1064
1065static struct mmci_host_ops mmci_variant_ops = {
1066	.prep_data = mmci_dmae_prep_data,
1067	.unprep_data = mmci_dmae_unprep_data,
1068	.get_datactrl_cfg = mmci_get_dctrl_cfg,
1069	.get_next_data = mmci_dmae_get_next_data,
1070	.dma_setup = mmci_dmae_setup,
1071	.dma_release = mmci_dmae_release,
1072	.dma_start = mmci_dmae_start,
1073	.dma_finalize = mmci_dmae_finalize,
1074	.dma_error = mmci_dmae_error,
1075};
1076#else
1077static struct mmci_host_ops mmci_variant_ops = {
1078	.get_datactrl_cfg = mmci_get_dctrl_cfg,
1079};
1080#endif
1081
1082static void mmci_variant_init(struct mmci_host *host)
1083{
1084	host->ops = &mmci_variant_ops;
1085}
1086
1087static void ux500_variant_init(struct mmci_host *host)
1088{
1089	host->ops = &mmci_variant_ops;
1090	host->ops->busy_complete = ux500_busy_complete;
1091}
1092
1093static void ux500v2_variant_init(struct mmci_host *host)
1094{
1095	host->ops = &mmci_variant_ops;
1096	host->ops->busy_complete = ux500_busy_complete;
1097	host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
1098}
1099
1100static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
1101{
1102	struct mmci_host *host = mmc_priv(mmc);
1103	struct mmc_data *data = mrq->data;
1104
1105	if (!data)
1106		return;
1107
1108	WARN_ON(data->host_cookie);
1109
1110	if (mmci_validate_data(host, data))
1111		return;
1112
1113	mmci_prep_data(host, data, true);
1114}
1115
1116static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
1117			      int err)
1118{
1119	struct mmci_host *host = mmc_priv(mmc);
1120	struct mmc_data *data = mrq->data;
1121
1122	if (!data || !data->host_cookie)
1123		return;
1124
1125	mmci_unprep_data(host, data, err);
1126}
1127
1128static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
1129{
1130	struct variant_data *variant = host->variant;
1131	unsigned int datactrl, timeout, irqmask;
1132	unsigned long long clks;
1133	void __iomem *base;
1134
1135	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1136		data->blksz, data->blocks, data->flags);
1137
1138	host->data = data;
1139	host->size = data->blksz * data->blocks;
1140	data->bytes_xfered = 0;
1141
1142	clks = (unsigned long long)data->timeout_ns * host->cclk;
1143	do_div(clks, NSEC_PER_SEC);
1144
1145	timeout = data->timeout_clks + (unsigned int)clks;
1146
1147	base = host->base;
1148	writel(timeout, base + MMCIDATATIMER);
1149	writel(host->size, base + MMCIDATALENGTH);
1150
1151	datactrl = host->ops->get_datactrl_cfg(host);
1152	datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
1153
1154	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1155		u32 clk;
1156
1157		datactrl |= variant->datactrl_mask_sdio;
1158
1159		/*
1160		 * The ST Micro variant for SDIO small write transfers
1161		 * needs to have clock H/W flow control disabled,
1162		 * otherwise the transfer will not start. The threshold
1163		 * depends on the rate of MCLK.
1164		 */
1165		if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1166		    (host->size < 8 ||
1167		     (host->size <= 8 && host->mclk > 50000000)))
1168			clk = host->clk_reg & ~variant->clkreg_enable;
1169		else
1170			clk = host->clk_reg | variant->clkreg_enable;
1171
1172		mmci_write_clkreg(host, clk);
1173	}
1174
1175	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1176	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1177		datactrl |= variant->datactrl_mask_ddrmode;
1178
1179	/*
1180	 * Attempt to use DMA operation mode, if this
1181	 * should fail, fall back to PIO mode
1182	 */
1183	if (!mmci_dma_start(host, datactrl))
1184		return;
1185
1186	/* IRQ mode, map the SG list for CPU reading/writing */
1187	mmci_init_sg(host, data);
1188
1189	if (data->flags & MMC_DATA_READ) {
1190		irqmask = MCI_RXFIFOHALFFULLMASK;
1191
1192		/*
1193		 * If we have less than the fifo 'half-full' threshold to
1194		 * transfer, trigger a PIO interrupt as soon as any data
1195		 * is available.
1196		 */
1197		if (host->size < variant->fifohalfsize)
1198			irqmask |= MCI_RXDATAAVLBLMASK;
1199	} else {
1200		/*
1201		 * We don't actually need to include "FIFO empty" here
1202		 * since its implicit in "FIFO half empty".
1203		 */
1204		irqmask = MCI_TXFIFOHALFEMPTYMASK;
1205	}
1206
1207	mmci_write_datactrlreg(host, datactrl);
1208	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1209	mmci_set_mask1(host, irqmask);
1210}
1211
1212static void
1213mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1214{
1215	void __iomem *base = host->base;
1216	unsigned long long clks;
1217
1218	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1219	    cmd->opcode, cmd->arg, cmd->flags);
1220
1221	if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1222		writel(0, base + MMCICOMMAND);
1223		mmci_reg_delay(host);
1224	}
1225
1226	if (host->variant->cmdreg_stop &&
1227	    cmd->opcode == MMC_STOP_TRANSMISSION)
1228		c |= host->variant->cmdreg_stop;
1229
1230	c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1231	if (cmd->flags & MMC_RSP_PRESENT) {
1232		if (cmd->flags & MMC_RSP_136)
1233			c |= host->variant->cmdreg_lrsp_crc;
1234		else if (cmd->flags & MMC_RSP_CRC)
1235			c |= host->variant->cmdreg_srsp_crc;
1236		else
1237			c |= host->variant->cmdreg_srsp;
1238	}
1239
1240	if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
1241		if (!cmd->busy_timeout)
1242			cmd->busy_timeout = 10 * MSEC_PER_SEC;
1243
1244		if (cmd->busy_timeout > host->mmc->max_busy_timeout)
1245			clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
1246		else
1247			clks = (unsigned long long)cmd->busy_timeout * host->cclk;
1248
1249		do_div(clks, MSEC_PER_SEC);
1250		writel_relaxed(clks, host->base + MMCIDATATIMER);
1251	}
1252
1253	if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
1254		host->ops->pre_sig_volt_switch(host);
1255
1256	if (/*interrupt*/0)
1257		c |= MCI_CPSM_INTERRUPT;
1258
1259	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1260		c |= host->variant->data_cmd_enable;
1261
1262	host->cmd = cmd;
1263
1264	writel(cmd->arg, base + MMCIARGUMENT);
1265	writel(c, base + MMCICOMMAND);
1266}
1267
1268static void mmci_stop_command(struct mmci_host *host)
1269{
1270	host->stop_abort.error = 0;
1271	mmci_start_command(host, &host->stop_abort, 0);
1272}
1273
1274static void
1275mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1276	      unsigned int status)
1277{
1278	unsigned int status_err;
1279
1280	/* Make sure we have data to handle */
1281	if (!data)
1282		return;
1283
1284	/* First check for errors */
1285	status_err = status & (host->variant->start_err |
1286			       MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1287			       MCI_TXUNDERRUN | MCI_RXOVERRUN);
1288
1289	if (status_err) {
1290		u32 remain, success;
1291
1292		/* Terminate the DMA transfer */
1293		mmci_dma_error(host);
1294
1295		/*
1296		 * Calculate how far we are into the transfer.  Note that
1297		 * the data counter gives the number of bytes transferred
1298		 * on the MMC bus, not on the host side.  On reads, this
1299		 * can be as much as a FIFO-worth of data ahead.  This
1300		 * matters for FIFO overruns only.
1301		 */
1302		if (!host->variant->datacnt_useless) {
1303			remain = readl(host->base + MMCIDATACNT);
1304			success = data->blksz * data->blocks - remain;
1305		} else {
1306			success = 0;
1307		}
1308
1309		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1310			status_err, success);
1311		if (status_err & MCI_DATACRCFAIL) {
1312			/* Last block was not successful */
1313			success -= 1;
1314			data->error = -EILSEQ;
1315		} else if (status_err & MCI_DATATIMEOUT) {
1316			data->error = -ETIMEDOUT;
1317		} else if (status_err & MCI_STARTBITERR) {
1318			data->error = -ECOMM;
1319		} else if (status_err & MCI_TXUNDERRUN) {
1320			data->error = -EIO;
1321		} else if (status_err & MCI_RXOVERRUN) {
1322			if (success > host->variant->fifosize)
1323				success -= host->variant->fifosize;
1324			else
1325				success = 0;
1326			data->error = -EIO;
1327		}
1328		data->bytes_xfered = round_down(success, data->blksz);
1329	}
1330
1331	if (status & MCI_DATABLOCKEND)
1332		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1333
1334	if (status & MCI_DATAEND || data->error) {
1335		mmci_dma_finalize(host, data);
1336
1337		mmci_stop_data(host);
1338
1339		if (!data->error)
1340			/* The error clause is handled above, success! */
1341			data->bytes_xfered = data->blksz * data->blocks;
1342
1343		if (!data->stop) {
1344			if (host->variant->cmdreg_stop && data->error)
1345				mmci_stop_command(host);
1346			else
1347				mmci_request_end(host, data->mrq);
1348		} else if (host->mrq->sbc && !data->error) {
1349			mmci_request_end(host, data->mrq);
1350		} else {
1351			mmci_start_command(host, data->stop, 0);
1352		}
1353	}
1354}
1355
1356static void
1357mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1358	     unsigned int status)
1359{
1360	u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
1361	void __iomem *base = host->base;
1362	bool sbc, busy_resp;
1363
1364	if (!cmd)
1365		return;
1366
1367	sbc = (cmd == host->mrq->sbc);
1368	busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1369
1370	/*
1371	 * We need to be one of these interrupts to be considered worth
1372	 * handling. Note that we tag on any latent IRQs postponed
1373	 * due to waiting for busy status.
1374	 */
1375	if (host->variant->busy_timeout && busy_resp)
1376		err_msk |= MCI_DATATIMEOUT;
1377
1378	if (!((status | host->busy_status) &
1379	      (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
1380		return;
1381
1382	/* Handle busy detection on DAT0 if the variant supports it. */
1383	if (busy_resp && host->variant->busy_detect)
1384		if (!host->ops->busy_complete(host, status, err_msk))
1385			return;
1386
1387	host->cmd = NULL;
1388
1389	if (status & MCI_CMDTIMEOUT) {
1390		cmd->error = -ETIMEDOUT;
1391	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1392		cmd->error = -EILSEQ;
1393	} else if (host->variant->busy_timeout && busy_resp &&
1394		   status & MCI_DATATIMEOUT) {
1395		cmd->error = -ETIMEDOUT;
1396		host->irq_action = IRQ_WAKE_THREAD;
1397	} else {
1398		cmd->resp[0] = readl(base + MMCIRESPONSE0);
1399		cmd->resp[1] = readl(base + MMCIRESPONSE1);
1400		cmd->resp[2] = readl(base + MMCIRESPONSE2);
1401		cmd->resp[3] = readl(base + MMCIRESPONSE3);
1402	}
1403
1404	if ((!sbc && !cmd->data) || cmd->error) {
1405		if (host->data) {
1406			/* Terminate the DMA transfer */
1407			mmci_dma_error(host);
1408
1409			mmci_stop_data(host);
1410			if (host->variant->cmdreg_stop && cmd->error) {
1411				mmci_stop_command(host);
1412				return;
1413			}
1414		}
1415
1416		if (host->irq_action != IRQ_WAKE_THREAD)
1417			mmci_request_end(host, host->mrq);
1418
1419	} else if (sbc) {
1420		mmci_start_command(host, host->mrq->cmd, 0);
1421	} else if (!host->variant->datactrl_first &&
1422		   !(cmd->data->flags & MMC_DATA_READ)) {
1423		mmci_start_data(host, cmd->data);
1424	}
1425}
1426
1427static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1428{
1429	return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1430}
1431
1432static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1433{
1434	/*
1435	 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1436	 * from the fifo range should be used
1437	 */
1438	if (status & MCI_RXFIFOHALFFULL)
1439		return host->variant->fifohalfsize;
1440	else if (status & MCI_RXDATAAVLBL)
1441		return 4;
1442
1443	return 0;
1444}
1445
1446static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1447{
1448	void __iomem *base = host->base;
1449	char *ptr = buffer;
1450	u32 status = readl(host->base + MMCISTATUS);
1451	int host_remain = host->size;
1452
1453	do {
1454		int count = host->get_rx_fifocnt(host, status, host_remain);
1455
1456		if (count > remain)
1457			count = remain;
1458
1459		if (count <= 0)
1460			break;
1461
1462		/*
1463		 * SDIO especially may want to send something that is
1464		 * not divisible by 4 (as opposed to card sectors
1465		 * etc). Therefore make sure to always read the last bytes
1466		 * while only doing full 32-bit reads towards the FIFO.
1467		 */
1468		if (unlikely(count & 0x3)) {
1469			if (count < 4) {
1470				unsigned char buf[4];
1471				ioread32_rep(base + MMCIFIFO, buf, 1);
1472				memcpy(ptr, buf, count);
1473			} else {
1474				ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1475				count &= ~0x3;
1476			}
1477		} else {
1478			ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1479		}
1480
1481		ptr += count;
1482		remain -= count;
1483		host_remain -= count;
1484
1485		if (remain == 0)
1486			break;
1487
1488		status = readl(base + MMCISTATUS);
1489	} while (status & MCI_RXDATAAVLBL);
1490
1491	return ptr - buffer;
1492}
1493
1494static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1495{
1496	struct variant_data *variant = host->variant;
1497	void __iomem *base = host->base;
1498	char *ptr = buffer;
1499
1500	do {
1501		unsigned int count, maxcnt;
1502
1503		maxcnt = status & MCI_TXFIFOEMPTY ?
1504			 variant->fifosize : variant->fifohalfsize;
1505		count = min(remain, maxcnt);
1506
1507		/*
1508		 * SDIO especially may want to send something that is
1509		 * not divisible by 4 (as opposed to card sectors
1510		 * etc), and the FIFO only accept full 32-bit writes.
1511		 * So compensate by adding +3 on the count, a single
1512		 * byte become a 32bit write, 7 bytes will be two
1513		 * 32bit writes etc.
1514		 */
1515		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1516
1517		ptr += count;
1518		remain -= count;
1519
1520		if (remain == 0)
1521			break;
1522
1523		status = readl(base + MMCISTATUS);
1524	} while (status & MCI_TXFIFOHALFEMPTY);
1525
1526	return ptr - buffer;
1527}
1528
1529/*
1530 * PIO data transfer IRQ handler.
1531 */
1532static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1533{
1534	struct mmci_host *host = dev_id;
1535	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1536	struct variant_data *variant = host->variant;
1537	void __iomem *base = host->base;
1538	u32 status;
1539
1540	status = readl(base + MMCISTATUS);
1541
1542	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1543
1544	do {
1545		unsigned int remain, len;
1546		char *buffer;
1547
1548		/*
1549		 * For write, we only need to test the half-empty flag
1550		 * here - if the FIFO is completely empty, then by
1551		 * definition it is more than half empty.
1552		 *
1553		 * For read, check for data available.
1554		 */
1555		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1556			break;
1557
1558		if (!sg_miter_next(sg_miter))
1559			break;
1560
1561		buffer = sg_miter->addr;
1562		remain = sg_miter->length;
1563
1564		len = 0;
1565		if (status & MCI_RXACTIVE)
1566			len = mmci_pio_read(host, buffer, remain);
1567		if (status & MCI_TXACTIVE)
1568			len = mmci_pio_write(host, buffer, remain, status);
1569
1570		sg_miter->consumed = len;
1571
1572		host->size -= len;
1573		remain -= len;
1574
1575		if (remain)
1576			break;
1577
1578		status = readl(base + MMCISTATUS);
1579	} while (1);
1580
1581	sg_miter_stop(sg_miter);
1582
1583	/*
1584	 * If we have less than the fifo 'half-full' threshold to transfer,
1585	 * trigger a PIO interrupt as soon as any data is available.
1586	 */
1587	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1588		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1589
1590	/*
1591	 * If we run out of data, disable the data IRQs; this
1592	 * prevents a race where the FIFO becomes empty before
1593	 * the chip itself has disabled the data path, and
1594	 * stops us racing with our data end IRQ.
1595	 */
1596	if (host->size == 0) {
1597		mmci_set_mask1(host, 0);
1598		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1599	}
1600
1601	return IRQ_HANDLED;
1602}
1603
1604/*
1605 * Handle completion of command and data transfers.
1606 */
1607static irqreturn_t mmci_irq(int irq, void *dev_id)
1608{
1609	struct mmci_host *host = dev_id;
1610	u32 status;
1611
1612	spin_lock(&host->lock);
1613	host->irq_action = IRQ_HANDLED;
1614
1615	do {
1616		status = readl(host->base + MMCISTATUS);
1617
1618		if (host->singleirq) {
1619			if (status & host->mask1_reg)
1620				mmci_pio_irq(irq, dev_id);
1621
1622			status &= ~host->variant->irq_pio_mask;
1623		}
1624
1625		/*
1626		 * Busy detection is managed by mmci_cmd_irq(), including to
1627		 * clear the corresponding IRQ.
1628		 */
1629		status &= readl(host->base + MMCIMASK0);
1630		if (host->variant->busy_detect)
1631			writel(status & ~host->variant->busy_detect_mask,
1632			       host->base + MMCICLEAR);
1633		else
1634			writel(status, host->base + MMCICLEAR);
1635
1636		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1637
1638		if (host->variant->reversed_irq_handling) {
1639			mmci_data_irq(host, host->data, status);
1640			mmci_cmd_irq(host, host->cmd, status);
1641		} else {
1642			mmci_cmd_irq(host, host->cmd, status);
1643			mmci_data_irq(host, host->data, status);
1644		}
1645
1646		/*
1647		 * Busy detection has been handled by mmci_cmd_irq() above.
1648		 * Clear the status bit to prevent polling in IRQ context.
1649		 */
1650		if (host->variant->busy_detect_flag)
1651			status &= ~host->variant->busy_detect_flag;
1652
1653	} while (status);
1654
1655	spin_unlock(&host->lock);
1656
1657	return host->irq_action;
1658}
1659
1660/*
1661 * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
1662 *
1663 * A reset is needed for some variants, where a datatimeout for a R1B request
1664 * causes the DPSM to stay busy (non-functional).
1665 */
1666static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
1667{
1668	struct mmci_host *host = dev_id;
1669	unsigned long flags;
1670
1671	if (host->rst) {
1672		reset_control_assert(host->rst);
1673		udelay(2);
1674		reset_control_deassert(host->rst);
1675	}
1676
1677	spin_lock_irqsave(&host->lock, flags);
1678	writel(host->clk_reg, host->base + MMCICLOCK);
1679	writel(host->pwr_reg, host->base + MMCIPOWER);
1680	writel(MCI_IRQENABLE | host->variant->start_err,
1681	       host->base + MMCIMASK0);
1682
1683	host->irq_action = IRQ_HANDLED;
1684	mmci_request_end(host, host->mrq);
1685	spin_unlock_irqrestore(&host->lock, flags);
1686
1687	return host->irq_action;
1688}
1689
1690static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1691{
1692	struct mmci_host *host = mmc_priv(mmc);
1693	unsigned long flags;
1694
1695	WARN_ON(host->mrq != NULL);
1696
1697	mrq->cmd->error = mmci_validate_data(host, mrq->data);
1698	if (mrq->cmd->error) {
1699		mmc_request_done(mmc, mrq);
1700		return;
1701	}
1702
1703	spin_lock_irqsave(&host->lock, flags);
1704
1705	host->mrq = mrq;
1706
1707	if (mrq->data)
1708		mmci_get_next_data(host, mrq->data);
1709
1710	if (mrq->data &&
1711	    (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1712		mmci_start_data(host, mrq->data);
1713
1714	if (mrq->sbc)
1715		mmci_start_command(host, mrq->sbc, 0);
1716	else
1717		mmci_start_command(host, mrq->cmd, 0);
1718
1719	spin_unlock_irqrestore(&host->lock, flags);
1720}
1721
1722static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
1723{
1724	struct mmci_host *host = mmc_priv(mmc);
1725	u32 max_busy_timeout = 0;
1726
1727	if (!host->variant->busy_detect)
1728		return;
1729
1730	if (host->variant->busy_timeout && mmc->actual_clock)
1731		max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
1732							  MSEC_PER_SEC);
1733
1734	mmc->max_busy_timeout = max_busy_timeout;
1735}
1736
1737static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1738{
1739	struct mmci_host *host = mmc_priv(mmc);
1740	struct variant_data *variant = host->variant;
1741	u32 pwr = 0;
1742	unsigned long flags;
1743	int ret;
1744
1745	if (host->plat->ios_handler &&
1746		host->plat->ios_handler(mmc_dev(mmc), ios))
1747			dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1748
1749	switch (ios->power_mode) {
1750	case MMC_POWER_OFF:
1751		if (!IS_ERR(mmc->supply.vmmc))
1752			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1753
1754		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1755			regulator_disable(mmc->supply.vqmmc);
1756			host->vqmmc_enabled = false;
1757		}
1758
1759		break;
1760	case MMC_POWER_UP:
1761		if (!IS_ERR(mmc->supply.vmmc))
1762			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1763
1764		/*
1765		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1766		 * and instead uses MCI_PWR_ON so apply whatever value is
1767		 * configured in the variant data.
1768		 */
1769		pwr |= variant->pwrreg_powerup;
1770
1771		break;
1772	case MMC_POWER_ON:
1773		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1774			ret = regulator_enable(mmc->supply.vqmmc);
1775			if (ret < 0)
1776				dev_err(mmc_dev(mmc),
1777					"failed to enable vqmmc regulator\n");
1778			else
1779				host->vqmmc_enabled = true;
1780		}
1781
1782		pwr |= MCI_PWR_ON;
1783		break;
1784	}
1785
1786	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1787		/*
1788		 * The ST Micro variant has some additional bits
1789		 * indicating signal direction for the signals in
1790		 * the SD/MMC bus and feedback-clock usage.
1791		 */
1792		pwr |= host->pwr_reg_add;
1793
1794		if (ios->bus_width == MMC_BUS_WIDTH_4)
1795			pwr &= ~MCI_ST_DATA74DIREN;
1796		else if (ios->bus_width == MMC_BUS_WIDTH_1)
1797			pwr &= (~MCI_ST_DATA74DIREN &
1798				~MCI_ST_DATA31DIREN &
1799				~MCI_ST_DATA2DIREN);
1800	}
1801
1802	if (variant->opendrain) {
1803		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1804			pwr |= variant->opendrain;
1805	} else {
1806		/*
1807		 * If the variant cannot configure the pads by its own, then we
1808		 * expect the pinctrl to be able to do that for us
1809		 */
1810		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1811			pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1812		else
1813			pinctrl_select_default_state(mmc_dev(mmc));
1814	}
1815
1816	/*
1817	 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1818	 * gating the clock, the MCI_PWR_ON bit is cleared.
1819	 */
1820	if (!ios->clock && variant->pwrreg_clkgate)
1821		pwr &= ~MCI_PWR_ON;
1822
1823	if (host->variant->explicit_mclk_control &&
1824	    ios->clock != host->clock_cache) {
1825		ret = clk_set_rate(host->clk, ios->clock);
1826		if (ret < 0)
1827			dev_err(mmc_dev(host->mmc),
1828				"Error setting clock rate (%d)\n", ret);
1829		else
1830			host->mclk = clk_get_rate(host->clk);
1831	}
1832	host->clock_cache = ios->clock;
1833
1834	spin_lock_irqsave(&host->lock, flags);
1835
1836	if (host->ops && host->ops->set_clkreg)
1837		host->ops->set_clkreg(host, ios->clock);
1838	else
1839		mmci_set_clkreg(host, ios->clock);
1840
1841	mmci_set_max_busy_timeout(mmc);
1842
1843	if (host->ops && host->ops->set_pwrreg)
1844		host->ops->set_pwrreg(host, pwr);
1845	else
1846		mmci_write_pwrreg(host, pwr);
1847
1848	mmci_reg_delay(host);
1849
1850	spin_unlock_irqrestore(&host->lock, flags);
1851}
1852
1853static int mmci_get_cd(struct mmc_host *mmc)
1854{
1855	struct mmci_host *host = mmc_priv(mmc);
1856	struct mmci_platform_data *plat = host->plat;
1857	unsigned int status = mmc_gpio_get_cd(mmc);
1858
1859	if (status == -ENOSYS) {
1860		if (!plat->status)
1861			return 1; /* Assume always present */
1862
1863		status = plat->status(mmc_dev(host->mmc));
1864	}
1865	return status;
1866}
1867
1868static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1869{
1870	struct mmci_host *host = mmc_priv(mmc);
1871	int ret;
1872
1873	ret = mmc_regulator_set_vqmmc(mmc, ios);
1874
1875	if (!ret && host->ops && host->ops->post_sig_volt_switch)
1876		ret = host->ops->post_sig_volt_switch(host, ios);
1877	else if (ret)
1878		ret = 0;
1879
1880	if (ret < 0)
1881		dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1882
1883	return ret;
1884}
1885
1886static struct mmc_host_ops mmci_ops = {
1887	.request	= mmci_request,
1888	.pre_req	= mmci_pre_request,
1889	.post_req	= mmci_post_request,
1890	.set_ios	= mmci_set_ios,
1891	.get_ro		= mmc_gpio_get_ro,
1892	.get_cd		= mmci_get_cd,
1893	.start_signal_voltage_switch = mmci_sig_volt_switch,
1894};
1895
1896static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1897{
1898	struct mmci_host *host = mmc_priv(mmc);
1899	int ret = mmc_of_parse(mmc);
1900
1901	if (ret)
1902		return ret;
1903
1904	if (of_get_property(np, "st,sig-dir-dat0", NULL))
1905		host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1906	if (of_get_property(np, "st,sig-dir-dat2", NULL))
1907		host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1908	if (of_get_property(np, "st,sig-dir-dat31", NULL))
1909		host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1910	if (of_get_property(np, "st,sig-dir-dat74", NULL))
1911		host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1912	if (of_get_property(np, "st,sig-dir-cmd", NULL))
1913		host->pwr_reg_add |= MCI_ST_CMDDIREN;
1914	if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1915		host->pwr_reg_add |= MCI_ST_FBCLKEN;
1916	if (of_get_property(np, "st,sig-dir", NULL))
1917		host->pwr_reg_add |= MCI_STM32_DIRPOL;
1918	if (of_get_property(np, "st,neg-edge", NULL))
1919		host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1920	if (of_get_property(np, "st,use-ckin", NULL))
1921		host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
1922
1923	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1924		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1925	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1926		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1927
1928	return 0;
1929}
1930
1931static int mmci_probe(struct amba_device *dev,
1932	const struct amba_id *id)
1933{
1934	struct mmci_platform_data *plat = dev->dev.platform_data;
1935	struct device_node *np = dev->dev.of_node;
1936	struct variant_data *variant = id->data;
1937	struct mmci_host *host;
1938	struct mmc_host *mmc;
1939	int ret;
1940
1941	/* Must have platform data or Device Tree. */
1942	if (!plat && !np) {
1943		dev_err(&dev->dev, "No plat data or DT found\n");
1944		return -EINVAL;
1945	}
1946
1947	if (!plat) {
1948		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1949		if (!plat)
1950			return -ENOMEM;
1951	}
1952
1953	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1954	if (!mmc)
1955		return -ENOMEM;
1956
1957	ret = mmci_of_parse(np, mmc);
1958	if (ret)
1959		goto host_free;
1960
1961	host = mmc_priv(mmc);
1962	host->mmc = mmc;
1963	host->mmc_ops = &mmci_ops;
1964	mmc->ops = &mmci_ops;
1965
1966	/*
1967	 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1968	 * pins can be set accordingly using pinctrl
1969	 */
1970	if (!variant->opendrain) {
1971		host->pinctrl = devm_pinctrl_get(&dev->dev);
1972		if (IS_ERR(host->pinctrl)) {
1973			dev_err(&dev->dev, "failed to get pinctrl");
1974			ret = PTR_ERR(host->pinctrl);
1975			goto host_free;
1976		}
1977
1978		host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1979							    MMCI_PINCTRL_STATE_OPENDRAIN);
1980		if (IS_ERR(host->pins_opendrain)) {
1981			dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1982			ret = PTR_ERR(host->pins_opendrain);
1983			goto host_free;
1984		}
1985	}
1986
1987	host->hw_designer = amba_manf(dev);
1988	host->hw_revision = amba_rev(dev);
1989	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1990	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1991
1992	host->clk = devm_clk_get(&dev->dev, NULL);
1993	if (IS_ERR(host->clk)) {
1994		ret = PTR_ERR(host->clk);
1995		goto host_free;
1996	}
1997
1998	ret = clk_prepare_enable(host->clk);
1999	if (ret)
2000		goto host_free;
2001
2002	if (variant->qcom_fifo)
2003		host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
2004	else
2005		host->get_rx_fifocnt = mmci_get_rx_fifocnt;
2006
2007	host->plat = plat;
2008	host->variant = variant;
2009	host->mclk = clk_get_rate(host->clk);
2010	/*
2011	 * According to the spec, mclk is max 100 MHz,
2012	 * so we try to adjust the clock down to this,
2013	 * (if possible).
2014	 */
2015	if (host->mclk > variant->f_max) {
2016		ret = clk_set_rate(host->clk, variant->f_max);
2017		if (ret < 0)
2018			goto clk_disable;
2019		host->mclk = clk_get_rate(host->clk);
2020		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
2021			host->mclk);
2022	}
2023
2024	host->phybase = dev->res.start;
2025	host->base = devm_ioremap_resource(&dev->dev, &dev->res);
2026	if (IS_ERR(host->base)) {
2027		ret = PTR_ERR(host->base);
2028		goto clk_disable;
2029	}
2030
2031	if (variant->init)
2032		variant->init(host);
2033
2034	/*
2035	 * The ARM and ST versions of the block have slightly different
2036	 * clock divider equations which means that the minimum divider
2037	 * differs too.
2038	 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
2039	 */
2040	if (variant->st_clkdiv)
2041		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
2042	else if (variant->stm32_clkdiv)
2043		mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
2044	else if (variant->explicit_mclk_control)
2045		mmc->f_min = clk_round_rate(host->clk, 100000);
2046	else
2047		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
2048	/*
2049	 * If no maximum operating frequency is supplied, fall back to use
2050	 * the module parameter, which has a (low) default value in case it
2051	 * is not specified. Either value must not exceed the clock rate into
2052	 * the block, of course.
2053	 */
2054	if (mmc->f_max)
2055		mmc->f_max = variant->explicit_mclk_control ?
2056				min(variant->f_max, mmc->f_max) :
2057				min(host->mclk, mmc->f_max);
2058	else
2059		mmc->f_max = variant->explicit_mclk_control ?
2060				fmax : min(host->mclk, fmax);
2061
2062
2063	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
2064
2065	host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
2066	if (IS_ERR(host->rst)) {
2067		ret = PTR_ERR(host->rst);
2068		goto clk_disable;
2069	}
2070
2071	/* Get regulators and the supported OCR mask */
2072	ret = mmc_regulator_get_supply(mmc);
2073	if (ret)
2074		goto clk_disable;
2075
2076	if (!mmc->ocr_avail)
2077		mmc->ocr_avail = plat->ocr_mask;
2078	else if (plat->ocr_mask)
2079		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
2080
2081	/* We support these capabilities. */
2082	mmc->caps |= MMC_CAP_CMD23;
2083
2084	/*
2085	 * Enable busy detection.
2086	 */
2087	if (variant->busy_detect) {
2088		mmci_ops.card_busy = mmci_card_busy;
2089		/*
2090		 * Not all variants have a flag to enable busy detection
2091		 * in the DPSM, but if they do, set it here.
2092		 */
2093		if (variant->busy_dpsm_flag)
2094			mmci_write_datactrlreg(host,
2095					       host->variant->busy_dpsm_flag);
2096		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
2097	}
2098
2099	/* Variants with mandatory busy timeout in HW needs R1B responses. */
2100	if (variant->busy_timeout)
2101		mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
2102
2103	/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
2104	host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
2105	host->stop_abort.arg = 0;
2106	host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
2107
2108	/* We support these PM capabilities. */
2109	mmc->pm_caps |= MMC_PM_KEEP_POWER;
2110
2111	/*
2112	 * We can do SGIO
2113	 */
2114	mmc->max_segs = NR_SG;
2115
2116	/*
2117	 * Since only a certain number of bits are valid in the data length
2118	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
2119	 * single request.
2120	 */
2121	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
2122
2123	/*
2124	 * Set the maximum segment size.  Since we aren't doing DMA
2125	 * (yet) we are only limited by the data length register.
2126	 */
2127	mmc->max_seg_size = mmc->max_req_size;
2128
2129	/*
2130	 * Block size can be up to 2048 bytes, but must be a power of two.
2131	 */
2132	mmc->max_blk_size = 1 << variant->datactrl_blocksz;
2133
2134	/*
2135	 * Limit the number of blocks transferred so that we don't overflow
2136	 * the maximum request size.
2137	 */
2138	mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2139
2140	spin_lock_init(&host->lock);
2141
2142	writel(0, host->base + MMCIMASK0);
2143
2144	if (variant->mmcimask1)
2145		writel(0, host->base + MMCIMASK1);
2146
2147	writel(0xfff, host->base + MMCICLEAR);
2148
2149	/*
2150	 * If:
2151	 * - not using DT but using a descriptor table, or
2152	 * - using a table of descriptors ALONGSIDE DT, or
2153	 * look up these descriptors named "cd" and "wp" right here, fail
2154	 * silently of these do not exist
2155	 */
2156	if (!np) {
2157		ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
2158		if (ret == -EPROBE_DEFER)
2159			goto clk_disable;
2160
2161		ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
2162		if (ret == -EPROBE_DEFER)
2163			goto clk_disable;
2164	}
2165
2166	ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
2167					mmci_irq_thread, IRQF_SHARED,
2168					DRIVER_NAME " (cmd)", host);
2169	if (ret)
2170		goto clk_disable;
2171
2172	if (!dev->irq[1])
2173		host->singleirq = true;
2174	else {
2175		ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2176				IRQF_SHARED, DRIVER_NAME " (pio)", host);
2177		if (ret)
2178			goto clk_disable;
2179	}
2180
2181	writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2182
2183	amba_set_drvdata(dev, mmc);
2184
2185	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2186		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2187		 amba_rev(dev), (unsigned long long)dev->res.start,
2188		 dev->irq[0], dev->irq[1]);
2189
2190	mmci_dma_setup(host);
2191
2192	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2193	pm_runtime_use_autosuspend(&dev->dev);
2194
2195	ret = mmc_add_host(mmc);
2196	if (ret)
2197		goto clk_disable;
2198
2199	pm_runtime_put(&dev->dev);
2200	return 0;
2201
2202 clk_disable:
2203	clk_disable_unprepare(host->clk);
2204 host_free:
2205	mmc_free_host(mmc);
2206	return ret;
2207}
2208
2209static void mmci_remove(struct amba_device *dev)
2210{
2211	struct mmc_host *mmc = amba_get_drvdata(dev);
2212
2213	if (mmc) {
2214		struct mmci_host *host = mmc_priv(mmc);
2215		struct variant_data *variant = host->variant;
2216
2217		/*
2218		 * Undo pm_runtime_put() in probe.  We use the _sync
2219		 * version here so that we can access the primecell.
2220		 */
2221		pm_runtime_get_sync(&dev->dev);
2222
2223		mmc_remove_host(mmc);
2224
2225		writel(0, host->base + MMCIMASK0);
2226
2227		if (variant->mmcimask1)
2228			writel(0, host->base + MMCIMASK1);
2229
2230		writel(0, host->base + MMCICOMMAND);
2231		writel(0, host->base + MMCIDATACTRL);
2232
2233		mmci_dma_release(host);
2234		clk_disable_unprepare(host->clk);
2235		mmc_free_host(mmc);
2236	}
2237}
2238
2239#ifdef CONFIG_PM
2240static void mmci_save(struct mmci_host *host)
2241{
2242	unsigned long flags;
2243
2244	spin_lock_irqsave(&host->lock, flags);
2245
2246	writel(0, host->base + MMCIMASK0);
2247	if (host->variant->pwrreg_nopower) {
2248		writel(0, host->base + MMCIDATACTRL);
2249		writel(0, host->base + MMCIPOWER);
2250		writel(0, host->base + MMCICLOCK);
2251	}
2252	mmci_reg_delay(host);
2253
2254	spin_unlock_irqrestore(&host->lock, flags);
2255}
2256
2257static void mmci_restore(struct mmci_host *host)
2258{
2259	unsigned long flags;
2260
2261	spin_lock_irqsave(&host->lock, flags);
2262
2263	if (host->variant->pwrreg_nopower) {
2264		writel(host->clk_reg, host->base + MMCICLOCK);
2265		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2266		writel(host->pwr_reg, host->base + MMCIPOWER);
2267	}
2268	writel(MCI_IRQENABLE | host->variant->start_err,
2269	       host->base + MMCIMASK0);
2270	mmci_reg_delay(host);
2271
2272	spin_unlock_irqrestore(&host->lock, flags);
2273}
2274
2275static int mmci_runtime_suspend(struct device *dev)
2276{
2277	struct amba_device *adev = to_amba_device(dev);
2278	struct mmc_host *mmc = amba_get_drvdata(adev);
2279
2280	if (mmc) {
2281		struct mmci_host *host = mmc_priv(mmc);
2282		pinctrl_pm_select_sleep_state(dev);
2283		mmci_save(host);
2284		clk_disable_unprepare(host->clk);
2285	}
2286
2287	return 0;
2288}
2289
2290static int mmci_runtime_resume(struct device *dev)
2291{
2292	struct amba_device *adev = to_amba_device(dev);
2293	struct mmc_host *mmc = amba_get_drvdata(adev);
2294
2295	if (mmc) {
2296		struct mmci_host *host = mmc_priv(mmc);
2297		clk_prepare_enable(host->clk);
2298		mmci_restore(host);
2299		pinctrl_select_default_state(dev);
2300	}
2301
2302	return 0;
2303}
2304#endif
2305
2306static const struct dev_pm_ops mmci_dev_pm_ops = {
2307	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2308				pm_runtime_force_resume)
2309	SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2310};
2311
2312static const struct amba_id mmci_ids[] = {
2313	{
2314		.id	= 0x00041180,
2315		.mask	= 0xff0fffff,
2316		.data	= &variant_arm,
2317	},
2318	{
2319		.id	= 0x01041180,
2320		.mask	= 0xff0fffff,
2321		.data	= &variant_arm_extended_fifo,
2322	},
2323	{
2324		.id	= 0x02041180,
2325		.mask	= 0xff0fffff,
2326		.data	= &variant_arm_extended_fifo_hwfc,
2327	},
2328	{
2329		.id	= 0x00041181,
2330		.mask	= 0x000fffff,
2331		.data	= &variant_arm,
2332	},
2333	/* ST Micro variants */
2334	{
2335		.id     = 0x00180180,
2336		.mask   = 0x00ffffff,
2337		.data	= &variant_u300,
2338	},
2339	{
2340		.id     = 0x10180180,
2341		.mask   = 0xf0ffffff,
2342		.data	= &variant_nomadik,
2343	},
2344	{
2345		.id     = 0x00280180,
2346		.mask   = 0x00ffffff,
2347		.data	= &variant_nomadik,
2348	},
2349	{
2350		.id     = 0x00480180,
2351		.mask   = 0xf0ffffff,
2352		.data	= &variant_ux500,
2353	},
2354	{
2355		.id     = 0x10480180,
2356		.mask   = 0xf0ffffff,
2357		.data	= &variant_ux500v2,
2358	},
2359	{
2360		.id     = 0x00880180,
2361		.mask   = 0x00ffffff,
2362		.data	= &variant_stm32,
2363	},
2364	{
2365		.id     = 0x10153180,
2366		.mask	= 0xf0ffffff,
2367		.data	= &variant_stm32_sdmmc,
2368	},
2369	{
2370		.id     = 0x00253180,
2371		.mask	= 0xf0ffffff,
2372		.data	= &variant_stm32_sdmmcv2,
2373	},
2374	/* Qualcomm variants */
2375	{
2376		.id     = 0x00051180,
2377		.mask	= 0x000fffff,
2378		.data	= &variant_qcom,
2379	},
2380	{ 0, 0 },
2381};
2382
2383MODULE_DEVICE_TABLE(amba, mmci_ids);
2384
2385static struct amba_driver mmci_driver = {
2386	.drv		= {
2387		.name	= DRIVER_NAME,
2388		.pm	= &mmci_dev_pm_ops,
2389		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2390	},
2391	.probe		= mmci_probe,
2392	.remove		= mmci_remove,
2393	.id_table	= mmci_ids,
2394};
2395
2396module_amba_driver(mmci_driver);
2397
2398module_param(fmax, uint, 0444);
2399
2400MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2401MODULE_LICENSE("GPL");
2402