1/*
2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
3 * ThunderX SOCs.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License.  See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2012-2017 Cavium Inc.
10 * Authors:
11 *   David Daney <david.daney@cavium.com>
12 *   Peter Swain <pswain@cavium.com>
13 *   Steven J. Hill <steven.hill@cavium.com>
14 *   Jan Glauber <jglauber@cavium.com>
15 */
16#include <linux/bitfield.h>
17#include <linux/delay.h>
18#include <linux/dma-direction.h>
19#include <linux/dma-mapping.h>
20#include <linux/gpio/consumer.h>
21#include <linux/interrupt.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/slot-gpio.h>
24#include <linux/module.h>
25#include <linux/regulator/consumer.h>
26#include <linux/scatterlist.h>
27#include <linux/time.h>
28
29#include "cavium.h"
30
31const char *cvm_mmc_irq_names[] = {
32	"MMC Buffer",
33	"MMC Command",
34	"MMC DMA",
35	"MMC Command Error",
36	"MMC DMA Error",
37	"MMC Switch",
38	"MMC Switch Error",
39	"MMC DMA int Fifo",
40	"MMC DMA int",
41};
42
43/*
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types.  These are correct if MMC devices are
46 * being used.  However, non-MMC devices like SD use command and
47 * response types that are unexpected by the host hardware.
48 *
49 * The command and response types can be overridden by supplying an
50 * XOR value that is applied to the type.  We calculate the XOR value
51 * from the values in this table and the flags passed from the MMC
52 * core.
53 */
54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
55	{0, 0},		/* CMD0 */
56	{0, 3},		/* CMD1 */
57	{0, 2},		/* CMD2 */
58	{0, 1},		/* CMD3 */
59	{0, 0},		/* CMD4 */
60	{0, 1},		/* CMD5 */
61	{0, 1},		/* CMD6 */
62	{0, 1},		/* CMD7 */
63	{1, 1},		/* CMD8 */
64	{0, 2},		/* CMD9 */
65	{0, 2},		/* CMD10 */
66	{1, 1},		/* CMD11 */
67	{0, 1},		/* CMD12 */
68	{0, 1},		/* CMD13 */
69	{1, 1},		/* CMD14 */
70	{0, 0},		/* CMD15 */
71	{0, 1},		/* CMD16 */
72	{1, 1},		/* CMD17 */
73	{1, 1},		/* CMD18 */
74	{3, 1},		/* CMD19 */
75	{2, 1},		/* CMD20 */
76	{0, 0},		/* CMD21 */
77	{0, 0},		/* CMD22 */
78	{0, 1},		/* CMD23 */
79	{2, 1},		/* CMD24 */
80	{2, 1},		/* CMD25 */
81	{2, 1},		/* CMD26 */
82	{2, 1},		/* CMD27 */
83	{0, 1},		/* CMD28 */
84	{0, 1},		/* CMD29 */
85	{1, 1},		/* CMD30 */
86	{1, 1},		/* CMD31 */
87	{0, 0},		/* CMD32 */
88	{0, 0},		/* CMD33 */
89	{0, 0},		/* CMD34 */
90	{0, 1},		/* CMD35 */
91	{0, 1},		/* CMD36 */
92	{0, 0},		/* CMD37 */
93	{0, 1},		/* CMD38 */
94	{0, 4},		/* CMD39 */
95	{0, 5},		/* CMD40 */
96	{0, 0},		/* CMD41 */
97	{2, 1},		/* CMD42 */
98	{0, 0},		/* CMD43 */
99	{0, 0},		/* CMD44 */
100	{0, 0},		/* CMD45 */
101	{0, 0},		/* CMD46 */
102	{0, 0},		/* CMD47 */
103	{0, 0},		/* CMD48 */
104	{0, 0},		/* CMD49 */
105	{0, 0},		/* CMD50 */
106	{0, 0},		/* CMD51 */
107	{0, 0},		/* CMD52 */
108	{0, 0},		/* CMD53 */
109	{0, 0},		/* CMD54 */
110	{0, 1},		/* CMD55 */
111	{0xff, 0xff},	/* CMD56 */
112	{0, 0},		/* CMD57 */
113	{0, 0},		/* CMD58 */
114	{0, 0},		/* CMD59 */
115	{0, 0},		/* CMD60 */
116	{0, 0},		/* CMD61 */
117	{0, 0},		/* CMD62 */
118	{0, 0}		/* CMD63 */
119};
120
121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
122{
123	struct cvm_mmc_cr_type *cr;
124	u8 hardware_ctype, hardware_rtype;
125	u8 desired_ctype = 0, desired_rtype = 0;
126	struct cvm_mmc_cr_mods r;
127
128	cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
129	hardware_ctype = cr->ctype;
130	hardware_rtype = cr->rtype;
131	if (cmd->opcode == MMC_GEN_CMD)
132		hardware_ctype = (cmd->arg & 1) ? 1 : 2;
133
134	switch (mmc_cmd_type(cmd)) {
135	case MMC_CMD_ADTC:
136		desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
137		break;
138	case MMC_CMD_AC:
139	case MMC_CMD_BC:
140	case MMC_CMD_BCR:
141		desired_ctype = 0;
142		break;
143	}
144
145	switch (mmc_resp_type(cmd)) {
146	case MMC_RSP_NONE:
147		desired_rtype = 0;
148		break;
149	case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
150	case MMC_RSP_R1B:
151		desired_rtype = 1;
152		break;
153	case MMC_RSP_R2:
154		desired_rtype = 2;
155		break;
156	case MMC_RSP_R3: /* MMC_RSP_R4 */
157		desired_rtype = 3;
158		break;
159	}
160	r.ctype_xor = desired_ctype ^ hardware_ctype;
161	r.rtype_xor = desired_rtype ^ hardware_rtype;
162	return r;
163}
164
165static void check_switch_errors(struct cvm_mmc_host *host)
166{
167	u64 emm_switch;
168
169	emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
170	if (emm_switch & MIO_EMM_SWITCH_ERR0)
171		dev_err(host->dev, "Switch power class error\n");
172	if (emm_switch & MIO_EMM_SWITCH_ERR1)
173		dev_err(host->dev, "Switch hs timing error\n");
174	if (emm_switch & MIO_EMM_SWITCH_ERR2)
175		dev_err(host->dev, "Switch bus width error\n");
176}
177
178static void clear_bus_id(u64 *reg)
179{
180	u64 bus_id_mask = GENMASK_ULL(61, 60);
181
182	*reg &= ~bus_id_mask;
183}
184
185static void set_bus_id(u64 *reg, int bus_id)
186{
187	clear_bus_id(reg);
188	*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
189}
190
191static int get_bus_id(u64 reg)
192{
193	return FIELD_GET(GENMASK_ULL(61, 60), reg);
194}
195
196/*
197 * We never set the switch_exe bit since that would interfere
198 * with the commands send by the MMC core.
199 */
200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
201{
202	int retries = 100;
203	u64 rsp_sts;
204	int bus_id;
205
206	/*
207	 * Modes setting only taken from slot 0. Work around that hardware
208	 * issue by first switching to slot 0.
209	 */
210	bus_id = get_bus_id(emm_switch);
211	clear_bus_id(&emm_switch);
212	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
213
214	set_bus_id(&emm_switch, bus_id);
215	writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
216
217	/* wait for the switch to finish */
218	do {
219		rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
220		if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
221			break;
222		udelay(10);
223	} while (--retries);
224
225	check_switch_errors(host);
226}
227
228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
229{
230	/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231	u64 match = 0x3001070fffffffffull;
232
233	return (slot->cached_switch & match) != (new_val & match);
234}
235
236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
237{
238	u64 timeout;
239
240	if (!slot->clock)
241		return;
242
243	if (ns)
244		timeout = (slot->clock * ns) / NSEC_PER_SEC;
245	else
246		timeout = (slot->clock * 850ull) / 1000ull;
247	writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
248}
249
250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
251{
252	struct cvm_mmc_host *host = slot->host;
253	u64 emm_switch, wdog;
254
255	emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
256	emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
257			MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
258	set_bus_id(&emm_switch, slot->bus_id);
259
260	wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261	do_switch(slot->host, emm_switch);
262
263	slot->cached_switch = emm_switch;
264
265	msleep(20);
266
267	writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
268}
269
270/* Switch to another slot if needed */
271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
272{
273	struct cvm_mmc_host *host = slot->host;
274	struct cvm_mmc_slot *old_slot;
275	u64 emm_sample, emm_switch;
276
277	if (slot->bus_id == host->last_slot)
278		return;
279
280	if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281		old_slot = host->slot[host->last_slot];
282		old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283		old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
284	}
285
286	writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
287	emm_switch = slot->cached_switch;
288	set_bus_id(&emm_switch, slot->bus_id);
289	do_switch(host, emm_switch);
290
291	emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
292		     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
293	writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
294
295	host->last_slot = slot->bus_id;
296}
297
298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
299		    u64 dbuf)
300{
301	struct sg_mapping_iter *smi = &host->smi;
302	int data_len = req->data->blocks * req->data->blksz;
303	int bytes_xfered, shift = -1;
304	u64 dat = 0;
305
306	/* Auto inc from offset zero */
307	writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
308
309	for (bytes_xfered = 0; bytes_xfered < data_len;) {
310		if (smi->consumed >= smi->length) {
311			if (!sg_miter_next(smi))
312				break;
313			smi->consumed = 0;
314		}
315
316		if (shift < 0) {
317			dat = readq(host->base + MIO_EMM_BUF_DAT(host));
318			shift = 56;
319		}
320
321		while (smi->consumed < smi->length && shift >= 0) {
322			((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
323			bytes_xfered++;
324			smi->consumed++;
325			shift -= 8;
326		}
327	}
328
329	sg_miter_stop(smi);
330	req->data->bytes_xfered = bytes_xfered;
331	req->data->error = 0;
332}
333
334static void do_write(struct mmc_request *req)
335{
336	req->data->bytes_xfered = req->data->blocks * req->data->blksz;
337	req->data->error = 0;
338}
339
340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
341			     u64 rsp_sts)
342{
343	u64 rsp_hi, rsp_lo;
344
345	if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
346		return;
347
348	rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
349
350	switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
351	case 1:
352	case 3:
353		req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
354		req->cmd->resp[1] = 0;
355		req->cmd->resp[2] = 0;
356		req->cmd->resp[3] = 0;
357		break;
358	case 2:
359		req->cmd->resp[3] = rsp_lo & 0xffffffff;
360		req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
361		rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
362		req->cmd->resp[1] = rsp_hi & 0xffffffff;
363		req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
364		break;
365	}
366}
367
368static int get_dma_dir(struct mmc_data *data)
369{
370	return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
371}
372
373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
374{
375	data->bytes_xfered = data->blocks * data->blksz;
376	data->error = 0;
377	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
378	return 1;
379}
380
381static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
382{
383	u64 fifo_cfg;
384	int count;
385
386	/* Check if there are any pending requests left */
387	fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
388	count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
389	if (count)
390		dev_err(host->dev, "%u requests still pending\n", count);
391
392	data->bytes_xfered = data->blocks * data->blksz;
393	data->error = 0;
394
395	/* Clear and disable FIFO */
396	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
397	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
398	return 1;
399}
400
401static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
402{
403	if (host->use_sg && data->sg_len > 1)
404		return finish_dma_sg(host, data);
405	else
406		return finish_dma_single(host, data);
407}
408
409static int check_status(u64 rsp_sts)
410{
411	if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
412	    rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
413	    rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
414		return -EILSEQ;
415	if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
416	    rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
417		return -ETIMEDOUT;
418	if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
419		return -EIO;
420	return 0;
421}
422
423/* Try to clean up failed DMA. */
424static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
425{
426	u64 emm_dma;
427
428	emm_dma = readq(host->base + MIO_EMM_DMA(host));
429	emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
430		   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
431	set_bus_id(&emm_dma, get_bus_id(rsp_sts));
432	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
433}
434
435irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
436{
437	struct cvm_mmc_host *host = dev_id;
438	struct mmc_request *req;
439	unsigned long flags = 0;
440	u64 emm_int, rsp_sts;
441	bool host_done;
442
443	if (host->need_irq_handler_lock)
444		spin_lock_irqsave(&host->irq_handler_lock, flags);
445	else
446		__acquire(&host->irq_handler_lock);
447
448	/* Clear interrupt bits (write 1 clears ). */
449	emm_int = readq(host->base + MIO_EMM_INT(host));
450	writeq(emm_int, host->base + MIO_EMM_INT(host));
451
452	if (emm_int & MIO_EMM_INT_SWITCH_ERR)
453		check_switch_errors(host);
454
455	req = host->current_req;
456	if (!req)
457		goto out;
458
459	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
460	/*
461	 * dma_val set means DMA is still in progress. Don't touch
462	 * the request and wait for the interrupt indicating that
463	 * the DMA is finished.
464	 */
465	if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
466		goto out;
467
468	if (!host->dma_active && req->data &&
469	    (emm_int & MIO_EMM_INT_BUF_DONE)) {
470		unsigned int type = (rsp_sts >> 7) & 3;
471
472		if (type == 1)
473			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
474		else if (type == 2)
475			do_write(req);
476	}
477
478	host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
479		    emm_int & MIO_EMM_INT_DMA_DONE ||
480		    emm_int & MIO_EMM_INT_CMD_ERR  ||
481		    emm_int & MIO_EMM_INT_DMA_ERR;
482
483	if (!(host_done && req->done))
484		goto no_req_done;
485
486	req->cmd->error = check_status(rsp_sts);
487
488	if (host->dma_active && req->data)
489		if (!finish_dma(host, req->data))
490			goto no_req_done;
491
492	set_cmd_response(host, req, rsp_sts);
493	if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
494	    (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
495		cleanup_dma(host, rsp_sts);
496
497	host->current_req = NULL;
498	req->done(req);
499
500no_req_done:
501	if (host->dmar_fixup_done)
502		host->dmar_fixup_done(host);
503	if (host_done)
504		host->release_bus(host);
505out:
506	if (host->need_irq_handler_lock)
507		spin_unlock_irqrestore(&host->irq_handler_lock, flags);
508	else
509		__release(&host->irq_handler_lock);
510	return IRQ_RETVAL(emm_int != 0);
511}
512
513/*
514 * Program DMA_CFG and if needed DMA_ADR.
515 * Returns 0 on error, DMA address otherwise.
516 */
517static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
518{
519	u64 dma_cfg, addr;
520	int count, rw;
521
522	count = dma_map_sg(host->dev, data->sg, data->sg_len,
523			   get_dma_dir(data));
524	if (!count)
525		return 0;
526
527	rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
528	dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
529		  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
530#ifdef __LITTLE_ENDIAN
531	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
532#endif
533	dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
534			      (sg_dma_len(&data->sg[0]) / 8) - 1);
535
536	addr = sg_dma_address(&data->sg[0]);
537	if (!host->big_dma_addr)
538		dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
539	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
540
541	pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
542		 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
543
544	if (host->big_dma_addr)
545		writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
546	return addr;
547}
548
549/*
550 * Queue complete sg list into the FIFO.
551 * Returns 0 on error, 1 otherwise.
552 */
553static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
554{
555	struct scatterlist *sg;
556	u64 fifo_cmd, addr;
557	int count, i, rw;
558
559	count = dma_map_sg(host->dev, data->sg, data->sg_len,
560			   get_dma_dir(data));
561	if (!count)
562		return 0;
563	if (count > 16)
564		goto error;
565
566	/* Enable FIFO by removing CLR bit */
567	writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
568
569	for_each_sg(data->sg, sg, count, i) {
570		/* Program DMA address */
571		addr = sg_dma_address(sg);
572		if (addr & 7)
573			goto error;
574		writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
575
576		/*
577		 * If we have scatter-gather support we also have an extra
578		 * register for the DMA addr, so no need to check
579		 * host->big_dma_addr here.
580		 */
581		rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
582		fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
583
584		/* enable interrupts on the last element */
585		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
586				       (i + 1 == count) ? 0 : 1);
587
588#ifdef __LITTLE_ENDIAN
589		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
590#endif
591		fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
592				       sg_dma_len(sg) / 8 - 1);
593		/*
594		 * The write copies the address and the command to the FIFO
595		 * and increments the FIFO's COUNT field.
596		 */
597		writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
598		pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
599			 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
600	}
601
602	/*
603	 * In difference to prepare_dma_single we don't return the
604	 * address here, as it would not make sense for scatter-gather.
605	 * The dma fixup is only required on models that don't support
606	 * scatter-gather, so that is not a problem.
607	 */
608	return 1;
609
610error:
611	WARN_ON_ONCE(1);
612	dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
613	/* Disable FIFO */
614	writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
615	return 0;
616}
617
618static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
619{
620	if (host->use_sg && data->sg_len > 1)
621		return prepare_dma_sg(host, data);
622	else
623		return prepare_dma_single(host, data);
624}
625
626static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
627{
628	struct cvm_mmc_slot *slot = mmc_priv(mmc);
629	u64 emm_dma;
630
631	emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
632		  FIELD_PREP(MIO_EMM_DMA_SECTOR,
633			     mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
634		  FIELD_PREP(MIO_EMM_DMA_RW,
635			     (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
636		  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
637		  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
638	set_bus_id(&emm_dma, slot->bus_id);
639
640	if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
641	    (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
642		emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
643
644	pr_debug("[%s] blocks: %u  multi: %d\n",
645		(emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
646		 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
647	return emm_dma;
648}
649
650static void cvm_mmc_dma_request(struct mmc_host *mmc,
651				struct mmc_request *mrq)
652{
653	struct cvm_mmc_slot *slot = mmc_priv(mmc);
654	struct cvm_mmc_host *host = slot->host;
655	struct mmc_data *data;
656	u64 emm_dma, addr;
657
658	if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
659	    !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
660		dev_err(&mmc->card->dev,
661			"Error: cmv_mmc_dma_request no data\n");
662		goto error;
663	}
664
665	cvm_mmc_switch_to(slot);
666
667	data = mrq->data;
668	pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
669		 data->blocks, data->blksz, data->blocks * data->blksz);
670	if (data->timeout_ns)
671		set_wdog(slot, data->timeout_ns);
672
673	WARN_ON(host->current_req);
674	host->current_req = mrq;
675
676	emm_dma = prepare_ext_dma(mmc, mrq);
677	addr = prepare_dma(host, data);
678	if (!addr) {
679		dev_err(host->dev, "prepare_dma failed\n");
680		goto error;
681	}
682
683	host->dma_active = true;
684	host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
685			 MIO_EMM_INT_DMA_ERR);
686
687	if (host->dmar_fixup)
688		host->dmar_fixup(host, mrq->cmd, data, addr);
689
690	/*
691	 * If we have a valid SD card in the slot, we set the response
692	 * bit mask to check for CRC errors and timeouts only.
693	 * Otherwise, use the default power reset value.
694	 */
695	if (mmc_card_sd(mmc->card))
696		writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
697	else
698		writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
699	writeq(emm_dma, host->base + MIO_EMM_DMA(host));
700	return;
701
702error:
703	mrq->cmd->error = -EINVAL;
704	if (mrq->done)
705		mrq->done(mrq);
706	host->release_bus(host);
707}
708
709static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
710{
711	sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
712		       SG_MITER_ATOMIC | SG_MITER_TO_SG);
713}
714
715static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
716{
717	unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
718	struct sg_mapping_iter *smi = &host->smi;
719	unsigned int bytes_xfered;
720	int shift = 56;
721	u64 dat = 0;
722
723	/* Copy data to the xmit buffer before issuing the command. */
724	sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
725
726	/* Auto inc from offset zero, dbuf zero */
727	writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
728
729	for (bytes_xfered = 0; bytes_xfered < data_len;) {
730		if (smi->consumed >= smi->length) {
731			if (!sg_miter_next(smi))
732				break;
733			smi->consumed = 0;
734		}
735
736		while (smi->consumed < smi->length && shift >= 0) {
737			dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
738			bytes_xfered++;
739			smi->consumed++;
740			shift -= 8;
741		}
742
743		if (shift < 0) {
744			writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
745			shift = 56;
746			dat = 0;
747		}
748	}
749	sg_miter_stop(smi);
750}
751
752static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
753{
754	struct cvm_mmc_slot *slot = mmc_priv(mmc);
755	struct cvm_mmc_host *host = slot->host;
756	struct mmc_command *cmd = mrq->cmd;
757	struct cvm_mmc_cr_mods mods;
758	u64 emm_cmd, rsp_sts;
759	int retries = 100;
760
761	/*
762	 * Note about locking:
763	 * All MMC devices share the same bus and controller. Allow only a
764	 * single user of the bootbus/MMC bus at a time. The lock is acquired
765	 * on all entry points from the MMC layer.
766	 *
767	 * For requests the lock is only released after the completion
768	 * interrupt!
769	 */
770	host->acquire_bus(host);
771
772	if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
773	    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
774		return cvm_mmc_dma_request(mmc, mrq);
775
776	cvm_mmc_switch_to(slot);
777
778	mods = cvm_mmc_get_cr_mods(cmd);
779
780	WARN_ON(host->current_req);
781	host->current_req = mrq;
782
783	if (cmd->data) {
784		if (cmd->data->flags & MMC_DATA_READ)
785			do_read_request(host, mrq);
786		else
787			do_write_request(host, mrq);
788
789		if (cmd->data->timeout_ns)
790			set_wdog(slot, cmd->data->timeout_ns);
791	} else
792		set_wdog(slot, 0);
793
794	host->dma_active = false;
795	host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
796
797	emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
798		  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
799		  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
800		  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
801		  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
802	set_bus_id(&emm_cmd, slot->bus_id);
803	if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
804		emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
805				64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
806
807	writeq(0, host->base + MIO_EMM_STS_MASK(host));
808
809retry:
810	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
811	if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
812	    rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
813	    rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
814	    rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
815		udelay(10);
816		if (--retries)
817			goto retry;
818	}
819	if (!retries)
820		dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
821	writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
822}
823
824static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
825{
826	struct cvm_mmc_slot *slot = mmc_priv(mmc);
827	struct cvm_mmc_host *host = slot->host;
828	int clk_period = 0, power_class = 10, bus_width = 0;
829	u64 clock, emm_switch;
830
831	host->acquire_bus(host);
832	cvm_mmc_switch_to(slot);
833
834	/* Set the power state */
835	switch (ios->power_mode) {
836	case MMC_POWER_ON:
837		break;
838
839	case MMC_POWER_OFF:
840		cvm_mmc_reset_bus(slot);
841		if (host->global_pwr_gpiod)
842			host->set_shared_power(host, 0);
843		else if (!IS_ERR(mmc->supply.vmmc))
844			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
845		break;
846
847	case MMC_POWER_UP:
848		if (host->global_pwr_gpiod)
849			host->set_shared_power(host, 1);
850		else if (!IS_ERR(mmc->supply.vmmc))
851			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
852		break;
853	}
854
855	/* Convert bus width to HW definition */
856	switch (ios->bus_width) {
857	case MMC_BUS_WIDTH_8:
858		bus_width = 2;
859		break;
860	case MMC_BUS_WIDTH_4:
861		bus_width = 1;
862		break;
863	case MMC_BUS_WIDTH_1:
864		bus_width = 0;
865		break;
866	}
867
868	/* DDR is available for 4/8 bit bus width */
869	if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
870		bus_width |= 4;
871
872	/* Change the clock frequency. */
873	clock = ios->clock;
874	if (clock > 52000000)
875		clock = 52000000;
876	slot->clock = clock;
877
878	if (clock)
879		clk_period = (host->sys_freq + clock - 1) / (2 * clock);
880
881	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
882				(ios->timing == MMC_TIMING_MMC_HS)) |
883		     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
884		     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
885		     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
886		     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
887	set_bus_id(&emm_switch, slot->bus_id);
888
889	if (!switch_val_changed(slot, emm_switch))
890		goto out;
891
892	set_wdog(slot, 0);
893	do_switch(host, emm_switch);
894	slot->cached_switch = emm_switch;
895out:
896	host->release_bus(host);
897}
898
899static const struct mmc_host_ops cvm_mmc_ops = {
900	.request        = cvm_mmc_request,
901	.set_ios        = cvm_mmc_set_ios,
902	.get_ro		= mmc_gpio_get_ro,
903	.get_cd		= mmc_gpio_get_cd,
904};
905
906static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
907{
908	struct mmc_host *mmc = slot->mmc;
909
910	clock = min(clock, mmc->f_max);
911	clock = max(clock, mmc->f_min);
912	slot->clock = clock;
913}
914
915static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
916{
917	struct cvm_mmc_host *host = slot->host;
918	u64 emm_switch;
919
920	/* Enable this bus slot. */
921	host->emm_cfg |= (1ull << slot->bus_id);
922	writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
923	udelay(10);
924
925	/* Program initial clock speed and power. */
926	cvm_mmc_set_clock(slot, slot->mmc->f_min);
927	emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
928	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
929				 (host->sys_freq / slot->clock) / 2);
930	emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
931				 (host->sys_freq / slot->clock) / 2);
932
933	/* Make the changes take effect on this bus slot. */
934	set_bus_id(&emm_switch, slot->bus_id);
935	do_switch(host, emm_switch);
936
937	slot->cached_switch = emm_switch;
938
939	/*
940	 * Set watchdog timeout value and default reset value
941	 * for the mask register. Finally, set the CARD_RCA
942	 * bit so that we can get the card address relative
943	 * to the CMD register for CMD7 transactions.
944	 */
945	set_wdog(slot, 0);
946	writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
947	writeq(1, host->base + MIO_EMM_RCA(host));
948	return 0;
949}
950
951static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
952{
953	u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
954	struct device_node *node = dev->of_node;
955	struct mmc_host *mmc = slot->mmc;
956	u64 clock_period;
957	int ret;
958
959	ret = of_property_read_u32(node, "reg", &id);
960	if (ret) {
961		dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
962		return ret;
963	}
964
965	if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
966		dev_err(dev, "Invalid reg property on %pOF\n", node);
967		return -EINVAL;
968	}
969
970	ret = mmc_regulator_get_supply(mmc);
971	if (ret)
972		return ret;
973	/*
974	 * Legacy Octeon firmware has no regulator entry, fall-back to
975	 * a hard-coded voltage to get a sane OCR.
976	 */
977	if (IS_ERR(mmc->supply.vmmc))
978		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
979
980	/* Common MMC bindings */
981	ret = mmc_of_parse(mmc);
982	if (ret)
983		return ret;
984
985	/* Set bus width */
986	if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
987		of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
988		if (bus_width == 8)
989			mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
990		else if (bus_width == 4)
991			mmc->caps |= MMC_CAP_4_BIT_DATA;
992	}
993
994	/* Set maximum and minimum frequency */
995	if (!mmc->f_max)
996		of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
997	if (!mmc->f_max || mmc->f_max > 52000000)
998		mmc->f_max = 52000000;
999	mmc->f_min = 400000;
1000
1001	/* Sampling register settings, period in picoseconds */
1002	clock_period = 1000000000000ull / slot->host->sys_freq;
1003	of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1004	of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1005	slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1006	slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1007
1008	return id;
1009}
1010
1011int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1012{
1013	struct cvm_mmc_slot *slot;
1014	struct mmc_host *mmc;
1015	int ret, id;
1016
1017	mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1018	if (!mmc)
1019		return -ENOMEM;
1020
1021	slot = mmc_priv(mmc);
1022	slot->mmc = mmc;
1023	slot->host = host;
1024
1025	ret = cvm_mmc_of_parse(dev, slot);
1026	if (ret < 0)
1027		goto error;
1028	id = ret;
1029
1030	/* Set up host parameters */
1031	mmc->ops = &cvm_mmc_ops;
1032
1033	/*
1034	 * We only have a 3.3v supply, we cannot support any
1035	 * of the UHS modes. We do support the high speed DDR
1036	 * modes up to 52MHz.
1037	 *
1038	 * Disable bounce buffers for max_segs = 1
1039	 */
1040	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1041		     MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
1042
1043	if (host->use_sg)
1044		mmc->max_segs = 16;
1045	else
1046		mmc->max_segs = 1;
1047
1048	/* DMA size field can address up to 8 MB */
1049	mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1050				  dma_get_max_seg_size(host->dev));
1051	mmc->max_req_size = mmc->max_seg_size;
1052	/* External DMA is in 512 byte blocks */
1053	mmc->max_blk_size = 512;
1054	/* DMA block count field is 15 bits */
1055	mmc->max_blk_count = 32767;
1056
1057	slot->clock = mmc->f_min;
1058	slot->bus_id = id;
1059	slot->cached_rca = 1;
1060
1061	host->acquire_bus(host);
1062	host->slot[id] = slot;
1063	cvm_mmc_switch_to(slot);
1064	cvm_mmc_init_lowlevel(slot);
1065	host->release_bus(host);
1066
1067	ret = mmc_add_host(mmc);
1068	if (ret) {
1069		dev_err(dev, "mmc_add_host() returned %d\n", ret);
1070		slot->host->slot[id] = NULL;
1071		goto error;
1072	}
1073	return 0;
1074
1075error:
1076	mmc_free_host(slot->mmc);
1077	return ret;
1078}
1079
1080int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1081{
1082	mmc_remove_host(slot->mmc);
1083	slot->host->slot[slot->bus_id] = NULL;
1084	mmc_free_host(slot->mmc);
1085	return 0;
1086}
1087