1/*
2 * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2016 Cavium Inc.
9 */
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/interrupt.h>
13#include <linux/mmc/mmc.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_platform.h>
17#include <linux/pci.h>
18#include "cavium.h"
19
20static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
21{
22	down(&host->mmc_serializer);
23}
24
25static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
26{
27	up(&host->mmc_serializer);
28}
29
30static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
31{
32	writeq(val, host->base + MIO_EMM_INT(host));
33	writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
34}
35
36static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
37					   struct pci_dev *pdev)
38{
39	int nvec, ret, i;
40
41	nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
42	if (nvec < 0)
43		return nvec;
44
45	/* register interrupts */
46	for (i = 0; i < nvec; i++) {
47		ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
48				       cvm_mmc_interrupt,
49				       0, cvm_mmc_irq_names[i], host);
50		if (ret)
51			return ret;
52	}
53	return 0;
54}
55
56static int thunder_mmc_probe(struct pci_dev *pdev,
57			     const struct pci_device_id *id)
58{
59	struct device_node *node = pdev->dev.of_node;
60	struct device *dev = &pdev->dev;
61	struct device_node *child_node;
62	struct cvm_mmc_host *host;
63	int ret, i = 0;
64
65	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
66	if (!host)
67		return -ENOMEM;
68
69	pci_set_drvdata(pdev, host);
70	ret = pcim_enable_device(pdev);
71	if (ret)
72		return ret;
73
74	ret = pci_request_regions(pdev, KBUILD_MODNAME);
75	if (ret)
76		return ret;
77
78	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
79	if (!host->base) {
80		ret = -EINVAL;
81		goto error;
82	}
83
84	/* On ThunderX these are identical */
85	host->dma_base = host->base;
86
87	host->reg_off = 0x2000;
88	host->reg_off_dma = 0x160;
89
90	host->clk = devm_clk_get(dev, NULL);
91	if (IS_ERR(host->clk)) {
92		ret = PTR_ERR(host->clk);
93		goto error;
94	}
95
96	ret = clk_prepare_enable(host->clk);
97	if (ret)
98		goto error;
99	host->sys_freq = clk_get_rate(host->clk);
100
101	spin_lock_init(&host->irq_handler_lock);
102	sema_init(&host->mmc_serializer, 1);
103
104	host->dev = dev;
105	host->acquire_bus = thunder_mmc_acquire_bus;
106	host->release_bus = thunder_mmc_release_bus;
107	host->int_enable = thunder_mmc_int_enable;
108
109	host->use_sg = true;
110	host->big_dma_addr = true;
111	host->need_irq_handler_lock = true;
112	host->last_slot = -1;
113
114	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
115	if (ret)
116		goto error;
117
118	/*
119	 * Clear out any pending interrupts that may be left over from
120	 * bootloader. Writing 1 to the bits clears them.
121	 */
122	writeq(127, host->base + MIO_EMM_INT_EN(host));
123	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
124	/* Clear DMA FIFO */
125	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
126
127	ret = thunder_mmc_register_interrupts(host, pdev);
128	if (ret)
129		goto error;
130
131	for_each_child_of_node(node, child_node) {
132		/*
133		 * mmc_of_parse and devm* require one device per slot.
134		 * Create a dummy device per slot and set the node pointer to
135		 * the slot. The easiest way to get this is using
136		 * of_platform_device_create.
137		 */
138		if (of_device_is_compatible(child_node, "mmc-slot")) {
139			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
140								       &pdev->dev);
141			if (!host->slot_pdev[i])
142				continue;
143
144			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
145			if (ret) {
146				of_node_put(child_node);
147				goto error;
148			}
149		}
150		i++;
151	}
152	dev_info(dev, "probed\n");
153	return 0;
154
155error:
156	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
157		if (host->slot[i])
158			cvm_mmc_of_slot_remove(host->slot[i]);
159		if (host->slot_pdev[i]) {
160			get_device(&host->slot_pdev[i]->dev);
161			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
162			put_device(&host->slot_pdev[i]->dev);
163		}
164	}
165	clk_disable_unprepare(host->clk);
166	pci_release_regions(pdev);
167	return ret;
168}
169
170static void thunder_mmc_remove(struct pci_dev *pdev)
171{
172	struct cvm_mmc_host *host = pci_get_drvdata(pdev);
173	u64 dma_cfg;
174	int i;
175
176	for (i = 0; i < CAVIUM_MAX_MMC; i++)
177		if (host->slot[i])
178			cvm_mmc_of_slot_remove(host->slot[i]);
179
180	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
181	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
182	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
183
184	clk_disable_unprepare(host->clk);
185	pci_release_regions(pdev);
186}
187
188static const struct pci_device_id thunder_mmc_id_table[] = {
189	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
190	{ 0, }  /* end of table */
191};
192
193static struct pci_driver thunder_mmc_driver = {
194	.name = KBUILD_MODNAME,
195	.id_table = thunder_mmc_id_table,
196	.probe = thunder_mmc_probe,
197	.remove = thunder_mmc_remove,
198};
199
200module_pci_driver(thunder_mmc_driver);
201
202MODULE_AUTHOR("Cavium Inc.");
203MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
204MODULE_LICENSE("GPL");
205MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);
206