1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 *     * Redistributions of source code must retain the above copyright
6 *	 notice, this list of conditions and the following disclaimer.
7 *     * Redistributions in binary form must reproduce the above copyright
8 *	 notice, this list of conditions and the following disclaimer in the
9 *	 documentation and/or other materials provided with the distribution.
10 *     * Neither the name of Freescale Semiconductor nor the
11 *	 names of its contributors may be used to endorse or promote products
12 *	 derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33struct qman_portal *qman_dma_portal;
34EXPORT_SYMBOL(qman_dma_portal);
35
36/* Enable portal interupts (as opposed to polling mode) */
37#define CONFIG_FSL_DPA_PIRQ_SLOW  1
38#define CONFIG_FSL_DPA_PIRQ_FAST  1
39
40static struct cpumask portal_cpus;
41static int __qman_portals_probed;
42/* protect qman global registers and global data shared among portals */
43static DEFINE_SPINLOCK(qman_lock);
44
45static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
46{
47#ifdef CONFIG_FSL_PAMU
48	struct device *dev = pcfg->dev;
49	int window_count = 1;
50	struct iommu_domain_geometry geom_attr;
51	struct pamu_stash_attribute stash_attr;
52	int ret;
53
54	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
55	if (!pcfg->iommu_domain) {
56		dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
57		goto no_iommu;
58	}
59	geom_attr.aperture_start = 0;
60	geom_attr.aperture_end =
61		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
62	geom_attr.force_aperture = true;
63	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
64				    &geom_attr);
65	if (ret < 0) {
66		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
67			ret);
68		goto out_domain_free;
69	}
70	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
71				    &window_count);
72	if (ret < 0) {
73		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
74			ret);
75		goto out_domain_free;
76	}
77	stash_attr.cpu = cpu;
78	stash_attr.cache = PAMU_ATTR_CACHE_L1;
79	ret = iommu_domain_set_attr(pcfg->iommu_domain,
80				    DOMAIN_ATTR_FSL_PAMU_STASH,
81				    &stash_attr);
82	if (ret < 0) {
83		dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
84			__func__, ret);
85		goto out_domain_free;
86	}
87	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
88					 IOMMU_READ | IOMMU_WRITE);
89	if (ret < 0) {
90		dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
91			__func__, ret);
92		goto out_domain_free;
93	}
94	ret = iommu_attach_device(pcfg->iommu_domain, dev);
95	if (ret < 0) {
96		dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
97			ret);
98		goto out_domain_free;
99	}
100	ret = iommu_domain_set_attr(pcfg->iommu_domain,
101				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
102				    &window_count);
103	if (ret < 0) {
104		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
105			ret);
106		goto out_detach_device;
107	}
108
109no_iommu:
110#endif
111	qman_set_sdest(pcfg->channel, cpu);
112
113	return;
114
115#ifdef CONFIG_FSL_PAMU
116out_detach_device:
117	iommu_detach_device(pcfg->iommu_domain, NULL);
118out_domain_free:
119	iommu_domain_free(pcfg->iommu_domain);
120	pcfg->iommu_domain = NULL;
121#endif
122}
123
124static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
125{
126	struct qman_portal *p;
127	u32 irq_sources = 0;
128
129	/* We need the same LIODN offset for all portals */
130	qman_liodn_fixup(pcfg->channel);
131
132	pcfg->iommu_domain = NULL;
133	portal_set_cpu(pcfg, pcfg->cpu);
134
135	p = qman_create_affine_portal(pcfg, NULL);
136	if (!p) {
137		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
138			 __func__, pcfg->cpu);
139		return NULL;
140	}
141
142	/* Determine what should be interrupt-vs-poll driven */
143#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
144	irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
145		       QM_PIRQ_CSCI;
146#endif
147#ifdef CONFIG_FSL_DPA_PIRQ_FAST
148	irq_sources |= QM_PIRQ_DQRI;
149#endif
150	qman_p_irqsource_add(p, irq_sources);
151
152	spin_lock(&qman_lock);
153	if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
154		/* all assigned portals are initialized now */
155		qman_init_cgr_all();
156	}
157
158	if (!qman_dma_portal)
159		qman_dma_portal = p;
160
161	spin_unlock(&qman_lock);
162
163	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
164
165	return p;
166}
167
168static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
169							unsigned int cpu)
170{
171#ifdef CONFIG_FSL_PAMU /* TODO */
172	struct pamu_stash_attribute stash_attr;
173	int ret;
174
175	if (pcfg->iommu_domain) {
176		stash_attr.cpu = cpu;
177		stash_attr.cache = PAMU_ATTR_CACHE_L1;
178		ret = iommu_domain_set_attr(pcfg->iommu_domain,
179				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
180		if (ret < 0) {
181			dev_err(pcfg->dev,
182				"Failed to update pamu stash setting\n");
183			return;
184		}
185	}
186#endif
187	qman_set_sdest(pcfg->channel, cpu);
188}
189
190static int qman_offline_cpu(unsigned int cpu)
191{
192	struct qman_portal *p;
193	const struct qm_portal_config *pcfg;
194
195	p = affine_portals[cpu];
196	if (p) {
197		pcfg = qman_get_qm_portal_config(p);
198		if (pcfg) {
199			/* select any other online CPU */
200			cpu = cpumask_any_but(cpu_online_mask, cpu);
201			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
202			qman_portal_update_sdest(pcfg, cpu);
203		}
204	}
205	return 0;
206}
207
208static int qman_online_cpu(unsigned int cpu)
209{
210	struct qman_portal *p;
211	const struct qm_portal_config *pcfg;
212
213	p = affine_portals[cpu];
214	if (p) {
215		pcfg = qman_get_qm_portal_config(p);
216		if (pcfg) {
217			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
218			qman_portal_update_sdest(pcfg, cpu);
219		}
220	}
221	return 0;
222}
223
224int qman_portals_probed(void)
225{
226	return __qman_portals_probed;
227}
228EXPORT_SYMBOL_GPL(qman_portals_probed);
229
230static int qman_portal_probe(struct platform_device *pdev)
231{
232	struct device *dev = &pdev->dev;
233	struct device_node *node = dev->of_node;
234	struct qm_portal_config *pcfg;
235	struct resource *addr_phys[2];
236	int irq, cpu, err, i;
237	u32 val;
238
239	err = qman_is_probed();
240	if (!err)
241		return -EPROBE_DEFER;
242	if (err < 0) {
243		dev_err(&pdev->dev, "failing probe due to qman probe error\n");
244		return -ENODEV;
245	}
246
247	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
248	if (!pcfg) {
249		__qman_portals_probed = -1;
250		return -ENOMEM;
251	}
252
253	pcfg->dev = dev;
254
255	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
256					     DPAA_PORTAL_CE);
257	if (!addr_phys[0]) {
258		dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
259		goto err_ioremap1;
260	}
261
262	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
263					     DPAA_PORTAL_CI);
264	if (!addr_phys[1]) {
265		dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
266		goto err_ioremap1;
267	}
268
269	err = of_property_read_u32(node, "cell-index", &val);
270	if (err) {
271		dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
272		__qman_portals_probed = -1;
273		return err;
274	}
275	pcfg->channel = val;
276	pcfg->cpu = -1;
277	irq = platform_get_irq(pdev, 0);
278	if (irq <= 0)
279		goto err_ioremap1;
280	pcfg->irq = irq;
281
282	pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
283					resource_size(addr_phys[0]),
284					QBMAN_MEMREMAP_ATTR);
285	if (!pcfg->addr_virt_ce) {
286		dev_err(dev, "memremap::CE failed\n");
287		goto err_ioremap1;
288	}
289
290	pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
291				resource_size(addr_phys[1]));
292	if (!pcfg->addr_virt_ci) {
293		dev_err(dev, "ioremap::CI failed\n");
294		goto err_ioremap2;
295	}
296
297	pcfg->pools = qm_get_pools_sdqcr();
298
299	spin_lock(&qman_lock);
300	cpu = cpumask_next_zero(-1, &portal_cpus);
301	if (cpu >= nr_cpu_ids) {
302		__qman_portals_probed = 1;
303		/* unassigned portal, skip init */
304		spin_unlock(&qman_lock);
305		return 0;
306	}
307
308	cpumask_set_cpu(cpu, &portal_cpus);
309	spin_unlock(&qman_lock);
310	pcfg->cpu = cpu;
311
312	if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
313		dev_err(dev, "dma_set_mask() failed\n");
314		goto err_portal_init;
315	}
316
317	if (!init_pcfg(pcfg)) {
318		dev_err(dev, "portal init failed\n");
319		goto err_portal_init;
320	}
321
322	/* clear irq affinity if assigned cpu is offline */
323	if (!cpu_online(cpu))
324		qman_offline_cpu(cpu);
325
326	if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
327		/*
328		 * QMan wasn't reset prior to boot (Kexec for example)
329		 * Empty all the frame queues so they are in reset state
330		 */
331		for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
332			err =  qman_shutdown_fq(i);
333			if (err) {
334				dev_err(dev, "Failed to shutdown frame queue %d\n",
335					i);
336				goto err_portal_init;
337			}
338		}
339		qman_done_cleanup();
340	}
341
342	return 0;
343
344err_portal_init:
345	iounmap(pcfg->addr_virt_ci);
346err_ioremap2:
347	memunmap(pcfg->addr_virt_ce);
348err_ioremap1:
349	__qman_portals_probed = -1;
350
351	return -ENXIO;
352}
353
354static const struct of_device_id qman_portal_ids[] = {
355	{
356		.compatible = "fsl,qman-portal",
357	},
358	{}
359};
360MODULE_DEVICE_TABLE(of, qman_portal_ids);
361
362static struct platform_driver qman_portal_driver = {
363	.driver = {
364		.name = KBUILD_MODNAME,
365		.of_match_table = qman_portal_ids,
366	},
367	.probe = qman_portal_probe,
368};
369
370static int __init qman_portal_driver_register(struct platform_driver *drv)
371{
372	int ret;
373
374	ret = platform_driver_register(drv);
375	if (ret < 0)
376		return ret;
377
378	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
379					"soc/qman_portal:online",
380					qman_online_cpu, qman_offline_cpu);
381	if (ret < 0) {
382		pr_err("qman: failed to register hotplug callbacks.\n");
383		platform_driver_unregister(drv);
384		return ret;
385	}
386	return 0;
387}
388
389module_driver(qman_portal_driver,
390	      qman_portal_driver_register, platform_driver_unregister);
391