1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Synopsys DesignWare PCIe host controller driver
4 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 *		https://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
10
11#include <linux/irqchip/chained_irq.h>
12#include <linux/irqdomain.h>
13#include <linux/msi.h>
14#include <linux/of_address.h>
15#include <linux/of_pci.h>
16#include <linux/pci_regs.h>
17#include <linux/platform_device.h>
18
19#include "../../pci.h"
20#include "pcie-designware.h"
21
22static struct pci_ops dw_pcie_ops;
23static struct pci_ops dw_child_pcie_ops;
24
25static void dw_msi_ack_irq(struct irq_data *d)
26{
27	irq_chip_ack_parent(d);
28}
29
30static void dw_msi_mask_irq(struct irq_data *d)
31{
32	pci_msi_mask_irq(d);
33	irq_chip_mask_parent(d);
34}
35
36static void dw_msi_unmask_irq(struct irq_data *d)
37{
38	pci_msi_unmask_irq(d);
39	irq_chip_unmask_parent(d);
40}
41
42static struct irq_chip dw_pcie_msi_irq_chip = {
43	.name = "PCI-MSI",
44	.irq_ack = dw_msi_ack_irq,
45	.irq_mask = dw_msi_mask_irq,
46	.irq_unmask = dw_msi_unmask_irq,
47};
48
49static struct msi_domain_info dw_pcie_msi_domain_info = {
50	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
51		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
52	.chip	= &dw_pcie_msi_irq_chip,
53};
54
55/* MSI int handler */
56irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
57{
58	int i, pos, irq;
59	unsigned long val;
60	u32 status, num_ctrls;
61	irqreturn_t ret = IRQ_NONE;
62	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
63
64	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
65
66	for (i = 0; i < num_ctrls; i++) {
67		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
68					   (i * MSI_REG_CTRL_BLOCK_SIZE));
69		if (!status)
70			continue;
71
72		ret = IRQ_HANDLED;
73		val = status;
74		pos = 0;
75		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
76					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
77			irq = irq_find_mapping(pp->irq_domain,
78					       (i * MAX_MSI_IRQS_PER_CTRL) +
79					       pos);
80			generic_handle_irq(irq);
81			pos++;
82		}
83	}
84
85	return ret;
86}
87
88/* Chained MSI interrupt service routine */
89static void dw_chained_msi_isr(struct irq_desc *desc)
90{
91	struct irq_chip *chip = irq_desc_get_chip(desc);
92	struct pcie_port *pp;
93
94	chained_irq_enter(chip, desc);
95
96	pp = irq_desc_get_handler_data(desc);
97	dw_handle_msi_irq(pp);
98
99	chained_irq_exit(chip, desc);
100}
101
102static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
103{
104	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
105	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
106	u64 msi_target;
107
108	msi_target = (u64)pp->msi_data;
109
110	msg->address_lo = lower_32_bits(msi_target);
111	msg->address_hi = upper_32_bits(msi_target);
112
113	msg->data = d->hwirq;
114
115	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
116		(int)d->hwirq, msg->address_hi, msg->address_lo);
117}
118
119static int dw_pci_msi_set_affinity(struct irq_data *d,
120				   const struct cpumask *mask, bool force)
121{
122	return -EINVAL;
123}
124
125static void dw_pci_bottom_mask(struct irq_data *d)
126{
127	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
128	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
129	unsigned int res, bit, ctrl;
130	unsigned long flags;
131
132	raw_spin_lock_irqsave(&pp->lock, flags);
133
134	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
135	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
136	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
137
138	pp->irq_mask[ctrl] |= BIT(bit);
139	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
140
141	raw_spin_unlock_irqrestore(&pp->lock, flags);
142}
143
144static void dw_pci_bottom_unmask(struct irq_data *d)
145{
146	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
147	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
148	unsigned int res, bit, ctrl;
149	unsigned long flags;
150
151	raw_spin_lock_irqsave(&pp->lock, flags);
152
153	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
154	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
155	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
156
157	pp->irq_mask[ctrl] &= ~BIT(bit);
158	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
159
160	raw_spin_unlock_irqrestore(&pp->lock, flags);
161}
162
163static void dw_pci_bottom_ack(struct irq_data *d)
164{
165	struct pcie_port *pp  = irq_data_get_irq_chip_data(d);
166	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
167	unsigned int res, bit, ctrl;
168
169	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
170	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
171	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
172
173	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
174}
175
176static struct irq_chip dw_pci_msi_bottom_irq_chip = {
177	.name = "DWPCI-MSI",
178	.irq_ack = dw_pci_bottom_ack,
179	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
180	.irq_set_affinity = dw_pci_msi_set_affinity,
181	.irq_mask = dw_pci_bottom_mask,
182	.irq_unmask = dw_pci_bottom_unmask,
183};
184
185static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
186				    unsigned int virq, unsigned int nr_irqs,
187				    void *args)
188{
189	struct pcie_port *pp = domain->host_data;
190	unsigned long flags;
191	u32 i;
192	int bit;
193
194	raw_spin_lock_irqsave(&pp->lock, flags);
195
196	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
197				      order_base_2(nr_irqs));
198
199	raw_spin_unlock_irqrestore(&pp->lock, flags);
200
201	if (bit < 0)
202		return -ENOSPC;
203
204	for (i = 0; i < nr_irqs; i++)
205		irq_domain_set_info(domain, virq + i, bit + i,
206				    pp->msi_irq_chip,
207				    pp, handle_edge_irq,
208				    NULL, NULL);
209
210	return 0;
211}
212
213static void dw_pcie_irq_domain_free(struct irq_domain *domain,
214				    unsigned int virq, unsigned int nr_irqs)
215{
216	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
217	struct pcie_port *pp = domain->host_data;
218	unsigned long flags;
219
220	raw_spin_lock_irqsave(&pp->lock, flags);
221
222	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
223			      order_base_2(nr_irqs));
224
225	raw_spin_unlock_irqrestore(&pp->lock, flags);
226}
227
228static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
229	.alloc	= dw_pcie_irq_domain_alloc,
230	.free	= dw_pcie_irq_domain_free,
231};
232
233int dw_pcie_allocate_domains(struct pcie_port *pp)
234{
235	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
236	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
237
238	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
239					       &dw_pcie_msi_domain_ops, pp);
240	if (!pp->irq_domain) {
241		dev_err(pci->dev, "Failed to create IRQ domain\n");
242		return -ENOMEM;
243	}
244
245	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
246
247	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
248						   &dw_pcie_msi_domain_info,
249						   pp->irq_domain);
250	if (!pp->msi_domain) {
251		dev_err(pci->dev, "Failed to create MSI domain\n");
252		irq_domain_remove(pp->irq_domain);
253		return -ENOMEM;
254	}
255
256	return 0;
257}
258
259void dw_pcie_free_msi(struct pcie_port *pp)
260{
261	if (pp->msi_irq) {
262		irq_set_chained_handler(pp->msi_irq, NULL);
263		irq_set_handler_data(pp->msi_irq, NULL);
264	}
265
266	irq_domain_remove(pp->msi_domain);
267	irq_domain_remove(pp->irq_domain);
268
269	if (pp->msi_data) {
270		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
271		struct device *dev = pci->dev;
272
273		dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
274				       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
275	}
276}
277
278void dw_pcie_msi_init(struct pcie_port *pp)
279{
280	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
281	u64 msi_target = (u64)pp->msi_data;
282
283	if (!IS_ENABLED(CONFIG_PCI_MSI))
284		return;
285
286	/* Program the msi_data */
287	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
288	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
289}
290EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
291
292int dw_pcie_host_init(struct pcie_port *pp)
293{
294	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
295	struct device *dev = pci->dev;
296	struct device_node *np = dev->of_node;
297	struct platform_device *pdev = to_platform_device(dev);
298	struct resource_entry *win;
299	struct pci_host_bridge *bridge;
300	struct resource *cfg_res;
301	int ret;
302
303	raw_spin_lock_init(&pci->pp.lock);
304
305	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
306	if (cfg_res) {
307		pp->cfg0_size = resource_size(cfg_res);
308		pp->cfg0_base = cfg_res->start;
309	} else if (!pp->va_cfg0_base) {
310		dev_err(dev, "Missing *config* reg space\n");
311	}
312
313	bridge = devm_pci_alloc_host_bridge(dev, 0);
314	if (!bridge)
315		return -ENOMEM;
316
317	pp->bridge = bridge;
318
319	/* Get the I/O and memory ranges from DT */
320	resource_list_for_each_entry(win, &bridge->windows) {
321		switch (resource_type(win->res)) {
322		case IORESOURCE_IO:
323			pp->io_size = resource_size(win->res);
324			pp->io_bus_addr = win->res->start - win->offset;
325			pp->io_base = pci_pio_to_address(win->res->start);
326			break;
327		case 0:
328			dev_err(dev, "Missing *config* reg space\n");
329			pp->cfg0_size = resource_size(win->res);
330			pp->cfg0_base = win->res->start;
331			if (!pci->dbi_base) {
332				pci->dbi_base = devm_pci_remap_cfgspace(dev,
333								pp->cfg0_base,
334								pp->cfg0_size);
335				if (!pci->dbi_base) {
336					dev_err(dev, "Error with ioremap\n");
337					return -ENOMEM;
338				}
339			}
340			break;
341		}
342	}
343
344	if (!pp->va_cfg0_base) {
345		pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
346					pp->cfg0_base, pp->cfg0_size);
347		if (!pp->va_cfg0_base) {
348			dev_err(dev, "Error with ioremap in function\n");
349			return -ENOMEM;
350		}
351	}
352
353	ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
354	if (ret)
355		pci->num_viewport = 2;
356
357	if (pci->link_gen < 1)
358		pci->link_gen = of_pci_get_max_link_speed(np);
359
360	if (pci_msi_enabled()) {
361		/*
362		 * If a specific SoC driver needs to change the
363		 * default number of vectors, it needs to implement
364		 * the set_num_vectors callback.
365		 */
366		if (!pp->ops->set_num_vectors) {
367			pp->num_vectors = MSI_DEF_NUM_VECTORS;
368		} else {
369			pp->ops->set_num_vectors(pp);
370
371			if (pp->num_vectors > MAX_MSI_IRQS ||
372			    pp->num_vectors == 0) {
373				dev_err(dev,
374					"Invalid number of vectors\n");
375				return -EINVAL;
376			}
377		}
378
379		if (!pp->ops->msi_host_init) {
380			pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
381
382			ret = dw_pcie_allocate_domains(pp);
383			if (ret)
384				return ret;
385
386			if (pp->msi_irq)
387				irq_set_chained_handler_and_data(pp->msi_irq,
388							    dw_chained_msi_isr,
389							    pp);
390
391			pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
392						      sizeof(pp->msi_msg),
393						      DMA_FROM_DEVICE,
394						      DMA_ATTR_SKIP_CPU_SYNC);
395			ret = dma_mapping_error(pci->dev, pp->msi_data);
396			if (ret) {
397				dev_err(pci->dev, "Failed to map MSI data\n");
398				pp->msi_data = 0;
399				goto err_free_msi;
400			}
401		} else {
402			ret = pp->ops->msi_host_init(pp);
403			if (ret < 0)
404				return ret;
405		}
406	}
407
408	/* Set default bus ops */
409	bridge->ops = &dw_pcie_ops;
410	bridge->child_ops = &dw_child_pcie_ops;
411
412	if (pp->ops->host_init) {
413		ret = pp->ops->host_init(pp);
414		if (ret)
415			goto err_free_msi;
416	}
417
418	bridge->sysdata = pp;
419
420	ret = pci_host_probe(bridge);
421	if (!ret)
422		return 0;
423
424err_free_msi:
425	if (pci_msi_enabled() && !pp->ops->msi_host_init)
426		dw_pcie_free_msi(pp);
427	return ret;
428}
429EXPORT_SYMBOL_GPL(dw_pcie_host_init);
430
431void dw_pcie_host_deinit(struct pcie_port *pp)
432{
433	pci_stop_root_bus(pp->bridge->bus);
434	pci_remove_root_bus(pp->bridge->bus);
435	if (pci_msi_enabled() && !pp->ops->msi_host_init)
436		dw_pcie_free_msi(pp);
437}
438EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
439
440static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
441						unsigned int devfn, int where)
442{
443	int type;
444	u32 busdev;
445	struct pcie_port *pp = bus->sysdata;
446	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
447
448	/*
449	 * Checking whether the link is up here is a last line of defense
450	 * against platforms that forward errors on the system bus as
451	 * SError upon PCI configuration transactions issued when the link
452	 * is down. This check is racy by definition and does not stop
453	 * the system from triggering an SError if the link goes down
454	 * after this check is performed.
455	 */
456	if (!dw_pcie_link_up(pci))
457		return NULL;
458
459	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
460		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
461
462	if (pci_is_root_bus(bus->parent))
463		type = PCIE_ATU_TYPE_CFG0;
464	else
465		type = PCIE_ATU_TYPE_CFG1;
466
467
468	dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
469				  type, pp->cfg0_base,
470				  busdev, pp->cfg0_size);
471
472	return pp->va_cfg0_base + where;
473}
474
475static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
476				 int where, int size, u32 *val)
477{
478	int ret;
479	struct pcie_port *pp = bus->sysdata;
480	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
481
482	ret = pci_generic_config_read(bus, devfn, where, size, val);
483
484	if (!ret && pci->num_viewport <= 2)
485		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
486					  PCIE_ATU_TYPE_IO, pp->io_base,
487					  pp->io_bus_addr, pp->io_size);
488
489	return ret;
490}
491
492static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
493				 int where, int size, u32 val)
494{
495	int ret;
496	struct pcie_port *pp = bus->sysdata;
497	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
498
499	ret = pci_generic_config_write(bus, devfn, where, size, val);
500
501	if (!ret && pci->num_viewport <= 2)
502		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
503					  PCIE_ATU_TYPE_IO, pp->io_base,
504					  pp->io_bus_addr, pp->io_size);
505
506	return ret;
507}
508
509static struct pci_ops dw_child_pcie_ops = {
510	.map_bus = dw_pcie_other_conf_map_bus,
511	.read = dw_pcie_rd_other_conf,
512	.write = dw_pcie_wr_other_conf,
513};
514
515void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
516{
517	struct pcie_port *pp = bus->sysdata;
518	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
519
520	if (PCI_SLOT(devfn) > 0)
521		return NULL;
522
523	return pci->dbi_base + where;
524}
525EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
526
527static struct pci_ops dw_pcie_ops = {
528	.map_bus = dw_pcie_own_conf_map_bus,
529	.read = pci_generic_config_read,
530	.write = pci_generic_config_write,
531};
532
533void dw_pcie_setup_rc(struct pcie_port *pp)
534{
535	u32 val, ctrl, num_ctrls;
536	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
537
538	/*
539	 * Enable DBI read-only registers for writing/updating configuration.
540	 * Write permission gets disabled towards the end of this function.
541	 */
542	dw_pcie_dbi_ro_wr_en(pci);
543
544	dw_pcie_setup(pci);
545
546	if (pci_msi_enabled() && !pp->ops->msi_host_init) {
547		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
548
549		/* Initialize IRQ Status array */
550		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
551			pp->irq_mask[ctrl] = ~0;
552			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
553					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
554					    pp->irq_mask[ctrl]);
555			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
556					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
557					    ~0);
558		}
559	}
560
561	/* Setup RC BARs */
562	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
563	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
564
565	/* Setup interrupt pins */
566	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
567	val &= 0xffff00ff;
568	val |= 0x00000100;
569	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
570
571	/* Setup bus numbers */
572	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
573	val &= 0xff000000;
574	val |= 0x00ff0100;
575	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
576
577	/* Setup command register */
578	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
579	val &= 0xffff0000;
580	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
581		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
582	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
583
584	/*
585	 * If the platform provides its own child bus config accesses, it means
586	 * the platform uses its own address translation component rather than
587	 * ATU, so we should not program the ATU here.
588	 */
589	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
590		struct resource_entry *tmp, *entry = NULL;
591
592		/* Get last memory resource entry */
593		resource_list_for_each_entry(tmp, &pp->bridge->windows)
594			if (resource_type(tmp->res) == IORESOURCE_MEM)
595				entry = tmp;
596
597		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
598					  PCIE_ATU_TYPE_MEM, entry->res->start,
599					  entry->res->start - entry->offset,
600					  resource_size(entry->res));
601		if (pci->num_viewport > 2)
602			dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
603						  PCIE_ATU_TYPE_IO, pp->io_base,
604						  pp->io_bus_addr, pp->io_size);
605	}
606
607	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
608
609	/* Program correct class for RC */
610	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
611
612	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
613	val |= PORT_LOGIC_SPEED_CHANGE;
614	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
615
616	dw_pcie_dbi_ro_wr_dis(pci);
617}
618EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
619