1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe driver for Renesas R-Car SoCs
4 *  Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
5 *
6 * Based on:
7 *  arch/sh/drivers/pci/pcie-sh7786.c
8 *  arch/sh/drivers/pci/ops-sh7786.c
9 *  Copyright (C) 2009 - 2011  Paul Mundt
10 *
11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
12 */
13
14#include <linux/bitops.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/irqdomain.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/msi.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/of_pci.h>
26#include <linux/of_platform.h>
27#include <linux/pci.h>
28#include <linux/phy/phy.h>
29#include <linux/platform_device.h>
30#include <linux/pm_runtime.h>
31#include <linux/slab.h>
32
33#include "pcie-rcar.h"
34
35struct rcar_msi {
36	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
37	struct irq_domain *domain;
38	struct msi_controller chip;
39	unsigned long pages;
40	struct mutex lock;
41	int irq1;
42	int irq2;
43};
44
45static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
46{
47	return container_of(chip, struct rcar_msi, chip);
48}
49
50/* Structure representing the PCIe interface */
51struct rcar_pcie_host {
52	struct rcar_pcie	pcie;
53	struct device		*dev;
54	struct phy		*phy;
55	void __iomem		*base;
56	struct clk		*bus_clk;
57	struct			rcar_msi msi;
58	int			(*phy_init_fn)(struct rcar_pcie_host *host);
59};
60
61static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
62{
63	unsigned int shift = BITS_PER_BYTE * (where & 3);
64	u32 val = rcar_pci_read_reg(pcie, where & ~3);
65
66	return val >> shift;
67}
68
69/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
70static int rcar_pcie_config_access(struct rcar_pcie_host *host,
71		unsigned char access_type, struct pci_bus *bus,
72		unsigned int devfn, int where, u32 *data)
73{
74	struct rcar_pcie *pcie = &host->pcie;
75	unsigned int dev, func, reg, index;
76
77	dev = PCI_SLOT(devfn);
78	func = PCI_FUNC(devfn);
79	reg = where & ~3;
80	index = reg / 4;
81
82	/*
83	 * While each channel has its own memory-mapped extended config
84	 * space, it's generally only accessible when in endpoint mode.
85	 * When in root complex mode, the controller is unable to target
86	 * itself with either type 0 or type 1 accesses, and indeed, any
87	 * controller initiated target transfer to its own config space
88	 * result in a completer abort.
89	 *
90	 * Each channel effectively only supports a single device, but as
91	 * the same channel <-> device access works for any PCI_SLOT()
92	 * value, we cheat a bit here and bind the controller's config
93	 * space to devfn 0 in order to enable self-enumeration. In this
94	 * case the regular ECAR/ECDR path is sidelined and the mangled
95	 * config access itself is initiated as an internal bus transaction.
96	 */
97	if (pci_is_root_bus(bus)) {
98		if (dev != 0)
99			return PCIBIOS_DEVICE_NOT_FOUND;
100
101		if (access_type == RCAR_PCI_ACCESS_READ)
102			*data = rcar_pci_read_reg(pcie, PCICONF(index));
103		else
104			rcar_pci_write_reg(pcie, *data, PCICONF(index));
105
106		return PCIBIOS_SUCCESSFUL;
107	}
108
109	/* Clear errors */
110	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
111
112	/* Set the PIO address */
113	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
114		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
115
116	/* Enable the configuration access */
117	if (pci_is_root_bus(bus->parent))
118		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
119	else
120		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
121
122	/* Check for errors */
123	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
124		return PCIBIOS_DEVICE_NOT_FOUND;
125
126	/* Check for master and target aborts */
127	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
128		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
129		return PCIBIOS_DEVICE_NOT_FOUND;
130
131	if (access_type == RCAR_PCI_ACCESS_READ)
132		*data = rcar_pci_read_reg(pcie, PCIECDR);
133	else
134		rcar_pci_write_reg(pcie, *data, PCIECDR);
135
136	/* Disable the configuration access */
137	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
138
139	return PCIBIOS_SUCCESSFUL;
140}
141
142static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
143			       int where, int size, u32 *val)
144{
145	struct rcar_pcie_host *host = bus->sysdata;
146	int ret;
147
148	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
149				      bus, devfn, where, val);
150	if (ret != PCIBIOS_SUCCESSFUL) {
151		*val = 0xffffffff;
152		return ret;
153	}
154
155	if (size == 1)
156		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
157	else if (size == 2)
158		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
159
160	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
161		bus->number, devfn, where, size, *val);
162
163	return ret;
164}
165
166/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
167static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
168				int where, int size, u32 val)
169{
170	struct rcar_pcie_host *host = bus->sysdata;
171	unsigned int shift;
172	u32 data;
173	int ret;
174
175	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
176				      bus, devfn, where, &data);
177	if (ret != PCIBIOS_SUCCESSFUL)
178		return ret;
179
180	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
181		bus->number, devfn, where, size, val);
182
183	if (size == 1) {
184		shift = BITS_PER_BYTE * (where & 3);
185		data &= ~(0xff << shift);
186		data |= ((val & 0xff) << shift);
187	} else if (size == 2) {
188		shift = BITS_PER_BYTE * (where & 2);
189		data &= ~(0xffff << shift);
190		data |= ((val & 0xffff) << shift);
191	} else
192		data = val;
193
194	ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
195				      bus, devfn, where, &data);
196
197	return ret;
198}
199
200static struct pci_ops rcar_pcie_ops = {
201	.read	= rcar_pcie_read_conf,
202	.write	= rcar_pcie_write_conf,
203};
204
205static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
206{
207	struct device *dev = pcie->dev;
208	unsigned int timeout = 1000;
209	u32 macsr;
210
211	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
212		return;
213
214	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
215		dev_err(dev, "Speed change already in progress\n");
216		return;
217	}
218
219	macsr = rcar_pci_read_reg(pcie, MACSR);
220	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
221		goto done;
222
223	/* Set target link speed to 5.0 GT/s */
224	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
225		   PCI_EXP_LNKSTA_CLS_5_0GB);
226
227	/* Set speed change reason as intentional factor */
228	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
229
230	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
231	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
232		rcar_pci_write_reg(pcie, macsr, MACSR);
233
234	/* Start link speed change */
235	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
236
237	while (timeout--) {
238		macsr = rcar_pci_read_reg(pcie, MACSR);
239		if (macsr & SPCHGFIN) {
240			/* Clear the interrupt bits */
241			rcar_pci_write_reg(pcie, macsr, MACSR);
242
243			if (macsr & SPCHGFAIL)
244				dev_err(dev, "Speed change failed\n");
245
246			goto done;
247		}
248
249		msleep(1);
250	}
251
252	dev_err(dev, "Speed change timed out\n");
253
254done:
255	dev_info(dev, "Current link speed is %s GT/s\n",
256		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
257}
258
259static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
260{
261	struct rcar_pcie *pcie = &host->pcie;
262	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
263	struct resource_entry *win;
264	LIST_HEAD(res);
265	int i = 0;
266
267	/* Try setting 5 GT/s link speed */
268	rcar_pcie_force_speedup(pcie);
269
270	/* Setup PCI resources */
271	resource_list_for_each_entry(win, &bridge->windows) {
272		struct resource *res = win->res;
273
274		if (!res->flags)
275			continue;
276
277		switch (resource_type(res)) {
278		case IORESOURCE_IO:
279		case IORESOURCE_MEM:
280			rcar_pcie_set_outbound(pcie, i, win);
281			i++;
282			break;
283		}
284	}
285}
286
287static int rcar_pcie_enable(struct rcar_pcie_host *host)
288{
289	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
290
291	rcar_pcie_hw_enable(host);
292
293	pci_add_flags(PCI_REASSIGN_ALL_BUS);
294
295	bridge->sysdata = host;
296	bridge->ops = &rcar_pcie_ops;
297	if (IS_ENABLED(CONFIG_PCI_MSI))
298		bridge->msi = &host->msi.chip;
299
300	return pci_host_probe(bridge);
301}
302
303static int phy_wait_for_ack(struct rcar_pcie *pcie)
304{
305	struct device *dev = pcie->dev;
306	unsigned int timeout = 100;
307
308	while (timeout--) {
309		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
310			return 0;
311
312		udelay(100);
313	}
314
315	dev_err(dev, "Access to PCIe phy timed out\n");
316
317	return -ETIMEDOUT;
318}
319
320static void phy_write_reg(struct rcar_pcie *pcie,
321			  unsigned int rate, u32 addr,
322			  unsigned int lane, u32 data)
323{
324	u32 phyaddr;
325
326	phyaddr = WRITE_CMD |
327		((rate & 1) << RATE_POS) |
328		((lane & 0xf) << LANE_POS) |
329		((addr & 0xff) << ADR_POS);
330
331	/* Set write data */
332	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
333	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
334
335	/* Ignore errors as they will be dealt with if the data link is down */
336	phy_wait_for_ack(pcie);
337
338	/* Clear command */
339	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
340	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
341
342	/* Ignore errors as they will be dealt with if the data link is down */
343	phy_wait_for_ack(pcie);
344}
345
346static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
347{
348	int err;
349
350	/* Begin initialization */
351	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
352
353	/* Set mode */
354	rcar_pci_write_reg(pcie, 1, PCIEMSR);
355
356	err = rcar_pcie_wait_for_phyrdy(pcie);
357	if (err)
358		return err;
359
360	/*
361	 * Initial header for port config space is type 1, set the device
362	 * class to match. Hardware takes care of propagating the IDSETR
363	 * settings, so there is no need to bother with a quirk.
364	 */
365	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
366
367	/*
368	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
369	 * they aren't used, to avoid bridge being detected as broken.
370	 */
371	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
372	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
373
374	/* Initialize default capabilities. */
375	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
376	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
377		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
378	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
379		PCI_HEADER_TYPE_BRIDGE);
380
381	/* Enable data link layer active state reporting */
382	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
383		PCI_EXP_LNKCAP_DLLLARC);
384
385	/* Write out the physical slot number = 0 */
386	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
387
388	/* Set the completion timer timeout to the maximum 50ms. */
389	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
390
391	/* Terminate list of capabilities (Next Capability Offset=0) */
392	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
393
394	/* Enable MSI */
395	if (IS_ENABLED(CONFIG_PCI_MSI))
396		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
397
398	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
399
400	/* Finish initialization - establish a PCI Express link */
401	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
402
403	/* This will timeout if we don't have a link. */
404	err = rcar_pcie_wait_for_dl(pcie);
405	if (err)
406		return err;
407
408	/* Enable INTx interrupts */
409	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
410
411	wmb();
412
413	return 0;
414}
415
416static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
417{
418	struct rcar_pcie *pcie = &host->pcie;
419
420	/* Initialize the phy */
421	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
422	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
423	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
424	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
425	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
426	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
427	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
428	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
429	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
430	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
431	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
432	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
433
434	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
435	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
436	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
437
438	return 0;
439}
440
441static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
442{
443	struct rcar_pcie *pcie = &host->pcie;
444
445	/*
446	 * These settings come from the R-Car Series, 2nd Generation User's
447	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
448	 */
449	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
450	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
451	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
452	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
453
454	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
455	/* The following value is for DC connection, no termination resistor */
456	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
457	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
458	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
459
460	return 0;
461}
462
463static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
464{
465	int err;
466
467	err = phy_init(host->phy);
468	if (err)
469		return err;
470
471	err = phy_power_on(host->phy);
472	if (err)
473		phy_exit(host->phy);
474
475	return err;
476}
477
478static int rcar_msi_alloc(struct rcar_msi *chip)
479{
480	int msi;
481
482	mutex_lock(&chip->lock);
483
484	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
485	if (msi < INT_PCI_MSI_NR)
486		set_bit(msi, chip->used);
487	else
488		msi = -ENOSPC;
489
490	mutex_unlock(&chip->lock);
491
492	return msi;
493}
494
495static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
496{
497	int msi;
498
499	mutex_lock(&chip->lock);
500	msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
501				      order_base_2(no_irqs));
502	mutex_unlock(&chip->lock);
503
504	return msi;
505}
506
507static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
508{
509	mutex_lock(&chip->lock);
510	clear_bit(irq, chip->used);
511	mutex_unlock(&chip->lock);
512}
513
514static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
515{
516	struct rcar_pcie_host *host = data;
517	struct rcar_pcie *pcie = &host->pcie;
518	struct rcar_msi *msi = &host->msi;
519	struct device *dev = pcie->dev;
520	unsigned long reg;
521
522	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
523
524	/* MSI & INTx share an interrupt - we only handle MSI here */
525	if (!reg)
526		return IRQ_NONE;
527
528	while (reg) {
529		unsigned int index = find_first_bit(&reg, 32);
530		unsigned int msi_irq;
531
532		/* clear the interrupt */
533		rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
534
535		msi_irq = irq_find_mapping(msi->domain, index);
536		if (msi_irq) {
537			if (test_bit(index, msi->used))
538				generic_handle_irq(msi_irq);
539			else
540				dev_info(dev, "unhandled MSI\n");
541		} else {
542			/* Unknown MSI, just clear it */
543			dev_dbg(dev, "unexpected MSI\n");
544		}
545
546		/* see if there's any more pending in this vector */
547		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
548	}
549
550	return IRQ_HANDLED;
551}
552
553static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
554			      struct msi_desc *desc)
555{
556	struct rcar_msi *msi = to_rcar_msi(chip);
557	struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
558						   msi.chip);
559	struct rcar_pcie *pcie = &host->pcie;
560	struct msi_msg msg;
561	unsigned int irq;
562	int hwirq;
563
564	hwirq = rcar_msi_alloc(msi);
565	if (hwirq < 0)
566		return hwirq;
567
568	irq = irq_find_mapping(msi->domain, hwirq);
569	if (!irq) {
570		rcar_msi_free(msi, hwirq);
571		return -EINVAL;
572	}
573
574	irq_set_msi_desc(irq, desc);
575
576	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
577	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
578	msg.data = hwirq;
579
580	pci_write_msi_msg(irq, &msg);
581
582	return 0;
583}
584
585static int rcar_msi_setup_irqs(struct msi_controller *chip,
586			       struct pci_dev *pdev, int nvec, int type)
587{
588	struct rcar_msi *msi = to_rcar_msi(chip);
589	struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
590						   msi.chip);
591	struct rcar_pcie *pcie = &host->pcie;
592	struct msi_desc *desc;
593	struct msi_msg msg;
594	unsigned int irq;
595	int hwirq;
596	int i;
597
598	/* MSI-X interrupts are not supported */
599	if (type == PCI_CAP_ID_MSIX)
600		return -EINVAL;
601
602	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
603	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
604
605	hwirq = rcar_msi_alloc_region(msi, nvec);
606	if (hwirq < 0)
607		return -ENOSPC;
608
609	irq = irq_find_mapping(msi->domain, hwirq);
610	if (!irq)
611		return -ENOSPC;
612
613	for (i = 0; i < nvec; i++) {
614		/*
615		 * irq_create_mapping() called from rcar_pcie_probe() pre-
616		 * allocates descs,  so there is no need to allocate descs here.
617		 * We can therefore assume that if irq_find_mapping() above
618		 * returns non-zero, then the descs are also successfully
619		 * allocated.
620		 */
621		if (irq_set_msi_desc_off(irq, i, desc)) {
622			/* TODO: clear */
623			return -EINVAL;
624		}
625	}
626
627	desc->nvec_used = nvec;
628	desc->msi_attrib.multiple = order_base_2(nvec);
629
630	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
631	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
632	msg.data = hwirq;
633
634	pci_write_msi_msg(irq, &msg);
635
636	return 0;
637}
638
639static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
640{
641	struct rcar_msi *msi = to_rcar_msi(chip);
642	struct irq_data *d = irq_get_irq_data(irq);
643
644	rcar_msi_free(msi, d->hwirq);
645}
646
647static struct irq_chip rcar_msi_irq_chip = {
648	.name = "R-Car PCIe MSI",
649	.irq_enable = pci_msi_unmask_irq,
650	.irq_disable = pci_msi_mask_irq,
651	.irq_mask = pci_msi_mask_irq,
652	.irq_unmask = pci_msi_unmask_irq,
653};
654
655static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
656			irq_hw_number_t hwirq)
657{
658	irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
659	irq_set_chip_data(irq, domain->host_data);
660
661	return 0;
662}
663
664static const struct irq_domain_ops msi_domain_ops = {
665	.map = rcar_msi_map,
666};
667
668static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
669{
670	struct rcar_msi *msi = &host->msi;
671	int i, irq;
672
673	for (i = 0; i < INT_PCI_MSI_NR; i++) {
674		irq = irq_find_mapping(msi->domain, i);
675		if (irq > 0)
676			irq_dispose_mapping(irq);
677	}
678
679	irq_domain_remove(msi->domain);
680}
681
682static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
683{
684	struct rcar_pcie *pcie = &host->pcie;
685	struct rcar_msi *msi = &host->msi;
686	unsigned long base;
687
688	/* setup MSI data target */
689	base = virt_to_phys((void *)msi->pages);
690
691	rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
692	rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
693
694	/* enable all MSI interrupts */
695	rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
696}
697
698static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
699{
700	struct rcar_pcie *pcie = &host->pcie;
701	struct device *dev = pcie->dev;
702	struct rcar_msi *msi = &host->msi;
703	int err, i;
704
705	mutex_init(&msi->lock);
706
707	msi->chip.dev = dev;
708	msi->chip.setup_irq = rcar_msi_setup_irq;
709	msi->chip.setup_irqs = rcar_msi_setup_irqs;
710	msi->chip.teardown_irq = rcar_msi_teardown_irq;
711
712	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
713					    &msi_domain_ops, &msi->chip);
714	if (!msi->domain) {
715		dev_err(dev, "failed to create IRQ domain\n");
716		return -ENOMEM;
717	}
718
719	for (i = 0; i < INT_PCI_MSI_NR; i++)
720		irq_create_mapping(msi->domain, i);
721
722	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
723	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
724			       IRQF_SHARED | IRQF_NO_THREAD,
725			       rcar_msi_irq_chip.name, host);
726	if (err < 0) {
727		dev_err(dev, "failed to request IRQ: %d\n", err);
728		goto err;
729	}
730
731	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
732			       IRQF_SHARED | IRQF_NO_THREAD,
733			       rcar_msi_irq_chip.name, host);
734	if (err < 0) {
735		dev_err(dev, "failed to request IRQ: %d\n", err);
736		goto err;
737	}
738
739	/* setup MSI data target */
740	msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
741	rcar_pcie_hw_enable_msi(host);
742
743	return 0;
744
745err:
746	rcar_pcie_unmap_msi(host);
747	return err;
748}
749
750static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
751{
752	struct rcar_pcie *pcie = &host->pcie;
753	struct rcar_msi *msi = &host->msi;
754
755	/* Disable all MSI interrupts */
756	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
757
758	/* Disable address decoding of the MSI interrupt, MSIFE */
759	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
760
761	free_pages(msi->pages, 0);
762
763	rcar_pcie_unmap_msi(host);
764}
765
766static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
767{
768	struct rcar_pcie *pcie = &host->pcie;
769	struct device *dev = pcie->dev;
770	struct resource res;
771	int err, i;
772
773	host->phy = devm_phy_optional_get(dev, "pcie");
774	if (IS_ERR(host->phy))
775		return PTR_ERR(host->phy);
776
777	err = of_address_to_resource(dev->of_node, 0, &res);
778	if (err)
779		return err;
780
781	pcie->base = devm_ioremap_resource(dev, &res);
782	if (IS_ERR(pcie->base))
783		return PTR_ERR(pcie->base);
784
785	host->bus_clk = devm_clk_get(dev, "pcie_bus");
786	if (IS_ERR(host->bus_clk)) {
787		dev_err(dev, "cannot get pcie bus clock\n");
788		return PTR_ERR(host->bus_clk);
789	}
790
791	i = irq_of_parse_and_map(dev->of_node, 0);
792	if (!i) {
793		dev_err(dev, "cannot get platform resources for msi interrupt\n");
794		err = -ENOENT;
795		goto err_irq1;
796	}
797	host->msi.irq1 = i;
798
799	i = irq_of_parse_and_map(dev->of_node, 1);
800	if (!i) {
801		dev_err(dev, "cannot get platform resources for msi interrupt\n");
802		err = -ENOENT;
803		goto err_irq2;
804	}
805	host->msi.irq2 = i;
806
807	return 0;
808
809err_irq2:
810	irq_dispose_mapping(host->msi.irq1);
811err_irq1:
812	return err;
813}
814
815static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
816				    struct resource_entry *entry,
817				    int *index)
818{
819	u64 restype = entry->res->flags;
820	u64 cpu_addr = entry->res->start;
821	u64 cpu_end = entry->res->end;
822	u64 pci_addr = entry->res->start - entry->offset;
823	u32 flags = LAM_64BIT | LAR_ENABLE;
824	u64 mask;
825	u64 size = resource_size(entry->res);
826	int idx = *index;
827
828	if (restype & IORESOURCE_PREFETCH)
829		flags |= LAM_PREFETCH;
830
831	while (cpu_addr < cpu_end) {
832		if (idx >= MAX_NR_INBOUND_MAPS - 1) {
833			dev_err(pcie->dev, "Failed to map inbound regions!\n");
834			return -EINVAL;
835		}
836		/*
837		 * If the size of the range is larger than the alignment of
838		 * the start address, we have to use multiple entries to
839		 * perform the mapping.
840		 */
841		if (cpu_addr > 0) {
842			unsigned long nr_zeros = __ffs64(cpu_addr);
843			u64 alignment = 1ULL << nr_zeros;
844
845			size = min(size, alignment);
846		}
847		/* Hardware supports max 4GiB inbound region */
848		size = min(size, 1ULL << 32);
849
850		mask = roundup_pow_of_two(size) - 1;
851		mask &= ~0xf;
852
853		rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
854				      lower_32_bits(mask) | flags, idx, true);
855
856		pci_addr += size;
857		cpu_addr += size;
858		idx += 2;
859	}
860	*index = idx;
861
862	return 0;
863}
864
865static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
866{
867	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
868	struct resource_entry *entry;
869	int index = 0, err = 0;
870
871	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
872		err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
873		if (err)
874			break;
875	}
876
877	return err;
878}
879
880static const struct of_device_id rcar_pcie_of_match[] = {
881	{ .compatible = "renesas,pcie-r8a7779",
882	  .data = rcar_pcie_phy_init_h1 },
883	{ .compatible = "renesas,pcie-r8a7790",
884	  .data = rcar_pcie_phy_init_gen2 },
885	{ .compatible = "renesas,pcie-r8a7791",
886	  .data = rcar_pcie_phy_init_gen2 },
887	{ .compatible = "renesas,pcie-rcar-gen2",
888	  .data = rcar_pcie_phy_init_gen2 },
889	{ .compatible = "renesas,pcie-r8a7795",
890	  .data = rcar_pcie_phy_init_gen3 },
891	{ .compatible = "renesas,pcie-rcar-gen3",
892	  .data = rcar_pcie_phy_init_gen3 },
893	{},
894};
895
896static int rcar_pcie_probe(struct platform_device *pdev)
897{
898	struct device *dev = &pdev->dev;
899	struct rcar_pcie_host *host;
900	struct rcar_pcie *pcie;
901	u32 data;
902	int err;
903	struct pci_host_bridge *bridge;
904
905	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
906	if (!bridge)
907		return -ENOMEM;
908
909	host = pci_host_bridge_priv(bridge);
910	pcie = &host->pcie;
911	pcie->dev = dev;
912	platform_set_drvdata(pdev, host);
913
914	pm_runtime_enable(pcie->dev);
915	err = pm_runtime_get_sync(pcie->dev);
916	if (err < 0) {
917		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
918		goto err_pm_put;
919	}
920
921	err = rcar_pcie_get_resources(host);
922	if (err < 0) {
923		dev_err(dev, "failed to request resources: %d\n", err);
924		goto err_pm_put;
925	}
926
927	err = clk_prepare_enable(host->bus_clk);
928	if (err) {
929		dev_err(dev, "failed to enable bus clock: %d\n", err);
930		goto err_unmap_msi_irqs;
931	}
932
933	err = rcar_pcie_parse_map_dma_ranges(host);
934	if (err)
935		goto err_clk_disable;
936
937	host->phy_init_fn = of_device_get_match_data(dev);
938	err = host->phy_init_fn(host);
939	if (err) {
940		dev_err(dev, "failed to init PCIe PHY\n");
941		goto err_clk_disable;
942	}
943
944	/* Failure to get a link might just be that no cards are inserted */
945	if (rcar_pcie_hw_init(pcie)) {
946		dev_info(dev, "PCIe link down\n");
947		err = -ENODEV;
948		goto err_phy_shutdown;
949	}
950
951	data = rcar_pci_read_reg(pcie, MACSR);
952	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
953
954	if (IS_ENABLED(CONFIG_PCI_MSI)) {
955		err = rcar_pcie_enable_msi(host);
956		if (err < 0) {
957			dev_err(dev,
958				"failed to enable MSI support: %d\n",
959				err);
960			goto err_phy_shutdown;
961		}
962	}
963
964	err = rcar_pcie_enable(host);
965	if (err)
966		goto err_msi_teardown;
967
968	return 0;
969
970err_msi_teardown:
971	if (IS_ENABLED(CONFIG_PCI_MSI))
972		rcar_pcie_teardown_msi(host);
973
974err_phy_shutdown:
975	if (host->phy) {
976		phy_power_off(host->phy);
977		phy_exit(host->phy);
978	}
979
980err_clk_disable:
981	clk_disable_unprepare(host->bus_clk);
982
983err_unmap_msi_irqs:
984	irq_dispose_mapping(host->msi.irq2);
985	irq_dispose_mapping(host->msi.irq1);
986
987err_pm_put:
988	pm_runtime_put(dev);
989	pm_runtime_disable(dev);
990
991	return err;
992}
993
994static int __maybe_unused rcar_pcie_resume(struct device *dev)
995{
996	struct rcar_pcie_host *host = dev_get_drvdata(dev);
997	struct rcar_pcie *pcie = &host->pcie;
998	unsigned int data;
999	int err;
1000
1001	err = rcar_pcie_parse_map_dma_ranges(host);
1002	if (err)
1003		return 0;
1004
1005	/* Failure to get a link might just be that no cards are inserted */
1006	err = host->phy_init_fn(host);
1007	if (err) {
1008		dev_info(dev, "PCIe link down\n");
1009		return 0;
1010	}
1011
1012	data = rcar_pci_read_reg(pcie, MACSR);
1013	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1014
1015	/* Enable MSI */
1016	if (IS_ENABLED(CONFIG_PCI_MSI))
1017		rcar_pcie_hw_enable_msi(host);
1018
1019	rcar_pcie_hw_enable(host);
1020
1021	return 0;
1022}
1023
1024static int rcar_pcie_resume_noirq(struct device *dev)
1025{
1026	struct rcar_pcie_host *host = dev_get_drvdata(dev);
1027	struct rcar_pcie *pcie = &host->pcie;
1028
1029	if (rcar_pci_read_reg(pcie, PMSR) &&
1030	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1031		return 0;
1032
1033	/* Re-establish the PCIe link */
1034	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1035	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1036	return rcar_pcie_wait_for_dl(pcie);
1037}
1038
1039static const struct dev_pm_ops rcar_pcie_pm_ops = {
1040	SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1041	.resume_noirq = rcar_pcie_resume_noirq,
1042};
1043
1044static struct platform_driver rcar_pcie_driver = {
1045	.driver = {
1046		.name = "rcar-pcie",
1047		.of_match_table = rcar_pcie_of_match,
1048		.pm = &rcar_pcie_pm_ops,
1049		.suppress_bind_attrs = true,
1050	},
1051	.probe = rcar_pcie_probe,
1052};
1053builtin_platform_driver(rcar_pcie_driver);
1054