1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2009 - 2019 Broadcom */
3
4#include <linux/bitfield.h>
5#include <linux/bitops.h>
6#include <linux/clk.h>
7#include <linux/compiler.h>
8#include <linux/delay.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/log2.h>
18#include <linux/module.h>
19#include <linux/msi.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/of_pci.h>
23#include <linux/of_platform.h>
24#include <linux/pci.h>
25#include <linux/printk.h>
26#include <linux/reset.h>
27#include <linux/sizes.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/types.h>
31
32#include "../pci.h"
33
34/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
35#define BRCM_PCIE_CAP_REGS				0x00ac
36
37/* Broadcom STB PCIe Register Offsets */
38#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188
39#define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc
40#define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN			0x0
41
42#define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c
43#define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff
44
45#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY			0x04dc
46#define  PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK	0xc00
47
48#define PCIE_RC_DL_MDIO_ADDR				0x1100
49#define PCIE_RC_DL_MDIO_WR_DATA				0x1104
50#define PCIE_RC_DL_MDIO_RD_DATA				0x1108
51
52#define PCIE_MISC_MISC_CTRL				0x4008
53#define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000
54#define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000
55#define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000
56
57#define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000
58#define  PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK		0x07c00000
59#define  PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK		0x0000001f
60#define  SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
61
62#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c
63#define PCIE_MEM_WIN0_LO(win)	\
64		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
65
66#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI		0x4010
67#define PCIE_MEM_WIN0_HI(win)	\
68		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
69
70#define PCIE_MISC_RC_BAR1_CONFIG_LO			0x402c
71#define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK		0x1f
72
73#define PCIE_MISC_RC_BAR2_CONFIG_LO			0x4034
74#define  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK		0x1f
75#define PCIE_MISC_RC_BAR2_CONFIG_HI			0x4038
76
77#define PCIE_MISC_RC_BAR3_CONFIG_LO			0x403c
78#define  PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK		0x1f
79
80#define PCIE_MISC_MSI_BAR_CONFIG_LO			0x4044
81#define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048
82
83#define PCIE_MISC_MSI_DATA_CONFIG			0x404c
84#define  PCIE_MISC_MSI_DATA_CONFIG_VAL_32		0xffe06540
85#define  PCIE_MISC_MSI_DATA_CONFIG_VAL_8		0xfff86540
86
87#define PCIE_MISC_PCIE_CTRL				0x4064
88#define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1
89#define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK		0x4
90
91#define PCIE_MISC_PCIE_STATUS				0x4068
92#define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80
93#define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK	0x20
94#define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10
95#define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40
96
97#define PCIE_MISC_REVISION				0x406c
98#define  BRCM_PCIE_HW_REV_33				0x0303
99
100#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070
101#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000
102#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0
103#define PCIE_MEM_WIN0_BASE_LIMIT(win)	\
104		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
105
106#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI			0x4080
107#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK	0xff
108#define PCIE_MEM_WIN0_BASE_HI(win)	\
109		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
110
111#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI			0x4084
112#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK	0xff
113#define PCIE_MEM_WIN0_LIMIT_HI(win)	\
114		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
115
116#define PCIE_MISC_HARD_PCIE_HARD_DEBUG					0x4204
117#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2
118#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000
119
120
121#define PCIE_INTR2_CPU_BASE		0x4300
122#define PCIE_MSI_INTR2_BASE		0x4500
123/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
124#define  MSI_INT_STATUS			0x0
125#define  MSI_INT_CLR			0x8
126#define  MSI_INT_MASK_SET		0x10
127#define  MSI_INT_MASK_CLR		0x14
128
129#define PCIE_EXT_CFG_DATA				0x8000
130
131#define PCIE_EXT_CFG_INDEX				0x9000
132#define  PCIE_EXT_BUSNUM_SHIFT				20
133#define  PCIE_EXT_SLOT_SHIFT				15
134#define  PCIE_EXT_FUNC_SHIFT				12
135
136#define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1
137#define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT		0x0
138
139#define RGR1_SW_INIT_1_INIT_GENERIC_MASK		0x2
140#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT		0x1
141#define RGR1_SW_INIT_1_INIT_7278_MASK			0x1
142#define RGR1_SW_INIT_1_INIT_7278_SHIFT			0x0
143
144/* PCIe parameters */
145#define BRCM_NUM_PCIE_OUT_WINS		0x4
146#define BRCM_INT_PCI_MSI_NR		32
147#define BRCM_INT_PCI_MSI_LEGACY_NR	8
148#define BRCM_INT_PCI_MSI_SHIFT		0
149
150/* MSI target adresses */
151#define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL
152#define BRCM_MSI_TARGET_ADDR_GT_4GB	0xffffffffcULL
153
154/* MDIO registers */
155#define MDIO_PORT0			0x0
156#define MDIO_DATA_MASK			0x7fffffff
157#define MDIO_PORT_MASK			0xf0000
158#define MDIO_REGAD_MASK			0xffff
159#define MDIO_CMD_MASK			0xfff00000
160#define MDIO_CMD_READ			0x1
161#define MDIO_CMD_WRITE			0x0
162#define MDIO_DATA_DONE_MASK		0x80000000
163#define MDIO_RD_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
164#define MDIO_WT_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
165#define SSC_REGS_ADDR			0x1100
166#define SET_ADDR_OFFSET			0x1f
167#define SSC_CNTL_OFFSET			0x2
168#define SSC_CNTL_OVRD_EN_MASK		0x8000
169#define SSC_CNTL_OVRD_VAL_MASK		0x4000
170#define SSC_STATUS_OFFSET		0x1
171#define SSC_STATUS_SSC_MASK		0x400
172#define SSC_STATUS_PLL_LOCK_MASK	0x800
173#define PCIE_BRCM_MAX_MEMC		3
174
175#define IDX_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_INDEX])
176#define DATA_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_DATA])
177#define PCIE_RGR1_SW_INIT_1(pcie)	(pcie->reg_offsets[RGR1_SW_INIT_1])
178
179/* Rescal registers */
180#define PCIE_DVT_PMU_PCIE_PHY_CTRL				0xc700
181#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS			0x3
182#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK		0x4
183#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT	0x2
184#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK		0x2
185#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT		0x1
186#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK		0x1
187#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT		0x0
188
189/* Forward declarations */
190struct brcm_pcie;
191static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
192static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
193static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
194static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
195
196enum {
197	RGR1_SW_INIT_1,
198	EXT_CFG_INDEX,
199	EXT_CFG_DATA,
200};
201
202enum {
203	RGR1_SW_INIT_1_INIT_MASK,
204	RGR1_SW_INIT_1_INIT_SHIFT,
205};
206
207enum pcie_type {
208	GENERIC,
209	BCM7278,
210	BCM2711,
211};
212
213struct pcie_cfg_data {
214	const int *offsets;
215	const enum pcie_type type;
216	void (*perst_set)(struct brcm_pcie *pcie, u32 val);
217	void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
218};
219
220static const int pcie_offsets[] = {
221	[RGR1_SW_INIT_1] = 0x9210,
222	[EXT_CFG_INDEX]  = 0x9000,
223	[EXT_CFG_DATA]   = 0x9004,
224};
225
226static const struct pcie_cfg_data generic_cfg = {
227	.offsets	= pcie_offsets,
228	.type		= GENERIC,
229	.perst_set	= brcm_pcie_perst_set_generic,
230	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
231};
232
233static const int pcie_offset_bcm7278[] = {
234	[RGR1_SW_INIT_1] = 0xc010,
235	[EXT_CFG_INDEX] = 0x9000,
236	[EXT_CFG_DATA] = 0x9004,
237};
238
239static const struct pcie_cfg_data bcm7278_cfg = {
240	.offsets	= pcie_offset_bcm7278,
241	.type		= BCM7278,
242	.perst_set	= brcm_pcie_perst_set_7278,
243	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
244};
245
246static const struct pcie_cfg_data bcm2711_cfg = {
247	.offsets	= pcie_offsets,
248	.type		= BCM2711,
249	.perst_set	= brcm_pcie_perst_set_generic,
250	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
251};
252
253struct brcm_msi {
254	struct device		*dev;
255	void __iomem		*base;
256	struct device_node	*np;
257	struct irq_domain	*msi_domain;
258	struct irq_domain	*inner_domain;
259	struct mutex		lock; /* guards the alloc/free operations */
260	u64			target_addr;
261	int			irq;
262	/* used indicates which MSI interrupts have been alloc'd */
263	unsigned long		used;
264	bool			legacy;
265	/* Some chips have MSIs in bits [31..24] of a shared register. */
266	int			legacy_shift;
267	int			nr; /* No. of MSI available, depends on chip */
268	/* This is the base pointer for interrupt status/set/clr regs */
269	void __iomem		*intr_base;
270};
271
272/* Internal PCIe Host Controller Information.*/
273struct brcm_pcie {
274	struct device		*dev;
275	void __iomem		*base;
276	struct clk		*clk;
277	struct device_node	*np;
278	bool			ssc;
279	int			gen;
280	u64			msi_target_addr;
281	struct brcm_msi		*msi;
282	const int		*reg_offsets;
283	enum pcie_type		type;
284	struct reset_control	*rescal;
285	int			num_memc;
286	u64			memc_size[PCIE_BRCM_MAX_MEMC];
287	u32			hw_rev;
288	void			(*perst_set)(struct brcm_pcie *pcie, u32 val);
289	void			(*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
290};
291
292/*
293 * This is to convert the size of the inbound "BAR" region to the
294 * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
295 */
296static int brcm_pcie_encode_ibar_size(u64 size)
297{
298	int log2_in = ilog2(size);
299
300	if (log2_in >= 12 && log2_in <= 15)
301		/* Covers 4KB to 32KB (inclusive) */
302		return (log2_in - 12) + 0x1c;
303	else if (log2_in >= 16 && log2_in <= 35)
304		/* Covers 64KB to 32GB, (inclusive) */
305		return log2_in - 15;
306	/* Something is awry so disable */
307	return 0;
308}
309
310static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
311{
312	u32 pkt = 0;
313
314	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
315	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
316	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
317
318	return pkt;
319}
320
321/* negative return value indicates error */
322static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
323{
324	int tries;
325	u32 data;
326
327	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
328		   base + PCIE_RC_DL_MDIO_ADDR);
329	readl(base + PCIE_RC_DL_MDIO_ADDR);
330
331	data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
332	for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
333		udelay(10);
334		data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
335	}
336
337	*val = FIELD_GET(MDIO_DATA_MASK, data);
338	return MDIO_RD_DONE(data) ? 0 : -EIO;
339}
340
341/* negative return value indicates error */
342static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
343				u8 regad, u16 wrdata)
344{
345	int tries;
346	u32 data;
347
348	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
349		   base + PCIE_RC_DL_MDIO_ADDR);
350	readl(base + PCIE_RC_DL_MDIO_ADDR);
351	writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
352
353	data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
354	for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
355		udelay(10);
356		data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
357	}
358
359	return MDIO_WT_DONE(data) ? 0 : -EIO;
360}
361
362/*
363 * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
364 * return value indicates error.
365 */
366static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
367{
368	int pll, ssc;
369	int ret;
370	u32 tmp;
371
372	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
373				   SSC_REGS_ADDR);
374	if (ret < 0)
375		return ret;
376
377	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
378				  SSC_CNTL_OFFSET, &tmp);
379	if (ret < 0)
380		return ret;
381
382	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
383	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
384	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
385				   SSC_CNTL_OFFSET, tmp);
386	if (ret < 0)
387		return ret;
388
389	usleep_range(1000, 2000);
390	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
391				  SSC_STATUS_OFFSET, &tmp);
392	if (ret < 0)
393		return ret;
394
395	ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
396	pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
397
398	return ssc && pll ? 0 : -EIO;
399}
400
401/* Limits operation to a specific generation (1, 2, or 3) */
402static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
403{
404	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
405	u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
406
407	lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
408	writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
409
410	lnkctl2 = (lnkctl2 & ~0xf) | gen;
411	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
412}
413
414static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
415				       unsigned int win, u64 cpu_addr,
416				       u64 pcie_addr, u64 size)
417{
418	u32 cpu_addr_mb_high, limit_addr_mb_high;
419	phys_addr_t cpu_addr_mb, limit_addr_mb;
420	int high_addr_shift;
421	u32 tmp;
422
423	/* Set the base of the pcie_addr window */
424	writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
425	writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
426
427	/* Write the addr base & limit lower bits (in MBs) */
428	cpu_addr_mb = cpu_addr / SZ_1M;
429	limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
430
431	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
432	u32p_replace_bits(&tmp, cpu_addr_mb,
433			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
434	u32p_replace_bits(&tmp, limit_addr_mb,
435			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
436	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
437
438	/* Write the cpu & limit addr upper bits */
439	high_addr_shift =
440		HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
441
442	cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
443	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
444	u32p_replace_bits(&tmp, cpu_addr_mb_high,
445			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
446	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
447
448	limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
449	tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
450	u32p_replace_bits(&tmp, limit_addr_mb_high,
451			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
452	writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
453}
454
455static struct irq_chip brcm_msi_irq_chip = {
456	.name            = "BRCM STB PCIe MSI",
457	.irq_ack         = irq_chip_ack_parent,
458	.irq_mask        = pci_msi_mask_irq,
459	.irq_unmask      = pci_msi_unmask_irq,
460};
461
462static struct msi_domain_info brcm_msi_domain_info = {
463	/* Multi MSI is supported by the controller, but not by this driver */
464	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
465	.chip	= &brcm_msi_irq_chip,
466};
467
468static void brcm_pcie_msi_isr(struct irq_desc *desc)
469{
470	struct irq_chip *chip = irq_desc_get_chip(desc);
471	unsigned long status, virq;
472	struct brcm_msi *msi;
473	struct device *dev;
474	u32 bit;
475
476	chained_irq_enter(chip, desc);
477	msi = irq_desc_get_handler_data(desc);
478	dev = msi->dev;
479
480	status = readl(msi->intr_base + MSI_INT_STATUS);
481	status >>= msi->legacy_shift;
482
483	for_each_set_bit(bit, &status, msi->nr) {
484		virq = irq_find_mapping(msi->inner_domain, bit);
485		if (virq)
486			generic_handle_irq(virq);
487		else
488			dev_dbg(dev, "unexpected MSI\n");
489	}
490
491	chained_irq_exit(chip, desc);
492}
493
494static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
495{
496	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
497
498	msg->address_lo = lower_32_bits(msi->target_addr);
499	msg->address_hi = upper_32_bits(msi->target_addr);
500	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
501}
502
503static int brcm_msi_set_affinity(struct irq_data *irq_data,
504				 const struct cpumask *mask, bool force)
505{
506	return -EINVAL;
507}
508
509static void brcm_msi_ack_irq(struct irq_data *data)
510{
511	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
512	const int shift_amt = data->hwirq + msi->legacy_shift;
513
514	writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
515}
516
517
518static struct irq_chip brcm_msi_bottom_irq_chip = {
519	.name			= "BRCM STB MSI",
520	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
521	.irq_set_affinity	= brcm_msi_set_affinity,
522	.irq_ack                = brcm_msi_ack_irq,
523};
524
525static int brcm_msi_alloc(struct brcm_msi *msi)
526{
527	int hwirq;
528
529	mutex_lock(&msi->lock);
530	hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
531	mutex_unlock(&msi->lock);
532
533	return hwirq;
534}
535
536static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
537{
538	mutex_lock(&msi->lock);
539	bitmap_release_region(&msi->used, hwirq, 0);
540	mutex_unlock(&msi->lock);
541}
542
543static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
544				 unsigned int nr_irqs, void *args)
545{
546	struct brcm_msi *msi = domain->host_data;
547	int hwirq;
548
549	hwirq = brcm_msi_alloc(msi);
550
551	if (hwirq < 0)
552		return hwirq;
553
554	irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
555			    &brcm_msi_bottom_irq_chip, domain->host_data,
556			    handle_edge_irq, NULL, NULL);
557	return 0;
558}
559
560static void brcm_irq_domain_free(struct irq_domain *domain,
561				 unsigned int virq, unsigned int nr_irqs)
562{
563	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
564	struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
565
566	brcm_msi_free(msi, d->hwirq);
567}
568
569static const struct irq_domain_ops msi_domain_ops = {
570	.alloc	= brcm_irq_domain_alloc,
571	.free	= brcm_irq_domain_free,
572};
573
574static int brcm_allocate_domains(struct brcm_msi *msi)
575{
576	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
577	struct device *dev = msi->dev;
578
579	msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
580	if (!msi->inner_domain) {
581		dev_err(dev, "failed to create IRQ domain\n");
582		return -ENOMEM;
583	}
584
585	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
586						    &brcm_msi_domain_info,
587						    msi->inner_domain);
588	if (!msi->msi_domain) {
589		dev_err(dev, "failed to create MSI domain\n");
590		irq_domain_remove(msi->inner_domain);
591		return -ENOMEM;
592	}
593
594	return 0;
595}
596
597static void brcm_free_domains(struct brcm_msi *msi)
598{
599	irq_domain_remove(msi->msi_domain);
600	irq_domain_remove(msi->inner_domain);
601}
602
603static void brcm_msi_remove(struct brcm_pcie *pcie)
604{
605	struct brcm_msi *msi = pcie->msi;
606
607	if (!msi)
608		return;
609	irq_set_chained_handler(msi->irq, NULL);
610	irq_set_handler_data(msi->irq, NULL);
611	brcm_free_domains(msi);
612}
613
614static void brcm_msi_set_regs(struct brcm_msi *msi)
615{
616	u32 val = __GENMASK(31, msi->legacy_shift);
617
618	writel(val, msi->intr_base + MSI_INT_MASK_CLR);
619	writel(val, msi->intr_base + MSI_INT_CLR);
620
621	/*
622	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
623	 * enable, which we set to 1.
624	 */
625	writel(lower_32_bits(msi->target_addr) | 0x1,
626	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
627	writel(upper_32_bits(msi->target_addr),
628	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
629
630	val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
631	writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
632}
633
634static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
635{
636	struct brcm_msi *msi;
637	int irq, ret;
638	struct device *dev = pcie->dev;
639
640	irq = irq_of_parse_and_map(dev->of_node, 1);
641	if (irq <= 0) {
642		dev_err(dev, "cannot map MSI interrupt\n");
643		return -ENODEV;
644	}
645
646	msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
647	if (!msi)
648		return -ENOMEM;
649
650	mutex_init(&msi->lock);
651	msi->dev = dev;
652	msi->base = pcie->base;
653	msi->np = pcie->np;
654	msi->target_addr = pcie->msi_target_addr;
655	msi->irq = irq;
656	msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
657
658	if (msi->legacy) {
659		msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
660		msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
661		msi->legacy_shift = 24;
662	} else {
663		msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
664		msi->nr = BRCM_INT_PCI_MSI_NR;
665		msi->legacy_shift = 0;
666	}
667
668	ret = brcm_allocate_domains(msi);
669	if (ret)
670		return ret;
671
672	irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
673
674	brcm_msi_set_regs(msi);
675	pcie->msi = msi;
676
677	return 0;
678}
679
680/* The controller is capable of serving in both RC and EP roles */
681static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
682{
683	void __iomem *base = pcie->base;
684	u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
685
686	return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
687}
688
689static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
690{
691	u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
692	u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
693	u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
694
695	return dla && plu;
696}
697
698/* Configuration space read/write support */
699static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg)
700{
701	return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT)
702		| ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT)
703		| (busnr << PCIE_EXT_BUSNUM_SHIFT)
704		| (reg & ~3);
705}
706
707static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
708					int where)
709{
710	struct brcm_pcie *pcie = bus->sysdata;
711	void __iomem *base = pcie->base;
712	int idx;
713
714	/* Accesses to the RC go right to the RC registers if slot==0 */
715	if (pci_is_root_bus(bus))
716		return PCI_SLOT(devfn) ? NULL : base + where;
717
718	/* For devices, write to the config space index register */
719	idx = brcm_pcie_cfg_index(bus->number, devfn, 0);
720	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
721	return base + PCIE_EXT_CFG_DATA + where;
722}
723
724static struct pci_ops brcm_pcie_ops = {
725	.map_bus = brcm_pcie_map_conf,
726	.read = pci_generic_config_read,
727	.write = pci_generic_config_write,
728};
729
730static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
731{
732	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_GENERIC_MASK;
733	u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
734
735	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
736	tmp = (tmp & ~mask) | ((val << shift) & mask);
737	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
738}
739
740static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
741{
742	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_7278_MASK;
743	u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
744
745	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
746	tmp = (tmp & ~mask) | ((val << shift) & mask);
747	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
748}
749
750static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
751{
752	u32 tmp;
753
754	/* Perst bit has moved and assert value is 0 */
755	tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
756	u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
757	writel(tmp, pcie->base +  PCIE_MISC_PCIE_CTRL);
758}
759
760static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
761{
762	u32 tmp;
763
764	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
765	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
766	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
767}
768
769static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
770							u64 *rc_bar2_size,
771							u64 *rc_bar2_offset)
772{
773	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
774	struct resource_entry *entry;
775	struct device *dev = pcie->dev;
776	u64 lowest_pcie_addr = ~(u64)0;
777	int ret, i = 0;
778	u64 size = 0;
779
780	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
781		u64 pcie_beg = entry->res->start - entry->offset;
782
783		size += entry->res->end - entry->res->start + 1;
784		if (pcie_beg < lowest_pcie_addr)
785			lowest_pcie_addr = pcie_beg;
786	}
787
788	if (lowest_pcie_addr == ~(u64)0) {
789		dev_err(dev, "DT node has no dma-ranges\n");
790		return -EINVAL;
791	}
792
793	ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
794						  PCIE_BRCM_MAX_MEMC);
795
796	if (ret <= 0) {
797		/* Make an educated guess */
798		pcie->num_memc = 1;
799		pcie->memc_size[0] = 1ULL << fls64(size - 1);
800	} else {
801		pcie->num_memc = ret;
802	}
803
804	/* Each memc is viewed through a "port" that is a power of 2 */
805	for (i = 0, size = 0; i < pcie->num_memc; i++)
806		size += pcie->memc_size[i];
807
808	/* System memory starts at this address in PCIe-space */
809	*rc_bar2_offset = lowest_pcie_addr;
810	/* The sum of all memc views must also be a power of 2 */
811	*rc_bar2_size = 1ULL << fls64(size - 1);
812
813	/*
814	 * We validate the inbound memory view even though we should trust
815	 * whatever the device-tree provides. This is because of an HW issue on
816	 * early Raspberry Pi 4's revisions (bcm2711). It turns out its
817	 * firmware has to dynamically edit dma-ranges due to a bug on the
818	 * PCIe controller integration, which prohibits any access above the
819	 * lower 3GB of memory. Given this, we decided to keep the dma-ranges
820	 * in check, avoiding hard to debug device-tree related issues in the
821	 * future:
822	 *
823	 * The PCIe host controller by design must set the inbound viewport to
824	 * be a contiguous arrangement of all of the system's memory.  In
825	 * addition, its size mut be a power of two.  To further complicate
826	 * matters, the viewport must start on a pcie-address that is aligned
827	 * on a multiple of its size.  If a portion of the viewport does not
828	 * represent system memory -- e.g. 3GB of memory requires a 4GB
829	 * viewport -- we can map the outbound memory in or after 3GB and even
830	 * though the viewport will overlap the outbound memory the controller
831	 * will know to send outbound memory downstream and everything else
832	 * upstream.
833	 *
834	 * For example:
835	 *
836	 * - The best-case scenario, memory up to 3GB, is to place the inbound
837	 *   region in the first 4GB of pcie-space, as some legacy devices can
838	 *   only address 32bits. We would also like to put the MSI under 4GB
839	 *   as well, since some devices require a 32bit MSI target address.
840	 *
841	 * - If the system memory is 4GB or larger we cannot start the inbound
842	 *   region at location 0 (since we have to allow some space for
843	 *   outbound memory @ 3GB). So instead it will  start at the 1x
844	 *   multiple of its size
845	 */
846	if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
847	    (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
848		dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
849			*rc_bar2_size, *rc_bar2_offset);
850		return -EINVAL;
851	}
852
853	return 0;
854}
855
856static int brcm_pcie_setup(struct brcm_pcie *pcie)
857{
858	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
859	u64 rc_bar2_offset, rc_bar2_size;
860	void __iomem *base = pcie->base;
861	struct device *dev = pcie->dev;
862	struct resource_entry *entry;
863	bool ssc_good = false;
864	struct resource *res;
865	int num_out_wins = 0;
866	u16 nlw, cls, lnksta;
867	int i, ret, memc;
868	u32 tmp, burst, aspm_support;
869
870	/* Reset the bridge */
871	pcie->bridge_sw_init_set(pcie, 1);
872	usleep_range(100, 200);
873
874	/* Take the bridge out of reset */
875	pcie->bridge_sw_init_set(pcie, 0);
876
877	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
878	tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
879	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
880	/* Wait for SerDes to be stable */
881	usleep_range(100, 200);
882
883	/*
884	 * SCB_MAX_BURST_SIZE is a two bit field.  For GENERIC chips it
885	 * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
886	 * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
887	 */
888	if (pcie->type == BCM2711)
889		burst = 0x0; /* 128B */
890	else if (pcie->type == BCM7278)
891		burst = 0x3; /* 512 bytes */
892	else
893		burst = 0x2; /* 512 bytes */
894
895	/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
896	tmp = readl(base + PCIE_MISC_MISC_CTRL);
897	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
898	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
899	u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
900	writel(tmp, base + PCIE_MISC_MISC_CTRL);
901
902	ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
903						    &rc_bar2_offset);
904	if (ret)
905		return ret;
906
907	tmp = lower_32_bits(rc_bar2_offset);
908	u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
909			  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
910	writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
911	writel(upper_32_bits(rc_bar2_offset),
912	       base + PCIE_MISC_RC_BAR2_CONFIG_HI);
913
914	tmp = readl(base + PCIE_MISC_MISC_CTRL);
915	for (memc = 0; memc < pcie->num_memc; memc++) {
916		u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
917
918		if (memc == 0)
919			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
920		else if (memc == 1)
921			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
922		else if (memc == 2)
923			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
924	}
925	writel(tmp, base + PCIE_MISC_MISC_CTRL);
926
927	/*
928	 * We ideally want the MSI target address to be located in the 32bit
929	 * addressable memory area. Some devices might depend on it. This is
930	 * possible either when the inbound window is located above the lower
931	 * 4GB or when the inbound area is smaller than 4GB (taking into
932	 * account the rounding-up we're forced to perform).
933	 */
934	if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
935		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
936	else
937		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
938
939	/* disable the PCIe->GISB memory window (RC_BAR1) */
940	tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
941	tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
942	writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
943
944	/* disable the PCIe->SCB memory window (RC_BAR3) */
945	tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
946	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
947	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
948
949	if (pcie->gen)
950		brcm_pcie_set_gen(pcie, pcie->gen);
951
952	/* Unassert the fundamental reset */
953	pcie->perst_set(pcie, 0);
954
955	/*
956	 * Give the RC/EP time to wake up, before trying to configure RC.
957	 * Intermittently check status for link-up, up to a total of 100ms.
958	 */
959	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
960		msleep(5);
961
962	if (!brcm_pcie_link_up(pcie)) {
963		dev_err(dev, "link down\n");
964		return -ENODEV;
965	}
966
967	if (!brcm_pcie_rc_mode(pcie)) {
968		dev_err(dev, "PCIe misconfigured; is in EP mode\n");
969		return -EINVAL;
970	}
971
972	resource_list_for_each_entry(entry, &bridge->windows) {
973		res = entry->res;
974
975		if (resource_type(res) != IORESOURCE_MEM)
976			continue;
977
978		if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
979			dev_err(pcie->dev, "too many outbound wins\n");
980			return -EINVAL;
981		}
982
983		brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
984					   res->start - entry->offset,
985					   resource_size(res));
986		num_out_wins++;
987	}
988
989	/* Don't advertise L0s capability if 'aspm-no-l0s' */
990	aspm_support = PCIE_LINK_STATE_L1;
991	if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
992		aspm_support |= PCIE_LINK_STATE_L0S;
993	tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
994	u32p_replace_bits(&tmp, aspm_support,
995		PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
996	writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
997
998	/*
999	 * For config space accesses on the RC, show the right class for
1000	 * a PCIe-PCIe bridge (the default setting is to be EP mode).
1001	 */
1002	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
1003	u32p_replace_bits(&tmp, 0x060400,
1004			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
1005	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
1006
1007	if (pcie->ssc) {
1008		ret = brcm_pcie_set_ssc(pcie);
1009		if (ret == 0)
1010			ssc_good = true;
1011		else
1012			dev_err(dev, "failed attempt to enter ssc mode\n");
1013	}
1014
1015	lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
1016	cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
1017	nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
1018	dev_info(dev, "link up, %s x%u %s\n",
1019		 pci_speed_string(pcie_link_speed[cls]), nlw,
1020		 ssc_good ? "(SSC)" : "(!SSC)");
1021
1022	/* PCIe->SCB endian mode for BAR */
1023	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1024	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
1025		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
1026	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1027
1028	/*
1029	 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
1030	 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
1031	 */
1032	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1033	tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
1034	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1035
1036	return 0;
1037}
1038
1039/* L23 is a low-power PCIe link state */
1040static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
1041{
1042	void __iomem *base = pcie->base;
1043	int l23, i;
1044	u32 tmp;
1045
1046	/* Assert request for L23 */
1047	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1048	u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1049	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1050
1051	/* Wait up to 36 msec for L23 */
1052	tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1053	l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
1054	for (i = 0; i < 15 && !l23; i++) {
1055		usleep_range(2000, 2400);
1056		tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1057		l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
1058				tmp);
1059	}
1060
1061	if (!l23)
1062		dev_err(pcie->dev, "failed to enter low-power link state\n");
1063}
1064
1065static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
1066{
1067	static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1068		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
1069		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
1070		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
1071	static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1072		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
1073		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
1074		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
1075	const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
1076	const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
1077	u32 tmp, combined_mask = 0;
1078	u32 val;
1079	void __iomem *base = pcie->base;
1080	int i, ret;
1081
1082	for (i = beg; i != end; start ? i++ : i--) {
1083		val = start ? BIT_MASK(shifts[i]) : 0;
1084		tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1085		tmp = (tmp & ~masks[i]) | (val & masks[i]);
1086		writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1087		usleep_range(50, 200);
1088		combined_mask |= masks[i];
1089	}
1090
1091	tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1092	val = start ? combined_mask : 0;
1093
1094	ret = (tmp & combined_mask) == val ? 0 : -EIO;
1095	if (ret)
1096		dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
1097
1098	return ret;
1099}
1100
1101static inline int brcm_phy_start(struct brcm_pcie *pcie)
1102{
1103	return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
1104}
1105
1106static inline int brcm_phy_stop(struct brcm_pcie *pcie)
1107{
1108	return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
1109}
1110
1111static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
1112{
1113	void __iomem *base = pcie->base;
1114	int tmp;
1115
1116	if (brcm_pcie_link_up(pcie))
1117		brcm_pcie_enter_l23(pcie);
1118	/* Assert fundamental reset */
1119	pcie->perst_set(pcie, 1);
1120
1121	/* Deassert request for L23 in case it was asserted */
1122	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1123	u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1124	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1125
1126	/* Turn off SerDes */
1127	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1128	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1129	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1130
1131	/* Shutdown PCIe bridge */
1132	pcie->bridge_sw_init_set(pcie, 1);
1133}
1134
1135static int brcm_pcie_suspend(struct device *dev)
1136{
1137	struct brcm_pcie *pcie = dev_get_drvdata(dev);
1138	int ret;
1139
1140	brcm_pcie_turn_off(pcie);
1141	ret = brcm_phy_stop(pcie);
1142	clk_disable_unprepare(pcie->clk);
1143
1144	return ret;
1145}
1146
1147static int brcm_pcie_resume(struct device *dev)
1148{
1149	struct brcm_pcie *pcie = dev_get_drvdata(dev);
1150	void __iomem *base;
1151	u32 tmp;
1152	int ret;
1153
1154	base = pcie->base;
1155	clk_prepare_enable(pcie->clk);
1156
1157	ret = brcm_phy_start(pcie);
1158	if (ret)
1159		goto err;
1160
1161	/* Take bridge out of reset so we can access the SERDES reg */
1162	pcie->bridge_sw_init_set(pcie, 0);
1163
1164	/* SERDES_IDDQ = 0 */
1165	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1166	u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1167	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1168
1169	/* wait for serdes to be stable */
1170	udelay(100);
1171
1172	ret = brcm_pcie_setup(pcie);
1173	if (ret)
1174		goto err;
1175
1176	if (pcie->msi)
1177		brcm_msi_set_regs(pcie->msi);
1178
1179	return 0;
1180
1181err:
1182	clk_disable_unprepare(pcie->clk);
1183	return ret;
1184}
1185
1186static void __brcm_pcie_remove(struct brcm_pcie *pcie)
1187{
1188	brcm_msi_remove(pcie);
1189	brcm_pcie_turn_off(pcie);
1190	brcm_phy_stop(pcie);
1191	reset_control_assert(pcie->rescal);
1192	clk_disable_unprepare(pcie->clk);
1193}
1194
1195static int brcm_pcie_remove(struct platform_device *pdev)
1196{
1197	struct brcm_pcie *pcie = platform_get_drvdata(pdev);
1198	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1199
1200	pci_stop_root_bus(bridge->bus);
1201	pci_remove_root_bus(bridge->bus);
1202	__brcm_pcie_remove(pcie);
1203
1204	return 0;
1205}
1206
1207static const struct of_device_id brcm_pcie_match[] = {
1208	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
1209	{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
1210	{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
1211	{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
1212	{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
1213	{},
1214};
1215
1216static int brcm_pcie_probe(struct platform_device *pdev)
1217{
1218	struct device_node *np = pdev->dev.of_node, *msi_np;
1219	struct pci_host_bridge *bridge;
1220	const struct pcie_cfg_data *data;
1221	struct brcm_pcie *pcie;
1222	int ret;
1223
1224	bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
1225	if (!bridge)
1226		return -ENOMEM;
1227
1228	data = of_device_get_match_data(&pdev->dev);
1229	if (!data) {
1230		pr_err("failed to look up compatible string\n");
1231		return -EINVAL;
1232	}
1233
1234	pcie = pci_host_bridge_priv(bridge);
1235	pcie->dev = &pdev->dev;
1236	pcie->np = np;
1237	pcie->reg_offsets = data->offsets;
1238	pcie->type = data->type;
1239	pcie->perst_set = data->perst_set;
1240	pcie->bridge_sw_init_set = data->bridge_sw_init_set;
1241
1242	pcie->base = devm_platform_ioremap_resource(pdev, 0);
1243	if (IS_ERR(pcie->base))
1244		return PTR_ERR(pcie->base);
1245
1246	pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
1247	if (IS_ERR(pcie->clk))
1248		return PTR_ERR(pcie->clk);
1249
1250	ret = of_pci_get_max_link_speed(np);
1251	pcie->gen = (ret < 0) ? 0 : ret;
1252
1253	pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
1254
1255	ret = clk_prepare_enable(pcie->clk);
1256	if (ret) {
1257		dev_err(&pdev->dev, "could not enable clock\n");
1258		return ret;
1259	}
1260	pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
1261	if (IS_ERR(pcie->rescal)) {
1262		clk_disable_unprepare(pcie->clk);
1263		return PTR_ERR(pcie->rescal);
1264	}
1265
1266	ret = reset_control_deassert(pcie->rescal);
1267	if (ret)
1268		dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
1269
1270	ret = brcm_phy_start(pcie);
1271	if (ret) {
1272		reset_control_assert(pcie->rescal);
1273		clk_disable_unprepare(pcie->clk);
1274		return ret;
1275	}
1276
1277	ret = brcm_pcie_setup(pcie);
1278	if (ret)
1279		goto fail;
1280
1281	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
1282
1283	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
1284	if (pci_msi_enabled() && msi_np == pcie->np) {
1285		ret = brcm_pcie_enable_msi(pcie);
1286		if (ret) {
1287			dev_err(pcie->dev, "probe of internal MSI failed");
1288			goto fail;
1289		}
1290	}
1291
1292	bridge->ops = &brcm_pcie_ops;
1293	bridge->sysdata = pcie;
1294
1295	platform_set_drvdata(pdev, pcie);
1296
1297	return pci_host_probe(bridge);
1298fail:
1299	__brcm_pcie_remove(pcie);
1300	return ret;
1301}
1302
1303MODULE_DEVICE_TABLE(of, brcm_pcie_match);
1304
1305static const struct dev_pm_ops brcm_pcie_pm_ops = {
1306	.suspend = brcm_pcie_suspend,
1307	.resume = brcm_pcie_resume,
1308};
1309
1310static struct platform_driver brcm_pcie_driver = {
1311	.probe = brcm_pcie_probe,
1312	.remove = brcm_pcie_remove,
1313	.driver = {
1314		.name = "brcm-pcie",
1315		.of_match_table = brcm_pcie_match,
1316		.pm = &brcm_pcie_pm_ops,
1317	},
1318};
1319module_platform_driver(brcm_pcie_driver);
1320
1321MODULE_LICENSE("GPL");
1322MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
1323MODULE_AUTHOR("Broadcom");
1324