1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm PCIe root complex driver
4 *
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6 * Copyright 2015 Linaro Limited.
7 *
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/gpio/consumer.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/regulator/consumer.h>
26#include <linux/reset.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "../../pci.h"
31#include "pcie-designware.h"
32
33#define PCIE20_PARF_SYS_CTRL			0x00
34#define MST_WAKEUP_EN				BIT(13)
35#define SLV_WAKEUP_EN				BIT(12)
36#define MSTR_ACLK_CGC_DIS			BIT(10)
37#define SLV_ACLK_CGC_DIS			BIT(9)
38#define CORE_CLK_CGC_DIS			BIT(6)
39#define AUX_PWR_DET				BIT(4)
40#define L23_CLK_RMV_DIS				BIT(2)
41#define L1_CLK_RMV_DIS				BIT(1)
42
43#define PCIE20_PARF_PHY_CTRL			0x40
44#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
45#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
46
47#define PCIE20_PARF_PHY_REFCLK			0x4C
48#define PHY_REFCLK_SSP_EN			BIT(16)
49#define PHY_REFCLK_USE_PAD			BIT(12)
50
51#define PCIE20_PARF_DBI_BASE_ADDR		0x168
52#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
53#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
54#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
55#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
56#define PCIE20_PARF_LTSSM			0x1B0
57#define PCIE20_PARF_SID_OFFSET			0x234
58#define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
59#define PCIE20_PARF_DEVICE_TYPE			0x1000
60
61#define PCIE20_ELBI_SYS_CTRL			0x04
62#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
63
64#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
65#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
66#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
67#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
68#define CFG_BRIDGE_SB_INIT			BIT(0)
69
70#define PCIE_CAP_LINK1_VAL			0x2FD7F
71
72#define PCIE20_PARF_Q2A_FLUSH			0x1AC
73
74#define PCIE20_MISC_CONTROL_1_REG		0x8BC
75#define DBI_RO_WR_EN				1
76
77#define PERST_DELAY_US				1000
78/* PARF registers */
79#define PCIE20_PARF_PCS_DEEMPH			0x34
80#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
81#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
82#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
83
84#define PCIE20_PARF_PCS_SWING			0x38
85#define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
86#define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
87
88#define PCIE20_PARF_CONFIG_BITS		0x50
89#define PHY_RX0_EQ(x)				((x) << 24)
90
91#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
92#define SLV_ADDR_SPACE_SZ			0x10000000
93
94#define PCIE20_LNK_CONTROL2_LINK_STATUS2	0xa0
95
96#define DEVICE_TYPE_RC				0x4
97
98#define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
99#define QCOM_PCIE_2_1_0_MAX_CLOCKS	5
100struct qcom_pcie_resources_2_1_0 {
101	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
102	struct reset_control *pci_reset;
103	struct reset_control *axi_reset;
104	struct reset_control *ahb_reset;
105	struct reset_control *por_reset;
106	struct reset_control *phy_reset;
107	struct reset_control *ext_reset;
108	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
109};
110
111struct qcom_pcie_resources_1_0_0 {
112	struct clk *iface;
113	struct clk *aux;
114	struct clk *master_bus;
115	struct clk *slave_bus;
116	struct reset_control *core;
117	struct regulator *vdda;
118};
119
120#define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
121struct qcom_pcie_resources_2_3_2 {
122	struct clk *aux_clk;
123	struct clk *master_clk;
124	struct clk *slave_clk;
125	struct clk *cfg_clk;
126	struct clk *pipe_clk;
127	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
128};
129
130#define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
131struct qcom_pcie_resources_2_4_0 {
132	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
133	int num_clks;
134	struct reset_control *axi_m_reset;
135	struct reset_control *axi_s_reset;
136	struct reset_control *pipe_reset;
137	struct reset_control *axi_m_vmid_reset;
138	struct reset_control *axi_s_xpu_reset;
139	struct reset_control *parf_reset;
140	struct reset_control *phy_reset;
141	struct reset_control *axi_m_sticky_reset;
142	struct reset_control *pipe_sticky_reset;
143	struct reset_control *pwr_reset;
144	struct reset_control *ahb_reset;
145	struct reset_control *phy_ahb_reset;
146};
147
148struct qcom_pcie_resources_2_3_3 {
149	struct clk *iface;
150	struct clk *axi_m_clk;
151	struct clk *axi_s_clk;
152	struct clk *ahb_clk;
153	struct clk *aux_clk;
154	struct reset_control *rst[7];
155};
156
157struct qcom_pcie_resources_2_7_0 {
158	struct clk_bulk_data clks[6];
159	struct regulator_bulk_data supplies[2];
160	struct reset_control *pci_reset;
161	struct clk *pipe_clk;
162};
163
164union qcom_pcie_resources {
165	struct qcom_pcie_resources_1_0_0 v1_0_0;
166	struct qcom_pcie_resources_2_1_0 v2_1_0;
167	struct qcom_pcie_resources_2_3_2 v2_3_2;
168	struct qcom_pcie_resources_2_3_3 v2_3_3;
169	struct qcom_pcie_resources_2_4_0 v2_4_0;
170	struct qcom_pcie_resources_2_7_0 v2_7_0;
171};
172
173struct qcom_pcie;
174
175struct qcom_pcie_ops {
176	int (*get_resources)(struct qcom_pcie *pcie);
177	int (*init)(struct qcom_pcie *pcie);
178	int (*post_init)(struct qcom_pcie *pcie);
179	void (*deinit)(struct qcom_pcie *pcie);
180	void (*post_deinit)(struct qcom_pcie *pcie);
181	void (*ltssm_enable)(struct qcom_pcie *pcie);
182};
183
184struct qcom_pcie {
185	struct dw_pcie *pci;
186	void __iomem *parf;			/* DT parf */
187	void __iomem *elbi;			/* DT elbi */
188	union qcom_pcie_resources res;
189	struct phy *phy;
190	struct gpio_desc *reset;
191	const struct qcom_pcie_ops *ops;
192};
193
194#define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
195
196static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
197{
198	gpiod_set_value_cansleep(pcie->reset, 1);
199	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
200}
201
202static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
203{
204	/* Ensure that PERST has been asserted for at least 100 ms */
205	msleep(100);
206	gpiod_set_value_cansleep(pcie->reset, 0);
207	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
208}
209
210static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
211{
212	struct dw_pcie *pci = pcie->pci;
213
214	if (dw_pcie_link_up(pci))
215		return 0;
216
217	/* Enable Link Training state machine */
218	if (pcie->ops->ltssm_enable)
219		pcie->ops->ltssm_enable(pcie);
220
221	return dw_pcie_wait_for_link(pci);
222}
223
224static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
225{
226	u32 val;
227
228	/* enable link training */
229	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
230	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
231	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
232}
233
234static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
235{
236	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
237	struct dw_pcie *pci = pcie->pci;
238	struct device *dev = pci->dev;
239	int ret;
240
241	res->supplies[0].supply = "vdda";
242	res->supplies[1].supply = "vdda_phy";
243	res->supplies[2].supply = "vdda_refclk";
244	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
245				      res->supplies);
246	if (ret)
247		return ret;
248
249	res->clks[0].id = "iface";
250	res->clks[1].id = "core";
251	res->clks[2].id = "phy";
252	res->clks[3].id = "aux";
253	res->clks[4].id = "ref";
254
255	/* iface, core, phy are required */
256	ret = devm_clk_bulk_get(dev, 3, res->clks);
257	if (ret < 0)
258		return ret;
259
260	/* aux, ref are optional */
261	ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
262	if (ret < 0)
263		return ret;
264
265	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
266	if (IS_ERR(res->pci_reset))
267		return PTR_ERR(res->pci_reset);
268
269	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
270	if (IS_ERR(res->axi_reset))
271		return PTR_ERR(res->axi_reset);
272
273	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
274	if (IS_ERR(res->ahb_reset))
275		return PTR_ERR(res->ahb_reset);
276
277	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
278	if (IS_ERR(res->por_reset))
279		return PTR_ERR(res->por_reset);
280
281	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
282	if (IS_ERR(res->ext_reset))
283		return PTR_ERR(res->ext_reset);
284
285	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
286	return PTR_ERR_OR_ZERO(res->phy_reset);
287}
288
289static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
290{
291	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
292
293	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
294	reset_control_assert(res->pci_reset);
295	reset_control_assert(res->axi_reset);
296	reset_control_assert(res->ahb_reset);
297	reset_control_assert(res->por_reset);
298	reset_control_assert(res->ext_reset);
299	reset_control_assert(res->phy_reset);
300
301	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
302
303	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
304}
305
306static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
307{
308	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
309	struct dw_pcie *pci = pcie->pci;
310	struct device *dev = pci->dev;
311	struct device_node *node = dev->of_node;
312	u32 val;
313	int ret;
314
315	/* reset the PCIe interface as uboot can leave it undefined state */
316	reset_control_assert(res->pci_reset);
317	reset_control_assert(res->axi_reset);
318	reset_control_assert(res->ahb_reset);
319	reset_control_assert(res->por_reset);
320	reset_control_assert(res->ext_reset);
321	reset_control_assert(res->phy_reset);
322
323	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
324	if (ret < 0) {
325		dev_err(dev, "cannot enable regulators\n");
326		return ret;
327	}
328
329	ret = reset_control_deassert(res->ahb_reset);
330	if (ret) {
331		dev_err(dev, "cannot deassert ahb reset\n");
332		goto err_deassert_ahb;
333	}
334
335	ret = reset_control_deassert(res->ext_reset);
336	if (ret) {
337		dev_err(dev, "cannot deassert ext reset\n");
338		goto err_deassert_ext;
339	}
340
341	ret = reset_control_deassert(res->phy_reset);
342	if (ret) {
343		dev_err(dev, "cannot deassert phy reset\n");
344		goto err_deassert_phy;
345	}
346
347	ret = reset_control_deassert(res->pci_reset);
348	if (ret) {
349		dev_err(dev, "cannot deassert pci reset\n");
350		goto err_deassert_pci;
351	}
352
353	ret = reset_control_deassert(res->por_reset);
354	if (ret) {
355		dev_err(dev, "cannot deassert por reset\n");
356		goto err_deassert_por;
357	}
358
359	ret = reset_control_deassert(res->axi_reset);
360	if (ret) {
361		dev_err(dev, "cannot deassert axi reset\n");
362		goto err_deassert_axi;
363	}
364
365	/* enable PCIe clocks and resets */
366	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
367	val &= ~BIT(0);
368	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
369
370	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
371	if (ret)
372		goto err_clks;
373
374	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
375	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
376		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
377			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
378			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
379		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
380		writel(PCS_SWING_TX_SWING_FULL(120) |
381			       PCS_SWING_TX_SWING_LOW(120),
382		       pcie->parf + PCIE20_PARF_PCS_SWING);
383		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
384	}
385
386	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
387		/* set TX termination offset */
388		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
389		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
390		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
391		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
392	}
393
394	/* enable external reference clock */
395	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
396	/* USE_PAD is required only for ipq806x */
397	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
398		val &= ~PHY_REFCLK_USE_PAD;
399	val |= PHY_REFCLK_SSP_EN;
400	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
401
402	/* wait for clock acquisition */
403	usleep_range(1000, 1500);
404
405	/* Set the Max TLP size to 2K, instead of using default of 4K */
406	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
407	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
408	writel(CFG_BRIDGE_SB_INIT,
409	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
410
411	return 0;
412
413err_clks:
414	reset_control_assert(res->axi_reset);
415err_deassert_axi:
416	reset_control_assert(res->por_reset);
417err_deassert_por:
418	reset_control_assert(res->pci_reset);
419err_deassert_pci:
420	reset_control_assert(res->phy_reset);
421err_deassert_phy:
422	reset_control_assert(res->ext_reset);
423err_deassert_ext:
424	reset_control_assert(res->ahb_reset);
425err_deassert_ahb:
426	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
427
428	return ret;
429}
430
431static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
432{
433	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
434	struct dw_pcie *pci = pcie->pci;
435	struct device *dev = pci->dev;
436
437	res->vdda = devm_regulator_get(dev, "vdda");
438	if (IS_ERR(res->vdda))
439		return PTR_ERR(res->vdda);
440
441	res->iface = devm_clk_get(dev, "iface");
442	if (IS_ERR(res->iface))
443		return PTR_ERR(res->iface);
444
445	res->aux = devm_clk_get(dev, "aux");
446	if (IS_ERR(res->aux))
447		return PTR_ERR(res->aux);
448
449	res->master_bus = devm_clk_get(dev, "master_bus");
450	if (IS_ERR(res->master_bus))
451		return PTR_ERR(res->master_bus);
452
453	res->slave_bus = devm_clk_get(dev, "slave_bus");
454	if (IS_ERR(res->slave_bus))
455		return PTR_ERR(res->slave_bus);
456
457	res->core = devm_reset_control_get_exclusive(dev, "core");
458	return PTR_ERR_OR_ZERO(res->core);
459}
460
461static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
462{
463	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
464
465	reset_control_assert(res->core);
466	clk_disable_unprepare(res->slave_bus);
467	clk_disable_unprepare(res->master_bus);
468	clk_disable_unprepare(res->iface);
469	clk_disable_unprepare(res->aux);
470	regulator_disable(res->vdda);
471}
472
473static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
474{
475	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
476	struct dw_pcie *pci = pcie->pci;
477	struct device *dev = pci->dev;
478	int ret;
479
480	ret = reset_control_deassert(res->core);
481	if (ret) {
482		dev_err(dev, "cannot deassert core reset\n");
483		return ret;
484	}
485
486	ret = clk_prepare_enable(res->aux);
487	if (ret) {
488		dev_err(dev, "cannot prepare/enable aux clock\n");
489		goto err_res;
490	}
491
492	ret = clk_prepare_enable(res->iface);
493	if (ret) {
494		dev_err(dev, "cannot prepare/enable iface clock\n");
495		goto err_aux;
496	}
497
498	ret = clk_prepare_enable(res->master_bus);
499	if (ret) {
500		dev_err(dev, "cannot prepare/enable master_bus clock\n");
501		goto err_iface;
502	}
503
504	ret = clk_prepare_enable(res->slave_bus);
505	if (ret) {
506		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
507		goto err_master;
508	}
509
510	ret = regulator_enable(res->vdda);
511	if (ret) {
512		dev_err(dev, "cannot enable vdda regulator\n");
513		goto err_slave;
514	}
515
516	/* change DBI base address */
517	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
518
519	if (IS_ENABLED(CONFIG_PCI_MSI)) {
520		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
521
522		val |= BIT(31);
523		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
524	}
525
526	return 0;
527err_slave:
528	clk_disable_unprepare(res->slave_bus);
529err_master:
530	clk_disable_unprepare(res->master_bus);
531err_iface:
532	clk_disable_unprepare(res->iface);
533err_aux:
534	clk_disable_unprepare(res->aux);
535err_res:
536	reset_control_assert(res->core);
537
538	return ret;
539}
540
541static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
542{
543	u32 val;
544
545	/* enable link training */
546	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
547	val |= BIT(8);
548	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
549}
550
551static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
552{
553	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
554	struct dw_pcie *pci = pcie->pci;
555	struct device *dev = pci->dev;
556	int ret;
557
558	res->supplies[0].supply = "vdda";
559	res->supplies[1].supply = "vddpe-3v3";
560	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
561				      res->supplies);
562	if (ret)
563		return ret;
564
565	res->aux_clk = devm_clk_get(dev, "aux");
566	if (IS_ERR(res->aux_clk))
567		return PTR_ERR(res->aux_clk);
568
569	res->cfg_clk = devm_clk_get(dev, "cfg");
570	if (IS_ERR(res->cfg_clk))
571		return PTR_ERR(res->cfg_clk);
572
573	res->master_clk = devm_clk_get(dev, "bus_master");
574	if (IS_ERR(res->master_clk))
575		return PTR_ERR(res->master_clk);
576
577	res->slave_clk = devm_clk_get(dev, "bus_slave");
578	if (IS_ERR(res->slave_clk))
579		return PTR_ERR(res->slave_clk);
580
581	res->pipe_clk = devm_clk_get(dev, "pipe");
582	return PTR_ERR_OR_ZERO(res->pipe_clk);
583}
584
585static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
586{
587	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
588
589	clk_disable_unprepare(res->slave_clk);
590	clk_disable_unprepare(res->master_clk);
591	clk_disable_unprepare(res->cfg_clk);
592	clk_disable_unprepare(res->aux_clk);
593
594	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
595}
596
597static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
598{
599	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
600
601	clk_disable_unprepare(res->pipe_clk);
602}
603
604static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
605{
606	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
607	struct dw_pcie *pci = pcie->pci;
608	struct device *dev = pci->dev;
609	u32 val;
610	int ret;
611
612	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
613	if (ret < 0) {
614		dev_err(dev, "cannot enable regulators\n");
615		return ret;
616	}
617
618	ret = clk_prepare_enable(res->aux_clk);
619	if (ret) {
620		dev_err(dev, "cannot prepare/enable aux clock\n");
621		goto err_aux_clk;
622	}
623
624	ret = clk_prepare_enable(res->cfg_clk);
625	if (ret) {
626		dev_err(dev, "cannot prepare/enable cfg clock\n");
627		goto err_cfg_clk;
628	}
629
630	ret = clk_prepare_enable(res->master_clk);
631	if (ret) {
632		dev_err(dev, "cannot prepare/enable master clock\n");
633		goto err_master_clk;
634	}
635
636	ret = clk_prepare_enable(res->slave_clk);
637	if (ret) {
638		dev_err(dev, "cannot prepare/enable slave clock\n");
639		goto err_slave_clk;
640	}
641
642	/* enable PCIe clocks and resets */
643	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
644	val &= ~BIT(0);
645	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
646
647	/* change DBI base address */
648	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
649
650	/* MAC PHY_POWERDOWN MUX DISABLE  */
651	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
652	val &= ~BIT(29);
653	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
654
655	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
656	val |= BIT(4);
657	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
658
659	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
660	val |= BIT(31);
661	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
662
663	return 0;
664
665err_slave_clk:
666	clk_disable_unprepare(res->master_clk);
667err_master_clk:
668	clk_disable_unprepare(res->cfg_clk);
669err_cfg_clk:
670	clk_disable_unprepare(res->aux_clk);
671
672err_aux_clk:
673	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
674
675	return ret;
676}
677
678static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
679{
680	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
681	struct dw_pcie *pci = pcie->pci;
682	struct device *dev = pci->dev;
683	int ret;
684
685	ret = clk_prepare_enable(res->pipe_clk);
686	if (ret) {
687		dev_err(dev, "cannot prepare/enable pipe clock\n");
688		return ret;
689	}
690
691	return 0;
692}
693
694static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
695{
696	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
697	struct dw_pcie *pci = pcie->pci;
698	struct device *dev = pci->dev;
699	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
700	int ret;
701
702	res->clks[0].id = "aux";
703	res->clks[1].id = "master_bus";
704	res->clks[2].id = "slave_bus";
705	res->clks[3].id = "iface";
706
707	/* qcom,pcie-ipq4019 is defined without "iface" */
708	res->num_clks = is_ipq ? 3 : 4;
709
710	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
711	if (ret < 0)
712		return ret;
713
714	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
715	if (IS_ERR(res->axi_m_reset))
716		return PTR_ERR(res->axi_m_reset);
717
718	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
719	if (IS_ERR(res->axi_s_reset))
720		return PTR_ERR(res->axi_s_reset);
721
722	if (is_ipq) {
723		/*
724		 * These resources relates to the PHY or are secure clocks, but
725		 * are controlled here for IPQ4019
726		 */
727		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
728		if (IS_ERR(res->pipe_reset))
729			return PTR_ERR(res->pipe_reset);
730
731		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
732									 "axi_m_vmid");
733		if (IS_ERR(res->axi_m_vmid_reset))
734			return PTR_ERR(res->axi_m_vmid_reset);
735
736		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
737									"axi_s_xpu");
738		if (IS_ERR(res->axi_s_xpu_reset))
739			return PTR_ERR(res->axi_s_xpu_reset);
740
741		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
742		if (IS_ERR(res->parf_reset))
743			return PTR_ERR(res->parf_reset);
744
745		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
746		if (IS_ERR(res->phy_reset))
747			return PTR_ERR(res->phy_reset);
748	}
749
750	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
751								   "axi_m_sticky");
752	if (IS_ERR(res->axi_m_sticky_reset))
753		return PTR_ERR(res->axi_m_sticky_reset);
754
755	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
756								  "pipe_sticky");
757	if (IS_ERR(res->pipe_sticky_reset))
758		return PTR_ERR(res->pipe_sticky_reset);
759
760	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
761	if (IS_ERR(res->pwr_reset))
762		return PTR_ERR(res->pwr_reset);
763
764	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
765	if (IS_ERR(res->ahb_reset))
766		return PTR_ERR(res->ahb_reset);
767
768	if (is_ipq) {
769		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
770		if (IS_ERR(res->phy_ahb_reset))
771			return PTR_ERR(res->phy_ahb_reset);
772	}
773
774	return 0;
775}
776
777static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
778{
779	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
780
781	reset_control_assert(res->axi_m_reset);
782	reset_control_assert(res->axi_s_reset);
783	reset_control_assert(res->pipe_reset);
784	reset_control_assert(res->pipe_sticky_reset);
785	reset_control_assert(res->phy_reset);
786	reset_control_assert(res->phy_ahb_reset);
787	reset_control_assert(res->axi_m_sticky_reset);
788	reset_control_assert(res->pwr_reset);
789	reset_control_assert(res->ahb_reset);
790	clk_bulk_disable_unprepare(res->num_clks, res->clks);
791}
792
793static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
794{
795	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
796	struct dw_pcie *pci = pcie->pci;
797	struct device *dev = pci->dev;
798	u32 val;
799	int ret;
800
801	ret = reset_control_assert(res->axi_m_reset);
802	if (ret) {
803		dev_err(dev, "cannot assert axi master reset\n");
804		return ret;
805	}
806
807	ret = reset_control_assert(res->axi_s_reset);
808	if (ret) {
809		dev_err(dev, "cannot assert axi slave reset\n");
810		return ret;
811	}
812
813	usleep_range(10000, 12000);
814
815	ret = reset_control_assert(res->pipe_reset);
816	if (ret) {
817		dev_err(dev, "cannot assert pipe reset\n");
818		return ret;
819	}
820
821	ret = reset_control_assert(res->pipe_sticky_reset);
822	if (ret) {
823		dev_err(dev, "cannot assert pipe sticky reset\n");
824		return ret;
825	}
826
827	ret = reset_control_assert(res->phy_reset);
828	if (ret) {
829		dev_err(dev, "cannot assert phy reset\n");
830		return ret;
831	}
832
833	ret = reset_control_assert(res->phy_ahb_reset);
834	if (ret) {
835		dev_err(dev, "cannot assert phy ahb reset\n");
836		return ret;
837	}
838
839	usleep_range(10000, 12000);
840
841	ret = reset_control_assert(res->axi_m_sticky_reset);
842	if (ret) {
843		dev_err(dev, "cannot assert axi master sticky reset\n");
844		return ret;
845	}
846
847	ret = reset_control_assert(res->pwr_reset);
848	if (ret) {
849		dev_err(dev, "cannot assert power reset\n");
850		return ret;
851	}
852
853	ret = reset_control_assert(res->ahb_reset);
854	if (ret) {
855		dev_err(dev, "cannot assert ahb reset\n");
856		return ret;
857	}
858
859	usleep_range(10000, 12000);
860
861	ret = reset_control_deassert(res->phy_ahb_reset);
862	if (ret) {
863		dev_err(dev, "cannot deassert phy ahb reset\n");
864		return ret;
865	}
866
867	ret = reset_control_deassert(res->phy_reset);
868	if (ret) {
869		dev_err(dev, "cannot deassert phy reset\n");
870		goto err_rst_phy;
871	}
872
873	ret = reset_control_deassert(res->pipe_reset);
874	if (ret) {
875		dev_err(dev, "cannot deassert pipe reset\n");
876		goto err_rst_pipe;
877	}
878
879	ret = reset_control_deassert(res->pipe_sticky_reset);
880	if (ret) {
881		dev_err(dev, "cannot deassert pipe sticky reset\n");
882		goto err_rst_pipe_sticky;
883	}
884
885	usleep_range(10000, 12000);
886
887	ret = reset_control_deassert(res->axi_m_reset);
888	if (ret) {
889		dev_err(dev, "cannot deassert axi master reset\n");
890		goto err_rst_axi_m;
891	}
892
893	ret = reset_control_deassert(res->axi_m_sticky_reset);
894	if (ret) {
895		dev_err(dev, "cannot deassert axi master sticky reset\n");
896		goto err_rst_axi_m_sticky;
897	}
898
899	ret = reset_control_deassert(res->axi_s_reset);
900	if (ret) {
901		dev_err(dev, "cannot deassert axi slave reset\n");
902		goto err_rst_axi_s;
903	}
904
905	ret = reset_control_deassert(res->pwr_reset);
906	if (ret) {
907		dev_err(dev, "cannot deassert power reset\n");
908		goto err_rst_pwr;
909	}
910
911	ret = reset_control_deassert(res->ahb_reset);
912	if (ret) {
913		dev_err(dev, "cannot deassert ahb reset\n");
914		goto err_rst_ahb;
915	}
916
917	usleep_range(10000, 12000);
918
919	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
920	if (ret)
921		goto err_clks;
922
923	/* enable PCIe clocks and resets */
924	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
925	val &= ~BIT(0);
926	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
927
928	/* change DBI base address */
929	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
930
931	/* MAC PHY_POWERDOWN MUX DISABLE  */
932	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
933	val &= ~BIT(29);
934	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
935
936	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
937	val |= BIT(4);
938	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
939
940	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
941	val |= BIT(31);
942	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
943
944	return 0;
945
946err_clks:
947	reset_control_assert(res->ahb_reset);
948err_rst_ahb:
949	reset_control_assert(res->pwr_reset);
950err_rst_pwr:
951	reset_control_assert(res->axi_s_reset);
952err_rst_axi_s:
953	reset_control_assert(res->axi_m_sticky_reset);
954err_rst_axi_m_sticky:
955	reset_control_assert(res->axi_m_reset);
956err_rst_axi_m:
957	reset_control_assert(res->pipe_sticky_reset);
958err_rst_pipe_sticky:
959	reset_control_assert(res->pipe_reset);
960err_rst_pipe:
961	reset_control_assert(res->phy_reset);
962err_rst_phy:
963	reset_control_assert(res->phy_ahb_reset);
964	return ret;
965}
966
967static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
968{
969	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
970	struct dw_pcie *pci = pcie->pci;
971	struct device *dev = pci->dev;
972	int i;
973	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
974				    "axi_m_sticky", "sticky",
975				    "ahb", "sleep", };
976
977	res->iface = devm_clk_get(dev, "iface");
978	if (IS_ERR(res->iface))
979		return PTR_ERR(res->iface);
980
981	res->axi_m_clk = devm_clk_get(dev, "axi_m");
982	if (IS_ERR(res->axi_m_clk))
983		return PTR_ERR(res->axi_m_clk);
984
985	res->axi_s_clk = devm_clk_get(dev, "axi_s");
986	if (IS_ERR(res->axi_s_clk))
987		return PTR_ERR(res->axi_s_clk);
988
989	res->ahb_clk = devm_clk_get(dev, "ahb");
990	if (IS_ERR(res->ahb_clk))
991		return PTR_ERR(res->ahb_clk);
992
993	res->aux_clk = devm_clk_get(dev, "aux");
994	if (IS_ERR(res->aux_clk))
995		return PTR_ERR(res->aux_clk);
996
997	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
998		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
999		if (IS_ERR(res->rst[i]))
1000			return PTR_ERR(res->rst[i]);
1001	}
1002
1003	return 0;
1004}
1005
1006static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1007{
1008	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1009
1010	clk_disable_unprepare(res->iface);
1011	clk_disable_unprepare(res->axi_m_clk);
1012	clk_disable_unprepare(res->axi_s_clk);
1013	clk_disable_unprepare(res->ahb_clk);
1014	clk_disable_unprepare(res->aux_clk);
1015}
1016
1017static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1018{
1019	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1020	struct dw_pcie *pci = pcie->pci;
1021	struct device *dev = pci->dev;
1022	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1023	int i, ret;
1024	u32 val;
1025
1026	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1027		ret = reset_control_assert(res->rst[i]);
1028		if (ret) {
1029			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1030			return ret;
1031		}
1032	}
1033
1034	usleep_range(2000, 2500);
1035
1036	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1037		ret = reset_control_deassert(res->rst[i]);
1038		if (ret) {
1039			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1040				ret);
1041			return ret;
1042		}
1043	}
1044
1045	/*
1046	 * Don't have a way to see if the reset has completed.
1047	 * Wait for some time.
1048	 */
1049	usleep_range(2000, 2500);
1050
1051	ret = clk_prepare_enable(res->iface);
1052	if (ret) {
1053		dev_err(dev, "cannot prepare/enable core clock\n");
1054		goto err_clk_iface;
1055	}
1056
1057	ret = clk_prepare_enable(res->axi_m_clk);
1058	if (ret) {
1059		dev_err(dev, "cannot prepare/enable core clock\n");
1060		goto err_clk_axi_m;
1061	}
1062
1063	ret = clk_prepare_enable(res->axi_s_clk);
1064	if (ret) {
1065		dev_err(dev, "cannot prepare/enable axi slave clock\n");
1066		goto err_clk_axi_s;
1067	}
1068
1069	ret = clk_prepare_enable(res->ahb_clk);
1070	if (ret) {
1071		dev_err(dev, "cannot prepare/enable ahb clock\n");
1072		goto err_clk_ahb;
1073	}
1074
1075	ret = clk_prepare_enable(res->aux_clk);
1076	if (ret) {
1077		dev_err(dev, "cannot prepare/enable aux clock\n");
1078		goto err_clk_aux;
1079	}
1080
1081	writel(SLV_ADDR_SPACE_SZ,
1082		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1083
1084	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1085	val &= ~BIT(0);
1086	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1087
1088	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1089
1090	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1091		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1092		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1093		pcie->parf + PCIE20_PARF_SYS_CTRL);
1094	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1095
1096	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1097	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1098	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1099
1100	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1101	val &= ~PCI_EXP_LNKCAP_ASPMS;
1102	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1103
1104	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1105		PCI_EXP_DEVCTL2);
1106
1107	return 0;
1108
1109err_clk_aux:
1110	clk_disable_unprepare(res->ahb_clk);
1111err_clk_ahb:
1112	clk_disable_unprepare(res->axi_s_clk);
1113err_clk_axi_s:
1114	clk_disable_unprepare(res->axi_m_clk);
1115err_clk_axi_m:
1116	clk_disable_unprepare(res->iface);
1117err_clk_iface:
1118	/*
1119	 * Not checking for failure, will anyway return
1120	 * the original failure in 'ret'.
1121	 */
1122	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1123		reset_control_assert(res->rst[i]);
1124
1125	return ret;
1126}
1127
1128static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1129{
1130	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1131	struct dw_pcie *pci = pcie->pci;
1132	struct device *dev = pci->dev;
1133	int ret;
1134
1135	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1136	if (IS_ERR(res->pci_reset))
1137		return PTR_ERR(res->pci_reset);
1138
1139	res->supplies[0].supply = "vdda";
1140	res->supplies[1].supply = "vddpe-3v3";
1141	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1142				      res->supplies);
1143	if (ret)
1144		return ret;
1145
1146	res->clks[0].id = "aux";
1147	res->clks[1].id = "cfg";
1148	res->clks[2].id = "bus_master";
1149	res->clks[3].id = "bus_slave";
1150	res->clks[4].id = "slave_q2a";
1151	res->clks[5].id = "tbu";
1152
1153	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1154	if (ret < 0)
1155		return ret;
1156
1157	res->pipe_clk = devm_clk_get(dev, "pipe");
1158	return PTR_ERR_OR_ZERO(res->pipe_clk);
1159}
1160
1161static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1162{
1163	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1164	struct dw_pcie *pci = pcie->pci;
1165	struct device *dev = pci->dev;
1166	u32 val;
1167	int ret;
1168
1169	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1170	if (ret < 0) {
1171		dev_err(dev, "cannot enable regulators\n");
1172		return ret;
1173	}
1174
1175	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1176	if (ret < 0)
1177		goto err_disable_regulators;
1178
1179	ret = reset_control_assert(res->pci_reset);
1180	if (ret < 0) {
1181		dev_err(dev, "cannot deassert pci reset\n");
1182		goto err_disable_clocks;
1183	}
1184
1185	usleep_range(1000, 1500);
1186
1187	ret = reset_control_deassert(res->pci_reset);
1188	if (ret < 0) {
1189		dev_err(dev, "cannot deassert pci reset\n");
1190		goto err_disable_clocks;
1191	}
1192
1193	/* configure PCIe to RC mode */
1194	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1195
1196	/* enable PCIe clocks and resets */
1197	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1198	val &= ~BIT(0);
1199	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1200
1201	/* change DBI base address */
1202	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1203
1204	/* MAC PHY_POWERDOWN MUX DISABLE  */
1205	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1206	val &= ~BIT(29);
1207	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1208
1209	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1210	val |= BIT(4);
1211	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1212
1213	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1214	val |= BIT(31);
1215	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1216
1217	return 0;
1218err_disable_clocks:
1219	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1220err_disable_regulators:
1221	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1222
1223	return ret;
1224}
1225
1226static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1227{
1228	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1229
1230	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1231	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1232}
1233
1234static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1235{
1236	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1237
1238	return clk_prepare_enable(res->pipe_clk);
1239}
1240
1241static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1242{
1243	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1244
1245	clk_disable_unprepare(res->pipe_clk);
1246}
1247
1248static int qcom_pcie_link_up(struct dw_pcie *pci)
1249{
1250	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1251	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1252
1253	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1254}
1255
1256static int qcom_pcie_host_init(struct pcie_port *pp)
1257{
1258	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1259	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1260	int ret;
1261
1262	qcom_ep_reset_assert(pcie);
1263
1264	ret = pcie->ops->init(pcie);
1265	if (ret)
1266		return ret;
1267
1268	ret = phy_power_on(pcie->phy);
1269	if (ret)
1270		goto err_deinit;
1271
1272	if (pcie->ops->post_init) {
1273		ret = pcie->ops->post_init(pcie);
1274		if (ret)
1275			goto err_disable_phy;
1276	}
1277
1278	dw_pcie_setup_rc(pp);
1279	dw_pcie_msi_init(pp);
1280
1281	qcom_ep_reset_deassert(pcie);
1282
1283	ret = qcom_pcie_establish_link(pcie);
1284	if (ret)
1285		goto err;
1286
1287	return 0;
1288err:
1289	qcom_ep_reset_assert(pcie);
1290	if (pcie->ops->post_deinit)
1291		pcie->ops->post_deinit(pcie);
1292err_disable_phy:
1293	phy_power_off(pcie->phy);
1294err_deinit:
1295	pcie->ops->deinit(pcie);
1296
1297	return ret;
1298}
1299
1300static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1301	.host_init = qcom_pcie_host_init,
1302};
1303
1304/* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1305static const struct qcom_pcie_ops ops_2_1_0 = {
1306	.get_resources = qcom_pcie_get_resources_2_1_0,
1307	.init = qcom_pcie_init_2_1_0,
1308	.deinit = qcom_pcie_deinit_2_1_0,
1309	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1310};
1311
1312/* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1313static const struct qcom_pcie_ops ops_1_0_0 = {
1314	.get_resources = qcom_pcie_get_resources_1_0_0,
1315	.init = qcom_pcie_init_1_0_0,
1316	.deinit = qcom_pcie_deinit_1_0_0,
1317	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1318};
1319
1320/* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1321static const struct qcom_pcie_ops ops_2_3_2 = {
1322	.get_resources = qcom_pcie_get_resources_2_3_2,
1323	.init = qcom_pcie_init_2_3_2,
1324	.post_init = qcom_pcie_post_init_2_3_2,
1325	.deinit = qcom_pcie_deinit_2_3_2,
1326	.post_deinit = qcom_pcie_post_deinit_2_3_2,
1327	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1328};
1329
1330/* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1331static const struct qcom_pcie_ops ops_2_4_0 = {
1332	.get_resources = qcom_pcie_get_resources_2_4_0,
1333	.init = qcom_pcie_init_2_4_0,
1334	.deinit = qcom_pcie_deinit_2_4_0,
1335	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1336};
1337
1338/* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1339static const struct qcom_pcie_ops ops_2_3_3 = {
1340	.get_resources = qcom_pcie_get_resources_2_3_3,
1341	.init = qcom_pcie_init_2_3_3,
1342	.deinit = qcom_pcie_deinit_2_3_3,
1343	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1344};
1345
1346/* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1347static const struct qcom_pcie_ops ops_2_7_0 = {
1348	.get_resources = qcom_pcie_get_resources_2_7_0,
1349	.init = qcom_pcie_init_2_7_0,
1350	.deinit = qcom_pcie_deinit_2_7_0,
1351	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1352	.post_init = qcom_pcie_post_init_2_7_0,
1353	.post_deinit = qcom_pcie_post_deinit_2_7_0,
1354};
1355
1356static const struct dw_pcie_ops dw_pcie_ops = {
1357	.link_up = qcom_pcie_link_up,
1358};
1359
1360static int qcom_pcie_probe(struct platform_device *pdev)
1361{
1362	struct device *dev = &pdev->dev;
1363	struct resource *res;
1364	struct pcie_port *pp;
1365	struct dw_pcie *pci;
1366	struct qcom_pcie *pcie;
1367	int ret;
1368
1369	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1370	if (!pcie)
1371		return -ENOMEM;
1372
1373	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1374	if (!pci)
1375		return -ENOMEM;
1376
1377	pm_runtime_enable(dev);
1378	ret = pm_runtime_get_sync(dev);
1379	if (ret < 0)
1380		goto err_pm_runtime_put;
1381
1382	pci->dev = dev;
1383	pci->ops = &dw_pcie_ops;
1384	pp = &pci->pp;
1385
1386	pcie->pci = pci;
1387
1388	pcie->ops = of_device_get_match_data(dev);
1389
1390	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1391	if (IS_ERR(pcie->reset)) {
1392		ret = PTR_ERR(pcie->reset);
1393		goto err_pm_runtime_put;
1394	}
1395
1396	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1397	if (IS_ERR(pcie->parf)) {
1398		ret = PTR_ERR(pcie->parf);
1399		goto err_pm_runtime_put;
1400	}
1401
1402	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1403	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1404	if (IS_ERR(pci->dbi_base)) {
1405		ret = PTR_ERR(pci->dbi_base);
1406		goto err_pm_runtime_put;
1407	}
1408
1409	pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1410	if (IS_ERR(pcie->elbi)) {
1411		ret = PTR_ERR(pcie->elbi);
1412		goto err_pm_runtime_put;
1413	}
1414
1415	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1416	if (IS_ERR(pcie->phy)) {
1417		ret = PTR_ERR(pcie->phy);
1418		goto err_pm_runtime_put;
1419	}
1420
1421	ret = pcie->ops->get_resources(pcie);
1422	if (ret)
1423		goto err_pm_runtime_put;
1424
1425	pp->ops = &qcom_pcie_dw_ops;
1426
1427	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1428		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1429		if (pp->msi_irq < 0) {
1430			ret = pp->msi_irq;
1431			goto err_pm_runtime_put;
1432		}
1433	}
1434
1435	ret = phy_init(pcie->phy);
1436	if (ret)
1437		goto err_pm_runtime_put;
1438
1439	platform_set_drvdata(pdev, pcie);
1440
1441	ret = dw_pcie_host_init(pp);
1442	if (ret) {
1443		dev_err(dev, "cannot initialize host\n");
1444		goto err_phy_exit;
1445	}
1446
1447	return 0;
1448
1449err_phy_exit:
1450	phy_exit(pcie->phy);
1451err_pm_runtime_put:
1452	pm_runtime_put(dev);
1453	pm_runtime_disable(dev);
1454
1455	return ret;
1456}
1457
1458static const struct of_device_id qcom_pcie_match[] = {
1459	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1460	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1461	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
1462	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1463	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1464	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1465	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1466	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1467	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1468	{ }
1469};
1470
1471static void qcom_fixup_class(struct pci_dev *dev)
1472{
1473	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1474}
1475DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1476DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1477DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1478DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1479DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1480DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1481DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1482
1483static struct platform_driver qcom_pcie_driver = {
1484	.probe = qcom_pcie_probe,
1485	.driver = {
1486		.name = "qcom-pcie",
1487		.suppress_bind_attrs = true,
1488		.of_match_table = qcom_pcie_match,
1489	},
1490};
1491builtin_platform_driver(qcom_pcie_driver);
1492