1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/clk-provider.h>
8#include <linux/delay.h>
9#include <linux/err.h>
10#include <linux/io.h>
11#include <linux/iopoll.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/phy/phy.h>
17#include <linux/platform_device.h>
18#include <linux/regulator/consumer.h>
19#include <linux/reset.h>
20#include <linux/slab.h>
21
22#include "phy-qcom-qmp.h"
23
24/* QPHY_SW_RESET bit */
25#define SW_RESET				BIT(0)
26/* QPHY_POWER_DOWN_CONTROL */
27#define SW_PWRDN				BIT(0)
28#define REFCLK_DRV_DSBL				BIT(1)
29/* QPHY_START_CONTROL bits */
30#define SERDES_START				BIT(0)
31#define PCS_START				BIT(1)
32#define PLL_READY_GATE_EN			BIT(3)
33/* QPHY_PCS_STATUS bit */
34#define PHYSTATUS				BIT(6)
35/* QPHY_COM_PCS_READY_STATUS bit */
36#define PCS_READY				BIT(0)
37
38#define PHY_INIT_COMPLETE_TIMEOUT		10000
39#define POWER_DOWN_DELAY_US_MIN			10
40#define POWER_DOWN_DELAY_US_MAX			20
41
42struct qmp_phy_init_tbl {
43	unsigned int offset;
44	unsigned int val;
45	/*
46	 * mask of lanes for which this register is written
47	 * for cases when second lane needs different values
48	 */
49	u8 lane_mask;
50};
51
52#define QMP_PHY_INIT_CFG(o, v)		\
53	{				\
54		.offset = o,		\
55		.val = v,		\
56		.lane_mask = 0xff,	\
57	}
58
59#define QMP_PHY_INIT_CFG_LANE(o, v, l)	\
60	{				\
61		.offset = o,		\
62		.val = v,		\
63		.lane_mask = l,		\
64	}
65
66/* set of registers with offsets different per-PHY */
67enum qphy_reg_layout {
68	/* Common block control registers */
69	QPHY_COM_SW_RESET,
70	QPHY_COM_POWER_DOWN_CONTROL,
71	QPHY_COM_START_CONTROL,
72	QPHY_COM_PCS_READY_STATUS,
73	/* PCS registers */
74	QPHY_SW_RESET,
75	QPHY_START_CTRL,
76	QPHY_PCS_STATUS,
77	/* Keep last to ensure regs_layout arrays are properly initialized */
78	QPHY_LAYOUT_SIZE
79};
80
81static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
82	[QPHY_COM_SW_RESET]		= 0x400,
83	[QPHY_COM_POWER_DOWN_CONTROL]	= 0x404,
84	[QPHY_COM_START_CONTROL]	= 0x408,
85	[QPHY_COM_PCS_READY_STATUS]	= 0x448,
86	[QPHY_SW_RESET]			= QPHY_V2_PCS_SW_RESET,
87	[QPHY_START_CTRL]		= QPHY_V2_PCS_START_CONTROL,
88	[QPHY_PCS_STATUS]		= QPHY_V2_PCS_PCI_PCS_STATUS,
89};
90
91static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
92	QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
93	QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
94	QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
95	QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
96	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
97	QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
98	QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
99	QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
100	QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
101	QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
102	QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
103	QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
104	QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
105	QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
106	QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
107	QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
108	QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
109	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
110	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
111	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
112	QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
113	QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
114	QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
115	QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
116	QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
117	QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
118	QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
119	QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
120	QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
121	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
122	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
123	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
124	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
125	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
126	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
127	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
128	QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
129	QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
130	QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
131	QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
132	QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
133	QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
134	QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
135};
136
137static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
138	QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
139	QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
140};
141
142static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
143	QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
144	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
145	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
146	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
147	QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
148	QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
149	QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
150	QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
151	QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
152	QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
153};
154
155static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
156	QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL, 0x4c),
157	QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
158	QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
159
160	QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x05),
161
162	QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x05),
163	QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_DOWN_CONTROL, 0x02),
164	QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG4, 0x00),
165	QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG1, 0xa3),
166	QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
167};
168
169/* struct qmp_phy_cfg - per-PHY initialization config */
170struct qmp_phy_cfg {
171	/* number of PHYs provided by this block */
172	int num_phys;
173
174	/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
175	const struct qmp_phy_init_tbl *serdes_tbl;
176	int serdes_tbl_num;
177	const struct qmp_phy_init_tbl *tx_tbl;
178	int tx_tbl_num;
179	const struct qmp_phy_init_tbl *rx_tbl;
180	int rx_tbl_num;
181	const struct qmp_phy_init_tbl *pcs_tbl;
182	int pcs_tbl_num;
183
184	/* clock ids to be requested */
185	const char * const *clk_list;
186	int num_clks;
187	/* resets to be requested */
188	const char * const *reset_list;
189	int num_resets;
190	/* regulators to be requested */
191	const char * const *vreg_list;
192	int num_vregs;
193
194	/* array of registers with different offsets */
195	const unsigned int *regs;
196};
197
198/**
199 * struct qmp_phy - per-lane phy descriptor
200 *
201 * @phy: generic phy
202 * @cfg: phy specific configuration
203 * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
204 * @tx: iomapped memory space for lane's tx
205 * @rx: iomapped memory space for lane's rx
206 * @pcs: iomapped memory space for lane's pcs
207 * @pipe_clk: pipe clock
208 * @index: lane index
209 * @qmp: QMP phy to which this lane belongs
210 * @lane_rst: lane's reset controller
211 */
212struct qmp_phy {
213	struct phy *phy;
214	const struct qmp_phy_cfg *cfg;
215	void __iomem *serdes;
216	void __iomem *tx;
217	void __iomem *rx;
218	void __iomem *pcs;
219	struct clk *pipe_clk;
220	unsigned int index;
221	struct qcom_qmp *qmp;
222	struct reset_control *lane_rst;
223};
224
225/**
226 * struct qcom_qmp - structure holding QMP phy block attributes
227 *
228 * @dev: device
229 *
230 * @clks: array of clocks required by phy
231 * @resets: array of resets required by phy
232 * @vregs: regulator supplies bulk data
233 *
234 * @phys: array of per-lane phy descriptors
235 * @phy_mutex: mutex lock for PHY common block initialization
236 * @init_count: phy common block initialization count
237 */
238struct qcom_qmp {
239	struct device *dev;
240
241	struct clk_bulk_data *clks;
242	struct reset_control_bulk_data *resets;
243	struct regulator_bulk_data *vregs;
244
245	struct qmp_phy **phys;
246
247	struct mutex phy_mutex;
248	int init_count;
249};
250
251static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
252{
253	u32 reg;
254
255	reg = readl(base + offset);
256	reg |= val;
257	writel(reg, base + offset);
258
259	/* ensure that above write is through */
260	readl(base + offset);
261}
262
263static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
264{
265	u32 reg;
266
267	reg = readl(base + offset);
268	reg &= ~val;
269	writel(reg, base + offset);
270
271	/* ensure that above write is through */
272	readl(base + offset);
273}
274
275/* list of clocks required by phy */
276static const char * const msm8996_phy_clk_l[] = {
277	"aux", "cfg_ahb", "ref",
278};
279
280/* list of resets */
281static const char * const msm8996_pciephy_reset_l[] = {
282	"phy", "common", "cfg",
283};
284
285/* list of regulators */
286static const char * const qmp_phy_vreg_l[] = {
287	"vdda-phy", "vdda-pll",
288};
289
290static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
291	.num_phys		= 3,
292
293	.serdes_tbl		= msm8996_pcie_serdes_tbl,
294	.serdes_tbl_num		= ARRAY_SIZE(msm8996_pcie_serdes_tbl),
295	.tx_tbl			= msm8996_pcie_tx_tbl,
296	.tx_tbl_num		= ARRAY_SIZE(msm8996_pcie_tx_tbl),
297	.rx_tbl			= msm8996_pcie_rx_tbl,
298	.rx_tbl_num		= ARRAY_SIZE(msm8996_pcie_rx_tbl),
299	.pcs_tbl		= msm8996_pcie_pcs_tbl,
300	.pcs_tbl_num		= ARRAY_SIZE(msm8996_pcie_pcs_tbl),
301	.clk_list		= msm8996_phy_clk_l,
302	.num_clks		= ARRAY_SIZE(msm8996_phy_clk_l),
303	.reset_list		= msm8996_pciephy_reset_l,
304	.num_resets		= ARRAY_SIZE(msm8996_pciephy_reset_l),
305	.vreg_list		= qmp_phy_vreg_l,
306	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
307	.regs			= pciephy_regs_layout,
308};
309
310static void qmp_pcie_msm8996_configure_lane(void __iomem *base,
311					const struct qmp_phy_init_tbl tbl[],
312					int num,
313					u8 lane_mask)
314{
315	int i;
316	const struct qmp_phy_init_tbl *t = tbl;
317
318	if (!t)
319		return;
320
321	for (i = 0; i < num; i++, t++) {
322		if (!(t->lane_mask & lane_mask))
323			continue;
324
325		writel(t->val, base + t->offset);
326	}
327}
328
329static void qmp_pcie_msm8996_configure(void __iomem *base,
330				   const struct qmp_phy_init_tbl tbl[],
331				   int num)
332{
333	qmp_pcie_msm8996_configure_lane(base, tbl, num, 0xff);
334}
335
336static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
337{
338	struct qcom_qmp *qmp = qphy->qmp;
339	const struct qmp_phy_cfg *cfg = qphy->cfg;
340	void __iomem *serdes = qphy->serdes;
341	const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
342	int serdes_tbl_num = cfg->serdes_tbl_num;
343	void __iomem *status;
344	unsigned int val;
345	int ret;
346
347	qmp_pcie_msm8996_configure(serdes, serdes_tbl, serdes_tbl_num);
348
349	qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
350	qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
351		     SERDES_START | PCS_START);
352
353	status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
354	ret = readl_poll_timeout(status, val, (val & PCS_READY), 200,
355				 PHY_INIT_COMPLETE_TIMEOUT);
356	if (ret) {
357		dev_err(qmp->dev,
358			"phy common block init timed-out\n");
359		return ret;
360	}
361
362	return 0;
363}
364
365static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
366{
367	struct qcom_qmp *qmp = qphy->qmp;
368	const struct qmp_phy_cfg *cfg = qphy->cfg;
369	void __iomem *serdes = qphy->serdes;
370	int ret;
371
372	mutex_lock(&qmp->phy_mutex);
373	if (qmp->init_count++) {
374		mutex_unlock(&qmp->phy_mutex);
375		return 0;
376	}
377
378	ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
379	if (ret) {
380		dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
381		goto err_decrement_count;
382	}
383
384	ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
385	if (ret) {
386		dev_err(qmp->dev, "reset assert failed\n");
387		goto err_disable_regulators;
388	}
389
390	ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
391	if (ret) {
392		dev_err(qmp->dev, "reset deassert failed\n");
393		goto err_disable_regulators;
394	}
395
396	ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
397	if (ret)
398		goto err_assert_reset;
399
400	qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
401		     SW_PWRDN);
402
403	mutex_unlock(&qmp->phy_mutex);
404
405	return 0;
406
407err_assert_reset:
408	reset_control_bulk_assert(cfg->num_resets, qmp->resets);
409err_disable_regulators:
410	regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
411err_decrement_count:
412	qmp->init_count--;
413	mutex_unlock(&qmp->phy_mutex);
414
415	return ret;
416}
417
418static int qmp_pcie_msm8996_com_exit(struct qmp_phy *qphy)
419{
420	struct qcom_qmp *qmp = qphy->qmp;
421	const struct qmp_phy_cfg *cfg = qphy->cfg;
422	void __iomem *serdes = qphy->serdes;
423
424	mutex_lock(&qmp->phy_mutex);
425	if (--qmp->init_count) {
426		mutex_unlock(&qmp->phy_mutex);
427		return 0;
428	}
429
430	qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
431		     SERDES_START | PCS_START);
432	qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
433		     SW_RESET);
434	qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
435		     SW_PWRDN);
436
437	reset_control_bulk_assert(cfg->num_resets, qmp->resets);
438
439	clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
440
441	regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
442
443	mutex_unlock(&qmp->phy_mutex);
444
445	return 0;
446}
447
448static int qmp_pcie_msm8996_init(struct phy *phy)
449{
450	struct qmp_phy *qphy = phy_get_drvdata(phy);
451	struct qcom_qmp *qmp = qphy->qmp;
452	int ret;
453	dev_vdbg(qmp->dev, "Initializing QMP phy\n");
454
455	ret = qmp_pcie_msm8996_com_init(qphy);
456	if (ret)
457		return ret;
458
459	return 0;
460}
461
462static int qmp_pcie_msm8996_power_on(struct phy *phy)
463{
464	struct qmp_phy *qphy = phy_get_drvdata(phy);
465	struct qcom_qmp *qmp = qphy->qmp;
466	const struct qmp_phy_cfg *cfg = qphy->cfg;
467	void __iomem *tx = qphy->tx;
468	void __iomem *rx = qphy->rx;
469	void __iomem *pcs = qphy->pcs;
470	void __iomem *status;
471	unsigned int val;
472	int ret;
473
474	qmp_pcie_msm8996_serdes_init(qphy);
475
476	ret = reset_control_deassert(qphy->lane_rst);
477	if (ret) {
478		dev_err(qmp->dev, "lane%d reset deassert failed\n",
479			qphy->index);
480		return ret;
481	}
482
483	ret = clk_prepare_enable(qphy->pipe_clk);
484	if (ret) {
485		dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
486		goto err_reset_lane;
487	}
488
489	/* Tx, Rx, and PCS configurations */
490	qmp_pcie_msm8996_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
491	qmp_pcie_msm8996_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
492	qmp_pcie_msm8996_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
493
494	/*
495	 * Pull out PHY from POWER DOWN state.
496	 * This is active low enable signal to power-down PHY.
497	 */
498	qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
499			SW_PWRDN | REFCLK_DRV_DSBL);
500
501	usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
502
503	/* Pull PHY out of reset state */
504	qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
505
506	/* start SerDes and Phy-Coding-Sublayer */
507	qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL],
508			PCS_START | PLL_READY_GATE_EN);
509
510	status = pcs + cfg->regs[QPHY_PCS_STATUS];
511	ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200,
512				 PHY_INIT_COMPLETE_TIMEOUT);
513	if (ret) {
514		dev_err(qmp->dev, "phy initialization timed-out\n");
515		goto err_disable_pipe_clk;
516	}
517
518	return 0;
519
520err_disable_pipe_clk:
521	clk_disable_unprepare(qphy->pipe_clk);
522err_reset_lane:
523	reset_control_assert(qphy->lane_rst);
524
525	return ret;
526}
527
528static int qmp_pcie_msm8996_power_off(struct phy *phy)
529{
530	struct qmp_phy *qphy = phy_get_drvdata(phy);
531	const struct qmp_phy_cfg *cfg = qphy->cfg;
532
533	clk_disable_unprepare(qphy->pipe_clk);
534
535	/* PHY reset */
536	qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
537
538	/* stop SerDes and Phy-Coding-Sublayer */
539	qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL],
540			SERDES_START | PCS_START);
541
542	/* Put PHY into POWER DOWN state: active low */
543	qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
544			SW_PWRDN | REFCLK_DRV_DSBL);
545
546	return 0;
547}
548
549static int qmp_pcie_msm8996_exit(struct phy *phy)
550{
551	struct qmp_phy *qphy = phy_get_drvdata(phy);
552
553	reset_control_assert(qphy->lane_rst);
554
555	qmp_pcie_msm8996_com_exit(qphy);
556
557	return 0;
558}
559
560static int qmp_pcie_msm8996_enable(struct phy *phy)
561{
562	int ret;
563
564	ret = qmp_pcie_msm8996_init(phy);
565	if (ret)
566		return ret;
567
568	ret = qmp_pcie_msm8996_power_on(phy);
569	if (ret)
570		qmp_pcie_msm8996_exit(phy);
571
572	return ret;
573}
574
575static int qmp_pcie_msm8996_disable(struct phy *phy)
576{
577	int ret;
578
579	ret = qmp_pcie_msm8996_power_off(phy);
580	if (ret)
581		return ret;
582	return qmp_pcie_msm8996_exit(phy);
583}
584
585static int qmp_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
586{
587	struct qcom_qmp *qmp = dev_get_drvdata(dev);
588	int num = cfg->num_vregs;
589	int i;
590
591	qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
592	if (!qmp->vregs)
593		return -ENOMEM;
594
595	for (i = 0; i < num; i++)
596		qmp->vregs[i].supply = cfg->vreg_list[i];
597
598	return devm_regulator_bulk_get(dev, num, qmp->vregs);
599}
600
601static int qmp_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
602{
603	struct qcom_qmp *qmp = dev_get_drvdata(dev);
604	int i;
605	int ret;
606
607	qmp->resets = devm_kcalloc(dev, cfg->num_resets,
608				   sizeof(*qmp->resets), GFP_KERNEL);
609	if (!qmp->resets)
610		return -ENOMEM;
611
612	for (i = 0; i < cfg->num_resets; i++)
613		qmp->resets[i].id = cfg->reset_list[i];
614
615	ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
616	if (ret)
617		return dev_err_probe(dev, ret, "failed to get resets\n");
618
619	return 0;
620}
621
622static int qmp_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
623{
624	struct qcom_qmp *qmp = dev_get_drvdata(dev);
625	int num = cfg->num_clks;
626	int i;
627
628	qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
629	if (!qmp->clks)
630		return -ENOMEM;
631
632	for (i = 0; i < num; i++)
633		qmp->clks[i].id = cfg->clk_list[i];
634
635	return devm_clk_bulk_get(dev, num, qmp->clks);
636}
637
638static void phy_clk_release_provider(void *res)
639{
640	of_clk_del_provider(res);
641}
642
643/*
644 * Register a fixed rate pipe clock.
645 *
646 * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
647 * controls it. The <s>_pipe_clk coming out of the GCC is requested
648 * by the PHY driver for its operations.
649 * We register the <s>_pipe_clksrc here. The gcc driver takes care
650 * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
651 * Below picture shows this relationship.
652 *
653 *         +---------------+
654 *         |   PHY block   |<<---------------------------------------+
655 *         |               |                                         |
656 *         |   +-------+   |                   +-----+               |
657 *   I/P---^-->|  PLL  |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
658 *    clk  |   +-------+   |                   +-----+
659 *         +---------------+
660 */
661static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
662{
663	struct clk_fixed_rate *fixed;
664	struct clk_init_data init = { };
665	int ret;
666
667	ret = of_property_read_string(np, "clock-output-names", &init.name);
668	if (ret) {
669		dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
670		return ret;
671	}
672
673	fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
674	if (!fixed)
675		return -ENOMEM;
676
677	init.ops = &clk_fixed_rate_ops;
678
679	/* controllers using QMP phys use 125MHz pipe clock interface */
680	fixed->fixed_rate = 125000000;
681	fixed->hw.init = &init;
682
683	ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
684	if (ret)
685		return ret;
686
687	ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
688	if (ret)
689		return ret;
690
691	/*
692	 * Roll a devm action because the clock provider is the child node, but
693	 * the child node is not actually a device.
694	 */
695	return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
696}
697
698static const struct phy_ops qmp_pcie_msm8996_ops = {
699	.power_on	= qmp_pcie_msm8996_enable,
700	.power_off	= qmp_pcie_msm8996_disable,
701	.owner		= THIS_MODULE,
702};
703
704static void qcom_qmp_reset_control_put(void *data)
705{
706	reset_control_put(data);
707}
708
709static int qmp_pcie_msm8996_create(struct device *dev, struct device_node *np, int id,
710			void __iomem *serdes, const struct qmp_phy_cfg *cfg)
711{
712	struct qcom_qmp *qmp = dev_get_drvdata(dev);
713	struct phy *generic_phy;
714	struct qmp_phy *qphy;
715	int ret;
716
717	qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
718	if (!qphy)
719		return -ENOMEM;
720
721	qphy->cfg = cfg;
722	qphy->serdes = serdes;
723	/*
724	 * Get memory resources for each PHY:
725	 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
726	 */
727	qphy->tx = devm_of_iomap(dev, np, 0, NULL);
728	if (IS_ERR(qphy->tx))
729		return PTR_ERR(qphy->tx);
730
731	qphy->rx = devm_of_iomap(dev, np, 1, NULL);
732	if (IS_ERR(qphy->rx))
733		return PTR_ERR(qphy->rx);
734
735	qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
736	if (IS_ERR(qphy->pcs))
737		return PTR_ERR(qphy->pcs);
738
739	qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
740	if (IS_ERR(qphy->pipe_clk)) {
741		return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
742				     "failed to get lane%d pipe clock\n", id);
743	}
744
745	qphy->lane_rst = of_reset_control_get_exclusive_by_index(np, 0);
746	if (IS_ERR(qphy->lane_rst)) {
747		dev_err(dev, "failed to get lane%d reset\n", id);
748		return PTR_ERR(qphy->lane_rst);
749	}
750	ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
751				       qphy->lane_rst);
752	if (ret)
753		return ret;
754
755	generic_phy = devm_phy_create(dev, np, &qmp_pcie_msm8996_ops);
756	if (IS_ERR(generic_phy)) {
757		ret = PTR_ERR(generic_phy);
758		dev_err(dev, "failed to create qphy %d\n", ret);
759		return ret;
760	}
761
762	qphy->phy = generic_phy;
763	qphy->index = id;
764	qphy->qmp = qmp;
765	qmp->phys[id] = qphy;
766	phy_set_drvdata(generic_phy, qphy);
767
768	return 0;
769}
770
771static const struct of_device_id qmp_pcie_msm8996_of_match_table[] = {
772	{
773		.compatible = "qcom,msm8996-qmp-pcie-phy",
774		.data = &msm8996_pciephy_cfg,
775	},
776	{ },
777};
778MODULE_DEVICE_TABLE(of, qmp_pcie_msm8996_of_match_table);
779
780static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
781{
782	struct qcom_qmp *qmp;
783	struct device *dev = &pdev->dev;
784	struct device_node *child;
785	struct phy_provider *phy_provider;
786	void __iomem *serdes;
787	const struct qmp_phy_cfg *cfg = NULL;
788	int num, id, expected_phys;
789	int ret;
790
791	qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
792	if (!qmp)
793		return -ENOMEM;
794
795	qmp->dev = dev;
796	dev_set_drvdata(dev, qmp);
797
798	cfg = of_device_get_match_data(dev);
799	if (!cfg)
800		return -EINVAL;
801
802	serdes = devm_platform_ioremap_resource(pdev, 0);
803	if (IS_ERR(serdes))
804		return PTR_ERR(serdes);
805
806	expected_phys = cfg->num_phys;
807
808	mutex_init(&qmp->phy_mutex);
809
810	ret = qmp_pcie_msm8996_clk_init(dev, cfg);
811	if (ret)
812		return ret;
813
814	ret = qmp_pcie_msm8996_reset_init(dev, cfg);
815	if (ret)
816		return ret;
817
818	ret = qmp_pcie_msm8996_vreg_init(dev, cfg);
819	if (ret)
820		return ret;
821
822	num = of_get_available_child_count(dev->of_node);
823	/* do we have a rogue child node ? */
824	if (num > expected_phys)
825		return -EINVAL;
826
827	qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
828	if (!qmp->phys)
829		return -ENOMEM;
830
831	id = 0;
832	for_each_available_child_of_node(dev->of_node, child) {
833		/* Create per-lane phy */
834		ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg);
835		if (ret) {
836			dev_err(dev, "failed to create lane%d phy, %d\n",
837				id, ret);
838			goto err_node_put;
839		}
840
841		/*
842		 * Register the pipe clock provided by phy.
843		 * See function description to see details of this pipe clock.
844		 */
845		ret = phy_pipe_clk_register(qmp, child);
846		if (ret) {
847			dev_err(qmp->dev,
848				"failed to register pipe clock source\n");
849			goto err_node_put;
850		}
851
852		id++;
853	}
854
855	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
856
857	return PTR_ERR_OR_ZERO(phy_provider);
858
859err_node_put:
860	of_node_put(child);
861	return ret;
862}
863
864static struct platform_driver qmp_pcie_msm8996_driver = {
865	.probe		= qmp_pcie_msm8996_probe,
866	.driver = {
867		.name	= "qcom-qmp-msm8996-pcie-phy",
868		.of_match_table = qmp_pcie_msm8996_of_match_table,
869	},
870};
871
872module_platform_driver(qmp_pcie_msm8996_driver);
873
874MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
875MODULE_DESCRIPTION("Qualcomm QMP MSM8996 PCIe PHY driver");
876MODULE_LICENSE("GPL v2");
877