1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTx2 CGX driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/acpi.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/phy.h>
18#include <linux/of.h>
19#include <linux/of_mdio.h>
20#include <linux/of_net.h>
21
22#include "cgx.h"
23
24#define DRV_NAME	"octeontx2-cgx"
25#define DRV_STRING      "Marvell OcteonTX2 CGX/MAC Driver"
26
27/**
28 * struct lmac
29 * @wq_cmd_cmplt:	waitq to keep the process blocked until cmd completion
30 * @cmd_lock:		Lock to serialize the command interface
31 * @resp:		command response
32 * @link_info:		link related information
33 * @event_cb:		callback for linkchange events
34 * @event_cb_lock:	lock for serializing callback with unregister
35 * @cmd_pend:		flag set before new command is started
36 *			flag cleared after command response is received
37 * @cgx:		parent cgx port
38 * @lmac_id:		lmac port id
39 * @name:		lmac port name
40 */
41struct lmac {
42	wait_queue_head_t wq_cmd_cmplt;
43	struct mutex cmd_lock;
44	u64 resp;
45	struct cgx_link_user_info link_info;
46	struct cgx_event_cb event_cb;
47	spinlock_t event_cb_lock;
48	bool cmd_pend;
49	struct cgx *cgx;
50	u8 lmac_id;
51	char *name;
52};
53
54struct cgx {
55	void __iomem		*reg_base;
56	struct pci_dev		*pdev;
57	u8			cgx_id;
58	u8			lmac_count;
59	struct lmac		*lmac_idmap[MAX_LMAC_PER_CGX];
60	struct			work_struct cgx_cmd_work;
61	struct			workqueue_struct *cgx_cmd_workq;
62	struct list_head	cgx_list;
63};
64
65static LIST_HEAD(cgx_list);
66
67/* Convert firmware speed encoding to user format(Mbps) */
68static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
69
70/* Convert firmware lmac type encoding to string */
71static char *cgx_lmactype_string[LMAC_MODE_MAX];
72
73/* CGX PHY management internal APIs */
74static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
75
76/* Supported devices */
77static const struct pci_device_id cgx_id_table[] = {
78	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
79	{ 0, }  /* end of table */
80};
81
82MODULE_DEVICE_TABLE(pci, cgx_id_table);
83
84static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
85{
86	writeq(val, cgx->reg_base + (lmac << 18) + offset);
87}
88
89static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
90{
91	return readq(cgx->reg_base + (lmac << 18) + offset);
92}
93
94static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
95{
96	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
97		return NULL;
98
99	return cgx->lmac_idmap[lmac_id];
100}
101
102int cgx_get_cgxcnt_max(void)
103{
104	struct cgx *cgx_dev;
105	int idmax = -ENODEV;
106
107	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
108		if (cgx_dev->cgx_id > idmax)
109			idmax = cgx_dev->cgx_id;
110
111	if (idmax < 0)
112		return 0;
113
114	return idmax + 1;
115}
116
117int cgx_get_lmac_cnt(void *cgxd)
118{
119	struct cgx *cgx = cgxd;
120
121	if (!cgx)
122		return -ENODEV;
123
124	return cgx->lmac_count;
125}
126
127void *cgx_get_pdata(int cgx_id)
128{
129	struct cgx *cgx_dev;
130
131	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
132		if (cgx_dev->cgx_id == cgx_id)
133			return cgx_dev;
134	}
135	return NULL;
136}
137
138int cgx_get_cgxid(void *cgxd)
139{
140	struct cgx *cgx = cgxd;
141
142	if (!cgx)
143		return -EINVAL;
144
145	return cgx->cgx_id;
146}
147
148u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
149{
150	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
151	u64 cfg;
152
153	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
154
155	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
156}
157
158/* Ensure the required lock for event queue(where asynchronous events are
159 * posted) is acquired before calling this API. Else an asynchronous event(with
160 * latest link status) can reach the destination before this function returns
161 * and could make the link status appear wrong.
162 */
163int cgx_get_link_info(void *cgxd, int lmac_id,
164		      struct cgx_link_user_info *linfo)
165{
166	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
167
168	if (!lmac)
169		return -ENODEV;
170
171	*linfo = lmac->link_info;
172	return 0;
173}
174
175static u64 mac2u64 (u8 *mac_addr)
176{
177	u64 mac = 0;
178	int index;
179
180	for (index = ETH_ALEN - 1; index >= 0; index--)
181		mac |= ((u64)*mac_addr++) << (8 * index);
182	return mac;
183}
184
185int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
186{
187	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
188	u64 cfg;
189
190	/* copy 6bytes from macaddr */
191	/* memcpy(&cfg, mac_addr, 6); */
192
193	cfg = mac2u64 (mac_addr);
194
195	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
196		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
197
198	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
199	cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
200	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
201
202	return 0;
203}
204
205u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
206{
207	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
208	u64 cfg;
209
210	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
211	return cfg & CGX_RX_DMAC_ADR_MASK;
212}
213
214int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
215{
216	struct cgx *cgx = cgxd;
217
218	if (!cgx || lmac_id >= cgx->lmac_count)
219		return -ENODEV;
220
221	cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
222	return 0;
223}
224
225static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
226{
227	u64 cfg;
228
229	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
230	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
231}
232
233/* Configure CGX LMAC in internal loopback mode */
234int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
235{
236	struct cgx *cgx = cgxd;
237	u8 lmac_type;
238	u64 cfg;
239
240	if (!cgx || lmac_id >= cgx->lmac_count)
241		return -ENODEV;
242
243	lmac_type = cgx_get_lmac_type(cgx, lmac_id);
244	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
245		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
246		if (enable)
247			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
248		else
249			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
250		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
251	} else {
252		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
253		if (enable)
254			cfg |= CGXX_SPUX_CONTROL1_LBK;
255		else
256			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
257		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
258	}
259	return 0;
260}
261
262void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
263{
264	struct cgx *cgx = cgx_get_pdata(cgx_id);
265	u64 cfg = 0;
266
267	if (!cgx)
268		return;
269
270	if (enable) {
271		/* Enable promiscuous mode on LMAC */
272		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
273		cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
274		cfg |= CGX_DMAC_BCAST_MODE;
275		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
276
277		cfg = cgx_read(cgx, 0,
278			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
279		cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
280		cgx_write(cgx, 0,
281			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
282	} else {
283		/* Disable promiscuous mode */
284		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
285		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
286		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
287		cfg = cgx_read(cgx, 0,
288			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
289		cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
290		cgx_write(cgx, 0,
291			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
292	}
293}
294
295/* Enable or disable forwarding received pause frames to Tx block */
296void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
297{
298	struct cgx *cgx = cgxd;
299	u64 cfg;
300
301	if (!cgx)
302		return;
303
304	if (enable) {
305		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
306		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
307		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
308
309		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
310		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
311		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
312	} else {
313		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
314		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
315		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
316
317		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
318		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
319		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
320	}
321}
322
323int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
324{
325	struct cgx *cgx = cgxd;
326
327	if (!cgx || lmac_id >= cgx->lmac_count)
328		return -ENODEV;
329	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
330	return 0;
331}
332
333int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
334{
335	struct cgx *cgx = cgxd;
336
337	if (!cgx || lmac_id >= cgx->lmac_count)
338		return -ENODEV;
339	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
340	return 0;
341}
342
343int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
344{
345	struct cgx *cgx = cgxd;
346	u64 cfg;
347
348	if (!cgx || lmac_id >= cgx->lmac_count)
349		return -ENODEV;
350
351	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
352	if (enable)
353		cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
354	else
355		cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
356	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
357	return 0;
358}
359
360int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
361{
362	struct cgx *cgx = cgxd;
363	u64 cfg, last;
364
365	if (!cgx || lmac_id >= cgx->lmac_count)
366		return -ENODEV;
367
368	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
369	last = cfg;
370	if (enable)
371		cfg |= DATA_PKT_TX_EN;
372	else
373		cfg &= ~DATA_PKT_TX_EN;
374
375	if (cfg != last)
376		cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
377	return !!(last & DATA_PKT_TX_EN);
378}
379
380int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
381			   u8 *tx_pause, u8 *rx_pause)
382{
383	struct cgx *cgx = cgxd;
384	u64 cfg;
385
386	if (!cgx || lmac_id >= cgx->lmac_count)
387		return -ENODEV;
388
389	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
390	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
391
392	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
393	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
394	return 0;
395}
396
397int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
398			   u8 tx_pause, u8 rx_pause)
399{
400	struct cgx *cgx = cgxd;
401	u64 cfg;
402
403	if (!cgx || lmac_id >= cgx->lmac_count)
404		return -ENODEV;
405
406	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
407	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
408	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
409	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
410
411	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
412	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
413	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
414	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
415
416	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
417	if (tx_pause) {
418		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
419	} else {
420		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
421		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
422	}
423	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
424	return 0;
425}
426
427static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
428{
429	u64 cfg;
430
431	if (!cgx || lmac_id >= cgx->lmac_count)
432		return;
433	if (enable) {
434		/* Enable receive pause frames */
435		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
436		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
437		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
438
439		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
440		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
441		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
442
443		/* Enable pause frames transmission */
444		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
445		cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
446		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
447
448		/* Set pause time and interval */
449		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
450			  DEFAULT_PAUSE_TIME);
451		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
452		cfg &= ~0xFFFFULL;
453		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
454			  cfg | (DEFAULT_PAUSE_TIME / 2));
455
456		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
457			  DEFAULT_PAUSE_TIME);
458
459		cfg = cgx_read(cgx, lmac_id,
460			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
461		cfg &= ~0xFFFFULL;
462		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
463			  cfg | (DEFAULT_PAUSE_TIME / 2));
464	} else {
465		/* ALL pause frames received are completely ignored */
466		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
467		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
468		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
469
470		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
471		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
472		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
473
474		/* Disable pause frames transmission */
475		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
476		cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
477		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
478	}
479}
480
481void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
482{
483	struct cgx *cgx = cgxd;
484	u64 cfg;
485
486	if (!cgx)
487		return;
488
489	if (enable) {
490		/* Enable inbound PTP timestamping */
491		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
492		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
493		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
494
495		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
496		cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
497		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
498	} else {
499		/* Disable inbound PTP stamping */
500		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
501		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
502		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
503
504		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
505		cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
506		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
507	}
508}
509
510/* CGX Firmware interface low level support */
511static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
512{
513	struct cgx *cgx = lmac->cgx;
514	struct device *dev;
515	int err = 0;
516	u64 cmd;
517
518	/* Ensure no other command is in progress */
519	err = mutex_lock_interruptible(&lmac->cmd_lock);
520	if (err)
521		return err;
522
523	/* Ensure command register is free */
524	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
525	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
526		err = -EBUSY;
527		goto unlock;
528	}
529
530	/* Update ownership in command request */
531	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
532
533	/* Mark this lmac as pending, before we start */
534	lmac->cmd_pend = true;
535
536	/* Start command in hardware */
537	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
538
539	/* Ensure command is completed without errors */
540	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
541				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
542		dev = &cgx->pdev->dev;
543		dev_err(dev, "cgx port %d:%d cmd timeout\n",
544			cgx->cgx_id, lmac->lmac_id);
545		err = -EIO;
546		goto unlock;
547	}
548
549	/* we have a valid command response */
550	smp_rmb(); /* Ensure the latest updates are visible */
551	*resp = lmac->resp;
552
553unlock:
554	mutex_unlock(&lmac->cmd_lock);
555
556	return err;
557}
558
559static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
560				      struct cgx *cgx, int lmac_id)
561{
562	struct lmac *lmac;
563	int err;
564
565	lmac = lmac_pdata(lmac_id, cgx);
566	if (!lmac)
567		return -ENODEV;
568
569	err = cgx_fwi_cmd_send(req, resp, lmac);
570
571	/* Check for valid response */
572	if (!err) {
573		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
574			return -EIO;
575		else
576			return 0;
577	}
578
579	return err;
580}
581
582static inline void cgx_link_usertable_init(void)
583{
584	cgx_speed_mbps[CGX_LINK_NONE] = 0;
585	cgx_speed_mbps[CGX_LINK_10M] = 10;
586	cgx_speed_mbps[CGX_LINK_100M] = 100;
587	cgx_speed_mbps[CGX_LINK_1G] = 1000;
588	cgx_speed_mbps[CGX_LINK_2HG] = 2500;
589	cgx_speed_mbps[CGX_LINK_5G] = 5000;
590	cgx_speed_mbps[CGX_LINK_10G] = 10000;
591	cgx_speed_mbps[CGX_LINK_20G] = 20000;
592	cgx_speed_mbps[CGX_LINK_25G] = 25000;
593	cgx_speed_mbps[CGX_LINK_40G] = 40000;
594	cgx_speed_mbps[CGX_LINK_50G] = 50000;
595	cgx_speed_mbps[CGX_LINK_100G] = 100000;
596
597	cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
598	cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
599	cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
600	cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
601	cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
602	cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
603	cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
604	cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
605	cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
606	cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
607}
608
609static inline void link_status_user_format(u64 lstat,
610					   struct cgx_link_user_info *linfo,
611					   struct cgx *cgx, u8 lmac_id)
612{
613	char *lmac_string;
614
615	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
616	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
617	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
618	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
619	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
620	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
621}
622
623/* Hardware event handlers */
624static inline void cgx_link_change_handler(u64 lstat,
625					   struct lmac *lmac)
626{
627	struct cgx_link_user_info *linfo;
628	struct cgx *cgx = lmac->cgx;
629	struct cgx_link_event event;
630	struct device *dev;
631	int err_type;
632
633	dev = &cgx->pdev->dev;
634
635	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
636	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
637
638	event.cgx_id = cgx->cgx_id;
639	event.lmac_id = lmac->lmac_id;
640
641	/* update the local copy of link status */
642	lmac->link_info = event.link_uinfo;
643	linfo = &lmac->link_info;
644
645	/* Ensure callback doesn't get unregistered until we finish it */
646	spin_lock(&lmac->event_cb_lock);
647
648	if (!lmac->event_cb.notify_link_chg) {
649		dev_dbg(dev, "cgx port %d:%d Link change handler null",
650			cgx->cgx_id, lmac->lmac_id);
651		if (err_type != CGX_ERR_NONE) {
652			dev_err(dev, "cgx port %d:%d Link error %d\n",
653				cgx->cgx_id, lmac->lmac_id, err_type);
654		}
655		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
656			 cgx->cgx_id, lmac->lmac_id,
657			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
658		goto err;
659	}
660
661	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
662		dev_err(dev, "event notification failure\n");
663err:
664	spin_unlock(&lmac->event_cb_lock);
665}
666
667static inline bool cgx_cmdresp_is_linkevent(u64 event)
668{
669	u8 id;
670
671	id = FIELD_GET(EVTREG_ID, event);
672	if (id == CGX_CMD_LINK_BRING_UP ||
673	    id == CGX_CMD_LINK_BRING_DOWN)
674		return true;
675	else
676		return false;
677}
678
679static inline bool cgx_event_is_linkevent(u64 event)
680{
681	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
682		return true;
683	else
684		return false;
685}
686
687static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
688{
689	struct lmac *lmac = data;
690	struct cgx *cgx;
691	u64 event;
692
693	cgx = lmac->cgx;
694
695	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
696
697	if (!FIELD_GET(EVTREG_ACK, event))
698		return IRQ_NONE;
699
700	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
701	case CGX_EVT_CMD_RESP:
702		/* Copy the response. Since only one command is active at a
703		 * time, there is no way a response can get overwritten
704		 */
705		lmac->resp = event;
706		/* Ensure response is updated before thread context starts */
707		smp_wmb();
708
709		/* There wont be separate events for link change initiated from
710		 * software; Hence report the command responses as events
711		 */
712		if (cgx_cmdresp_is_linkevent(event))
713			cgx_link_change_handler(event, lmac);
714
715		/* Release thread waiting for completion  */
716		lmac->cmd_pend = false;
717		wake_up_interruptible(&lmac->wq_cmd_cmplt);
718		break;
719	case CGX_EVT_ASYNC:
720		if (cgx_event_is_linkevent(event))
721			cgx_link_change_handler(event, lmac);
722		break;
723	}
724
725	/* Any new event or command response will be posted by firmware
726	 * only after the current status is acked.
727	 * Ack the interrupt register as well.
728	 */
729	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
730	cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
731
732	return IRQ_HANDLED;
733}
734
735/* APIs for PHY management using CGX firmware interface */
736
737/* callback registration for hardware events like link change */
738int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
739{
740	struct cgx *cgx = cgxd;
741	struct lmac *lmac;
742
743	lmac = lmac_pdata(lmac_id, cgx);
744	if (!lmac)
745		return -ENODEV;
746
747	lmac->event_cb = *cb;
748
749	return 0;
750}
751
752int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
753{
754	struct lmac *lmac;
755	unsigned long flags;
756	struct cgx *cgx = cgxd;
757
758	lmac = lmac_pdata(lmac_id, cgx);
759	if (!lmac)
760		return -ENODEV;
761
762	spin_lock_irqsave(&lmac->event_cb_lock, flags);
763	lmac->event_cb.notify_link_chg = NULL;
764	lmac->event_cb.data = NULL;
765	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
766
767	return 0;
768}
769
770int cgx_get_fwdata_base(u64 *base)
771{
772	u64 req = 0, resp;
773	struct cgx *cgx;
774	int err;
775
776	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
777	if (!cgx)
778		return -ENXIO;
779
780	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
781	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
782	if (!err)
783		*base = FIELD_GET(RESP_FWD_BASE, resp);
784
785	return err;
786}
787
788static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
789{
790	u64 req = 0;
791	u64 resp;
792
793	if (enable)
794		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
795	else
796		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
797
798	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
799}
800
801static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
802{
803	u64 req = 0;
804
805	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
806	return cgx_fwi_cmd_generic(req, resp, cgx, 0);
807}
808
809static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
810{
811	struct device *dev = &cgx->pdev->dev;
812	int major_ver, minor_ver;
813	u64 resp;
814	int err;
815
816	if (!cgx->lmac_count)
817		return 0;
818
819	err = cgx_fwi_read_version(&resp, cgx);
820	if (err)
821		return err;
822
823	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
824	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
825	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
826		major_ver, minor_ver);
827	if (major_ver != CGX_FIRMWARE_MAJOR_VER)
828		return -EIO;
829	else
830		return 0;
831}
832
833static void cgx_lmac_linkup_work(struct work_struct *work)
834{
835	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
836	struct device *dev = &cgx->pdev->dev;
837	int i, err;
838
839	/* Do Link up for all the lmacs */
840	for (i = 0; i < cgx->lmac_count; i++) {
841		err = cgx_fwi_link_change(cgx, i, true);
842		if (err)
843			dev_info(dev, "cgx port %d:%d Link up command failed\n",
844				 cgx->cgx_id, i);
845	}
846}
847
848int cgx_lmac_linkup_start(void *cgxd)
849{
850	struct cgx *cgx = cgxd;
851
852	if (!cgx)
853		return -ENODEV;
854
855	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
856
857	return 0;
858}
859
860static int cgx_lmac_init(struct cgx *cgx)
861{
862	struct lmac *lmac;
863	int i, err;
864
865	cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
866	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
867		cgx->lmac_count = MAX_LMAC_PER_CGX;
868
869	for (i = 0; i < cgx->lmac_count; i++) {
870		lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
871		if (!lmac)
872			return -ENOMEM;
873		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
874		if (!lmac->name) {
875			err = -ENOMEM;
876			goto err_lmac_free;
877		}
878		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
879		lmac->lmac_id = i;
880		lmac->cgx = cgx;
881		init_waitqueue_head(&lmac->wq_cmd_cmplt);
882		mutex_init(&lmac->cmd_lock);
883		spin_lock_init(&lmac->event_cb_lock);
884		err = request_irq(pci_irq_vector(cgx->pdev,
885						 CGX_LMAC_FWI + i * 9),
886				   cgx_fwi_event_handler, 0, lmac->name, lmac);
887		if (err)
888			goto err_irq;
889
890		/* Enable interrupt */
891		cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
892			  FW_CGX_INT);
893
894		/* Add reference */
895		cgx->lmac_idmap[i] = lmac;
896		cgx_lmac_pause_frm_config(cgx, i, true);
897	}
898
899	return cgx_lmac_verify_fwi_version(cgx);
900
901err_irq:
902	kfree(lmac->name);
903err_lmac_free:
904	kfree(lmac);
905	return err;
906}
907
908static int cgx_lmac_exit(struct cgx *cgx)
909{
910	struct lmac *lmac;
911	int i;
912
913	if (cgx->cgx_cmd_workq) {
914		flush_workqueue(cgx->cgx_cmd_workq);
915		destroy_workqueue(cgx->cgx_cmd_workq);
916		cgx->cgx_cmd_workq = NULL;
917	}
918
919	/* Free all lmac related resources */
920	for (i = 0; i < cgx->lmac_count; i++) {
921		cgx_lmac_pause_frm_config(cgx, i, false);
922		lmac = cgx->lmac_idmap[i];
923		if (!lmac)
924			continue;
925		free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
926		kfree(lmac->name);
927		kfree(lmac);
928	}
929
930	return 0;
931}
932
933static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
934{
935	struct device *dev = &pdev->dev;
936	struct cgx *cgx;
937	int err, nvec;
938
939	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
940	if (!cgx)
941		return -ENOMEM;
942	cgx->pdev = pdev;
943
944	pci_set_drvdata(pdev, cgx);
945
946	err = pci_enable_device(pdev);
947	if (err) {
948		dev_err(dev, "Failed to enable PCI device\n");
949		pci_set_drvdata(pdev, NULL);
950		return err;
951	}
952
953	err = pci_request_regions(pdev, DRV_NAME);
954	if (err) {
955		dev_err(dev, "PCI request regions failed 0x%x\n", err);
956		goto err_disable_device;
957	}
958
959	/* MAP configuration registers */
960	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
961	if (!cgx->reg_base) {
962		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
963		err = -ENOMEM;
964		goto err_release_regions;
965	}
966
967	nvec = CGX_NVEC;
968	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
969	if (err < 0 || err != nvec) {
970		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
971			nvec, err);
972		goto err_release_regions;
973	}
974
975	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
976		& CGX_ID_MASK;
977
978	/* init wq for processing linkup requests */
979	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
980	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
981	if (!cgx->cgx_cmd_workq) {
982		dev_err(dev, "alloc workqueue failed for cgx cmd");
983		err = -ENOMEM;
984		goto err_free_irq_vectors;
985	}
986
987	list_add(&cgx->cgx_list, &cgx_list);
988
989	cgx_link_usertable_init();
990
991	err = cgx_lmac_init(cgx);
992	if (err)
993		goto err_release_lmac;
994
995	return 0;
996
997err_release_lmac:
998	cgx_lmac_exit(cgx);
999	list_del(&cgx->cgx_list);
1000err_free_irq_vectors:
1001	pci_free_irq_vectors(pdev);
1002err_release_regions:
1003	pci_release_regions(pdev);
1004err_disable_device:
1005	pci_disable_device(pdev);
1006	pci_set_drvdata(pdev, NULL);
1007	return err;
1008}
1009
1010static void cgx_remove(struct pci_dev *pdev)
1011{
1012	struct cgx *cgx = pci_get_drvdata(pdev);
1013
1014	cgx_lmac_exit(cgx);
1015	list_del(&cgx->cgx_list);
1016	pci_free_irq_vectors(pdev);
1017	pci_release_regions(pdev);
1018	pci_disable_device(pdev);
1019	pci_set_drvdata(pdev, NULL);
1020}
1021
1022struct pci_driver cgx_driver = {
1023	.name = DRV_NAME,
1024	.id_table = cgx_id_table,
1025	.probe = cgx_probe,
1026	.remove = cgx_remove,
1027};
1028