1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/ethtool.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/if_bridge.h>
16#include <linux/workqueue.h>
17#include <linux/jiffies.h>
18#include <linux/bitops.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/dcbnl.h>
22#include <linux/inetdevice.h>
23#include <linux/netlink.h>
24#include <linux/jhash.h>
25#include <linux/log2.h>
26#include <linux/refcount.h>
27#include <linux/rhashtable.h>
28#include <net/switchdev.h>
29#include <net/pkt_cls.h>
30#include <net/netevent.h>
31#include <net/addrconf.h>
32#include <linux/ptp_classify.h>
33
34#include "spectrum.h"
35#include "pci.h"
36#include "core.h"
37#include "core_env.h"
38#include "reg.h"
39#include "port.h"
40#include "trap.h"
41#include "txheader.h"
42#include "spectrum_cnt.h"
43#include "spectrum_dpipe.h"
44#include "spectrum_acl_flex_actions.h"
45#include "spectrum_span.h"
46#include "spectrum_ptp.h"
47#include "spectrum_trap.h"
48
49#define MLXSW_SP_FWREV_MINOR 2010
50#define MLXSW_SP_FWREV_SUBMINOR 1006
51
52#define MLXSW_SP1_FWREV_MAJOR 13
53#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56	.major = MLXSW_SP1_FWREV_MAJOR,
57	.minor = MLXSW_SP_FWREV_MINOR,
58	.subminor = MLXSW_SP_FWREV_SUBMINOR,
59	.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60};
61
62#define MLXSW_SP1_FW_FILENAME \
63	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64	"." __stringify(MLXSW_SP_FWREV_MINOR) \
65	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67#define MLXSW_SP2_FWREV_MAJOR 29
68
69static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70	.major = MLXSW_SP2_FWREV_MAJOR,
71	.minor = MLXSW_SP_FWREV_MINOR,
72	.subminor = MLXSW_SP_FWREV_SUBMINOR,
73};
74
75#define MLXSW_SP2_FW_FILENAME \
76	"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77	"." __stringify(MLXSW_SP_FWREV_MINOR) \
78	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80#define MLXSW_SP3_FWREV_MAJOR 30
81
82static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83	.major = MLXSW_SP3_FWREV_MAJOR,
84	.minor = MLXSW_SP_FWREV_MINOR,
85	.subminor = MLXSW_SP_FWREV_SUBMINOR,
86};
87
88#define MLXSW_SP3_FW_FILENAME \
89	"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90	"." __stringify(MLXSW_SP_FWREV_MINOR) \
91	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94	"mellanox/lc_ini_bundle_" \
95	__stringify(MLXSW_SP_FWREV_MINOR) "_" \
96	__stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104	0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105};
106static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107	0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108};
109
110/* tx_hdr_version
111 * Tx header version.
112 * Must be set to 1.
113 */
114MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115
116/* tx_hdr_ctl
117 * Packet control type.
118 * 0 - Ethernet control (e.g. EMADs, LACP)
119 * 1 - Ethernet data
120 */
121MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122
123/* tx_hdr_proto
124 * Packet protocol type. Must be set to 1 (Ethernet).
125 */
126MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127
128/* tx_hdr_rx_is_router
129 * Packet is sent from the router. Valid for data packets only.
130 */
131MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132
133/* tx_hdr_fid_valid
134 * Indicates if the 'fid' field is valid and should be used for
135 * forwarding lookup. Valid for data packets only.
136 */
137MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138
139/* tx_hdr_swid
140 * Switch partition ID. Must be set to 0.
141 */
142MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143
144/* tx_hdr_control_tclass
145 * Indicates if the packet should use the control TClass and not one
146 * of the data TClasses.
147 */
148MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149
150/* tx_hdr_etclass
151 * Egress TClass to be used on the egress device on the egress port.
152 */
153MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154
155/* tx_hdr_port_mid
156 * Destination local port for unicast packets.
157 * Destination multicast ID for multicast packets.
158 *
159 * Control packets are directed to a specific egress port, while data
160 * packets are transmitted through the CPU port (0) into the switch partition,
161 * where forwarding rules are applied.
162 */
163MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164
165/* tx_hdr_fid
166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168 * Valid for data packets only.
169 */
170MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171
172/* tx_hdr_type
173 * 0 - Data packets
174 * 6 - Control packets
175 */
176MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177
178int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179			      unsigned int counter_index, u64 *packets,
180			      u64 *bytes)
181{
182	char mgpc_pl[MLXSW_REG_MGPC_LEN];
183	int err;
184
185	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
186			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
187	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
188	if (err)
189		return err;
190	if (packets)
191		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
192	if (bytes)
193		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
194	return 0;
195}
196
197static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
198				       unsigned int counter_index)
199{
200	char mgpc_pl[MLXSW_REG_MGPC_LEN];
201
202	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
203			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
204	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
205}
206
207int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
208				unsigned int *p_counter_index)
209{
210	int err;
211
212	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
213				     p_counter_index);
214	if (err)
215		return err;
216	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
217	if (err)
218		goto err_counter_clear;
219	return 0;
220
221err_counter_clear:
222	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
223			      *p_counter_index);
224	return err;
225}
226
227void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
228				unsigned int counter_index)
229{
230	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
231			       counter_index);
232}
233
234void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
235			      const struct mlxsw_tx_info *tx_info)
236{
237	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
238
239	memset(txhdr, 0, MLXSW_TXHDR_LEN);
240
241	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
242	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
243	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
244	mlxsw_tx_hdr_swid_set(txhdr, 0);
245	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
246	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
247	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
248}
249
250int
251mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
252				  struct mlxsw_sp_port *mlxsw_sp_port,
253				  struct sk_buff *skb,
254				  const struct mlxsw_tx_info *tx_info)
255{
256	char *txhdr;
257	u16 max_fid;
258	int err;
259
260	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
261		err = -ENOMEM;
262		goto err_skb_cow_head;
263	}
264
265	if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
266		err = -EIO;
267		goto err_res_valid;
268	}
269	max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
270
271	txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
272	memset(txhdr, 0, MLXSW_TXHDR_LEN);
273
274	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
275	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
276	mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
277	mlxsw_tx_hdr_fid_valid_set(txhdr, true);
278	mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
279	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
280	return 0;
281
282err_res_valid:
283err_skb_cow_head:
284	this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
285	dev_kfree_skb_any(skb);
286	return err;
287}
288
289static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
290{
291	unsigned int type;
292
293	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
294		return false;
295
296	type = ptp_classify_raw(skb);
297	return !!ptp_parse_header(skb, type);
298}
299
300static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
301				 struct mlxsw_sp_port *mlxsw_sp_port,
302				 struct sk_buff *skb,
303				 const struct mlxsw_tx_info *tx_info)
304{
305	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
306
307	/* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
308	 * need special handling and cannot be transmitted as regular control
309	 * packets.
310	 */
311	if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
312		return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
313							  mlxsw_sp_port, skb,
314							  tx_info);
315
316	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
317		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
318		dev_kfree_skb_any(skb);
319		return -ENOMEM;
320	}
321
322	mlxsw_sp_txhdr_construct(skb, tx_info);
323	return 0;
324}
325
326enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
327{
328	switch (state) {
329	case BR_STATE_FORWARDING:
330		return MLXSW_REG_SPMS_STATE_FORWARDING;
331	case BR_STATE_LEARNING:
332		return MLXSW_REG_SPMS_STATE_LEARNING;
333	case BR_STATE_LISTENING:
334	case BR_STATE_DISABLED:
335	case BR_STATE_BLOCKING:
336		return MLXSW_REG_SPMS_STATE_DISCARDING;
337	default:
338		BUG();
339	}
340}
341
342int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
343			      u8 state)
344{
345	enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
346	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
347	char *spms_pl;
348	int err;
349
350	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
351	if (!spms_pl)
352		return -ENOMEM;
353	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
354	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
355
356	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
357	kfree(spms_pl);
358	return err;
359}
360
361static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
362{
363	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
364	int err;
365
366	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
367	if (err)
368		return err;
369	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
370	return 0;
371}
372
373int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
374				   bool is_up)
375{
376	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377	char paos_pl[MLXSW_REG_PAOS_LEN];
378
379	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
380			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
381			    MLXSW_PORT_ADMIN_STATUS_DOWN);
382	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
383}
384
385static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
386				      const unsigned char *addr)
387{
388	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389	char ppad_pl[MLXSW_REG_PPAD_LEN];
390
391	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
392	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
393	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
394}
395
396static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
397{
398	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
399
400	eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
401			mlxsw_sp_port->local_port);
402	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
403					  mlxsw_sp_port->dev->dev_addr);
404}
405
406static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
407{
408	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409	char pmtu_pl[MLXSW_REG_PMTU_LEN];
410	int err;
411
412	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
413	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
414	if (err)
415		return err;
416
417	*p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
418	return 0;
419}
420
421static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
422{
423	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
424	char pmtu_pl[MLXSW_REG_PMTU_LEN];
425
426	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
427	if (mtu > mlxsw_sp_port->max_mtu)
428		return -EINVAL;
429
430	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
431	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
432}
433
434static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
435				  u16 local_port, u8 swid)
436{
437	char pspa_pl[MLXSW_REG_PSPA_LEN];
438
439	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
440	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
441}
442
443int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
444{
445	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446	char svpe_pl[MLXSW_REG_SVPE_LEN];
447
448	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
449	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
450}
451
452int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
453				   bool learn_enable)
454{
455	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456	char *spvmlr_pl;
457	int err;
458
459	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
460	if (!spvmlr_pl)
461		return -ENOMEM;
462	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
463			      learn_enable);
464	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
465	kfree(spvmlr_pl);
466	return err;
467}
468
469int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
470{
471	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472	char spfsr_pl[MLXSW_REG_SPFSR_LEN];
473	int err;
474
475	if (mlxsw_sp_port->security == enable)
476		return 0;
477
478	mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
479	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
480	if (err)
481		return err;
482
483	mlxsw_sp_port->security = enable;
484	return 0;
485}
486
487int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
488{
489	switch (ethtype) {
490	case ETH_P_8021Q:
491		*p_sver_type = 0;
492		break;
493	case ETH_P_8021AD:
494		*p_sver_type = 1;
495		break;
496	default:
497		return -EINVAL;
498	}
499
500	return 0;
501}
502
503int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
504				     u16 ethtype)
505{
506	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
507	char spevet_pl[MLXSW_REG_SPEVET_LEN];
508	u8 sver_type;
509	int err;
510
511	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
512	if (err)
513		return err;
514
515	mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
516	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
517}
518
519static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
520				    u16 vid, u16 ethtype)
521{
522	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523	char spvid_pl[MLXSW_REG_SPVID_LEN];
524	u8 sver_type;
525	int err;
526
527	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
528	if (err)
529		return err;
530
531	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
532			     sver_type);
533
534	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
535}
536
537static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
538					    bool allow)
539{
540	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
541	char spaft_pl[MLXSW_REG_SPAFT_LEN];
542
543	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
544	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
545}
546
547int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
548			   u16 ethtype)
549{
550	int err;
551
552	if (!vid) {
553		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
554		if (err)
555			return err;
556	} else {
557		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
558		if (err)
559			return err;
560		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
561		if (err)
562			goto err_port_allow_untagged_set;
563	}
564
565	mlxsw_sp_port->pvid = vid;
566	return 0;
567
568err_port_allow_untagged_set:
569	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
570	return err;
571}
572
573static int
574mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
575{
576	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577	char sspr_pl[MLXSW_REG_SSPR_LEN];
578
579	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
580	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
581}
582
583static int
584mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
585				u16 local_port, char *pmlp_pl,
586				struct mlxsw_sp_port_mapping *port_mapping)
587{
588	bool separate_rxtx;
589	u8 first_lane;
590	u8 slot_index;
591	u8 module;
592	u8 width;
593	int i;
594
595	module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
596	slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
597	width = mlxsw_reg_pmlp_width_get(pmlp_pl);
598	separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
599	first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
600
601	if (width && !is_power_of_2(width)) {
602		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
603			local_port);
604		return -EINVAL;
605	}
606
607	for (i = 0; i < width; i++) {
608		if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
609			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
610				local_port);
611			return -EINVAL;
612		}
613		if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
614			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
615				local_port);
616			return -EINVAL;
617		}
618		if (separate_rxtx &&
619		    mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
620		    mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
621			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
622				local_port);
623			return -EINVAL;
624		}
625		if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
626			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
627				local_port);
628			return -EINVAL;
629		}
630	}
631
632	port_mapping->module = module;
633	port_mapping->slot_index = slot_index;
634	port_mapping->width = width;
635	port_mapping->module_width = width;
636	port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
637	return 0;
638}
639
640static int
641mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642			      struct mlxsw_sp_port_mapping *port_mapping)
643{
644	char pmlp_pl[MLXSW_REG_PMLP_LEN];
645	int err;
646
647	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
648	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
649	if (err)
650		return err;
651	return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
652					       pmlp_pl, port_mapping);
653}
654
655static int
656mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
657			 const struct mlxsw_sp_port_mapping *port_mapping)
658{
659	char pmlp_pl[MLXSW_REG_PMLP_LEN];
660	int i, err;
661
662	mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
663				  port_mapping->module);
664
665	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
666	mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
667	for (i = 0; i < port_mapping->width; i++) {
668		mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
669					      port_mapping->slot_index);
670		mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
671		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
672	}
673
674	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
675	if (err)
676		goto err_pmlp_write;
677	return 0;
678
679err_pmlp_write:
680	mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
681				    port_mapping->module);
682	return err;
683}
684
685static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
686				       u8 slot_index, u8 module)
687{
688	char pmlp_pl[MLXSW_REG_PMLP_LEN];
689
690	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
691	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
692	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
693	mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
694}
695
696static int mlxsw_sp_port_open(struct net_device *dev)
697{
698	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
699	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700	int err;
701
702	err = mlxsw_env_module_port_up(mlxsw_sp->core,
703				       mlxsw_sp_port->mapping.slot_index,
704				       mlxsw_sp_port->mapping.module);
705	if (err)
706		return err;
707	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
708	if (err)
709		goto err_port_admin_status_set;
710	netif_start_queue(dev);
711	return 0;
712
713err_port_admin_status_set:
714	mlxsw_env_module_port_down(mlxsw_sp->core,
715				   mlxsw_sp_port->mapping.slot_index,
716				   mlxsw_sp_port->mapping.module);
717	return err;
718}
719
720static int mlxsw_sp_port_stop(struct net_device *dev)
721{
722	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724
725	netif_stop_queue(dev);
726	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
727	mlxsw_env_module_port_down(mlxsw_sp->core,
728				   mlxsw_sp_port->mapping.slot_index,
729				   mlxsw_sp_port->mapping.module);
730	return 0;
731}
732
733static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
734				      struct net_device *dev)
735{
736	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
737	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
739	const struct mlxsw_tx_info tx_info = {
740		.local_port = mlxsw_sp_port->local_port,
741		.is_emad = false,
742	};
743	u64 len;
744	int err;
745
746	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
747
748	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
749		return NETDEV_TX_BUSY;
750
751	if (eth_skb_pad(skb)) {
752		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753		return NETDEV_TX_OK;
754	}
755
756	err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
757				    &tx_info);
758	if (err)
759		return NETDEV_TX_OK;
760
761	/* TX header is consumed by HW on the way so we shouldn't count its
762	 * bytes as being sent.
763	 */
764	len = skb->len - MLXSW_TXHDR_LEN;
765
766	/* Due to a race we might fail here because of a full queue. In that
767	 * unlikely case we simply drop the packet.
768	 */
769	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
770
771	if (!err) {
772		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773		u64_stats_update_begin(&pcpu_stats->syncp);
774		pcpu_stats->tx_packets++;
775		pcpu_stats->tx_bytes += len;
776		u64_stats_update_end(&pcpu_stats->syncp);
777	} else {
778		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779		dev_kfree_skb_any(skb);
780	}
781	return NETDEV_TX_OK;
782}
783
784static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785{
786}
787
788static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789{
790	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791	struct sockaddr *addr = p;
792	int err;
793
794	if (!is_valid_ether_addr(addr->sa_data))
795		return -EADDRNOTAVAIL;
796
797	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
798	if (err)
799		return err;
800	eth_hw_addr_set(dev, addr->sa_data);
801	return 0;
802}
803
804static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
805{
806	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
807	struct mlxsw_sp_hdroom orig_hdroom;
808	struct mlxsw_sp_hdroom hdroom;
809	int err;
810
811	orig_hdroom = *mlxsw_sp_port->hdroom;
812
813	hdroom = orig_hdroom;
814	hdroom.mtu = mtu;
815	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
816
817	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
818	if (err) {
819		netdev_err(dev, "Failed to configure port's headroom\n");
820		return err;
821	}
822
823	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
824	if (err)
825		goto err_port_mtu_set;
826	dev->mtu = mtu;
827	return 0;
828
829err_port_mtu_set:
830	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
831	return err;
832}
833
834static int
835mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
836			     struct rtnl_link_stats64 *stats)
837{
838	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839	struct mlxsw_sp_port_pcpu_stats *p;
840	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
841	u32 tx_dropped = 0;
842	unsigned int start;
843	int i;
844
845	for_each_possible_cpu(i) {
846		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
847		do {
848			start = u64_stats_fetch_begin(&p->syncp);
849			rx_packets	= p->rx_packets;
850			rx_bytes	= p->rx_bytes;
851			tx_packets	= p->tx_packets;
852			tx_bytes	= p->tx_bytes;
853		} while (u64_stats_fetch_retry(&p->syncp, start));
854
855		stats->rx_packets	+= rx_packets;
856		stats->rx_bytes		+= rx_bytes;
857		stats->tx_packets	+= tx_packets;
858		stats->tx_bytes		+= tx_bytes;
859		/* tx_dropped is u32, updated without syncp protection. */
860		tx_dropped	+= p->tx_dropped;
861	}
862	stats->tx_dropped	= tx_dropped;
863	return 0;
864}
865
866static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
867{
868	switch (attr_id) {
869	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
870		return true;
871	}
872
873	return false;
874}
875
876static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
877					   void *sp)
878{
879	switch (attr_id) {
880	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
881		return mlxsw_sp_port_get_sw_stats64(dev, sp);
882	}
883
884	return -EINVAL;
885}
886
887int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
888				int prio, char *ppcnt_pl)
889{
890	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
892
893	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
894	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
895}
896
897static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
898				      struct rtnl_link_stats64 *stats)
899{
900	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
901	int err;
902
903	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
904					  0, ppcnt_pl);
905	if (err)
906		goto out;
907
908	stats->tx_packets =
909		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
910	stats->rx_packets =
911		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
912	stats->tx_bytes =
913		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
914	stats->rx_bytes =
915		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
916	stats->multicast =
917		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
918
919	stats->rx_crc_errors =
920		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
921	stats->rx_frame_errors =
922		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
923
924	stats->rx_length_errors = (
925		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
926		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
927		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
928
929	stats->rx_errors = (stats->rx_crc_errors +
930		stats->rx_frame_errors + stats->rx_length_errors);
931
932out:
933	return err;
934}
935
936static void
937mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
938			    struct mlxsw_sp_port_xstats *xstats)
939{
940	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
941	int err, i;
942
943	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
944					  ppcnt_pl);
945	if (!err)
946		xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
947
948	for (i = 0; i < TC_MAX_QUEUE; i++) {
949		err = mlxsw_sp_port_get_stats_raw(dev,
950						  MLXSW_REG_PPCNT_TC_CONG_CNT,
951						  i, ppcnt_pl);
952		if (err)
953			goto tc_cnt;
954
955		xstats->wred_drop[i] =
956			mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
957		xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
958
959tc_cnt:
960		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
961						  i, ppcnt_pl);
962		if (err)
963			continue;
964
965		xstats->backlog[i] =
966			mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
967		xstats->tail_drop[i] =
968			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
969	}
970
971	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
972		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
973						  i, ppcnt_pl);
974		if (err)
975			continue;
976
977		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
978		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
979	}
980}
981
982static void update_stats_cache(struct work_struct *work)
983{
984	struct mlxsw_sp_port *mlxsw_sp_port =
985		container_of(work, struct mlxsw_sp_port,
986			     periodic_hw_stats.update_dw.work);
987
988	if (!netif_carrier_ok(mlxsw_sp_port->dev))
989		/* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
990		 * necessary when port goes down.
991		 */
992		goto out;
993
994	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
995				   &mlxsw_sp_port->periodic_hw_stats.stats);
996	mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
997				    &mlxsw_sp_port->periodic_hw_stats.xstats);
998
999out:
1000	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1001			       MLXSW_HW_STATS_UPDATE_TIME);
1002}
1003
1004/* Return the stats from a cache that is updated periodically,
1005 * as this function might get called in an atomic context.
1006 */
1007static void
1008mlxsw_sp_port_get_stats64(struct net_device *dev,
1009			  struct rtnl_link_stats64 *stats)
1010{
1011	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1012
1013	memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1014}
1015
1016static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1017				    u16 vid_begin, u16 vid_end,
1018				    bool is_member, bool untagged)
1019{
1020	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1021	char *spvm_pl;
1022	int err;
1023
1024	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1025	if (!spvm_pl)
1026		return -ENOMEM;
1027
1028	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
1029			    vid_end, is_member, untagged);
1030	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1031	kfree(spvm_pl);
1032	return err;
1033}
1034
1035int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1036			   u16 vid_end, bool is_member, bool untagged)
1037{
1038	u16 vid, vid_e;
1039	int err;
1040
1041	for (vid = vid_begin; vid <= vid_end;
1042	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1043		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1044			    vid_end);
1045
1046		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1047					       is_member, untagged);
1048		if (err)
1049			return err;
1050	}
1051
1052	return 0;
1053}
1054
1055static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1056				     bool flush_default)
1057{
1058	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1059
1060	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1061				 &mlxsw_sp_port->vlans_list, list) {
1062		if (!flush_default &&
1063		    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1064			continue;
1065		mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1066	}
1067}
1068
1069static void
1070mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1071{
1072	if (mlxsw_sp_port_vlan->bridge_port)
1073		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1074	else if (mlxsw_sp_port_vlan->fid)
1075		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1076}
1077
1078struct mlxsw_sp_port_vlan *
1079mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1080{
1081	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1082	bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1083	int err;
1084
1085	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1086	if (mlxsw_sp_port_vlan)
1087		return ERR_PTR(-EEXIST);
1088
1089	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1090	if (err)
1091		return ERR_PTR(err);
1092
1093	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1094	if (!mlxsw_sp_port_vlan) {
1095		err = -ENOMEM;
1096		goto err_port_vlan_alloc;
1097	}
1098
1099	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1100	mlxsw_sp_port_vlan->vid = vid;
1101	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1102
1103	return mlxsw_sp_port_vlan;
1104
1105err_port_vlan_alloc:
1106	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1107	return ERR_PTR(err);
1108}
1109
1110void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1111{
1112	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1113	u16 vid = mlxsw_sp_port_vlan->vid;
1114
1115	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1116	list_del(&mlxsw_sp_port_vlan->list);
1117	kfree(mlxsw_sp_port_vlan);
1118	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1119}
1120
1121static int mlxsw_sp_port_add_vid(struct net_device *dev,
1122				 __be16 __always_unused proto, u16 vid)
1123{
1124	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1125
1126	/* VLAN 0 is added to HW filter when device goes up, but it is
1127	 * reserved in our case, so simply return.
1128	 */
1129	if (!vid)
1130		return 0;
1131
1132	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1133}
1134
1135int mlxsw_sp_port_kill_vid(struct net_device *dev,
1136			   __be16 __always_unused proto, u16 vid)
1137{
1138	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1139	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1140
1141	/* VLAN 0 is removed from HW filter when device goes down, but
1142	 * it is reserved in our case, so simply return.
1143	 */
1144	if (!vid)
1145		return 0;
1146
1147	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1148	if (!mlxsw_sp_port_vlan)
1149		return 0;
1150	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1151
1152	return 0;
1153}
1154
1155static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1156				   struct flow_block_offload *f)
1157{
1158	switch (f->binder_type) {
1159	case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1160		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1161	case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1162		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1163	case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1164		return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1165	case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1166		return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1167	default:
1168		return -EOPNOTSUPP;
1169	}
1170}
1171
1172static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1173			     void *type_data)
1174{
1175	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1176
1177	switch (type) {
1178	case TC_SETUP_BLOCK:
1179		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1180	case TC_SETUP_QDISC_RED:
1181		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1182	case TC_SETUP_QDISC_PRIO:
1183		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1184	case TC_SETUP_QDISC_ETS:
1185		return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1186	case TC_SETUP_QDISC_TBF:
1187		return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1188	case TC_SETUP_QDISC_FIFO:
1189		return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1190	default:
1191		return -EOPNOTSUPP;
1192	}
1193}
1194
1195static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1196{
1197	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1198
1199	if (!enable) {
1200		if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1201		    mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1202			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1203			return -EINVAL;
1204		}
1205		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1206		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1207	} else {
1208		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1209		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1210	}
1211	return 0;
1212}
1213
1214static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1215{
1216	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1217	char pplr_pl[MLXSW_REG_PPLR_LEN];
1218	int err;
1219
1220	if (netif_running(dev))
1221		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1222
1223	mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1224	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1225			      pplr_pl);
1226
1227	if (netif_running(dev))
1228		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1229
1230	return err;
1231}
1232
1233typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1234
1235static int mlxsw_sp_handle_feature(struct net_device *dev,
1236				   netdev_features_t wanted_features,
1237				   netdev_features_t feature,
1238				   mlxsw_sp_feature_handler feature_handler)
1239{
1240	netdev_features_t changes = wanted_features ^ dev->features;
1241	bool enable = !!(wanted_features & feature);
1242	int err;
1243
1244	if (!(changes & feature))
1245		return 0;
1246
1247	err = feature_handler(dev, enable);
1248	if (err) {
1249		netdev_err(dev, "%s feature %pNF failed, err %d\n",
1250			   enable ? "Enable" : "Disable", &feature, err);
1251		return err;
1252	}
1253
1254	if (enable)
1255		dev->features |= feature;
1256	else
1257		dev->features &= ~feature;
1258
1259	return 0;
1260}
1261static int mlxsw_sp_set_features(struct net_device *dev,
1262				 netdev_features_t features)
1263{
1264	netdev_features_t oper_features = dev->features;
1265	int err = 0;
1266
1267	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1268				       mlxsw_sp_feature_hw_tc);
1269	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1270				       mlxsw_sp_feature_loopback);
1271
1272	if (err) {
1273		dev->features = oper_features;
1274		return -EINVAL;
1275	}
1276
1277	return 0;
1278}
1279
1280static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1281				      struct ifreq *ifr)
1282{
1283	struct hwtstamp_config config;
1284	int err;
1285
1286	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1287		return -EFAULT;
1288
1289	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1290							     &config);
1291	if (err)
1292		return err;
1293
1294	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1295		return -EFAULT;
1296
1297	return 0;
1298}
1299
1300static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1301				      struct ifreq *ifr)
1302{
1303	struct hwtstamp_config config;
1304	int err;
1305
1306	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1307							     &config);
1308	if (err)
1309		return err;
1310
1311	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1312		return -EFAULT;
1313
1314	return 0;
1315}
1316
1317static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1318{
1319	struct hwtstamp_config config = {0};
1320
1321	mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1322}
1323
1324static int
1325mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326{
1327	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1328
1329	switch (cmd) {
1330	case SIOCSHWTSTAMP:
1331		return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1332	case SIOCGHWTSTAMP:
1333		return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1334	default:
1335		return -EOPNOTSUPP;
1336	}
1337}
1338
1339static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1340	.ndo_open		= mlxsw_sp_port_open,
1341	.ndo_stop		= mlxsw_sp_port_stop,
1342	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1343	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1344	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1345	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1346	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1347	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1348	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1349	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1350	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1351	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1352	.ndo_set_features	= mlxsw_sp_set_features,
1353	.ndo_eth_ioctl		= mlxsw_sp_port_ioctl,
1354};
1355
1356static int
1357mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1358{
1359	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1361	const struct mlxsw_sp_port_type_speed_ops *ops;
1362	char ptys_pl[MLXSW_REG_PTYS_LEN];
1363	u32 eth_proto_cap_masked;
1364	int err;
1365
1366	ops = mlxsw_sp->port_type_speed_ops;
1367
1368	/* Set advertised speeds to speeds supported by both the driver
1369	 * and the device.
1370	 */
1371	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1372			       0, false);
1373	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1374	if (err)
1375		return err;
1376
1377	ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1378				 &eth_proto_admin, &eth_proto_oper);
1379	eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1380	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1381			       eth_proto_cap_masked,
1382			       mlxsw_sp_port->link.autoneg);
1383	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1384}
1385
1386int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1387{
1388	const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1389	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390	char ptys_pl[MLXSW_REG_PTYS_LEN];
1391	u32 eth_proto_oper;
1392	int err;
1393
1394	port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1395	port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1396					       mlxsw_sp_port->local_port, 0,
1397					       false);
1398	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1399	if (err)
1400		return err;
1401	port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1402						 &eth_proto_oper);
1403	*speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1404	return 0;
1405}
1406
1407int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1409			  bool dwrr, u8 dwrr_weight)
1410{
1411	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412	char qeec_pl[MLXSW_REG_QEEC_LEN];
1413
1414	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1415			    next_index);
1416	mlxsw_reg_qeec_de_set(qeec_pl, true);
1417	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1418	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1419	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1420}
1421
1422int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423				  enum mlxsw_reg_qeec_hr hr, u8 index,
1424				  u8 next_index, u32 maxrate, u8 burst_size)
1425{
1426	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427	char qeec_pl[MLXSW_REG_QEEC_LEN];
1428
1429	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1430			    next_index);
1431	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1432	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1433	mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1434	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1435}
1436
1437static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438				    enum mlxsw_reg_qeec_hr hr, u8 index,
1439				    u8 next_index, u32 minrate)
1440{
1441	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1442	char qeec_pl[MLXSW_REG_QEEC_LEN];
1443
1444	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1445			    next_index);
1446	mlxsw_reg_qeec_mise_set(qeec_pl, true);
1447	mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1448
1449	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1450}
1451
1452int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1453			      u8 switch_prio, u8 tclass)
1454{
1455	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1456	char qtct_pl[MLXSW_REG_QTCT_LEN];
1457
1458	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1459			    tclass);
1460	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1461}
1462
1463static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1464{
1465	int err, i;
1466
1467	/* Setup the elements hierarcy, so that each TC is linked to
1468	 * one subgroup, which are all member in the same group.
1469	 */
1470	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1471				    MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1472	if (err)
1473		return err;
1474	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
1477					    0, false, 0);
1478		if (err)
1479			return err;
1480	}
1481	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1483					    MLXSW_REG_QEEC_HR_TC, i, i,
1484					    false, 0);
1485		if (err)
1486			return err;
1487
1488		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1489					    MLXSW_REG_QEEC_HR_TC,
1490					    i + 8, i,
1491					    true, 100);
1492		if (err)
1493			return err;
1494	}
1495
1496	/* Make sure the max shaper is disabled in all hierarchies that support
1497	 * it. Note that this disables ptps (PTP shaper), but that is intended
1498	 * for the initial configuration.
1499	 */
1500	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1501					    MLXSW_REG_QEEC_HR_PORT, 0, 0,
1502					    MLXSW_REG_QEEC_MAS_DIS, 0);
1503	if (err)
1504		return err;
1505	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507						    MLXSW_REG_QEEC_HR_SUBGROUP,
1508						    i, 0,
1509						    MLXSW_REG_QEEC_MAS_DIS, 0);
1510		if (err)
1511			return err;
1512	}
1513	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1514		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515						    MLXSW_REG_QEEC_HR_TC,
1516						    i, i,
1517						    MLXSW_REG_QEEC_MAS_DIS, 0);
1518		if (err)
1519			return err;
1520
1521		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1522						    MLXSW_REG_QEEC_HR_TC,
1523						    i + 8, i,
1524						    MLXSW_REG_QEEC_MAS_DIS, 0);
1525		if (err)
1526			return err;
1527	}
1528
1529	/* Configure the min shaper for multicast TCs. */
1530	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1531		err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1532					       MLXSW_REG_QEEC_HR_TC,
1533					       i + 8, i,
1534					       MLXSW_REG_QEEC_MIS_MIN);
1535		if (err)
1536			return err;
1537	}
1538
1539	/* Map all priorities to traffic class 0. */
1540	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1541		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1542		if (err)
1543			return err;
1544	}
1545
1546	return 0;
1547}
1548
1549static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550					bool enable)
1551{
1552	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1553	char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1554
1555	mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1556	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1557}
1558
1559static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1560{
1561	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562	u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1563	u8 module = mlxsw_sp_port->mapping.module;
1564	u64 overheat_counter;
1565	int err;
1566
1567	err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1568						    module, &overheat_counter);
1569	if (err)
1570		return err;
1571
1572	mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1573	return 0;
1574}
1575
1576int
1577mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1578				      bool is_8021ad_tagged,
1579				      bool is_8021q_tagged)
1580{
1581	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582	char spvc_pl[MLXSW_REG_SPVC_LEN];
1583
1584	mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1585			    is_8021ad_tagged, is_8021q_tagged);
1586	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1587}
1588
1589static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1590					u16 local_port, u8 *port_number,
1591					u8 *split_port_subnumber,
1592					u8 *slot_index)
1593{
1594	char pllp_pl[MLXSW_REG_PLLP_LEN];
1595	int err;
1596
1597	mlxsw_reg_pllp_pack(pllp_pl, local_port);
1598	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1599	if (err)
1600		return err;
1601	mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1602			      split_port_subnumber, slot_index);
1603	return 0;
1604}
1605
1606static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1607				bool split,
1608				struct mlxsw_sp_port_mapping *port_mapping)
1609{
1610	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1611	struct mlxsw_sp_port *mlxsw_sp_port;
1612	u32 lanes = port_mapping->width;
1613	u8 split_port_subnumber;
1614	struct net_device *dev;
1615	u8 port_number;
1616	u8 slot_index;
1617	bool splittable;
1618	int err;
1619
1620	err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1621	if (err) {
1622		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1623			local_port);
1624		return err;
1625	}
1626
1627	err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1628	if (err) {
1629		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1630			local_port);
1631		goto err_port_swid_set;
1632	}
1633
1634	err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1635					   &split_port_subnumber, &slot_index);
1636	if (err) {
1637		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1638			local_port);
1639		goto err_port_label_info_get;
1640	}
1641
1642	splittable = lanes > 1 && !split;
1643	err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1644				   port_number, split, split_port_subnumber,
1645				   splittable, lanes, mlxsw_sp->base_mac,
1646				   sizeof(mlxsw_sp->base_mac));
1647	if (err) {
1648		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1649			local_port);
1650		goto err_core_port_init;
1651	}
1652
1653	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1654	if (!dev) {
1655		err = -ENOMEM;
1656		goto err_alloc_etherdev;
1657	}
1658	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1659	dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1660	mlxsw_sp_port = netdev_priv(dev);
1661	mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1662				    mlxsw_sp_port, dev);
1663	mlxsw_sp_port->dev = dev;
1664	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665	mlxsw_sp_port->local_port = local_port;
1666	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1667	mlxsw_sp_port->split = split;
1668	mlxsw_sp_port->mapping = *port_mapping;
1669	mlxsw_sp_port->link.autoneg = 1;
1670	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1671
1672	mlxsw_sp_port->pcpu_stats =
1673		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1674	if (!mlxsw_sp_port->pcpu_stats) {
1675		err = -ENOMEM;
1676		goto err_alloc_stats;
1677	}
1678
1679	INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1680			  &update_stats_cache);
1681
1682	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1683	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1684
1685	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1686	if (err) {
1687		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1688			mlxsw_sp_port->local_port);
1689		goto err_dev_addr_init;
1690	}
1691
1692	netif_carrier_off(dev);
1693
1694	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1695			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1696	dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1697
1698	dev->min_mtu = 0;
1699	dev->max_mtu = ETH_MAX_MTU;
1700
1701	/* Each packet needs to have a Tx header (metadata) on top all other
1702	 * headers.
1703	 */
1704	dev->needed_headroom = MLXSW_TXHDR_LEN;
1705
1706	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1707	if (err) {
1708		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1709			mlxsw_sp_port->local_port);
1710		goto err_port_system_port_mapping_set;
1711	}
1712
1713	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1714	if (err) {
1715		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1716			mlxsw_sp_port->local_port);
1717		goto err_port_speed_by_width_set;
1718	}
1719
1720	err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1721							    &mlxsw_sp_port->max_speed);
1722	if (err) {
1723		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1724			mlxsw_sp_port->local_port);
1725		goto err_max_speed_get;
1726	}
1727
1728	err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1729	if (err) {
1730		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1731			mlxsw_sp_port->local_port);
1732		goto err_port_max_mtu_get;
1733	}
1734
1735	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1736	if (err) {
1737		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1738			mlxsw_sp_port->local_port);
1739		goto err_port_mtu_set;
1740	}
1741
1742	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1743	if (err)
1744		goto err_port_admin_status_set;
1745
1746	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1747	if (err) {
1748		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1749			mlxsw_sp_port->local_port);
1750		goto err_port_buffers_init;
1751	}
1752
1753	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1754	if (err) {
1755		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1756			mlxsw_sp_port->local_port);
1757		goto err_port_ets_init;
1758	}
1759
1760	err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1761	if (err) {
1762		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1763			mlxsw_sp_port->local_port);
1764		goto err_port_tc_mc_mode;
1765	}
1766
1767	/* ETS and buffers must be initialized before DCB. */
1768	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1769	if (err) {
1770		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1771			mlxsw_sp_port->local_port);
1772		goto err_port_dcb_init;
1773	}
1774
1775	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1776	if (err) {
1777		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1778			mlxsw_sp_port->local_port);
1779		goto err_port_fids_init;
1780	}
1781
1782	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1783	if (err) {
1784		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1785			mlxsw_sp_port->local_port);
1786		goto err_port_qdiscs_init;
1787	}
1788
1789	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1790				     false);
1791	if (err) {
1792		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1793			mlxsw_sp_port->local_port);
1794		goto err_port_vlan_clear;
1795	}
1796
1797	err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1798	if (err) {
1799		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1800			mlxsw_sp_port->local_port);
1801		goto err_port_nve_init;
1802	}
1803
1804	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1805				     ETH_P_8021Q);
1806	if (err) {
1807		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1808			mlxsw_sp_port->local_port);
1809		goto err_port_pvid_set;
1810	}
1811
1812	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1813						       MLXSW_SP_DEFAULT_VID);
1814	if (IS_ERR(mlxsw_sp_port_vlan)) {
1815		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1816			mlxsw_sp_port->local_port);
1817		err = PTR_ERR(mlxsw_sp_port_vlan);
1818		goto err_port_vlan_create;
1819	}
1820	mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1821
1822	/* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1823	 * only packets with 802.1q header as tagged packets.
1824	 */
1825	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1826	if (err) {
1827		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1828			local_port);
1829		goto err_port_vlan_classification_set;
1830	}
1831
1832	INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1833			  mlxsw_sp->ptp_ops->shaper_work);
1834
1835	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1836
1837	err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1838	if (err) {
1839		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1840			mlxsw_sp_port->local_port);
1841		goto err_port_overheat_init_val_set;
1842	}
1843
1844	err = register_netdev(dev);
1845	if (err) {
1846		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1847			mlxsw_sp_port->local_port);
1848		goto err_register_netdev;
1849	}
1850
1851	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1852	return 0;
1853
1854err_register_netdev:
1855err_port_overheat_init_val_set:
1856	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1857err_port_vlan_classification_set:
1858	mlxsw_sp->ports[local_port] = NULL;
1859	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1860err_port_vlan_create:
1861err_port_pvid_set:
1862	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1863err_port_nve_init:
1864err_port_vlan_clear:
1865	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1866err_port_qdiscs_init:
1867	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1868err_port_fids_init:
1869	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870err_port_dcb_init:
1871	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1872err_port_tc_mc_mode:
1873err_port_ets_init:
1874	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1875err_port_buffers_init:
1876err_port_admin_status_set:
1877err_port_mtu_set:
1878err_port_max_mtu_get:
1879err_max_speed_get:
1880err_port_speed_by_width_set:
1881err_port_system_port_mapping_set:
1882err_dev_addr_init:
1883	free_percpu(mlxsw_sp_port->pcpu_stats);
1884err_alloc_stats:
1885	free_netdev(dev);
1886err_alloc_etherdev:
1887	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1888err_core_port_init:
1889err_port_label_info_get:
1890	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1891			       MLXSW_PORT_SWID_DISABLED_PORT);
1892err_port_swid_set:
1893	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1894				   port_mapping->slot_index,
1895				   port_mapping->module);
1896	return err;
1897}
1898
1899static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1900{
1901	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902	u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1903	u8 module = mlxsw_sp_port->mapping.module;
1904
1905	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1906	cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1907	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1908	mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1909	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1910	mlxsw_sp->ports[local_port] = NULL;
1911	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1912	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1913	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1914	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1915	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1916	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1917	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1918	free_percpu(mlxsw_sp_port->pcpu_stats);
1919	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1920	free_netdev(mlxsw_sp_port->dev);
1921	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1922	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1923			       MLXSW_PORT_SWID_DISABLED_PORT);
1924	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1925}
1926
1927static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1928{
1929	struct mlxsw_sp_port *mlxsw_sp_port;
1930	int err;
1931
1932	mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1933	if (!mlxsw_sp_port)
1934		return -ENOMEM;
1935
1936	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1937	mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1938
1939	err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1940				       mlxsw_sp_port,
1941				       mlxsw_sp->base_mac,
1942				       sizeof(mlxsw_sp->base_mac));
1943	if (err) {
1944		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1945		goto err_core_cpu_port_init;
1946	}
1947
1948	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1949	return 0;
1950
1951err_core_cpu_port_init:
1952	kfree(mlxsw_sp_port);
1953	return err;
1954}
1955
1956static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1957{
1958	struct mlxsw_sp_port *mlxsw_sp_port =
1959				mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1960
1961	mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1962	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1963	kfree(mlxsw_sp_port);
1964}
1965
1966static bool mlxsw_sp_local_port_valid(u16 local_port)
1967{
1968	return local_port != MLXSW_PORT_CPU_PORT;
1969}
1970
1971static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1972{
1973	if (!mlxsw_sp_local_port_valid(local_port))
1974		return false;
1975	return mlxsw_sp->ports[local_port] != NULL;
1976}
1977
1978static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1979					   u16 local_port, bool enable)
1980{
1981	char pmecr_pl[MLXSW_REG_PMECR_LEN];
1982
1983	mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1984			     enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1985				      MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1986	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1987}
1988
1989struct mlxsw_sp_port_mapping_event {
1990	struct list_head list;
1991	char pmlp_pl[MLXSW_REG_PMLP_LEN];
1992};
1993
1994static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1995{
1996	struct mlxsw_sp_port_mapping_event *event, *next_event;
1997	struct mlxsw_sp_port_mapping_events *events;
1998	struct mlxsw_sp_port_mapping port_mapping;
1999	struct mlxsw_sp *mlxsw_sp;
2000	struct devlink *devlink;
2001	LIST_HEAD(event_queue);
2002	u16 local_port;
2003	int err;
2004
2005	events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2006	mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2007	devlink = priv_to_devlink(mlxsw_sp->core);
2008
2009	spin_lock_bh(&events->queue_lock);
2010	list_splice_init(&events->queue, &event_queue);
2011	spin_unlock_bh(&events->queue_lock);
2012
2013	list_for_each_entry_safe(event, next_event, &event_queue, list) {
2014		local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2015		err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2016						      event->pmlp_pl, &port_mapping);
2017		if (err)
2018			goto out;
2019
2020		if (WARN_ON_ONCE(!port_mapping.width))
2021			goto out;
2022
2023		devl_lock(devlink);
2024
2025		if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2026			mlxsw_sp_port_create(mlxsw_sp, local_port,
2027					     false, &port_mapping);
2028		else
2029			WARN_ON_ONCE(1);
2030
2031		devl_unlock(devlink);
2032
2033		mlxsw_sp->port_mapping[local_port] = port_mapping;
2034
2035out:
2036		kfree(event);
2037	}
2038}
2039
2040static void
2041mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2042				    char *pmlp_pl, void *priv)
2043{
2044	struct mlxsw_sp_port_mapping_events *events;
2045	struct mlxsw_sp_port_mapping_event *event;
2046	struct mlxsw_sp *mlxsw_sp = priv;
2047	u16 local_port;
2048
2049	local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2050	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2051		return;
2052
2053	events = &mlxsw_sp->port_mapping_events;
2054	event = kmalloc(sizeof(*event), GFP_ATOMIC);
2055	if (!event)
2056		return;
2057	memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2058	spin_lock(&events->queue_lock);
2059	list_add_tail(&event->list, &events->queue);
2060	spin_unlock(&events->queue_lock);
2061	mlxsw_core_schedule_work(&events->work);
2062}
2063
2064static void
2065__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2066{
2067	struct mlxsw_sp_port_mapping_event *event, *next_event;
2068	struct mlxsw_sp_port_mapping_events *events;
2069
2070	events = &mlxsw_sp->port_mapping_events;
2071
2072	/* Caller needs to make sure that no new event is going to appear. */
2073	cancel_work_sync(&events->work);
2074	list_for_each_entry_safe(event, next_event, &events->queue, list) {
2075		list_del(&event->list);
2076		kfree(event);
2077	}
2078}
2079
2080static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2081{
2082	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2083	int i;
2084
2085	for (i = 1; i < max_ports; i++)
2086		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2087	/* Make sure all scheduled events are processed */
2088	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2089
2090	for (i = 1; i < max_ports; i++)
2091		if (mlxsw_sp_port_created(mlxsw_sp, i))
2092			mlxsw_sp_port_remove(mlxsw_sp, i);
2093	mlxsw_sp_cpu_port_remove(mlxsw_sp);
2094	kfree(mlxsw_sp->ports);
2095	mlxsw_sp->ports = NULL;
2096}
2097
2098static void
2099mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2100			       bool (*selector)(void *priv, u16 local_port),
2101			       void *priv)
2102{
2103	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2104	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2105	int i;
2106
2107	for (i = 1; i < max_ports; i++)
2108		if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2109			mlxsw_sp_port_remove(mlxsw_sp, i);
2110}
2111
2112static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2113{
2114	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2115	struct mlxsw_sp_port_mapping_events *events;
2116	struct mlxsw_sp_port_mapping *port_mapping;
2117	size_t alloc_size;
2118	int i;
2119	int err;
2120
2121	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2122	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2123	if (!mlxsw_sp->ports)
2124		return -ENOMEM;
2125
2126	events = &mlxsw_sp->port_mapping_events;
2127	INIT_LIST_HEAD(&events->queue);
2128	spin_lock_init(&events->queue_lock);
2129	INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2130
2131	for (i = 1; i < max_ports; i++) {
2132		err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2133		if (err)
2134			goto err_event_enable;
2135	}
2136
2137	err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2138	if (err)
2139		goto err_cpu_port_create;
2140
2141	for (i = 1; i < max_ports; i++) {
2142		port_mapping = &mlxsw_sp->port_mapping[i];
2143		if (!port_mapping->width)
2144			continue;
2145		err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2146		if (err)
2147			goto err_port_create;
2148	}
2149	return 0;
2150
2151err_port_create:
2152	for (i--; i >= 1; i--)
2153		if (mlxsw_sp_port_created(mlxsw_sp, i))
2154			mlxsw_sp_port_remove(mlxsw_sp, i);
2155	i = max_ports;
2156	mlxsw_sp_cpu_port_remove(mlxsw_sp);
2157err_cpu_port_create:
2158err_event_enable:
2159	for (i--; i >= 1; i--)
2160		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2161	/* Make sure all scheduled events are processed */
2162	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2163	kfree(mlxsw_sp->ports);
2164	mlxsw_sp->ports = NULL;
2165	return err;
2166}
2167
2168static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2169{
2170	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2171	struct mlxsw_sp_port_mapping *port_mapping;
2172	int i;
2173	int err;
2174
2175	mlxsw_sp->port_mapping = kcalloc(max_ports,
2176					 sizeof(struct mlxsw_sp_port_mapping),
2177					 GFP_KERNEL);
2178	if (!mlxsw_sp->port_mapping)
2179		return -ENOMEM;
2180
2181	for (i = 1; i < max_ports; i++) {
2182		port_mapping = &mlxsw_sp->port_mapping[i];
2183		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2184		if (err)
2185			goto err_port_module_info_get;
2186	}
2187	return 0;
2188
2189err_port_module_info_get:
2190	kfree(mlxsw_sp->port_mapping);
2191	return err;
2192}
2193
2194static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2195{
2196	kfree(mlxsw_sp->port_mapping);
2197}
2198
2199static int
2200mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2201			   struct mlxsw_sp_port_mapping *port_mapping,
2202			   unsigned int count, const char *pmtdb_pl)
2203{
2204	struct mlxsw_sp_port_mapping split_port_mapping;
2205	int err, i;
2206
2207	split_port_mapping = *port_mapping;
2208	split_port_mapping.width /= count;
2209	for (i = 0; i < count; i++) {
2210		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2211
2212		if (!mlxsw_sp_local_port_valid(s_local_port))
2213			continue;
2214
2215		err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2216					   true, &split_port_mapping);
2217		if (err)
2218			goto err_port_create;
2219		split_port_mapping.lane += split_port_mapping.width;
2220	}
2221
2222	return 0;
2223
2224err_port_create:
2225	for (i--; i >= 0; i--) {
2226		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2227
2228		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2229			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2230	}
2231	return err;
2232}
2233
2234static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2235					 unsigned int count,
2236					 const char *pmtdb_pl)
2237{
2238	struct mlxsw_sp_port_mapping *port_mapping;
2239	int i;
2240
2241	/* Go over original unsplit ports in the gap and recreate them. */
2242	for (i = 0; i < count; i++) {
2243		u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2244
2245		port_mapping = &mlxsw_sp->port_mapping[local_port];
2246		if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2247			continue;
2248		mlxsw_sp_port_create(mlxsw_sp, local_port,
2249				     false, port_mapping);
2250	}
2251}
2252
2253static struct mlxsw_sp_port *
2254mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2255{
2256	if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2257		return mlxsw_sp->ports[local_port];
2258	return NULL;
2259}
2260
2261static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2262			       unsigned int count,
2263			       struct netlink_ext_ack *extack)
2264{
2265	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2266	struct mlxsw_sp_port_mapping port_mapping;
2267	struct mlxsw_sp_port *mlxsw_sp_port;
2268	enum mlxsw_reg_pmtdb_status status;
2269	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2270	int i;
2271	int err;
2272
2273	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2274	if (!mlxsw_sp_port) {
2275		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2276			local_port);
2277		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2278		return -EINVAL;
2279	}
2280
2281	if (mlxsw_sp_port->split) {
2282		NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2283		return -EINVAL;
2284	}
2285
2286	mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2287			     mlxsw_sp_port->mapping.module,
2288			     mlxsw_sp_port->mapping.module_width / count,
2289			     count);
2290	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2291	if (err) {
2292		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2293		return err;
2294	}
2295
2296	status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2297	if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2298		NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2299		return -EINVAL;
2300	}
2301
2302	port_mapping = mlxsw_sp_port->mapping;
2303
2304	for (i = 0; i < count; i++) {
2305		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2306
2307		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2308			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2309	}
2310
2311	err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2312					 count, pmtdb_pl);
2313	if (err) {
2314		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2315		goto err_port_split_create;
2316	}
2317
2318	return 0;
2319
2320err_port_split_create:
2321	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2322
2323	return err;
2324}
2325
2326static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2327				 struct netlink_ext_ack *extack)
2328{
2329	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2330	struct mlxsw_sp_port *mlxsw_sp_port;
2331	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2332	unsigned int count;
2333	int i;
2334	int err;
2335
2336	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2337	if (!mlxsw_sp_port) {
2338		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2339			local_port);
2340		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2341		return -EINVAL;
2342	}
2343
2344	if (!mlxsw_sp_port->split) {
2345		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2346		return -EINVAL;
2347	}
2348
2349	count = mlxsw_sp_port->mapping.module_width /
2350		mlxsw_sp_port->mapping.width;
2351
2352	mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2353			     mlxsw_sp_port->mapping.module,
2354			     mlxsw_sp_port->mapping.module_width / count,
2355			     count);
2356	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2357	if (err) {
2358		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2359		return err;
2360	}
2361
2362	for (i = 0; i < count; i++) {
2363		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2364
2365		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2366			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2367	}
2368
2369	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2370
2371	return 0;
2372}
2373
2374static void
2375mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2376{
2377	int i;
2378
2379	for (i = 0; i < TC_MAX_QUEUE; i++)
2380		mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2381}
2382
2383static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2384				     char *pude_pl, void *priv)
2385{
2386	struct mlxsw_sp *mlxsw_sp = priv;
2387	struct mlxsw_sp_port *mlxsw_sp_port;
2388	enum mlxsw_reg_pude_oper_status status;
2389	u16 local_port;
2390
2391	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2392
2393	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2394		return;
2395	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2396	if (!mlxsw_sp_port)
2397		return;
2398
2399	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2400	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2401		netdev_info(mlxsw_sp_port->dev, "link up\n");
2402		netif_carrier_on(mlxsw_sp_port->dev);
2403		mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2404	} else {
2405		netdev_info(mlxsw_sp_port->dev, "link down\n");
2406		netif_carrier_off(mlxsw_sp_port->dev);
2407		mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2408	}
2409}
2410
2411static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2412					  char *mtpptr_pl, bool ingress)
2413{
2414	u16 local_port;
2415	u8 num_rec;
2416	int i;
2417
2418	local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2419	num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2420	for (i = 0; i < num_rec; i++) {
2421		u8 domain_number;
2422		u8 message_type;
2423		u16 sequence_id;
2424		u64 timestamp;
2425
2426		mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2427					&domain_number, &sequence_id,
2428					&timestamp);
2429		mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2430					    message_type, domain_number,
2431					    sequence_id, timestamp);
2432	}
2433}
2434
2435static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2436					      char *mtpptr_pl, void *priv)
2437{
2438	struct mlxsw_sp *mlxsw_sp = priv;
2439
2440	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2441}
2442
2443static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2444					      char *mtpptr_pl, void *priv)
2445{
2446	struct mlxsw_sp *mlxsw_sp = priv;
2447
2448	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2449}
2450
2451void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2452				       u16 local_port, void *priv)
2453{
2454	struct mlxsw_sp *mlxsw_sp = priv;
2455	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2456	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2457
2458	if (unlikely(!mlxsw_sp_port)) {
2459		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2460				     local_port);
2461		return;
2462	}
2463
2464	skb->dev = mlxsw_sp_port->dev;
2465
2466	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2467	u64_stats_update_begin(&pcpu_stats->syncp);
2468	pcpu_stats->rx_packets++;
2469	pcpu_stats->rx_bytes += skb->len;
2470	u64_stats_update_end(&pcpu_stats->syncp);
2471
2472	skb->protocol = eth_type_trans(skb, skb->dev);
2473	netif_receive_skb(skb);
2474}
2475
2476static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2477					   void *priv)
2478{
2479	skb->offload_fwd_mark = 1;
2480	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481}
2482
2483static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2484					      u16 local_port, void *priv)
2485{
2486	skb->offload_l3_fwd_mark = 1;
2487	skb->offload_fwd_mark = 1;
2488	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2489}
2490
2491void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2492			  u16 local_port)
2493{
2494	mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2495}
2496
2497#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2498	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
2499		  _is_ctrl, SP_##_trap_group, DISCARD)
2500
2501#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2502	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
2503		_is_ctrl, SP_##_trap_group, DISCARD)
2504
2505#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2506	MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action,	\
2507		_is_ctrl, SP_##_trap_group, DISCARD)
2508
2509#define MLXSW_SP_EVENTL(_func, _trap_id)		\
2510	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2511
2512static const struct mlxsw_listener mlxsw_sp_listener[] = {
2513	/* Events */
2514	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2515	/* L2 traps */
2516	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2517	/* L3 traps */
2518	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2519			  false),
2520	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2521	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2522			  false),
2523	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2524			     ROUTER_EXP, false),
2525	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2526			     ROUTER_EXP, false),
2527	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2528			     ROUTER_EXP, false),
2529	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2530			     ROUTER_EXP, false),
2531	/* Multicast Router Traps */
2532	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2533	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2534	/* NVE traps */
2535	MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2536};
2537
2538static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2539	/* Events */
2540	MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2541	MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2542};
2543
2544static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2545	/* Events */
2546	MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2547};
2548
2549static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2550{
2551	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2552	char qpcr_pl[MLXSW_REG_QPCR_LEN];
2553	enum mlxsw_reg_qpcr_ir_units ir_units;
2554	int max_cpu_policers;
2555	bool is_bytes;
2556	u8 burst_size;
2557	u32 rate;
2558	int i, err;
2559
2560	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2561		return -EIO;
2562
2563	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2564
2565	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2566	for (i = 0; i < max_cpu_policers; i++) {
2567		is_bytes = false;
2568		switch (i) {
2569		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2570		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2571		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2572			rate = 1024;
2573			burst_size = 7;
2574			break;
2575		default:
2576			continue;
2577		}
2578
2579		__set_bit(i, mlxsw_sp->trap->policers_usage);
2580		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2581				    burst_size);
2582		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2583		if (err)
2584			return err;
2585	}
2586
2587	return 0;
2588}
2589
2590static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2591{
2592	char htgt_pl[MLXSW_REG_HTGT_LEN];
2593	enum mlxsw_reg_htgt_trap_group i;
2594	int max_cpu_policers;
2595	int max_trap_groups;
2596	u8 priority, tc;
2597	u16 policer_id;
2598	int err;
2599
2600	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2601		return -EIO;
2602
2603	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2604	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2605
2606	for (i = 0; i < max_trap_groups; i++) {
2607		policer_id = i;
2608		switch (i) {
2609		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2610		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2611		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2612			priority = 1;
2613			tc = 1;
2614			break;
2615		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2616			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2617			tc = MLXSW_REG_HTGT_DEFAULT_TC;
2618			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2619			break;
2620		default:
2621			continue;
2622		}
2623
2624		if (max_cpu_policers <= policer_id &&
2625		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2626			return -EIO;
2627
2628		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2629		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2630		if (err)
2631			return err;
2632	}
2633
2634	return 0;
2635}
2636
2637static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2638{
2639	struct mlxsw_sp_trap *trap;
2640	u64 max_policers;
2641	int err;
2642
2643	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2644		return -EIO;
2645	max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2646	trap = kzalloc(struct_size(trap, policers_usage,
2647				   BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2648	if (!trap)
2649		return -ENOMEM;
2650	trap->max_policers = max_policers;
2651	mlxsw_sp->trap = trap;
2652
2653	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2654	if (err)
2655		goto err_cpu_policers_set;
2656
2657	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2658	if (err)
2659		goto err_trap_groups_set;
2660
2661	err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2662					ARRAY_SIZE(mlxsw_sp_listener),
2663					mlxsw_sp);
2664	if (err)
2665		goto err_traps_register;
2666
2667	err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2668					mlxsw_sp->listeners_count, mlxsw_sp);
2669	if (err)
2670		goto err_extra_traps_init;
2671
2672	return 0;
2673
2674err_extra_traps_init:
2675	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2676				    ARRAY_SIZE(mlxsw_sp_listener),
2677				    mlxsw_sp);
2678err_traps_register:
2679err_trap_groups_set:
2680err_cpu_policers_set:
2681	kfree(trap);
2682	return err;
2683}
2684
2685static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2686{
2687	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2688				    mlxsw_sp->listeners_count,
2689				    mlxsw_sp);
2690	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2691				    ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2692	kfree(mlxsw_sp->trap);
2693}
2694
2695#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2696
2697static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2698{
2699	char slcr_pl[MLXSW_REG_SLCR_LEN];
2700	u16 max_lag;
2701	u32 seed;
2702	int err;
2703
2704	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2705		     MLXSW_SP_LAG_SEED_INIT);
2706	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2707				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2708				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2709				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2710				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2711				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2712				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2713				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2714				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2715	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2716	if (err)
2717		return err;
2718
2719	err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2720	if (err)
2721		return err;
2722
2723	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2724		return -EIO;
2725
2726	mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
2727				 GFP_KERNEL);
2728	if (!mlxsw_sp->lags)
2729		return -ENOMEM;
2730
2731	return 0;
2732}
2733
2734static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2735{
2736	kfree(mlxsw_sp->lags);
2737}
2738
2739static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2740	.clock_init	= mlxsw_sp1_ptp_clock_init,
2741	.clock_fini	= mlxsw_sp1_ptp_clock_fini,
2742	.init		= mlxsw_sp1_ptp_init,
2743	.fini		= mlxsw_sp1_ptp_fini,
2744	.receive	= mlxsw_sp1_ptp_receive,
2745	.transmitted	= mlxsw_sp1_ptp_transmitted,
2746	.hwtstamp_get	= mlxsw_sp1_ptp_hwtstamp_get,
2747	.hwtstamp_set	= mlxsw_sp1_ptp_hwtstamp_set,
2748	.shaper_work	= mlxsw_sp1_ptp_shaper_work,
2749	.get_ts_info	= mlxsw_sp1_ptp_get_ts_info,
2750	.get_stats_count = mlxsw_sp1_get_stats_count,
2751	.get_stats_strings = mlxsw_sp1_get_stats_strings,
2752	.get_stats	= mlxsw_sp1_get_stats,
2753	.txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2754};
2755
2756static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2757	.clock_init	= mlxsw_sp2_ptp_clock_init,
2758	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
2759	.init		= mlxsw_sp2_ptp_init,
2760	.fini		= mlxsw_sp2_ptp_fini,
2761	.receive	= mlxsw_sp2_ptp_receive,
2762	.transmitted	= mlxsw_sp2_ptp_transmitted,
2763	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
2764	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
2765	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
2766	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
2767	.get_stats_count = mlxsw_sp2_get_stats_count,
2768	.get_stats_strings = mlxsw_sp2_get_stats_strings,
2769	.get_stats	= mlxsw_sp2_get_stats,
2770	.txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2771};
2772
2773static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2774	.clock_init	= mlxsw_sp2_ptp_clock_init,
2775	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
2776	.init		= mlxsw_sp2_ptp_init,
2777	.fini		= mlxsw_sp2_ptp_fini,
2778	.receive	= mlxsw_sp2_ptp_receive,
2779	.transmitted	= mlxsw_sp2_ptp_transmitted,
2780	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
2781	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
2782	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
2783	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
2784	.get_stats_count = mlxsw_sp2_get_stats_count,
2785	.get_stats_strings = mlxsw_sp2_get_stats_strings,
2786	.get_stats	= mlxsw_sp2_get_stats,
2787	.txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2788};
2789
2790struct mlxsw_sp_sample_trigger_node {
2791	struct mlxsw_sp_sample_trigger trigger;
2792	struct mlxsw_sp_sample_params params;
2793	struct rhash_head ht_node;
2794	struct rcu_head rcu;
2795	refcount_t refcount;
2796};
2797
2798static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2799	.key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2800	.head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2801	.key_len = sizeof(struct mlxsw_sp_sample_trigger),
2802	.automatic_shrinking = true,
2803};
2804
2805static void
2806mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2807				 const struct mlxsw_sp_sample_trigger *trigger)
2808{
2809	memset(key, 0, sizeof(*key));
2810	key->type = trigger->type;
2811	key->local_port = trigger->local_port;
2812}
2813
2814/* RCU read lock must be held */
2815struct mlxsw_sp_sample_params *
2816mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2817				      const struct mlxsw_sp_sample_trigger *trigger)
2818{
2819	struct mlxsw_sp_sample_trigger_node *trigger_node;
2820	struct mlxsw_sp_sample_trigger key;
2821
2822	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2823	trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2824					 mlxsw_sp_sample_trigger_ht_params);
2825	if (!trigger_node)
2826		return NULL;
2827
2828	return &trigger_node->params;
2829}
2830
2831static int
2832mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2833				  const struct mlxsw_sp_sample_trigger *trigger,
2834				  const struct mlxsw_sp_sample_params *params)
2835{
2836	struct mlxsw_sp_sample_trigger_node *trigger_node;
2837	int err;
2838
2839	trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2840	if (!trigger_node)
2841		return -ENOMEM;
2842
2843	trigger_node->trigger = *trigger;
2844	trigger_node->params = *params;
2845	refcount_set(&trigger_node->refcount, 1);
2846
2847	err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2848				     &trigger_node->ht_node,
2849				     mlxsw_sp_sample_trigger_ht_params);
2850	if (err)
2851		goto err_rhashtable_insert;
2852
2853	return 0;
2854
2855err_rhashtable_insert:
2856	kfree(trigger_node);
2857	return err;
2858}
2859
2860static void
2861mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2862				  struct mlxsw_sp_sample_trigger_node *trigger_node)
2863{
2864	rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2865			       &trigger_node->ht_node,
2866			       mlxsw_sp_sample_trigger_ht_params);
2867	kfree_rcu(trigger_node, rcu);
2868}
2869
2870int
2871mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2872				   const struct mlxsw_sp_sample_trigger *trigger,
2873				   const struct mlxsw_sp_sample_params *params,
2874				   struct netlink_ext_ack *extack)
2875{
2876	struct mlxsw_sp_sample_trigger_node *trigger_node;
2877	struct mlxsw_sp_sample_trigger key;
2878
2879	ASSERT_RTNL();
2880
2881	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2882
2883	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2884					      &key,
2885					      mlxsw_sp_sample_trigger_ht_params);
2886	if (!trigger_node)
2887		return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2888							 params);
2889
2890	if (trigger_node->trigger.local_port) {
2891		NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2892		return -EINVAL;
2893	}
2894
2895	if (trigger_node->params.psample_group != params->psample_group ||
2896	    trigger_node->params.truncate != params->truncate ||
2897	    trigger_node->params.rate != params->rate ||
2898	    trigger_node->params.trunc_size != params->trunc_size) {
2899		NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2900		return -EINVAL;
2901	}
2902
2903	refcount_inc(&trigger_node->refcount);
2904
2905	return 0;
2906}
2907
2908void
2909mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2910				     const struct mlxsw_sp_sample_trigger *trigger)
2911{
2912	struct mlxsw_sp_sample_trigger_node *trigger_node;
2913	struct mlxsw_sp_sample_trigger key;
2914
2915	ASSERT_RTNL();
2916
2917	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2918
2919	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2920					      &key,
2921					      mlxsw_sp_sample_trigger_ht_params);
2922	if (!trigger_node)
2923		return;
2924
2925	if (!refcount_dec_and_test(&trigger_node->refcount))
2926		return;
2927
2928	mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2929}
2930
2931static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2932				    unsigned long event, void *ptr);
2933
2934#define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2935#define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2936#define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2937
2938static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2939{
2940	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
2941	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
2942	mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
2943	mutex_init(&mlxsw_sp->parsing.lock);
2944}
2945
2946static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
2947{
2948	mutex_destroy(&mlxsw_sp->parsing.lock);
2949	WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
2950}
2951
2952struct mlxsw_sp_ipv6_addr_node {
2953	struct in6_addr key;
2954	struct rhash_head ht_node;
2955	u32 kvdl_index;
2956	refcount_t refcount;
2957};
2958
2959static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
2960	.key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
2961	.head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
2962	.key_len = sizeof(struct in6_addr),
2963	.automatic_shrinking = true,
2964};
2965
2966static int
2967mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
2968			u32 *p_kvdl_index)
2969{
2970	struct mlxsw_sp_ipv6_addr_node *node;
2971	char rips_pl[MLXSW_REG_RIPS_LEN];
2972	int err;
2973
2974	err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
2975				  MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2976				  p_kvdl_index);
2977	if (err)
2978		return err;
2979
2980	mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
2981	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
2982	if (err)
2983		goto err_rips_write;
2984
2985	node = kzalloc(sizeof(*node), GFP_KERNEL);
2986	if (!node) {
2987		err = -ENOMEM;
2988		goto err_node_alloc;
2989	}
2990
2991	node->key = *addr6;
2992	node->kvdl_index = *p_kvdl_index;
2993	refcount_set(&node->refcount, 1);
2994
2995	err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
2996				     &node->ht_node,
2997				     mlxsw_sp_ipv6_addr_ht_params);
2998	if (err)
2999		goto err_rhashtable_insert;
3000
3001	return 0;
3002
3003err_rhashtable_insert:
3004	kfree(node);
3005err_node_alloc:
3006err_rips_write:
3007	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3008			   *p_kvdl_index);
3009	return err;
3010}
3011
3012static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3013				    struct mlxsw_sp_ipv6_addr_node *node)
3014{
3015	u32 kvdl_index = node->kvdl_index;
3016
3017	rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3018			       mlxsw_sp_ipv6_addr_ht_params);
3019	kfree(node);
3020	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3021			   kvdl_index);
3022}
3023
3024int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3025				      const struct in6_addr *addr6,
3026				      u32 *p_kvdl_index)
3027{
3028	struct mlxsw_sp_ipv6_addr_node *node;
3029	int err = 0;
3030
3031	mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3032	node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3033				      mlxsw_sp_ipv6_addr_ht_params);
3034	if (node) {
3035		refcount_inc(&node->refcount);
3036		*p_kvdl_index = node->kvdl_index;
3037		goto out_unlock;
3038	}
3039
3040	err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3041
3042out_unlock:
3043	mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3044	return err;
3045}
3046
3047void
3048mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3049{
3050	struct mlxsw_sp_ipv6_addr_node *node;
3051
3052	mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3053	node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3054				      mlxsw_sp_ipv6_addr_ht_params);
3055	if (WARN_ON(!node))
3056		goto out_unlock;
3057
3058	if (!refcount_dec_and_test(&node->refcount))
3059		goto out_unlock;
3060
3061	mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3062
3063out_unlock:
3064	mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3065}
3066
3067static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3068{
3069	int err;
3070
3071	err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3072			      &mlxsw_sp_ipv6_addr_ht_params);
3073	if (err)
3074		return err;
3075
3076	mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3077	return 0;
3078}
3079
3080static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3081{
3082	mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3083	rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3084}
3085
3086static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3087			 const struct mlxsw_bus_info *mlxsw_bus_info,
3088			 struct netlink_ext_ack *extack)
3089{
3090	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3091	int err;
3092
3093	mlxsw_sp->core = mlxsw_core;
3094	mlxsw_sp->bus_info = mlxsw_bus_info;
3095
3096	mlxsw_sp_parsing_init(mlxsw_sp);
3097
3098	err = mlxsw_sp_base_mac_get(mlxsw_sp);
3099	if (err) {
3100		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3101		return err;
3102	}
3103
3104	err = mlxsw_sp_kvdl_init(mlxsw_sp);
3105	if (err) {
3106		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3107		return err;
3108	}
3109
3110	err = mlxsw_sp_pgt_init(mlxsw_sp);
3111	if (err) {
3112		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3113		goto err_pgt_init;
3114	}
3115
3116	err = mlxsw_sp_fids_init(mlxsw_sp);
3117	if (err) {
3118		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3119		goto err_fids_init;
3120	}
3121
3122	err = mlxsw_sp_policers_init(mlxsw_sp);
3123	if (err) {
3124		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3125		goto err_policers_init;
3126	}
3127
3128	err = mlxsw_sp_traps_init(mlxsw_sp);
3129	if (err) {
3130		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3131		goto err_traps_init;
3132	}
3133
3134	err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3135	if (err) {
3136		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3137		goto err_devlink_traps_init;
3138	}
3139
3140	err = mlxsw_sp_buffers_init(mlxsw_sp);
3141	if (err) {
3142		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3143		goto err_buffers_init;
3144	}
3145
3146	err = mlxsw_sp_lag_init(mlxsw_sp);
3147	if (err) {
3148		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3149		goto err_lag_init;
3150	}
3151
3152	/* Initialize SPAN before router and switchdev, so that those components
3153	 * can call mlxsw_sp_span_respin().
3154	 */
3155	err = mlxsw_sp_span_init(mlxsw_sp);
3156	if (err) {
3157		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3158		goto err_span_init;
3159	}
3160
3161	err = mlxsw_sp_switchdev_init(mlxsw_sp);
3162	if (err) {
3163		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3164		goto err_switchdev_init;
3165	}
3166
3167	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3168	if (err) {
3169		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3170		goto err_counter_pool_init;
3171	}
3172
3173	err = mlxsw_sp_afa_init(mlxsw_sp);
3174	if (err) {
3175		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3176		goto err_afa_init;
3177	}
3178
3179	err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3180	if (err) {
3181		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3182		goto err_ipv6_addr_ht_init;
3183	}
3184
3185	err = mlxsw_sp_nve_init(mlxsw_sp);
3186	if (err) {
3187		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3188		goto err_nve_init;
3189	}
3190
3191	err = mlxsw_sp_port_range_init(mlxsw_sp);
3192	if (err) {
3193		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3194		goto err_port_range_init;
3195	}
3196
3197	err = mlxsw_sp_acl_init(mlxsw_sp);
3198	if (err) {
3199		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3200		goto err_acl_init;
3201	}
3202
3203	err = mlxsw_sp_router_init(mlxsw_sp, extack);
3204	if (err) {
3205		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3206		goto err_router_init;
3207	}
3208
3209	if (mlxsw_sp->bus_info->read_clock_capable) {
3210		/* NULL is a valid return value from clock_init */
3211		mlxsw_sp->clock =
3212			mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3213						      mlxsw_sp->bus_info->dev);
3214		if (IS_ERR(mlxsw_sp->clock)) {
3215			err = PTR_ERR(mlxsw_sp->clock);
3216			dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3217			goto err_ptp_clock_init;
3218		}
3219	}
3220
3221	if (mlxsw_sp->clock) {
3222		/* NULL is a valid return value from ptp_ops->init */
3223		mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3224		if (IS_ERR(mlxsw_sp->ptp_state)) {
3225			err = PTR_ERR(mlxsw_sp->ptp_state);
3226			dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3227			goto err_ptp_init;
3228		}
3229	}
3230
3231	/* Initialize netdevice notifier after SPAN is initialized, so that the
3232	 * event handler can call SPAN respin.
3233	 */
3234	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3235	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3236					      &mlxsw_sp->netdevice_nb);
3237	if (err) {
3238		dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3239		goto err_netdev_notifier;
3240	}
3241
3242	err = mlxsw_sp_dpipe_init(mlxsw_sp);
3243	if (err) {
3244		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3245		goto err_dpipe_init;
3246	}
3247
3248	err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3249	if (err) {
3250		dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3251		goto err_port_module_info_init;
3252	}
3253
3254	err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3255			      &mlxsw_sp_sample_trigger_ht_params);
3256	if (err) {
3257		dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3258		goto err_sample_trigger_init;
3259	}
3260
3261	err = mlxsw_sp_ports_create(mlxsw_sp);
3262	if (err) {
3263		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3264		goto err_ports_create;
3265	}
3266
3267	return 0;
3268
3269err_ports_create:
3270	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3271err_sample_trigger_init:
3272	mlxsw_sp_port_module_info_fini(mlxsw_sp);
3273err_port_module_info_init:
3274	mlxsw_sp_dpipe_fini(mlxsw_sp);
3275err_dpipe_init:
3276	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3277					  &mlxsw_sp->netdevice_nb);
3278err_netdev_notifier:
3279	if (mlxsw_sp->clock)
3280		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3281err_ptp_init:
3282	if (mlxsw_sp->clock)
3283		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3284err_ptp_clock_init:
3285	mlxsw_sp_router_fini(mlxsw_sp);
3286err_router_init:
3287	mlxsw_sp_acl_fini(mlxsw_sp);
3288err_acl_init:
3289	mlxsw_sp_port_range_fini(mlxsw_sp);
3290err_port_range_init:
3291	mlxsw_sp_nve_fini(mlxsw_sp);
3292err_nve_init:
3293	mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3294err_ipv6_addr_ht_init:
3295	mlxsw_sp_afa_fini(mlxsw_sp);
3296err_afa_init:
3297	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3298err_counter_pool_init:
3299	mlxsw_sp_switchdev_fini(mlxsw_sp);
3300err_switchdev_init:
3301	mlxsw_sp_span_fini(mlxsw_sp);
3302err_span_init:
3303	mlxsw_sp_lag_fini(mlxsw_sp);
3304err_lag_init:
3305	mlxsw_sp_buffers_fini(mlxsw_sp);
3306err_buffers_init:
3307	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3308err_devlink_traps_init:
3309	mlxsw_sp_traps_fini(mlxsw_sp);
3310err_traps_init:
3311	mlxsw_sp_policers_fini(mlxsw_sp);
3312err_policers_init:
3313	mlxsw_sp_fids_fini(mlxsw_sp);
3314err_fids_init:
3315	mlxsw_sp_pgt_fini(mlxsw_sp);
3316err_pgt_init:
3317	mlxsw_sp_kvdl_fini(mlxsw_sp);
3318	mlxsw_sp_parsing_fini(mlxsw_sp);
3319	return err;
3320}
3321
3322static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3323			  const struct mlxsw_bus_info *mlxsw_bus_info,
3324			  struct netlink_ext_ack *extack)
3325{
3326	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3327
3328	mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3329	mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3330	mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3331	mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3332	mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3333	mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3334	mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3335	mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3336	mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3337	mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3338	mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3339	mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3340	mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3341	mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3342	mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3343	mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3344	mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3345	mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3346	mlxsw_sp->listeners = mlxsw_sp1_listener;
3347	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3348	mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
3349	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3350	mlxsw_sp->pgt_smpe_index_valid = true;
3351
3352	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3353}
3354
3355static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3356			  const struct mlxsw_bus_info *mlxsw_bus_info,
3357			  struct netlink_ext_ack *extack)
3358{
3359	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3360
3361	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3362	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3363	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3364	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3365	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3366	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3367	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3368	mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3369	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3370	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3371	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3372	mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3373	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3374	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3375	mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3376	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3377	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3378	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3379	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3380	mlxsw_sp->listeners = mlxsw_sp2_listener;
3381	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3382	mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3383	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3384	mlxsw_sp->pgt_smpe_index_valid = false;
3385
3386	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3387}
3388
3389static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3390			  const struct mlxsw_bus_info *mlxsw_bus_info,
3391			  struct netlink_ext_ack *extack)
3392{
3393	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3394
3395	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3396	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3397	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3398	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3399	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3400	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3401	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3402	mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3403	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3404	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3405	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3406	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3407	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3408	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3409	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3410	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3411	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3412	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3413	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3414	mlxsw_sp->listeners = mlxsw_sp2_listener;
3415	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3416	mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3417	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3418	mlxsw_sp->pgt_smpe_index_valid = false;
3419
3420	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3421}
3422
3423static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3424			  const struct mlxsw_bus_info *mlxsw_bus_info,
3425			  struct netlink_ext_ack *extack)
3426{
3427	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3428
3429	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3430	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3431	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3432	mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3433	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3434	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3435	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3436	mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3437	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3438	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3439	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3440	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3441	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3442	mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3443	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3444	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3445	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3446	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3447	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3448	mlxsw_sp->listeners = mlxsw_sp2_listener;
3449	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3450	mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3451	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3452	mlxsw_sp->pgt_smpe_index_valid = false;
3453
3454	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3455}
3456
3457static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3458{
3459	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3460
3461	mlxsw_sp_ports_remove(mlxsw_sp);
3462	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3463	mlxsw_sp_port_module_info_fini(mlxsw_sp);
3464	mlxsw_sp_dpipe_fini(mlxsw_sp);
3465	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3466					  &mlxsw_sp->netdevice_nb);
3467	if (mlxsw_sp->clock) {
3468		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3469		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3470	}
3471	mlxsw_sp_router_fini(mlxsw_sp);
3472	mlxsw_sp_acl_fini(mlxsw_sp);
3473	mlxsw_sp_port_range_fini(mlxsw_sp);
3474	mlxsw_sp_nve_fini(mlxsw_sp);
3475	mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3476	mlxsw_sp_afa_fini(mlxsw_sp);
3477	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3478	mlxsw_sp_switchdev_fini(mlxsw_sp);
3479	mlxsw_sp_span_fini(mlxsw_sp);
3480	mlxsw_sp_lag_fini(mlxsw_sp);
3481	mlxsw_sp_buffers_fini(mlxsw_sp);
3482	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3483	mlxsw_sp_traps_fini(mlxsw_sp);
3484	mlxsw_sp_policers_fini(mlxsw_sp);
3485	mlxsw_sp_fids_fini(mlxsw_sp);
3486	mlxsw_sp_pgt_fini(mlxsw_sp);
3487	mlxsw_sp_kvdl_fini(mlxsw_sp);
3488	mlxsw_sp_parsing_fini(mlxsw_sp);
3489}
3490
3491static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3492	.used_flood_mode                = 1,
3493	.flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3494	.used_max_ib_mc			= 1,
3495	.max_ib_mc			= 0,
3496	.used_max_pkey			= 1,
3497	.max_pkey			= 0,
3498	.used_ubridge			= 1,
3499	.ubridge			= 1,
3500	.used_kvd_sizes			= 1,
3501	.kvd_hash_single_parts		= 59,
3502	.kvd_hash_double_parts		= 41,
3503	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
3504	.swid_config			= {
3505		{
3506			.used_type	= 1,
3507			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3508		}
3509	},
3510};
3511
3512static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3513	.used_flood_mode                = 1,
3514	.flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3515	.used_max_ib_mc			= 1,
3516	.max_ib_mc			= 0,
3517	.used_max_pkey			= 1,
3518	.max_pkey			= 0,
3519	.used_ubridge			= 1,
3520	.ubridge			= 1,
3521	.swid_config			= {
3522		{
3523			.used_type	= 1,
3524			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3525		}
3526	},
3527	.used_cqe_time_stamp_type	= 1,
3528	.cqe_time_stamp_type		= MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3529};
3530
3531/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3532 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3533 * table.
3534 */
3535#define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3536
3537static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3538	.used_max_lag			= 1,
3539	.max_lag			= MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3540	.used_flood_mode                = 1,
3541	.flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3542	.used_max_ib_mc			= 1,
3543	.max_ib_mc			= 0,
3544	.used_max_pkey			= 1,
3545	.max_pkey			= 0,
3546	.used_ubridge			= 1,
3547	.ubridge			= 1,
3548	.swid_config			= {
3549		{
3550			.used_type	= 1,
3551			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3552		}
3553	},
3554	.used_cqe_time_stamp_type	= 1,
3555	.cqe_time_stamp_type		= MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3556};
3557
3558static void
3559mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3560				      struct devlink_resource_size_params *kvd_size_params,
3561				      struct devlink_resource_size_params *linear_size_params,
3562				      struct devlink_resource_size_params *hash_double_size_params,
3563				      struct devlink_resource_size_params *hash_single_size_params)
3564{
3565	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3566						 KVD_SINGLE_MIN_SIZE);
3567	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3568						 KVD_DOUBLE_MIN_SIZE);
3569	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3570	u32 linear_size_min = 0;
3571
3572	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3573					  MLXSW_SP_KVD_GRANULARITY,
3574					  DEVLINK_RESOURCE_UNIT_ENTRY);
3575	devlink_resource_size_params_init(linear_size_params, linear_size_min,
3576					  kvd_size - single_size_min -
3577					  double_size_min,
3578					  MLXSW_SP_KVD_GRANULARITY,
3579					  DEVLINK_RESOURCE_UNIT_ENTRY);
3580	devlink_resource_size_params_init(hash_double_size_params,
3581					  double_size_min,
3582					  kvd_size - single_size_min -
3583					  linear_size_min,
3584					  MLXSW_SP_KVD_GRANULARITY,
3585					  DEVLINK_RESOURCE_UNIT_ENTRY);
3586	devlink_resource_size_params_init(hash_single_size_params,
3587					  single_size_min,
3588					  kvd_size - double_size_min -
3589					  linear_size_min,
3590					  MLXSW_SP_KVD_GRANULARITY,
3591					  DEVLINK_RESOURCE_UNIT_ENTRY);
3592}
3593
3594static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3595{
3596	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3597	struct devlink_resource_size_params hash_single_size_params;
3598	struct devlink_resource_size_params hash_double_size_params;
3599	struct devlink_resource_size_params linear_size_params;
3600	struct devlink_resource_size_params kvd_size_params;
3601	u32 kvd_size, single_size, double_size, linear_size;
3602	const struct mlxsw_config_profile *profile;
3603	int err;
3604
3605	profile = &mlxsw_sp1_config_profile;
3606	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3607		return -EIO;
3608
3609	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3610					      &linear_size_params,
3611					      &hash_double_size_params,
3612					      &hash_single_size_params);
3613
3614	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3615	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3616				     kvd_size, MLXSW_SP_RESOURCE_KVD,
3617				     DEVLINK_RESOURCE_ID_PARENT_TOP,
3618				     &kvd_size_params);
3619	if (err)
3620		return err;
3621
3622	linear_size = profile->kvd_linear_size;
3623	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3624				     linear_size,
3625				     MLXSW_SP_RESOURCE_KVD_LINEAR,
3626				     MLXSW_SP_RESOURCE_KVD,
3627				     &linear_size_params);
3628	if (err)
3629		return err;
3630
3631	err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3632	if  (err)
3633		return err;
3634
3635	double_size = kvd_size - linear_size;
3636	double_size *= profile->kvd_hash_double_parts;
3637	double_size /= profile->kvd_hash_double_parts +
3638		       profile->kvd_hash_single_parts;
3639	double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3640	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3641				     double_size,
3642				     MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3643				     MLXSW_SP_RESOURCE_KVD,
3644				     &hash_double_size_params);
3645	if (err)
3646		return err;
3647
3648	single_size = kvd_size - double_size - linear_size;
3649	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3650				     single_size,
3651				     MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3652				     MLXSW_SP_RESOURCE_KVD,
3653				     &hash_single_size_params);
3654	if (err)
3655		return err;
3656
3657	return 0;
3658}
3659
3660static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3661{
3662	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3663	struct devlink_resource_size_params kvd_size_params;
3664	u32 kvd_size;
3665
3666	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3667		return -EIO;
3668
3669	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3670	devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3671					  MLXSW_SP_KVD_GRANULARITY,
3672					  DEVLINK_RESOURCE_UNIT_ENTRY);
3673
3674	return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3675				      kvd_size, MLXSW_SP_RESOURCE_KVD,
3676				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3677				      &kvd_size_params);
3678}
3679
3680static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3681{
3682	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3683	struct devlink_resource_size_params span_size_params;
3684	u32 max_span;
3685
3686	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3687		return -EIO;
3688
3689	max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3690	devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3691					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
3692
3693	return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3694				      max_span, MLXSW_SP_RESOURCE_SPAN,
3695				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3696				      &span_size_params);
3697}
3698
3699static int
3700mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3701{
3702	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3703	struct devlink_resource_size_params size_params;
3704	u8 max_rif_mac_profiles;
3705
3706	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3707		max_rif_mac_profiles = 1;
3708	else
3709		max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3710							  MAX_RIF_MAC_PROFILES);
3711	devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3712					  max_rif_mac_profiles, 1,
3713					  DEVLINK_RESOURCE_UNIT_ENTRY);
3714
3715	return devl_resource_register(devlink,
3716				      "rif_mac_profiles",
3717				      max_rif_mac_profiles,
3718				      MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3719				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3720				      &size_params);
3721}
3722
3723static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3724{
3725	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3726	struct devlink_resource_size_params size_params;
3727	u64 max_rifs;
3728
3729	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3730		return -EIO;
3731
3732	max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3733	devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3734					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
3735
3736	return devl_resource_register(devlink, "rifs", max_rifs,
3737				      MLXSW_SP_RESOURCE_RIFS,
3738				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3739				      &size_params);
3740}
3741
3742static int
3743mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3744{
3745	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3746	struct devlink_resource_size_params size_params;
3747	u64 max;
3748
3749	if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3750		return -EIO;
3751
3752	max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3753	devlink_resource_size_params_init(&size_params, max, max, 1,
3754					  DEVLINK_RESOURCE_UNIT_ENTRY);
3755
3756	return devl_resource_register(devlink, "port_range_registers", max,
3757				      MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3758				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3759				      &size_params);
3760}
3761
3762static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3763{
3764	int err;
3765
3766	err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3767	if (err)
3768		return err;
3769
3770	err = mlxsw_sp_resources_span_register(mlxsw_core);
3771	if (err)
3772		goto err_resources_span_register;
3773
3774	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3775	if (err)
3776		goto err_resources_counter_register;
3777
3778	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3779	if (err)
3780		goto err_policer_resources_register;
3781
3782	err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3783	if (err)
3784		goto err_resources_rif_mac_profile_register;
3785
3786	err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3787	if (err)
3788		goto err_resources_rifs_register;
3789
3790	err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3791	if (err)
3792		goto err_resources_port_range_register;
3793
3794	return 0;
3795
3796err_resources_port_range_register:
3797err_resources_rifs_register:
3798err_resources_rif_mac_profile_register:
3799err_policer_resources_register:
3800err_resources_counter_register:
3801err_resources_span_register:
3802	devl_resources_unregister(priv_to_devlink(mlxsw_core));
3803	return err;
3804}
3805
3806static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3807{
3808	int err;
3809
3810	err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3811	if (err)
3812		return err;
3813
3814	err = mlxsw_sp_resources_span_register(mlxsw_core);
3815	if (err)
3816		goto err_resources_span_register;
3817
3818	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3819	if (err)
3820		goto err_resources_counter_register;
3821
3822	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3823	if (err)
3824		goto err_policer_resources_register;
3825
3826	err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3827	if (err)
3828		goto err_resources_rif_mac_profile_register;
3829
3830	err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3831	if (err)
3832		goto err_resources_rifs_register;
3833
3834	err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3835	if (err)
3836		goto err_resources_port_range_register;
3837
3838	return 0;
3839
3840err_resources_port_range_register:
3841err_resources_rifs_register:
3842err_resources_rif_mac_profile_register:
3843err_policer_resources_register:
3844err_resources_counter_register:
3845err_resources_span_register:
3846	devl_resources_unregister(priv_to_devlink(mlxsw_core));
3847	return err;
3848}
3849
3850static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3851				  const struct mlxsw_config_profile *profile,
3852				  u64 *p_single_size, u64 *p_double_size,
3853				  u64 *p_linear_size)
3854{
3855	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3856	u32 double_size;
3857	int err;
3858
3859	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3860	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3861		return -EIO;
3862
3863	/* The hash part is what left of the kvd without the
3864	 * linear part. It is split to the single size and
3865	 * double size by the parts ratio from the profile.
3866	 * Both sizes must be a multiplications of the
3867	 * granularity from the profile. In case the user
3868	 * provided the sizes they are obtained via devlink.
3869	 */
3870	err = devl_resource_size_get(devlink,
3871				     MLXSW_SP_RESOURCE_KVD_LINEAR,
3872				     p_linear_size);
3873	if (err)
3874		*p_linear_size = profile->kvd_linear_size;
3875
3876	err = devl_resource_size_get(devlink,
3877				     MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3878				     p_double_size);
3879	if (err) {
3880		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3881			      *p_linear_size;
3882		double_size *= profile->kvd_hash_double_parts;
3883		double_size /= profile->kvd_hash_double_parts +
3884			       profile->kvd_hash_single_parts;
3885		*p_double_size = rounddown(double_size,
3886					   MLXSW_SP_KVD_GRANULARITY);
3887	}
3888
3889	err = devl_resource_size_get(devlink,
3890				     MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3891				     p_single_size);
3892	if (err)
3893		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3894				 *p_double_size - *p_linear_size;
3895
3896	/* Check results are legal. */
3897	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3898	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3899	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3900		return -EIO;
3901
3902	return 0;
3903}
3904
3905static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3906				     struct sk_buff *skb, u16 local_port)
3907{
3908	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3909
3910	skb_pull(skb, MLXSW_TXHDR_LEN);
3911	mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3912}
3913
3914static struct mlxsw_driver mlxsw_sp1_driver = {
3915	.kind				= mlxsw_sp1_driver_name,
3916	.priv_size			= sizeof(struct mlxsw_sp),
3917	.fw_req_rev			= &mlxsw_sp1_fw_rev,
3918	.fw_filename			= MLXSW_SP1_FW_FILENAME,
3919	.init				= mlxsw_sp1_init,
3920	.fini				= mlxsw_sp_fini,
3921	.port_split			= mlxsw_sp_port_split,
3922	.port_unsplit			= mlxsw_sp_port_unsplit,
3923	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3924	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3925	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3926	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3927	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3928	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3929	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3930	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3931	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3932	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3933	.trap_init			= mlxsw_sp_trap_init,
3934	.trap_fini			= mlxsw_sp_trap_fini,
3935	.trap_action_set		= mlxsw_sp_trap_action_set,
3936	.trap_group_init		= mlxsw_sp_trap_group_init,
3937	.trap_group_set			= mlxsw_sp_trap_group_set,
3938	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3939	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3940	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3941	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3942	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3943	.resources_register		= mlxsw_sp1_resources_register,
3944	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
3945	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3946	.txhdr_len			= MLXSW_TXHDR_LEN,
3947	.profile			= &mlxsw_sp1_config_profile,
3948	.sdq_supports_cqe_v2		= false,
3949};
3950
3951static struct mlxsw_driver mlxsw_sp2_driver = {
3952	.kind				= mlxsw_sp2_driver_name,
3953	.priv_size			= sizeof(struct mlxsw_sp),
3954	.fw_req_rev			= &mlxsw_sp2_fw_rev,
3955	.fw_filename			= MLXSW_SP2_FW_FILENAME,
3956	.init				= mlxsw_sp2_init,
3957	.fini				= mlxsw_sp_fini,
3958	.port_split			= mlxsw_sp_port_split,
3959	.port_unsplit			= mlxsw_sp_port_unsplit,
3960	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
3961	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3962	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3963	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3964	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3965	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3966	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3967	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3968	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3969	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3970	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3971	.trap_init			= mlxsw_sp_trap_init,
3972	.trap_fini			= mlxsw_sp_trap_fini,
3973	.trap_action_set		= mlxsw_sp_trap_action_set,
3974	.trap_group_init		= mlxsw_sp_trap_group_init,
3975	.trap_group_set			= mlxsw_sp_trap_group_set,
3976	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3977	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3978	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3979	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3980	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3981	.resources_register		= mlxsw_sp2_resources_register,
3982	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3983	.txhdr_len			= MLXSW_TXHDR_LEN,
3984	.profile			= &mlxsw_sp2_config_profile,
3985	.sdq_supports_cqe_v2		= true,
3986};
3987
3988static struct mlxsw_driver mlxsw_sp3_driver = {
3989	.kind				= mlxsw_sp3_driver_name,
3990	.priv_size			= sizeof(struct mlxsw_sp),
3991	.fw_req_rev			= &mlxsw_sp3_fw_rev,
3992	.fw_filename			= MLXSW_SP3_FW_FILENAME,
3993	.init				= mlxsw_sp3_init,
3994	.fini				= mlxsw_sp_fini,
3995	.port_split			= mlxsw_sp_port_split,
3996	.port_unsplit			= mlxsw_sp_port_unsplit,
3997	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
3998	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3999	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4000	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4001	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4002	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4003	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4004	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4005	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4006	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4007	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4008	.trap_init			= mlxsw_sp_trap_init,
4009	.trap_fini			= mlxsw_sp_trap_fini,
4010	.trap_action_set		= mlxsw_sp_trap_action_set,
4011	.trap_group_init		= mlxsw_sp_trap_group_init,
4012	.trap_group_set			= mlxsw_sp_trap_group_set,
4013	.trap_policer_init		= mlxsw_sp_trap_policer_init,
4014	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
4015	.trap_policer_set		= mlxsw_sp_trap_policer_set,
4016	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
4017	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4018	.resources_register		= mlxsw_sp2_resources_register,
4019	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
4020	.txhdr_len			= MLXSW_TXHDR_LEN,
4021	.profile			= &mlxsw_sp2_config_profile,
4022	.sdq_supports_cqe_v2		= true,
4023};
4024
4025static struct mlxsw_driver mlxsw_sp4_driver = {
4026	.kind				= mlxsw_sp4_driver_name,
4027	.priv_size			= sizeof(struct mlxsw_sp),
4028	.init				= mlxsw_sp4_init,
4029	.fini				= mlxsw_sp_fini,
4030	.port_split			= mlxsw_sp_port_split,
4031	.port_unsplit			= mlxsw_sp_port_unsplit,
4032	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
4033	.sb_pool_get			= mlxsw_sp_sb_pool_get,
4034	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4035	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4036	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4037	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4038	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4039	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4040	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4041	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4042	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4043	.trap_init			= mlxsw_sp_trap_init,
4044	.trap_fini			= mlxsw_sp_trap_fini,
4045	.trap_action_set		= mlxsw_sp_trap_action_set,
4046	.trap_group_init		= mlxsw_sp_trap_group_init,
4047	.trap_group_set			= mlxsw_sp_trap_group_set,
4048	.trap_policer_init		= mlxsw_sp_trap_policer_init,
4049	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
4050	.trap_policer_set		= mlxsw_sp_trap_policer_set,
4051	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
4052	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4053	.resources_register		= mlxsw_sp2_resources_register,
4054	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
4055	.txhdr_len			= MLXSW_TXHDR_LEN,
4056	.profile			= &mlxsw_sp4_config_profile,
4057	.sdq_supports_cqe_v2		= true,
4058};
4059
4060bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4061{
4062	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4063}
4064
4065static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4066				   struct netdev_nested_priv *priv)
4067{
4068	int ret = 0;
4069
4070	if (mlxsw_sp_port_dev_check(lower_dev)) {
4071		priv->data = (void *)netdev_priv(lower_dev);
4072		ret = 1;
4073	}
4074
4075	return ret;
4076}
4077
4078struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4079{
4080	struct netdev_nested_priv priv = {
4081		.data = NULL,
4082	};
4083
4084	if (mlxsw_sp_port_dev_check(dev))
4085		return netdev_priv(dev);
4086
4087	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4088
4089	return (struct mlxsw_sp_port *)priv.data;
4090}
4091
4092struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4093{
4094	struct mlxsw_sp_port *mlxsw_sp_port;
4095
4096	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4097	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4098}
4099
4100struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4101{
4102	struct netdev_nested_priv priv = {
4103		.data = NULL,
4104	};
4105
4106	if (mlxsw_sp_port_dev_check(dev))
4107		return netdev_priv(dev);
4108
4109	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4110				      &priv);
4111
4112	return (struct mlxsw_sp_port *)priv.data;
4113}
4114
4115int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4116{
4117	char mprs_pl[MLXSW_REG_MPRS_LEN];
4118	int err = 0;
4119
4120	mutex_lock(&mlxsw_sp->parsing.lock);
4121
4122	if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4123		goto out_unlock;
4124
4125	mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4126			    mlxsw_sp->parsing.vxlan_udp_dport);
4127	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4128	if (err)
4129		goto out_unlock;
4130
4131	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4132	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4133
4134out_unlock:
4135	mutex_unlock(&mlxsw_sp->parsing.lock);
4136	return err;
4137}
4138
4139void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4140{
4141	char mprs_pl[MLXSW_REG_MPRS_LEN];
4142
4143	mutex_lock(&mlxsw_sp->parsing.lock);
4144
4145	if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4146		goto out_unlock;
4147
4148	mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4149			    mlxsw_sp->parsing.vxlan_udp_dport);
4150	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4151	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4152
4153out_unlock:
4154	mutex_unlock(&mlxsw_sp->parsing.lock);
4155}
4156
4157int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4158					 __be16 udp_dport)
4159{
4160	char mprs_pl[MLXSW_REG_MPRS_LEN];
4161	int err;
4162
4163	mutex_lock(&mlxsw_sp->parsing.lock);
4164
4165	mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4166			    be16_to_cpu(udp_dport));
4167	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4168	if (err)
4169		goto out_unlock;
4170
4171	mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4172
4173out_unlock:
4174	mutex_unlock(&mlxsw_sp->parsing.lock);
4175	return err;
4176}
4177
4178static void
4179mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4180				 struct net_device *lag_dev)
4181{
4182	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4183	struct net_device *upper_dev;
4184	struct list_head *iter;
4185
4186	if (netif_is_bridge_port(lag_dev))
4187		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4188
4189	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4190		if (!netif_is_bridge_port(upper_dev))
4191			continue;
4192		br_dev = netdev_master_upper_dev_get(upper_dev);
4193		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4194	}
4195}
4196
4197static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4198{
4199	char sldr_pl[MLXSW_REG_SLDR_LEN];
4200
4201	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4202	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4203}
4204
4205static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4206{
4207	char sldr_pl[MLXSW_REG_SLDR_LEN];
4208
4209	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4210	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4211}
4212
4213static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4214				     u16 lag_id, u8 port_index)
4215{
4216	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4217	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4218
4219	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4220				      lag_id, port_index);
4221	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4222}
4223
4224static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4225					u16 lag_id)
4226{
4227	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4228	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4229
4230	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4231					 lag_id);
4232	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4233}
4234
4235static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4236					u16 lag_id)
4237{
4238	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4239	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4240
4241	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4242					lag_id);
4243	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4244}
4245
4246static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4247					 u16 lag_id)
4248{
4249	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4250	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4251
4252	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4253					 lag_id);
4254	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4255}
4256
4257static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4258				  struct net_device *lag_dev,
4259				  u16 *p_lag_id)
4260{
4261	struct mlxsw_sp_upper *lag;
4262	int free_lag_id = -1;
4263	u16 max_lag;
4264	int err, i;
4265
4266	err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
4267	if (err)
4268		return err;
4269
4270	for (i = 0; i < max_lag; i++) {
4271		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4272		if (lag->ref_count) {
4273			if (lag->dev == lag_dev) {
4274				*p_lag_id = i;
4275				return 0;
4276			}
4277		} else if (free_lag_id < 0) {
4278			free_lag_id = i;
4279		}
4280	}
4281	if (free_lag_id < 0)
4282		return -EBUSY;
4283	*p_lag_id = free_lag_id;
4284	return 0;
4285}
4286
4287static bool
4288mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4289			  struct net_device *lag_dev,
4290			  struct netdev_lag_upper_info *lag_upper_info,
4291			  struct netlink_ext_ack *extack)
4292{
4293	u16 lag_id;
4294
4295	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4296		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4297		return false;
4298	}
4299	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4300		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4301		return false;
4302	}
4303	return true;
4304}
4305
4306static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4307				       u16 lag_id, u8 *p_port_index)
4308{
4309	u64 max_lag_members;
4310	int i;
4311
4312	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4313					     MAX_LAG_MEMBERS);
4314	for (i = 0; i < max_lag_members; i++) {
4315		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4316			*p_port_index = i;
4317			return 0;
4318		}
4319	}
4320	return -EBUSY;
4321}
4322
4323static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4324					   struct net_device *lag_dev,
4325					   struct netlink_ext_ack *extack)
4326{
4327	struct net_device *upper_dev;
4328	struct net_device *master;
4329	struct list_head *iter;
4330	int done = 0;
4331	int err;
4332
4333	master = netdev_master_upper_dev_get(lag_dev);
4334	if (master && netif_is_bridge_master(master)) {
4335		err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4336						extack);
4337		if (err)
4338			return err;
4339	}
4340
4341	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4342		if (!is_vlan_dev(upper_dev))
4343			continue;
4344
4345		master = netdev_master_upper_dev_get(upper_dev);
4346		if (master && netif_is_bridge_master(master)) {
4347			err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4348							upper_dev, master,
4349							extack);
4350			if (err)
4351				goto err_port_bridge_join;
4352		}
4353
4354		++done;
4355	}
4356
4357	return 0;
4358
4359err_port_bridge_join:
4360	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4361		if (!is_vlan_dev(upper_dev))
4362			continue;
4363
4364		master = netdev_master_upper_dev_get(upper_dev);
4365		if (!master || !netif_is_bridge_master(master))
4366			continue;
4367
4368		if (!done--)
4369			break;
4370
4371		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4372	}
4373
4374	master = netdev_master_upper_dev_get(lag_dev);
4375	if (master && netif_is_bridge_master(master))
4376		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4377
4378	return err;
4379}
4380
4381static void
4382mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4383				 struct net_device *lag_dev)
4384{
4385	struct net_device *upper_dev;
4386	struct net_device *master;
4387	struct list_head *iter;
4388
4389	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4390		if (!is_vlan_dev(upper_dev))
4391			continue;
4392
4393		master = netdev_master_upper_dev_get(upper_dev);
4394		if (!master)
4395			continue;
4396
4397		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4398	}
4399
4400	master = netdev_master_upper_dev_get(lag_dev);
4401	if (master)
4402		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4403}
4404
4405static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4406				  struct net_device *lag_dev,
4407				  struct netlink_ext_ack *extack)
4408{
4409	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4410	struct mlxsw_sp_upper *lag;
4411	u16 lag_id;
4412	u8 port_index;
4413	int err;
4414
4415	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4416	if (err)
4417		return err;
4418	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4419	if (!lag->ref_count) {
4420		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4421		if (err)
4422			return err;
4423		lag->dev = lag_dev;
4424	}
4425
4426	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4427	if (err)
4428		return err;
4429
4430	err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4431					      extack);
4432	if (err)
4433		goto err_lag_uppers_bridge_join;
4434
4435	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4436	if (err)
4437		goto err_col_port_add;
4438
4439	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4440				   mlxsw_sp_port->local_port);
4441	mlxsw_sp_port->lag_id = lag_id;
4442	mlxsw_sp_port->lagged = 1;
4443	lag->ref_count++;
4444
4445	/* Port is no longer usable as a router interface */
4446	if (mlxsw_sp_port->default_vlan->fid)
4447		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4448
4449	/* Join a router interface configured on the LAG, if exists */
4450	err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4451					    extack);
4452	if (err)
4453		goto err_router_join;
4454
4455	err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4456	if (err)
4457		goto err_replay;
4458
4459	return 0;
4460
4461err_replay:
4462	mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4463err_router_join:
4464	lag->ref_count--;
4465	mlxsw_sp_port->lagged = 0;
4466	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4467				     mlxsw_sp_port->local_port);
4468	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4469err_col_port_add:
4470	mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4471err_lag_uppers_bridge_join:
4472	if (!lag->ref_count)
4473		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4474	return err;
4475}
4476
4477static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4478				    struct net_device *lag_dev)
4479{
4480	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4481	u16 lag_id = mlxsw_sp_port->lag_id;
4482	struct mlxsw_sp_upper *lag;
4483
4484	if (!mlxsw_sp_port->lagged)
4485		return;
4486	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4487	WARN_ON(lag->ref_count == 0);
4488
4489	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4490
4491	/* Any VLANs configured on the port are no longer valid */
4492	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4493	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4494	/* Make the LAG and its directly linked uppers leave bridges they
4495	 * are memeber in
4496	 */
4497	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4498
4499	if (lag->ref_count == 1)
4500		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4501
4502	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4503				     mlxsw_sp_port->local_port);
4504	mlxsw_sp_port->lagged = 0;
4505	lag->ref_count--;
4506
4507	/* Make sure untagged frames are allowed to ingress */
4508	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4509			       ETH_P_8021Q);
4510}
4511
4512static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4513				      u16 lag_id)
4514{
4515	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4516	char sldr_pl[MLXSW_REG_SLDR_LEN];
4517
4518	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4519					 mlxsw_sp_port->local_port);
4520	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4521}
4522
4523static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4524					 u16 lag_id)
4525{
4526	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4527	char sldr_pl[MLXSW_REG_SLDR_LEN];
4528
4529	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4530					    mlxsw_sp_port->local_port);
4531	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4532}
4533
4534static int
4535mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4536{
4537	int err;
4538
4539	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4540					   mlxsw_sp_port->lag_id);
4541	if (err)
4542		return err;
4543
4544	err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4545	if (err)
4546		goto err_dist_port_add;
4547
4548	return 0;
4549
4550err_dist_port_add:
4551	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4552	return err;
4553}
4554
4555static int
4556mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4557{
4558	int err;
4559
4560	err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4561					    mlxsw_sp_port->lag_id);
4562	if (err)
4563		return err;
4564
4565	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4566					    mlxsw_sp_port->lag_id);
4567	if (err)
4568		goto err_col_port_disable;
4569
4570	return 0;
4571
4572err_col_port_disable:
4573	mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4574	return err;
4575}
4576
4577static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4578				     struct netdev_lag_lower_state_info *info)
4579{
4580	if (info->tx_enabled)
4581		return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4582	else
4583		return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4584}
4585
4586static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4587				 bool enable)
4588{
4589	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4590	enum mlxsw_reg_spms_state spms_state;
4591	char *spms_pl;
4592	u16 vid;
4593	int err;
4594
4595	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4596			      MLXSW_REG_SPMS_STATE_DISCARDING;
4597
4598	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4599	if (!spms_pl)
4600		return -ENOMEM;
4601	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4602
4603	for (vid = 0; vid < VLAN_N_VID; vid++)
4604		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4605
4606	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4607	kfree(spms_pl);
4608	return err;
4609}
4610
4611static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4612{
4613	u16 vid = 1;
4614	int err;
4615
4616	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4617	if (err)
4618		return err;
4619	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4620	if (err)
4621		goto err_port_stp_set;
4622	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4623				     true, false);
4624	if (err)
4625		goto err_port_vlan_set;
4626
4627	for (; vid <= VLAN_N_VID - 1; vid++) {
4628		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4629						     vid, false);
4630		if (err)
4631			goto err_vid_learning_set;
4632	}
4633
4634	return 0;
4635
4636err_vid_learning_set:
4637	for (vid--; vid >= 1; vid--)
4638		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4639err_port_vlan_set:
4640	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4641err_port_stp_set:
4642	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4643	return err;
4644}
4645
4646static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4647{
4648	u16 vid;
4649
4650	for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4651		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4652					       vid, true);
4653
4654	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4655			       false, false);
4656	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4657	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4658}
4659
4660static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4661{
4662	unsigned int num_vxlans = 0;
4663	struct net_device *dev;
4664	struct list_head *iter;
4665
4666	netdev_for_each_lower_dev(br_dev, dev, iter) {
4667		if (netif_is_vxlan(dev))
4668			num_vxlans++;
4669	}
4670
4671	return num_vxlans > 1;
4672}
4673
4674static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4675{
4676	DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4677	struct net_device *dev;
4678	struct list_head *iter;
4679
4680	netdev_for_each_lower_dev(br_dev, dev, iter) {
4681		u16 pvid;
4682		int err;
4683
4684		if (!netif_is_vxlan(dev))
4685			continue;
4686
4687		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4688		if (err || !pvid)
4689			continue;
4690
4691		if (test_and_set_bit(pvid, vlans))
4692			return false;
4693	}
4694
4695	return true;
4696}
4697
4698static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4699					   struct netlink_ext_ack *extack)
4700{
4701	if (br_multicast_enabled(br_dev)) {
4702		NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4703		return false;
4704	}
4705
4706	if (!br_vlan_enabled(br_dev) &&
4707	    mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4708		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4709		return false;
4710	}
4711
4712	if (br_vlan_enabled(br_dev) &&
4713	    !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4714		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4715		return false;
4716	}
4717
4718	return true;
4719}
4720
4721static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4722				      struct net_device *dev)
4723{
4724	return upper_dev == netdev_master_upper_dev_get(dev);
4725}
4726
4727static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4728				      unsigned long event, void *ptr,
4729				      bool process_foreign);
4730
4731static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4732					      struct net_device *dev,
4733					      struct netlink_ext_ack *extack)
4734{
4735	struct net_device *upper_dev;
4736	struct list_head *iter;
4737	int err;
4738
4739	netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4740		struct netdev_notifier_changeupper_info info = {
4741			.info = {
4742				.dev = dev,
4743				.extack = extack,
4744			},
4745			.master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4746			.upper_dev = upper_dev,
4747			.linking = true,
4748
4749			/* upper_info is relevant for LAG devices. But we would
4750			 * only need this if LAG were a valid upper above
4751			 * another upper (e.g. a bridge that is a member of a
4752			 * LAG), and that is never a valid configuration. So we
4753			 * can keep this as NULL.
4754			 */
4755			.upper_info = NULL,
4756		};
4757
4758		err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4759						 NETDEV_PRECHANGEUPPER,
4760						 &info, true);
4761		if (err)
4762			return err;
4763
4764		err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4765							 extack);
4766		if (err)
4767			return err;
4768	}
4769
4770	return 0;
4771}
4772
4773static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4774					       struct net_device *dev,
4775					       unsigned long event, void *ptr,
4776					       bool replay_deslavement)
4777{
4778	struct netdev_notifier_changeupper_info *info;
4779	struct mlxsw_sp_port *mlxsw_sp_port;
4780	struct netlink_ext_ack *extack;
4781	struct net_device *upper_dev;
4782	struct mlxsw_sp *mlxsw_sp;
4783	int err = 0;
4784	u16 proto;
4785
4786	mlxsw_sp_port = netdev_priv(dev);
4787	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4788	info = ptr;
4789	extack = netdev_notifier_info_to_extack(&info->info);
4790
4791	switch (event) {
4792	case NETDEV_PRECHANGEUPPER:
4793		upper_dev = info->upper_dev;
4794		if (!is_vlan_dev(upper_dev) &&
4795		    !netif_is_lag_master(upper_dev) &&
4796		    !netif_is_bridge_master(upper_dev) &&
4797		    !netif_is_ovs_master(upper_dev) &&
4798		    !netif_is_macvlan(upper_dev) &&
4799		    !netif_is_l3_master(upper_dev)) {
4800			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4801			return -EINVAL;
4802		}
4803		if (!info->linking)
4804			break;
4805		if (netif_is_bridge_master(upper_dev) &&
4806		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4807		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4808		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4809			return -EOPNOTSUPP;
4810		if (netdev_has_any_upper_dev(upper_dev) &&
4811		    (!netif_is_bridge_master(upper_dev) ||
4812		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4813							  upper_dev))) {
4814			err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4815								 upper_dev,
4816								 extack);
4817			if (err)
4818				return err;
4819		}
4820		if (netif_is_lag_master(upper_dev) &&
4821		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4822					       info->upper_info, extack))
4823			return -EINVAL;
4824		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4825			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4826			return -EINVAL;
4827		}
4828		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4829		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4830			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4831			return -EINVAL;
4832		}
4833		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4834			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4835			return -EINVAL;
4836		}
4837		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4838			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4839			return -EINVAL;
4840		}
4841		if (netif_is_bridge_master(upper_dev)) {
4842			br_vlan_get_proto(upper_dev, &proto);
4843			if (br_vlan_enabled(upper_dev) &&
4844			    proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4845				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4846				return -EOPNOTSUPP;
4847			}
4848			if (vlan_uses_dev(lower_dev) &&
4849			    br_vlan_enabled(upper_dev) &&
4850			    proto == ETH_P_8021AD) {
4851				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4852				return -EOPNOTSUPP;
4853			}
4854		}
4855		if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4856			struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4857
4858			if (br_vlan_enabled(br_dev)) {
4859				br_vlan_get_proto(br_dev, &proto);
4860				if (proto == ETH_P_8021AD) {
4861					NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4862					return -EOPNOTSUPP;
4863				}
4864			}
4865		}
4866		if (is_vlan_dev(upper_dev) &&
4867		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4868			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4869			return -EOPNOTSUPP;
4870		}
4871		if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4872			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4873			return -EOPNOTSUPP;
4874		}
4875		break;
4876	case NETDEV_CHANGEUPPER:
4877		upper_dev = info->upper_dev;
4878		if (netif_is_bridge_master(upper_dev)) {
4879			if (info->linking) {
4880				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4881								lower_dev,
4882								upper_dev,
4883								extack);
4884			} else {
4885				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4886							   lower_dev,
4887							   upper_dev);
4888				if (!replay_deslavement)
4889					break;
4890				mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4891								      lower_dev);
4892			}
4893		} else if (netif_is_lag_master(upper_dev)) {
4894			if (info->linking) {
4895				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4896							     upper_dev, extack);
4897			} else {
4898				mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4899				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4900							upper_dev);
4901				mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4902								      dev);
4903			}
4904		} else if (netif_is_ovs_master(upper_dev)) {
4905			if (info->linking)
4906				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4907			else
4908				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4909		} else if (netif_is_macvlan(upper_dev)) {
4910			if (!info->linking)
4911				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4912		} else if (is_vlan_dev(upper_dev)) {
4913			struct net_device *br_dev;
4914
4915			if (!netif_is_bridge_port(upper_dev))
4916				break;
4917			if (info->linking)
4918				break;
4919			br_dev = netdev_master_upper_dev_get(upper_dev);
4920			mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4921						   br_dev);
4922		}
4923		break;
4924	}
4925
4926	return err;
4927}
4928
4929static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4930					       unsigned long event, void *ptr)
4931{
4932	struct netdev_notifier_changelowerstate_info *info;
4933	struct mlxsw_sp_port *mlxsw_sp_port;
4934	int err;
4935
4936	mlxsw_sp_port = netdev_priv(dev);
4937	info = ptr;
4938
4939	switch (event) {
4940	case NETDEV_CHANGELOWERSTATE:
4941		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4942			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4943							info->lower_state_info);
4944			if (err)
4945				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4946		}
4947		break;
4948	}
4949
4950	return 0;
4951}
4952
4953static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4954					 struct net_device *port_dev,
4955					 unsigned long event, void *ptr,
4956					 bool replay_deslavement)
4957{
4958	switch (event) {
4959	case NETDEV_PRECHANGEUPPER:
4960	case NETDEV_CHANGEUPPER:
4961		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4962							   event, ptr,
4963							   replay_deslavement);
4964	case NETDEV_CHANGELOWERSTATE:
4965		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4966							   ptr);
4967	}
4968
4969	return 0;
4970}
4971
4972/* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
4973 * to do any per-LAG / per-LAG-upper processing.
4974 */
4975static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
4976					     unsigned long event,
4977					     void *ptr)
4978{
4979	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
4980	struct netdev_notifier_changeupper_info *info = ptr;
4981
4982	if (!mlxsw_sp)
4983		return 0;
4984
4985	switch (event) {
4986	case NETDEV_CHANGEUPPER:
4987		if (info->linking)
4988			break;
4989		if (netif_is_bridge_master(info->upper_dev))
4990			mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
4991		break;
4992	}
4993	return 0;
4994}
4995
4996static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4997					unsigned long event, void *ptr)
4998{
4999	struct net_device *dev;
5000	struct list_head *iter;
5001	int ret;
5002
5003	netdev_for_each_lower_dev(lag_dev, dev, iter) {
5004		if (mlxsw_sp_port_dev_check(dev)) {
5005			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5006							    ptr, false);
5007			if (ret)
5008				return ret;
5009		}
5010	}
5011
5012	return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
5013}
5014
5015static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5016					      struct net_device *dev,
5017					      unsigned long event, void *ptr,
5018					      u16 vid, bool replay_deslavement)
5019{
5020	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5021	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5022	struct netdev_notifier_changeupper_info *info = ptr;
5023	struct netlink_ext_ack *extack;
5024	struct net_device *upper_dev;
5025	int err = 0;
5026
5027	extack = netdev_notifier_info_to_extack(&info->info);
5028
5029	switch (event) {
5030	case NETDEV_PRECHANGEUPPER:
5031		upper_dev = info->upper_dev;
5032		if (!netif_is_bridge_master(upper_dev) &&
5033		    !netif_is_macvlan(upper_dev) &&
5034		    !netif_is_l3_master(upper_dev)) {
5035			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5036			return -EINVAL;
5037		}
5038		if (!info->linking)
5039			break;
5040		if (netif_is_bridge_master(upper_dev) &&
5041		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5042		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5043		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5044			return -EOPNOTSUPP;
5045		if (netdev_has_any_upper_dev(upper_dev) &&
5046		    (!netif_is_bridge_master(upper_dev) ||
5047		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5048							  upper_dev))) {
5049			err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5050								 upper_dev,
5051								 extack);
5052			if (err)
5053				return err;
5054		}
5055		break;
5056	case NETDEV_CHANGEUPPER:
5057		upper_dev = info->upper_dev;
5058		if (netif_is_bridge_master(upper_dev)) {
5059			if (info->linking) {
5060				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5061								vlan_dev,
5062								upper_dev,
5063								extack);
5064			} else {
5065				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5066							   vlan_dev,
5067							   upper_dev);
5068				if (!replay_deslavement)
5069					break;
5070				mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5071								      vlan_dev);
5072			}
5073		} else if (netif_is_macvlan(upper_dev)) {
5074			if (!info->linking)
5075				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5076		}
5077		break;
5078	}
5079
5080	return err;
5081}
5082
5083static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5084						  struct net_device *lag_dev,
5085						  unsigned long event,
5086						  void *ptr, u16 vid)
5087{
5088	struct net_device *dev;
5089	struct list_head *iter;
5090	int ret;
5091
5092	netdev_for_each_lower_dev(lag_dev, dev, iter) {
5093		if (mlxsw_sp_port_dev_check(dev)) {
5094			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5095								 event, ptr,
5096								 vid, false);
5097			if (ret)
5098				return ret;
5099		}
5100	}
5101
5102	return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5103}
5104
5105static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5106						struct net_device *vlan_dev,
5107						struct net_device *br_dev,
5108						unsigned long event, void *ptr,
5109						u16 vid, bool process_foreign)
5110{
5111	struct netdev_notifier_changeupper_info *info = ptr;
5112	struct netlink_ext_ack *extack;
5113	struct net_device *upper_dev;
5114
5115	if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5116		return 0;
5117
5118	extack = netdev_notifier_info_to_extack(&info->info);
5119
5120	switch (event) {
5121	case NETDEV_PRECHANGEUPPER:
5122		upper_dev = info->upper_dev;
5123		if (!netif_is_macvlan(upper_dev) &&
5124		    !netif_is_l3_master(upper_dev)) {
5125			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5126			return -EOPNOTSUPP;
5127		}
5128		break;
5129	case NETDEV_CHANGEUPPER:
5130		upper_dev = info->upper_dev;
5131		if (info->linking)
5132			break;
5133		if (netif_is_macvlan(upper_dev))
5134			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5135		break;
5136	}
5137
5138	return 0;
5139}
5140
5141static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5142					 struct net_device *vlan_dev,
5143					 unsigned long event, void *ptr,
5144					 bool process_foreign)
5145{
5146	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5147	u16 vid = vlan_dev_vlan_id(vlan_dev);
5148
5149	if (mlxsw_sp_port_dev_check(real_dev))
5150		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5151							  event, ptr, vid,
5152							  true);
5153	else if (netif_is_lag_master(real_dev))
5154		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5155							      real_dev, event,
5156							      ptr, vid);
5157	else if (netif_is_bridge_master(real_dev))
5158		return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5159							    real_dev, event,
5160							    ptr, vid,
5161							    process_foreign);
5162
5163	return 0;
5164}
5165
5166static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5167					   struct net_device *br_dev,
5168					   unsigned long event, void *ptr,
5169					   bool process_foreign)
5170{
5171	struct netdev_notifier_changeupper_info *info = ptr;
5172	struct netlink_ext_ack *extack;
5173	struct net_device *upper_dev;
5174	u16 proto;
5175
5176	if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5177		return 0;
5178
5179	extack = netdev_notifier_info_to_extack(&info->info);
5180
5181	switch (event) {
5182	case NETDEV_PRECHANGEUPPER:
5183		upper_dev = info->upper_dev;
5184		if (!is_vlan_dev(upper_dev) &&
5185		    !netif_is_macvlan(upper_dev) &&
5186		    !netif_is_l3_master(upper_dev)) {
5187			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5188			return -EOPNOTSUPP;
5189		}
5190		if (!info->linking)
5191			break;
5192		if (br_vlan_enabled(br_dev)) {
5193			br_vlan_get_proto(br_dev, &proto);
5194			if (proto == ETH_P_8021AD) {
5195				NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5196				return -EOPNOTSUPP;
5197			}
5198		}
5199		if (is_vlan_dev(upper_dev) &&
5200		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5201			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5202			return -EOPNOTSUPP;
5203		}
5204		break;
5205	case NETDEV_CHANGEUPPER:
5206		upper_dev = info->upper_dev;
5207		if (info->linking)
5208			break;
5209		if (is_vlan_dev(upper_dev))
5210			mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5211		if (netif_is_macvlan(upper_dev))
5212			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5213		break;
5214	}
5215
5216	return 0;
5217}
5218
5219static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5220					    unsigned long event, void *ptr)
5221{
5222	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5223	struct netdev_notifier_changeupper_info *info = ptr;
5224	struct netlink_ext_ack *extack;
5225	struct net_device *upper_dev;
5226
5227	if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5228		return 0;
5229
5230	extack = netdev_notifier_info_to_extack(&info->info);
5231	upper_dev = info->upper_dev;
5232
5233	if (!netif_is_l3_master(upper_dev)) {
5234		NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5235		return -EOPNOTSUPP;
5236	}
5237
5238	return 0;
5239}
5240
5241static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5242					  struct net_device *dev,
5243					  unsigned long event, void *ptr)
5244{
5245	struct netdev_notifier_changeupper_info *cu_info;
5246	struct netdev_notifier_info *info = ptr;
5247	struct netlink_ext_ack *extack;
5248	struct net_device *upper_dev;
5249
5250	extack = netdev_notifier_info_to_extack(info);
5251
5252	switch (event) {
5253	case NETDEV_CHANGEUPPER:
5254		cu_info = container_of(info,
5255				       struct netdev_notifier_changeupper_info,
5256				       info);
5257		upper_dev = cu_info->upper_dev;
5258		if (!netif_is_bridge_master(upper_dev))
5259			return 0;
5260		if (!mlxsw_sp_lower_get(upper_dev))
5261			return 0;
5262		if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5263			return -EOPNOTSUPP;
5264		if (cu_info->linking) {
5265			if (!netif_running(dev))
5266				return 0;
5267			/* When the bridge is VLAN-aware, the VNI of the VxLAN
5268			 * device needs to be mapped to a VLAN, but at this
5269			 * point no VLANs are configured on the VxLAN device
5270			 */
5271			if (br_vlan_enabled(upper_dev))
5272				return 0;
5273			return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5274							  dev, 0, extack);
5275		} else {
5276			/* VLANs were already flushed, which triggered the
5277			 * necessary cleanup
5278			 */
5279			if (br_vlan_enabled(upper_dev))
5280				return 0;
5281			mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5282		}
5283		break;
5284	case NETDEV_PRE_UP:
5285		upper_dev = netdev_master_upper_dev_get(dev);
5286		if (!upper_dev)
5287			return 0;
5288		if (!netif_is_bridge_master(upper_dev))
5289			return 0;
5290		if (!mlxsw_sp_lower_get(upper_dev))
5291			return 0;
5292		return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5293						  extack);
5294	case NETDEV_DOWN:
5295		upper_dev = netdev_master_upper_dev_get(dev);
5296		if (!upper_dev)
5297			return 0;
5298		if (!netif_is_bridge_master(upper_dev))
5299			return 0;
5300		if (!mlxsw_sp_lower_get(upper_dev))
5301			return 0;
5302		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5303		break;
5304	}
5305
5306	return 0;
5307}
5308
5309static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5310				      unsigned long event, void *ptr,
5311				      bool process_foreign)
5312{
5313	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5314	struct mlxsw_sp_span_entry *span_entry;
5315	int err = 0;
5316
5317	if (event == NETDEV_UNREGISTER) {
5318		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5319		if (span_entry)
5320			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5321	}
5322
5323	if (netif_is_vxlan(dev))
5324		err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5325	else if (mlxsw_sp_port_dev_check(dev))
5326		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5327	else if (netif_is_lag_master(dev))
5328		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5329	else if (is_vlan_dev(dev))
5330		err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5331						    process_foreign);
5332	else if (netif_is_bridge_master(dev))
5333		err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5334						      process_foreign);
5335	else if (netif_is_macvlan(dev))
5336		err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5337
5338	return err;
5339}
5340
5341static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5342				    unsigned long event, void *ptr)
5343{
5344	struct mlxsw_sp *mlxsw_sp;
5345	int err;
5346
5347	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5348	mlxsw_sp_span_respin(mlxsw_sp);
5349	err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5350
5351	return notifier_from_errno(err);
5352}
5353
5354static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5355	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5356	{0, },
5357};
5358
5359static struct pci_driver mlxsw_sp1_pci_driver = {
5360	.name = mlxsw_sp1_driver_name,
5361	.id_table = mlxsw_sp1_pci_id_table,
5362};
5363
5364static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5365	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5366	{0, },
5367};
5368
5369static struct pci_driver mlxsw_sp2_pci_driver = {
5370	.name = mlxsw_sp2_driver_name,
5371	.id_table = mlxsw_sp2_pci_id_table,
5372};
5373
5374static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5375	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5376	{0, },
5377};
5378
5379static struct pci_driver mlxsw_sp3_pci_driver = {
5380	.name = mlxsw_sp3_driver_name,
5381	.id_table = mlxsw_sp3_pci_id_table,
5382};
5383
5384static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5385	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5386	{0, },
5387};
5388
5389static struct pci_driver mlxsw_sp4_pci_driver = {
5390	.name = mlxsw_sp4_driver_name,
5391	.id_table = mlxsw_sp4_pci_id_table,
5392};
5393
5394static int __init mlxsw_sp_module_init(void)
5395{
5396	int err;
5397
5398	err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5399	if (err)
5400		return err;
5401
5402	err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5403	if (err)
5404		goto err_sp2_core_driver_register;
5405
5406	err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5407	if (err)
5408		goto err_sp3_core_driver_register;
5409
5410	err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5411	if (err)
5412		goto err_sp4_core_driver_register;
5413
5414	err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5415	if (err)
5416		goto err_sp1_pci_driver_register;
5417
5418	err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5419	if (err)
5420		goto err_sp2_pci_driver_register;
5421
5422	err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5423	if (err)
5424		goto err_sp3_pci_driver_register;
5425
5426	err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5427	if (err)
5428		goto err_sp4_pci_driver_register;
5429
5430	return 0;
5431
5432err_sp4_pci_driver_register:
5433	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5434err_sp3_pci_driver_register:
5435	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5436err_sp2_pci_driver_register:
5437	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5438err_sp1_pci_driver_register:
5439	mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5440err_sp4_core_driver_register:
5441	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5442err_sp3_core_driver_register:
5443	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5444err_sp2_core_driver_register:
5445	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5446	return err;
5447}
5448
5449static void __exit mlxsw_sp_module_exit(void)
5450{
5451	mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5452	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5453	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5454	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5455	mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5456	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5457	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5458	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5459}
5460
5461module_init(mlxsw_sp_module_init);
5462module_exit(mlxsw_sp_module_exit);
5463
5464MODULE_LICENSE("Dual BSD/GPL");
5465MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5466MODULE_DESCRIPTION("Mellanox Spectrum driver");
5467MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5468MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5469MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5470MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5471MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5472MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5473MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5474MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
5475