1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/ethtool.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/if_bridge.h>
16#include <linux/workqueue.h>
17#include <linux/jiffies.h>
18#include <linux/bitops.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/dcbnl.h>
22#include <linux/inetdevice.h>
23#include <linux/netlink.h>
24#include <linux/jhash.h>
25#include <linux/log2.h>
26#include <net/switchdev.h>
27#include <net/pkt_cls.h>
28#include <net/netevent.h>
29#include <net/addrconf.h>
30
31#include "spectrum.h"
32#include "pci.h"
33#include "core.h"
34#include "core_env.h"
35#include "reg.h"
36#include "port.h"
37#include "trap.h"
38#include "txheader.h"
39#include "spectrum_cnt.h"
40#include "spectrum_dpipe.h"
41#include "spectrum_acl_flex_actions.h"
42#include "spectrum_span.h"
43#include "spectrum_ptp.h"
44#include "spectrum_trap.h"
45
46#define MLXSW_SP1_FWREV_MAJOR 13
47#define MLXSW_SP1_FWREV_MINOR 2008
48#define MLXSW_SP1_FWREV_SUBMINOR 1310
49#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52	.major = MLXSW_SP1_FWREV_MAJOR,
53	.minor = MLXSW_SP1_FWREV_MINOR,
54	.subminor = MLXSW_SP1_FWREV_SUBMINOR,
55	.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56};
57
58#define MLXSW_SP1_FW_FILENAME \
59	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60	"." __stringify(MLXSW_SP1_FWREV_MINOR) \
61	"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63#define MLXSW_SP2_FWREV_MAJOR 29
64#define MLXSW_SP2_FWREV_MINOR 2008
65#define MLXSW_SP2_FWREV_SUBMINOR 1310
66
67static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68	.major = MLXSW_SP2_FWREV_MAJOR,
69	.minor = MLXSW_SP2_FWREV_MINOR,
70	.subminor = MLXSW_SP2_FWREV_SUBMINOR,
71};
72
73#define MLXSW_SP2_FW_FILENAME \
74	"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75	"." __stringify(MLXSW_SP2_FWREV_MINOR) \
76	"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
77
78#define MLXSW_SP3_FWREV_MAJOR 30
79#define MLXSW_SP3_FWREV_MINOR 2008
80#define MLXSW_SP3_FWREV_SUBMINOR 1310
81
82static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83	.major = MLXSW_SP3_FWREV_MAJOR,
84	.minor = MLXSW_SP3_FWREV_MINOR,
85	.subminor = MLXSW_SP3_FWREV_SUBMINOR,
86};
87
88#define MLXSW_SP3_FW_FILENAME \
89	"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90	"." __stringify(MLXSW_SP3_FWREV_MINOR) \
91	"." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
92
93static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
94static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
95static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
96
97static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
98	0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
99};
100static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
101	0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
102};
103
104/* tx_hdr_version
105 * Tx header version.
106 * Must be set to 1.
107 */
108MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
109
110/* tx_hdr_ctl
111 * Packet control type.
112 * 0 - Ethernet control (e.g. EMADs, LACP)
113 * 1 - Ethernet data
114 */
115MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
116
117/* tx_hdr_proto
118 * Packet protocol type. Must be set to 1 (Ethernet).
119 */
120MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
121
122/* tx_hdr_rx_is_router
123 * Packet is sent from the router. Valid for data packets only.
124 */
125MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
126
127/* tx_hdr_fid_valid
128 * Indicates if the 'fid' field is valid and should be used for
129 * forwarding lookup. Valid for data packets only.
130 */
131MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
132
133/* tx_hdr_swid
134 * Switch partition ID. Must be set to 0.
135 */
136MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
137
138/* tx_hdr_control_tclass
139 * Indicates if the packet should use the control TClass and not one
140 * of the data TClasses.
141 */
142MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
143
144/* tx_hdr_etclass
145 * Egress TClass to be used on the egress device on the egress port.
146 */
147MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
148
149/* tx_hdr_port_mid
150 * Destination local port for unicast packets.
151 * Destination multicast ID for multicast packets.
152 *
153 * Control packets are directed to a specific egress port, while data
154 * packets are transmitted through the CPU port (0) into the switch partition,
155 * where forwarding rules are applied.
156 */
157MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
158
159/* tx_hdr_fid
160 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162 * Valid for data packets only.
163 */
164MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
165
166/* tx_hdr_type
167 * 0 - Data packets
168 * 6 - Control packets
169 */
170MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
171
172int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
173			      unsigned int counter_index, u64 *packets,
174			      u64 *bytes)
175{
176	char mgpc_pl[MLXSW_REG_MGPC_LEN];
177	int err;
178
179	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
180			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
181	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
182	if (err)
183		return err;
184	if (packets)
185		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
186	if (bytes)
187		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
188	return 0;
189}
190
191static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
192				       unsigned int counter_index)
193{
194	char mgpc_pl[MLXSW_REG_MGPC_LEN];
195
196	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
197			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
198	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
199}
200
201int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
202				unsigned int *p_counter_index)
203{
204	int err;
205
206	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
207				     p_counter_index);
208	if (err)
209		return err;
210	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
211	if (err)
212		goto err_counter_clear;
213	return 0;
214
215err_counter_clear:
216	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
217			      *p_counter_index);
218	return err;
219}
220
221void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
222				unsigned int counter_index)
223{
224	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225			       counter_index);
226}
227
228static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
229				     const struct mlxsw_tx_info *tx_info)
230{
231	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
232
233	memset(txhdr, 0, MLXSW_TXHDR_LEN);
234
235	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
236	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
237	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
238	mlxsw_tx_hdr_swid_set(txhdr, 0);
239	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
240	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
241	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
242}
243
244enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
245{
246	switch (state) {
247	case BR_STATE_FORWARDING:
248		return MLXSW_REG_SPMS_STATE_FORWARDING;
249	case BR_STATE_LEARNING:
250		return MLXSW_REG_SPMS_STATE_LEARNING;
251	case BR_STATE_LISTENING:
252	case BR_STATE_DISABLED:
253	case BR_STATE_BLOCKING:
254		return MLXSW_REG_SPMS_STATE_DISCARDING;
255	default:
256		BUG();
257	}
258}
259
260int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
261			      u8 state)
262{
263	enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
264	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265	char *spms_pl;
266	int err;
267
268	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
269	if (!spms_pl)
270		return -ENOMEM;
271	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
272	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
273
274	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
275	kfree(spms_pl);
276	return err;
277}
278
279static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
280{
281	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
282	int err;
283
284	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
285	if (err)
286		return err;
287	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
288	return 0;
289}
290
291int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
292				   bool is_up)
293{
294	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295	char paos_pl[MLXSW_REG_PAOS_LEN];
296
297	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
298			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
299			    MLXSW_PORT_ADMIN_STATUS_DOWN);
300	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
301}
302
303static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
304				      unsigned char *addr)
305{
306	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307	char ppad_pl[MLXSW_REG_PPAD_LEN];
308
309	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
310	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
311	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
312}
313
314static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
315{
316	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
317	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
318
319	ether_addr_copy(addr, mlxsw_sp->base_mac);
320	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
321	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
322}
323
324static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
325{
326	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327	char pmtu_pl[MLXSW_REG_PMTU_LEN];
328	int err;
329
330	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
331	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
332	if (err)
333		return err;
334
335	*p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
336	return 0;
337}
338
339static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
340{
341	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342	char pmtu_pl[MLXSW_REG_PMTU_LEN];
343
344	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
345	if (mtu > mlxsw_sp_port->max_mtu)
346		return -EINVAL;
347
348	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
349	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
350}
351
352static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
353{
354	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
355	char pspa_pl[MLXSW_REG_PSPA_LEN];
356
357	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
358	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
359}
360
361int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
362{
363	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364	char svpe_pl[MLXSW_REG_SVPE_LEN];
365
366	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
367	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
368}
369
370int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
371				   bool learn_enable)
372{
373	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
374	char *spvmlr_pl;
375	int err;
376
377	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
378	if (!spvmlr_pl)
379		return -ENOMEM;
380	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
381			      learn_enable);
382	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
383	kfree(spvmlr_pl);
384	return err;
385}
386
387static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
388				    u16 vid)
389{
390	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391	char spvid_pl[MLXSW_REG_SPVID_LEN];
392
393	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
394	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
395}
396
397static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
398					    bool allow)
399{
400	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401	char spaft_pl[MLXSW_REG_SPAFT_LEN];
402
403	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
404	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
405}
406
407int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
408{
409	int err;
410
411	if (!vid) {
412		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
413		if (err)
414			return err;
415	} else {
416		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
417		if (err)
418			return err;
419		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
420		if (err)
421			goto err_port_allow_untagged_set;
422	}
423
424	mlxsw_sp_port->pvid = vid;
425	return 0;
426
427err_port_allow_untagged_set:
428	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
429	return err;
430}
431
432static int
433mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
434{
435	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
436	char sspr_pl[MLXSW_REG_SSPR_LEN];
437
438	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
439	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
440}
441
442static int
443mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
444			      struct mlxsw_sp_port_mapping *port_mapping)
445{
446	char pmlp_pl[MLXSW_REG_PMLP_LEN];
447	bool separate_rxtx;
448	u8 module;
449	u8 width;
450	int err;
451	int i;
452
453	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
454	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
455	if (err)
456		return err;
457	module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
458	width = mlxsw_reg_pmlp_width_get(pmlp_pl);
459	separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
460
461	if (width && !is_power_of_2(width)) {
462		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
463			local_port);
464		return -EINVAL;
465	}
466
467	for (i = 0; i < width; i++) {
468		if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
469			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
470				local_port);
471			return -EINVAL;
472		}
473		if (separate_rxtx &&
474		    mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
475		    mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
476			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
477				local_port);
478			return -EINVAL;
479		}
480		if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
481			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
482				local_port);
483			return -EINVAL;
484		}
485	}
486
487	port_mapping->module = module;
488	port_mapping->width = width;
489	port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
490	return 0;
491}
492
493static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
494{
495	struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
496	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
497	char pmlp_pl[MLXSW_REG_PMLP_LEN];
498	int i;
499
500	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
501	mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
502	for (i = 0; i < port_mapping->width; i++) {
503		mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
504		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
505	}
506
507	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
508}
509
510static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
511{
512	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
513	char pmlp_pl[MLXSW_REG_PMLP_LEN];
514
515	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
516	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
517	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
518}
519
520static int mlxsw_sp_port_open(struct net_device *dev)
521{
522	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
523	int err;
524
525	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
526	if (err)
527		return err;
528	netif_start_queue(dev);
529	return 0;
530}
531
532static int mlxsw_sp_port_stop(struct net_device *dev)
533{
534	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
535
536	netif_stop_queue(dev);
537	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
538}
539
540static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
541				      struct net_device *dev)
542{
543	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
544	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
546	const struct mlxsw_tx_info tx_info = {
547		.local_port = mlxsw_sp_port->local_port,
548		.is_emad = false,
549	};
550	u64 len;
551	int err;
552
553	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
554		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
555		dev_kfree_skb_any(skb);
556		return NETDEV_TX_OK;
557	}
558
559	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
560
561	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
562		return NETDEV_TX_BUSY;
563
564	if (eth_skb_pad(skb)) {
565		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
566		return NETDEV_TX_OK;
567	}
568
569	mlxsw_sp_txhdr_construct(skb, &tx_info);
570	/* TX header is consumed by HW on the way so we shouldn't count its
571	 * bytes as being sent.
572	 */
573	len = skb->len - MLXSW_TXHDR_LEN;
574
575	/* Due to a race we might fail here because of a full queue. In that
576	 * unlikely case we simply drop the packet.
577	 */
578	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
579
580	if (!err) {
581		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
582		u64_stats_update_begin(&pcpu_stats->syncp);
583		pcpu_stats->tx_packets++;
584		pcpu_stats->tx_bytes += len;
585		u64_stats_update_end(&pcpu_stats->syncp);
586	} else {
587		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
588		dev_kfree_skb_any(skb);
589	}
590	return NETDEV_TX_OK;
591}
592
593static void mlxsw_sp_set_rx_mode(struct net_device *dev)
594{
595}
596
597static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
598{
599	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
600	struct sockaddr *addr = p;
601	int err;
602
603	if (!is_valid_ether_addr(addr->sa_data))
604		return -EADDRNOTAVAIL;
605
606	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
607	if (err)
608		return err;
609	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
610	return 0;
611}
612
613static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
614{
615	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
616	struct mlxsw_sp_hdroom orig_hdroom;
617	struct mlxsw_sp_hdroom hdroom;
618	int err;
619
620	orig_hdroom = *mlxsw_sp_port->hdroom;
621
622	hdroom = orig_hdroom;
623	hdroom.mtu = mtu;
624	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
625
626	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
627	if (err) {
628		netdev_err(dev, "Failed to configure port's headroom\n");
629		return err;
630	}
631
632	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
633	if (err)
634		goto err_port_mtu_set;
635	dev->mtu = mtu;
636	return 0;
637
638err_port_mtu_set:
639	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
640	return err;
641}
642
643static int
644mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
645			     struct rtnl_link_stats64 *stats)
646{
647	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
648	struct mlxsw_sp_port_pcpu_stats *p;
649	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
650	u32 tx_dropped = 0;
651	unsigned int start;
652	int i;
653
654	for_each_possible_cpu(i) {
655		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
656		do {
657			start = u64_stats_fetch_begin_irq(&p->syncp);
658			rx_packets	= p->rx_packets;
659			rx_bytes	= p->rx_bytes;
660			tx_packets	= p->tx_packets;
661			tx_bytes	= p->tx_bytes;
662		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
663
664		stats->rx_packets	+= rx_packets;
665		stats->rx_bytes		+= rx_bytes;
666		stats->tx_packets	+= tx_packets;
667		stats->tx_bytes		+= tx_bytes;
668		/* tx_dropped is u32, updated without syncp protection. */
669		tx_dropped	+= p->tx_dropped;
670	}
671	stats->tx_dropped	= tx_dropped;
672	return 0;
673}
674
675static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
676{
677	switch (attr_id) {
678	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
679		return true;
680	}
681
682	return false;
683}
684
685static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
686					   void *sp)
687{
688	switch (attr_id) {
689	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
690		return mlxsw_sp_port_get_sw_stats64(dev, sp);
691	}
692
693	return -EINVAL;
694}
695
696int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
697				int prio, char *ppcnt_pl)
698{
699	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
700	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
701
702	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
703	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
704}
705
706static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
707				      struct rtnl_link_stats64 *stats)
708{
709	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
710	int err;
711
712	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
713					  0, ppcnt_pl);
714	if (err)
715		goto out;
716
717	stats->tx_packets =
718		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
719	stats->rx_packets =
720		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
721	stats->tx_bytes =
722		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
723	stats->rx_bytes =
724		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
725	stats->multicast =
726		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
727
728	stats->rx_crc_errors =
729		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
730	stats->rx_frame_errors =
731		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
732
733	stats->rx_length_errors = (
734		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
735		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
736		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
737
738	stats->rx_errors = (stats->rx_crc_errors +
739		stats->rx_frame_errors + stats->rx_length_errors);
740
741out:
742	return err;
743}
744
745static void
746mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
747			    struct mlxsw_sp_port_xstats *xstats)
748{
749	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
750	int err, i;
751
752	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
753					  ppcnt_pl);
754	if (!err)
755		xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
756
757	for (i = 0; i < TC_MAX_QUEUE; i++) {
758		err = mlxsw_sp_port_get_stats_raw(dev,
759						  MLXSW_REG_PPCNT_TC_CONG_TC,
760						  i, ppcnt_pl);
761		if (!err)
762			xstats->wred_drop[i] =
763				mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
764
765		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
766						  i, ppcnt_pl);
767		if (err)
768			continue;
769
770		xstats->backlog[i] =
771			mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
772		xstats->tail_drop[i] =
773			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
774	}
775
776	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
777		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
778						  i, ppcnt_pl);
779		if (err)
780			continue;
781
782		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
783		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
784	}
785}
786
787static void update_stats_cache(struct work_struct *work)
788{
789	struct mlxsw_sp_port *mlxsw_sp_port =
790		container_of(work, struct mlxsw_sp_port,
791			     periodic_hw_stats.update_dw.work);
792
793	if (!netif_carrier_ok(mlxsw_sp_port->dev))
794		/* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
795		 * necessary when port goes down.
796		 */
797		goto out;
798
799	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
800				   &mlxsw_sp_port->periodic_hw_stats.stats);
801	mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
802				    &mlxsw_sp_port->periodic_hw_stats.xstats);
803
804out:
805	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
806			       MLXSW_HW_STATS_UPDATE_TIME);
807}
808
809/* Return the stats from a cache that is updated periodically,
810 * as this function might get called in an atomic context.
811 */
812static void
813mlxsw_sp_port_get_stats64(struct net_device *dev,
814			  struct rtnl_link_stats64 *stats)
815{
816	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
817
818	memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
819}
820
821static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
822				    u16 vid_begin, u16 vid_end,
823				    bool is_member, bool untagged)
824{
825	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
826	char *spvm_pl;
827	int err;
828
829	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
830	if (!spvm_pl)
831		return -ENOMEM;
832
833	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
834			    vid_end, is_member, untagged);
835	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
836	kfree(spvm_pl);
837	return err;
838}
839
840int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
841			   u16 vid_end, bool is_member, bool untagged)
842{
843	u16 vid, vid_e;
844	int err;
845
846	for (vid = vid_begin; vid <= vid_end;
847	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
848		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
849			    vid_end);
850
851		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
852					       is_member, untagged);
853		if (err)
854			return err;
855	}
856
857	return 0;
858}
859
860static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
861				     bool flush_default)
862{
863	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
864
865	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
866				 &mlxsw_sp_port->vlans_list, list) {
867		if (!flush_default &&
868		    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
869			continue;
870		mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
871	}
872}
873
874static void
875mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
876{
877	if (mlxsw_sp_port_vlan->bridge_port)
878		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
879	else if (mlxsw_sp_port_vlan->fid)
880		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
881}
882
883struct mlxsw_sp_port_vlan *
884mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
885{
886	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
887	bool untagged = vid == MLXSW_SP_DEFAULT_VID;
888	int err;
889
890	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
891	if (mlxsw_sp_port_vlan)
892		return ERR_PTR(-EEXIST);
893
894	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
895	if (err)
896		return ERR_PTR(err);
897
898	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
899	if (!mlxsw_sp_port_vlan) {
900		err = -ENOMEM;
901		goto err_port_vlan_alloc;
902	}
903
904	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
905	mlxsw_sp_port_vlan->vid = vid;
906	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
907
908	return mlxsw_sp_port_vlan;
909
910err_port_vlan_alloc:
911	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
912	return ERR_PTR(err);
913}
914
915void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
916{
917	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
918	u16 vid = mlxsw_sp_port_vlan->vid;
919
920	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
921	list_del(&mlxsw_sp_port_vlan->list);
922	kfree(mlxsw_sp_port_vlan);
923	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
924}
925
926static int mlxsw_sp_port_add_vid(struct net_device *dev,
927				 __be16 __always_unused proto, u16 vid)
928{
929	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
930
931	/* VLAN 0 is added to HW filter when device goes up, but it is
932	 * reserved in our case, so simply return.
933	 */
934	if (!vid)
935		return 0;
936
937	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
938}
939
940static int mlxsw_sp_port_kill_vid(struct net_device *dev,
941				  __be16 __always_unused proto, u16 vid)
942{
943	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
944	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
945
946	/* VLAN 0 is removed from HW filter when device goes down, but
947	 * it is reserved in our case, so simply return.
948	 */
949	if (!vid)
950		return 0;
951
952	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
953	if (!mlxsw_sp_port_vlan)
954		return 0;
955	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
956
957	return 0;
958}
959
960static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
961				   struct flow_block_offload *f)
962{
963	switch (f->binder_type) {
964	case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
965		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
966	case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
967		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
968	case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
969		return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
970	default:
971		return -EOPNOTSUPP;
972	}
973}
974
975static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
976			     void *type_data)
977{
978	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
979
980	switch (type) {
981	case TC_SETUP_BLOCK:
982		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
983	case TC_SETUP_QDISC_RED:
984		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
985	case TC_SETUP_QDISC_PRIO:
986		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
987	case TC_SETUP_QDISC_ETS:
988		return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
989	case TC_SETUP_QDISC_TBF:
990		return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
991	case TC_SETUP_QDISC_FIFO:
992		return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
993	default:
994		return -EOPNOTSUPP;
995	}
996}
997
998static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
999{
1000	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001
1002	if (!enable) {
1003		if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1004		    mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1005			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1006			return -EINVAL;
1007		}
1008		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1009		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1010	} else {
1011		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1012		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1013	}
1014	return 0;
1015}
1016
1017static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1018{
1019	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020	char pplr_pl[MLXSW_REG_PPLR_LEN];
1021	int err;
1022
1023	if (netif_running(dev))
1024		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1025
1026	mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1027	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1028			      pplr_pl);
1029
1030	if (netif_running(dev))
1031		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1032
1033	return err;
1034}
1035
1036typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1037
1038static int mlxsw_sp_handle_feature(struct net_device *dev,
1039				   netdev_features_t wanted_features,
1040				   netdev_features_t feature,
1041				   mlxsw_sp_feature_handler feature_handler)
1042{
1043	netdev_features_t changes = wanted_features ^ dev->features;
1044	bool enable = !!(wanted_features & feature);
1045	int err;
1046
1047	if (!(changes & feature))
1048		return 0;
1049
1050	err = feature_handler(dev, enable);
1051	if (err) {
1052		netdev_err(dev, "%s feature %pNF failed, err %d\n",
1053			   enable ? "Enable" : "Disable", &feature, err);
1054		return err;
1055	}
1056
1057	if (enable)
1058		dev->features |= feature;
1059	else
1060		dev->features &= ~feature;
1061
1062	return 0;
1063}
1064static int mlxsw_sp_set_features(struct net_device *dev,
1065				 netdev_features_t features)
1066{
1067	netdev_features_t oper_features = dev->features;
1068	int err = 0;
1069
1070	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1071				       mlxsw_sp_feature_hw_tc);
1072	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1073				       mlxsw_sp_feature_loopback);
1074
1075	if (err) {
1076		dev->features = oper_features;
1077		return -EINVAL;
1078	}
1079
1080	return 0;
1081}
1082
1083static struct devlink_port *
1084mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1085{
1086	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1087	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1088
1089	return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1090						mlxsw_sp_port->local_port);
1091}
1092
1093static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1094				      struct ifreq *ifr)
1095{
1096	struct hwtstamp_config config;
1097	int err;
1098
1099	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1100		return -EFAULT;
1101
1102	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1103							     &config);
1104	if (err)
1105		return err;
1106
1107	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1108		return -EFAULT;
1109
1110	return 0;
1111}
1112
1113static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1114				      struct ifreq *ifr)
1115{
1116	struct hwtstamp_config config;
1117	int err;
1118
1119	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1120							     &config);
1121	if (err)
1122		return err;
1123
1124	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1125		return -EFAULT;
1126
1127	return 0;
1128}
1129
1130static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1131{
1132	struct hwtstamp_config config = {0};
1133
1134	mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1135}
1136
1137static int
1138mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1139{
1140	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1141
1142	switch (cmd) {
1143	case SIOCSHWTSTAMP:
1144		return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1145	case SIOCGHWTSTAMP:
1146		return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1147	default:
1148		return -EOPNOTSUPP;
1149	}
1150}
1151
1152static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1153	.ndo_open		= mlxsw_sp_port_open,
1154	.ndo_stop		= mlxsw_sp_port_stop,
1155	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1156	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1157	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1158	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1159	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1160	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1161	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1162	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1163	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1164	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1165	.ndo_set_features	= mlxsw_sp_set_features,
1166	.ndo_get_devlink_port	= mlxsw_sp_port_get_devlink_port,
1167	.ndo_do_ioctl		= mlxsw_sp_port_ioctl,
1168};
1169
1170static int
1171mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1172{
1173	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1174	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1175	const struct mlxsw_sp_port_type_speed_ops *ops;
1176	char ptys_pl[MLXSW_REG_PTYS_LEN];
1177	u32 eth_proto_cap_masked;
1178	int err;
1179
1180	ops = mlxsw_sp->port_type_speed_ops;
1181
1182	/* Set advertised speeds to speeds supported by both the driver
1183	 * and the device.
1184	 */
1185	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1186			       0, false);
1187	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1188	if (err)
1189		return err;
1190
1191	ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1192				 &eth_proto_admin, &eth_proto_oper);
1193	eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1194	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1195			       eth_proto_cap_masked,
1196			       mlxsw_sp_port->link.autoneg);
1197	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1198}
1199
1200int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1201{
1202	const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1203	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1204	char ptys_pl[MLXSW_REG_PTYS_LEN];
1205	u32 eth_proto_oper;
1206	int err;
1207
1208	port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1209	port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1210					       mlxsw_sp_port->local_port, 0,
1211					       false);
1212	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1213	if (err)
1214		return err;
1215	port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1216						 &eth_proto_oper);
1217	*speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1218	return 0;
1219}
1220
1221int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1222			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1223			  bool dwrr, u8 dwrr_weight)
1224{
1225	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1226	char qeec_pl[MLXSW_REG_QEEC_LEN];
1227
1228	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1229			    next_index);
1230	mlxsw_reg_qeec_de_set(qeec_pl, true);
1231	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1232	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1233	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1234}
1235
1236int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1237				  enum mlxsw_reg_qeec_hr hr, u8 index,
1238				  u8 next_index, u32 maxrate, u8 burst_size)
1239{
1240	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1241	char qeec_pl[MLXSW_REG_QEEC_LEN];
1242
1243	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1244			    next_index);
1245	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1246	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1247	mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1248	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1249}
1250
1251static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1252				    enum mlxsw_reg_qeec_hr hr, u8 index,
1253				    u8 next_index, u32 minrate)
1254{
1255	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1256	char qeec_pl[MLXSW_REG_QEEC_LEN];
1257
1258	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1259			    next_index);
1260	mlxsw_reg_qeec_mise_set(qeec_pl, true);
1261	mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1262
1263	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1264}
1265
1266int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1267			      u8 switch_prio, u8 tclass)
1268{
1269	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1270	char qtct_pl[MLXSW_REG_QTCT_LEN];
1271
1272	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1273			    tclass);
1274	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1275}
1276
1277static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1278{
1279	int err, i;
1280
1281	/* Setup the elements hierarcy, so that each TC is linked to
1282	 * one subgroup, which are all member in the same group.
1283	 */
1284	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1285				    MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1286	if (err)
1287		return err;
1288	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1289		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1290					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
1291					    0, false, 0);
1292		if (err)
1293			return err;
1294	}
1295	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1296		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1297					    MLXSW_REG_QEEC_HR_TC, i, i,
1298					    false, 0);
1299		if (err)
1300			return err;
1301
1302		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1303					    MLXSW_REG_QEEC_HR_TC,
1304					    i + 8, i,
1305					    true, 100);
1306		if (err)
1307			return err;
1308	}
1309
1310	/* Make sure the max shaper is disabled in all hierarchies that support
1311	 * it. Note that this disables ptps (PTP shaper), but that is intended
1312	 * for the initial configuration.
1313	 */
1314	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1315					    MLXSW_REG_QEEC_HR_PORT, 0, 0,
1316					    MLXSW_REG_QEEC_MAS_DIS, 0);
1317	if (err)
1318		return err;
1319	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1320		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1321						    MLXSW_REG_QEEC_HR_SUBGROUP,
1322						    i, 0,
1323						    MLXSW_REG_QEEC_MAS_DIS, 0);
1324		if (err)
1325			return err;
1326	}
1327	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1328		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1329						    MLXSW_REG_QEEC_HR_TC,
1330						    i, i,
1331						    MLXSW_REG_QEEC_MAS_DIS, 0);
1332		if (err)
1333			return err;
1334
1335		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1336						    MLXSW_REG_QEEC_HR_TC,
1337						    i + 8, i,
1338						    MLXSW_REG_QEEC_MAS_DIS, 0);
1339		if (err)
1340			return err;
1341	}
1342
1343	/* Configure the min shaper for multicast TCs. */
1344	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1345		err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1346					       MLXSW_REG_QEEC_HR_TC,
1347					       i + 8, i,
1348					       MLXSW_REG_QEEC_MIS_MIN);
1349		if (err)
1350			return err;
1351	}
1352
1353	/* Map all priorities to traffic class 0. */
1354	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1355		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1356		if (err)
1357			return err;
1358	}
1359
1360	return 0;
1361}
1362
1363static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1364					bool enable)
1365{
1366	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1367	char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1368
1369	mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1370	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1371}
1372
1373static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1374{
1375	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1376	u8 module = mlxsw_sp_port->mapping.module;
1377	u64 overheat_counter;
1378	int err;
1379
1380	err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1381						    &overheat_counter);
1382	if (err)
1383		return err;
1384
1385	mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1386	return 0;
1387}
1388
1389static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1390				u8 split_base_local_port,
1391				struct mlxsw_sp_port_mapping *port_mapping)
1392{
1393	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1394	bool split = !!split_base_local_port;
1395	struct mlxsw_sp_port *mlxsw_sp_port;
1396	u32 lanes = port_mapping->width;
1397	struct net_device *dev;
1398	bool splittable;
1399	int err;
1400
1401	splittable = lanes > 1 && !split;
1402	err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1403				   port_mapping->module + 1, split,
1404				   port_mapping->lane / lanes,
1405				   splittable, lanes,
1406				   mlxsw_sp->base_mac,
1407				   sizeof(mlxsw_sp->base_mac));
1408	if (err) {
1409		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1410			local_port);
1411		return err;
1412	}
1413
1414	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1415	if (!dev) {
1416		err = -ENOMEM;
1417		goto err_alloc_etherdev;
1418	}
1419	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1420	dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1421	mlxsw_sp_port = netdev_priv(dev);
1422	mlxsw_sp_port->dev = dev;
1423	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1424	mlxsw_sp_port->local_port = local_port;
1425	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1426	mlxsw_sp_port->split = split;
1427	mlxsw_sp_port->split_base_local_port = split_base_local_port;
1428	mlxsw_sp_port->mapping = *port_mapping;
1429	mlxsw_sp_port->link.autoneg = 1;
1430	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1431
1432	mlxsw_sp_port->pcpu_stats =
1433		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1434	if (!mlxsw_sp_port->pcpu_stats) {
1435		err = -ENOMEM;
1436		goto err_alloc_stats;
1437	}
1438
1439	INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1440			  &update_stats_cache);
1441
1442	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1443	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1444
1445	err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1446	if (err) {
1447		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1448			mlxsw_sp_port->local_port);
1449		goto err_port_module_map;
1450	}
1451
1452	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1453	if (err) {
1454		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1455			mlxsw_sp_port->local_port);
1456		goto err_port_swid_set;
1457	}
1458
1459	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1460	if (err) {
1461		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1462			mlxsw_sp_port->local_port);
1463		goto err_dev_addr_init;
1464	}
1465
1466	netif_carrier_off(dev);
1467
1468	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1469			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1470	dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1471
1472	dev->min_mtu = 0;
1473	dev->max_mtu = ETH_MAX_MTU;
1474
1475	/* Each packet needs to have a Tx header (metadata) on top all other
1476	 * headers.
1477	 */
1478	dev->needed_headroom = MLXSW_TXHDR_LEN;
1479
1480	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1481	if (err) {
1482		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1483			mlxsw_sp_port->local_port);
1484		goto err_port_system_port_mapping_set;
1485	}
1486
1487	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1488	if (err) {
1489		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1490			mlxsw_sp_port->local_port);
1491		goto err_port_speed_by_width_set;
1492	}
1493
1494	err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1495							    &mlxsw_sp_port->max_speed);
1496	if (err) {
1497		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1498			mlxsw_sp_port->local_port);
1499		goto err_max_speed_get;
1500	}
1501
1502	err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1503	if (err) {
1504		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1505			mlxsw_sp_port->local_port);
1506		goto err_port_max_mtu_get;
1507	}
1508
1509	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1510	if (err) {
1511		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1512			mlxsw_sp_port->local_port);
1513		goto err_port_mtu_set;
1514	}
1515
1516	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1517	if (err)
1518		goto err_port_admin_status_set;
1519
1520	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1521	if (err) {
1522		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1523			mlxsw_sp_port->local_port);
1524		goto err_port_buffers_init;
1525	}
1526
1527	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1528	if (err) {
1529		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1530			mlxsw_sp_port->local_port);
1531		goto err_port_ets_init;
1532	}
1533
1534	err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1535	if (err) {
1536		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1537			mlxsw_sp_port->local_port);
1538		goto err_port_tc_mc_mode;
1539	}
1540
1541	/* ETS and buffers must be initialized before DCB. */
1542	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1543	if (err) {
1544		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1545			mlxsw_sp_port->local_port);
1546		goto err_port_dcb_init;
1547	}
1548
1549	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1550	if (err) {
1551		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1552			mlxsw_sp_port->local_port);
1553		goto err_port_fids_init;
1554	}
1555
1556	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1557	if (err) {
1558		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1559			mlxsw_sp_port->local_port);
1560		goto err_port_qdiscs_init;
1561	}
1562
1563	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1564				     false);
1565	if (err) {
1566		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1567			mlxsw_sp_port->local_port);
1568		goto err_port_vlan_clear;
1569	}
1570
1571	err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1572	if (err) {
1573		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1574			mlxsw_sp_port->local_port);
1575		goto err_port_nve_init;
1576	}
1577
1578	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1579	if (err) {
1580		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1581			mlxsw_sp_port->local_port);
1582		goto err_port_pvid_set;
1583	}
1584
1585	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1586						       MLXSW_SP_DEFAULT_VID);
1587	if (IS_ERR(mlxsw_sp_port_vlan)) {
1588		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1589			mlxsw_sp_port->local_port);
1590		err = PTR_ERR(mlxsw_sp_port_vlan);
1591		goto err_port_vlan_create;
1592	}
1593	mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1594
1595	INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1596			  mlxsw_sp->ptp_ops->shaper_work);
1597
1598	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1599
1600	err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1601	if (err) {
1602		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1603			mlxsw_sp_port->local_port);
1604		goto err_port_overheat_init_val_set;
1605	}
1606
1607	err = register_netdev(dev);
1608	if (err) {
1609		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1610			mlxsw_sp_port->local_port);
1611		goto err_register_netdev;
1612	}
1613
1614	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1615				mlxsw_sp_port, dev);
1616	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1617	return 0;
1618
1619err_register_netdev:
1620err_port_overheat_init_val_set:
1621	mlxsw_sp->ports[local_port] = NULL;
1622	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1623err_port_vlan_create:
1624err_port_pvid_set:
1625	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1626err_port_nve_init:
1627err_port_vlan_clear:
1628	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1629err_port_qdiscs_init:
1630	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1631err_port_fids_init:
1632	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1633err_port_dcb_init:
1634	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1635err_port_tc_mc_mode:
1636err_port_ets_init:
1637	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1638err_port_buffers_init:
1639err_port_admin_status_set:
1640err_port_mtu_set:
1641err_port_max_mtu_get:
1642err_max_speed_get:
1643err_port_speed_by_width_set:
1644err_port_system_port_mapping_set:
1645err_dev_addr_init:
1646	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1647err_port_swid_set:
1648	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1649err_port_module_map:
1650	free_percpu(mlxsw_sp_port->pcpu_stats);
1651err_alloc_stats:
1652	free_netdev(dev);
1653err_alloc_etherdev:
1654	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1655	return err;
1656}
1657
1658static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1659{
1660	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1661
1662	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1663	cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1664	mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1665	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1666	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1667	mlxsw_sp->ports[local_port] = NULL;
1668	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1669	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1670	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1671	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1672	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1673	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1674	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1675	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1676	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1677	free_percpu(mlxsw_sp_port->pcpu_stats);
1678	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1679	free_netdev(mlxsw_sp_port->dev);
1680	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1681}
1682
1683static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1684{
1685	struct mlxsw_sp_port *mlxsw_sp_port;
1686	int err;
1687
1688	mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1689	if (!mlxsw_sp_port)
1690		return -ENOMEM;
1691
1692	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1693	mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1694
1695	err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1696				       mlxsw_sp_port,
1697				       mlxsw_sp->base_mac,
1698				       sizeof(mlxsw_sp->base_mac));
1699	if (err) {
1700		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1701		goto err_core_cpu_port_init;
1702	}
1703
1704	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1705	return 0;
1706
1707err_core_cpu_port_init:
1708	kfree(mlxsw_sp_port);
1709	return err;
1710}
1711
1712static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1713{
1714	struct mlxsw_sp_port *mlxsw_sp_port =
1715				mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1716
1717	mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1718	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1719	kfree(mlxsw_sp_port);
1720}
1721
1722static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1723{
1724	return mlxsw_sp->ports[local_port] != NULL;
1725}
1726
1727static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1728{
1729	int i;
1730
1731	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1732		if (mlxsw_sp_port_created(mlxsw_sp, i))
1733			mlxsw_sp_port_remove(mlxsw_sp, i);
1734	mlxsw_sp_cpu_port_remove(mlxsw_sp);
1735	kfree(mlxsw_sp->ports);
1736	mlxsw_sp->ports = NULL;
1737}
1738
1739static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1740{
1741	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1742	struct mlxsw_sp_port_mapping *port_mapping;
1743	size_t alloc_size;
1744	int i;
1745	int err;
1746
1747	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1748	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1749	if (!mlxsw_sp->ports)
1750		return -ENOMEM;
1751
1752	err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1753	if (err)
1754		goto err_cpu_port_create;
1755
1756	for (i = 1; i < max_ports; i++) {
1757		port_mapping = mlxsw_sp->port_mapping[i];
1758		if (!port_mapping)
1759			continue;
1760		err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1761		if (err)
1762			goto err_port_create;
1763	}
1764	return 0;
1765
1766err_port_create:
1767	for (i--; i >= 1; i--)
1768		if (mlxsw_sp_port_created(mlxsw_sp, i))
1769			mlxsw_sp_port_remove(mlxsw_sp, i);
1770	mlxsw_sp_cpu_port_remove(mlxsw_sp);
1771err_cpu_port_create:
1772	kfree(mlxsw_sp->ports);
1773	mlxsw_sp->ports = NULL;
1774	return err;
1775}
1776
1777static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1778{
1779	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1780	struct mlxsw_sp_port_mapping port_mapping;
1781	int i;
1782	int err;
1783
1784	mlxsw_sp->port_mapping = kcalloc(max_ports,
1785					 sizeof(struct mlxsw_sp_port_mapping *),
1786					 GFP_KERNEL);
1787	if (!mlxsw_sp->port_mapping)
1788		return -ENOMEM;
1789
1790	for (i = 1; i < max_ports; i++) {
1791		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1792		if (err)
1793			goto err_port_module_info_get;
1794		if (!port_mapping.width)
1795			continue;
1796
1797		mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1798						    sizeof(port_mapping),
1799						    GFP_KERNEL);
1800		if (!mlxsw_sp->port_mapping[i]) {
1801			err = -ENOMEM;
1802			goto err_port_module_info_dup;
1803		}
1804	}
1805	return 0;
1806
1807err_port_module_info_get:
1808err_port_module_info_dup:
1809	for (i--; i >= 1; i--)
1810		kfree(mlxsw_sp->port_mapping[i]);
1811	kfree(mlxsw_sp->port_mapping);
1812	return err;
1813}
1814
1815static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1816{
1817	int i;
1818
1819	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1820		kfree(mlxsw_sp->port_mapping[i]);
1821	kfree(mlxsw_sp->port_mapping);
1822}
1823
1824static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1825{
1826	u8 offset = (local_port - 1) % max_width;
1827
1828	return local_port - offset;
1829}
1830
1831static int
1832mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1833			   struct mlxsw_sp_port_mapping *port_mapping,
1834			   unsigned int count, u8 offset)
1835{
1836	struct mlxsw_sp_port_mapping split_port_mapping;
1837	int err, i;
1838
1839	split_port_mapping = *port_mapping;
1840	split_port_mapping.width /= count;
1841	for (i = 0; i < count; i++) {
1842		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1843					   base_port, &split_port_mapping);
1844		if (err)
1845			goto err_port_create;
1846		split_port_mapping.lane += split_port_mapping.width;
1847	}
1848
1849	return 0;
1850
1851err_port_create:
1852	for (i--; i >= 0; i--)
1853		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1854			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1855	return err;
1856}
1857
1858static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1859					 u8 base_port,
1860					 unsigned int count, u8 offset)
1861{
1862	struct mlxsw_sp_port_mapping *port_mapping;
1863	int i;
1864
1865	/* Go over original unsplit ports in the gap and recreate them. */
1866	for (i = 0; i < count * offset; i++) {
1867		port_mapping = mlxsw_sp->port_mapping[base_port + i];
1868		if (!port_mapping)
1869			continue;
1870		mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1871	}
1872}
1873
1874static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1875				       unsigned int count,
1876				       unsigned int max_width)
1877{
1878	enum mlxsw_res_id local_ports_in_x_res_id;
1879	int split_width = max_width / count;
1880
1881	if (split_width == 1)
1882		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1883	else if (split_width == 2)
1884		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1885	else if (split_width == 4)
1886		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1887	else
1888		return -EINVAL;
1889
1890	if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1891		return -EINVAL;
1892	return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1893}
1894
1895static struct mlxsw_sp_port *
1896mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1897{
1898	if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1899		return mlxsw_sp->ports[local_port];
1900	return NULL;
1901}
1902
1903static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1904			       unsigned int count,
1905			       struct netlink_ext_ack *extack)
1906{
1907	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1908	struct mlxsw_sp_port_mapping port_mapping;
1909	struct mlxsw_sp_port *mlxsw_sp_port;
1910	int max_width;
1911	u8 base_port;
1912	int offset;
1913	int i;
1914	int err;
1915
1916	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1917	if (!mlxsw_sp_port) {
1918		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1919			local_port);
1920		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1921		return -EINVAL;
1922	}
1923
1924	max_width = mlxsw_core_module_max_width(mlxsw_core,
1925						mlxsw_sp_port->mapping.module);
1926	if (max_width < 0) {
1927		netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
1928		NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
1929		return max_width;
1930	}
1931
1932	/* Split port with non-max cannot be split. */
1933	if (mlxsw_sp_port->mapping.width != max_width) {
1934		netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
1935		NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
1936		return -EINVAL;
1937	}
1938
1939	offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
1940	if (offset < 0) {
1941		netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
1942		NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
1943		return -EINVAL;
1944	}
1945
1946	/* Only in case max split is being done, the local port and
1947	 * base port may differ.
1948	 */
1949	base_port = count == max_width ?
1950		    mlxsw_sp_cluster_base_port_get(local_port, max_width) :
1951		    local_port;
1952
1953	for (i = 0; i < count * offset; i++) {
1954		/* Expect base port to exist and also the one in the middle in
1955		 * case of maximal split count.
1956		 */
1957		if (i == 0 || (count == max_width && i == count / 2))
1958			continue;
1959
1960		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
1961			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1962			NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
1963			return -EINVAL;
1964		}
1965	}
1966
1967	port_mapping = mlxsw_sp_port->mapping;
1968
1969	for (i = 0; i < count; i++)
1970		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1971			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1972
1973	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
1974					 count, offset);
1975	if (err) {
1976		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
1977		goto err_port_split_create;
1978	}
1979
1980	return 0;
1981
1982err_port_split_create:
1983	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
1984	return err;
1985}
1986
1987static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
1988				 struct netlink_ext_ack *extack)
1989{
1990	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1991	struct mlxsw_sp_port *mlxsw_sp_port;
1992	unsigned int count;
1993	int max_width;
1994	u8 base_port;
1995	int offset;
1996	int i;
1997
1998	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1999	if (!mlxsw_sp_port) {
2000		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2001			local_port);
2002		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2003		return -EINVAL;
2004	}
2005
2006	if (!mlxsw_sp_port->split) {
2007		netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2008		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2009		return -EINVAL;
2010	}
2011
2012	max_width = mlxsw_core_module_max_width(mlxsw_core,
2013						mlxsw_sp_port->mapping.module);
2014	if (max_width < 0) {
2015		netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2016		NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2017		return max_width;
2018	}
2019
2020	count = max_width / mlxsw_sp_port->mapping.width;
2021
2022	offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2023	if (WARN_ON(offset < 0)) {
2024		netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2025		NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2026		return -EINVAL;
2027	}
2028
2029	base_port = mlxsw_sp_port->split_base_local_port;
2030
2031	for (i = 0; i < count; i++)
2032		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2033			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2034
2035	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2036
2037	return 0;
2038}
2039
2040static void
2041mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2042{
2043	int i;
2044
2045	for (i = 0; i < TC_MAX_QUEUE; i++)
2046		mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2047}
2048
2049static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2050				     char *pude_pl, void *priv)
2051{
2052	struct mlxsw_sp *mlxsw_sp = priv;
2053	struct mlxsw_sp_port *mlxsw_sp_port;
2054	enum mlxsw_reg_pude_oper_status status;
2055	unsigned int max_ports;
2056	u8 local_port;
2057
2058	max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2059	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2060
2061	if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
2062		return;
2063	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2064	if (!mlxsw_sp_port)
2065		return;
2066
2067	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2068	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2069		netdev_info(mlxsw_sp_port->dev, "link up\n");
2070		netif_carrier_on(mlxsw_sp_port->dev);
2071		mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2072	} else {
2073		netdev_info(mlxsw_sp_port->dev, "link down\n");
2074		netif_carrier_off(mlxsw_sp_port->dev);
2075		mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2076	}
2077}
2078
2079static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2080					  char *mtpptr_pl, bool ingress)
2081{
2082	u8 local_port;
2083	u8 num_rec;
2084	int i;
2085
2086	local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2087	num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2088	for (i = 0; i < num_rec; i++) {
2089		u8 domain_number;
2090		u8 message_type;
2091		u16 sequence_id;
2092		u64 timestamp;
2093
2094		mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2095					&domain_number, &sequence_id,
2096					&timestamp);
2097		mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2098					    message_type, domain_number,
2099					    sequence_id, timestamp);
2100	}
2101}
2102
2103static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2104					      char *mtpptr_pl, void *priv)
2105{
2106	struct mlxsw_sp *mlxsw_sp = priv;
2107
2108	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2109}
2110
2111static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2112					      char *mtpptr_pl, void *priv)
2113{
2114	struct mlxsw_sp *mlxsw_sp = priv;
2115
2116	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2117}
2118
2119void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2120				       u8 local_port, void *priv)
2121{
2122	struct mlxsw_sp *mlxsw_sp = priv;
2123	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2124	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2125
2126	if (unlikely(!mlxsw_sp_port)) {
2127		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2128				     local_port);
2129		return;
2130	}
2131
2132	skb->dev = mlxsw_sp_port->dev;
2133
2134	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2135	u64_stats_update_begin(&pcpu_stats->syncp);
2136	pcpu_stats->rx_packets++;
2137	pcpu_stats->rx_bytes += skb->len;
2138	u64_stats_update_end(&pcpu_stats->syncp);
2139
2140	skb->protocol = eth_type_trans(skb, skb->dev);
2141	netif_receive_skb(skb);
2142}
2143
2144static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2145					   void *priv)
2146{
2147	skb->offload_fwd_mark = 1;
2148	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2149}
2150
2151static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2152					      u8 local_port, void *priv)
2153{
2154	skb->offload_l3_fwd_mark = 1;
2155	skb->offload_fwd_mark = 1;
2156	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2157}
2158
2159void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2160			  u8 local_port)
2161{
2162	mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2163}
2164
2165void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2166			     u8 local_port)
2167{
2168	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2169	struct mlxsw_sp_port_sample *sample;
2170	u32 size;
2171
2172	if (unlikely(!mlxsw_sp_port)) {
2173		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2174				     local_port);
2175		goto out;
2176	}
2177
2178	rcu_read_lock();
2179	sample = rcu_dereference(mlxsw_sp_port->sample);
2180	if (!sample)
2181		goto out_unlock;
2182	size = sample->truncate ? sample->trunc_size : skb->len;
2183	psample_sample_packet(sample->psample_group, skb, size,
2184			      mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2185out_unlock:
2186	rcu_read_unlock();
2187out:
2188	consume_skb(skb);
2189}
2190
2191#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2192	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
2193		  _is_ctrl, SP_##_trap_group, DISCARD)
2194
2195#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2196	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
2197		_is_ctrl, SP_##_trap_group, DISCARD)
2198
2199#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2200	MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action,	\
2201		_is_ctrl, SP_##_trap_group, DISCARD)
2202
2203#define MLXSW_SP_EVENTL(_func, _trap_id)		\
2204	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2205
2206static const struct mlxsw_listener mlxsw_sp_listener[] = {
2207	/* Events */
2208	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2209	/* L2 traps */
2210	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2211	/* L3 traps */
2212	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2213			  false),
2214	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2215	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2216			  false),
2217	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2218			     ROUTER_EXP, false),
2219	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2220			     ROUTER_EXP, false),
2221	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2222			     ROUTER_EXP, false),
2223	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2224			     ROUTER_EXP, false),
2225	/* Multicast Router Traps */
2226	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2227	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2228	/* NVE traps */
2229	MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2230};
2231
2232static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2233	/* Events */
2234	MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2235	MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2236};
2237
2238static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2239{
2240	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2241	char qpcr_pl[MLXSW_REG_QPCR_LEN];
2242	enum mlxsw_reg_qpcr_ir_units ir_units;
2243	int max_cpu_policers;
2244	bool is_bytes;
2245	u8 burst_size;
2246	u32 rate;
2247	int i, err;
2248
2249	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2250		return -EIO;
2251
2252	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2253
2254	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2255	for (i = 0; i < max_cpu_policers; i++) {
2256		is_bytes = false;
2257		switch (i) {
2258		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2259		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2260		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2261			rate = 1024;
2262			burst_size = 7;
2263			break;
2264		default:
2265			continue;
2266		}
2267
2268		__set_bit(i, mlxsw_sp->trap->policers_usage);
2269		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2270				    burst_size);
2271		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2272		if (err)
2273			return err;
2274	}
2275
2276	return 0;
2277}
2278
2279static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2280{
2281	char htgt_pl[MLXSW_REG_HTGT_LEN];
2282	enum mlxsw_reg_htgt_trap_group i;
2283	int max_cpu_policers;
2284	int max_trap_groups;
2285	u8 priority, tc;
2286	u16 policer_id;
2287	int err;
2288
2289	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2290		return -EIO;
2291
2292	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2293	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2294
2295	for (i = 0; i < max_trap_groups; i++) {
2296		policer_id = i;
2297		switch (i) {
2298		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2299		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2300		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2301			priority = 1;
2302			tc = 1;
2303			break;
2304		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2305			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2306			tc = MLXSW_REG_HTGT_DEFAULT_TC;
2307			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2308			break;
2309		default:
2310			continue;
2311		}
2312
2313		if (max_cpu_policers <= policer_id &&
2314		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2315			return -EIO;
2316
2317		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2318		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2319		if (err)
2320			return err;
2321	}
2322
2323	return 0;
2324}
2325
2326static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2327				   const struct mlxsw_listener listeners[],
2328				   size_t listeners_count)
2329{
2330	int i;
2331	int err;
2332
2333	for (i = 0; i < listeners_count; i++) {
2334		err = mlxsw_core_trap_register(mlxsw_sp->core,
2335					       &listeners[i],
2336					       mlxsw_sp);
2337		if (err)
2338			goto err_listener_register;
2339
2340	}
2341	return 0;
2342
2343err_listener_register:
2344	for (i--; i >= 0; i--) {
2345		mlxsw_core_trap_unregister(mlxsw_sp->core,
2346					   &listeners[i],
2347					   mlxsw_sp);
2348	}
2349	return err;
2350}
2351
2352static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2353				      const struct mlxsw_listener listeners[],
2354				      size_t listeners_count)
2355{
2356	int i;
2357
2358	for (i = 0; i < listeners_count; i++) {
2359		mlxsw_core_trap_unregister(mlxsw_sp->core,
2360					   &listeners[i],
2361					   mlxsw_sp);
2362	}
2363}
2364
2365static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2366{
2367	struct mlxsw_sp_trap *trap;
2368	u64 max_policers;
2369	int err;
2370
2371	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2372		return -EIO;
2373	max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2374	trap = kzalloc(struct_size(trap, policers_usage,
2375				   BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2376	if (!trap)
2377		return -ENOMEM;
2378	trap->max_policers = max_policers;
2379	mlxsw_sp->trap = trap;
2380
2381	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2382	if (err)
2383		goto err_cpu_policers_set;
2384
2385	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2386	if (err)
2387		goto err_trap_groups_set;
2388
2389	err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2390				      ARRAY_SIZE(mlxsw_sp_listener));
2391	if (err)
2392		goto err_traps_register;
2393
2394	err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2395				      mlxsw_sp->listeners_count);
2396	if (err)
2397		goto err_extra_traps_init;
2398
2399	return 0;
2400
2401err_extra_traps_init:
2402	mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2403				  ARRAY_SIZE(mlxsw_sp_listener));
2404err_traps_register:
2405err_trap_groups_set:
2406err_cpu_policers_set:
2407	kfree(trap);
2408	return err;
2409}
2410
2411static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2412{
2413	mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2414				  mlxsw_sp->listeners_count);
2415	mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2416				  ARRAY_SIZE(mlxsw_sp_listener));
2417	kfree(mlxsw_sp->trap);
2418}
2419
2420#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2421
2422static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2423{
2424	char slcr_pl[MLXSW_REG_SLCR_LEN];
2425	u32 seed;
2426	int err;
2427
2428	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2429		     MLXSW_SP_LAG_SEED_INIT);
2430	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2431				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2432				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2433				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2434				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2435				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2436				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2437				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2438				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2439	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2440	if (err)
2441		return err;
2442
2443	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2444	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2445		return -EIO;
2446
2447	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2448				 sizeof(struct mlxsw_sp_upper),
2449				 GFP_KERNEL);
2450	if (!mlxsw_sp->lags)
2451		return -ENOMEM;
2452
2453	return 0;
2454}
2455
2456static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2457{
2458	kfree(mlxsw_sp->lags);
2459}
2460
2461static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2462{
2463	char htgt_pl[MLXSW_REG_HTGT_LEN];
2464	int err;
2465
2466	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2467			    MLXSW_REG_HTGT_INVALID_POLICER,
2468			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2469			    MLXSW_REG_HTGT_DEFAULT_TC);
2470	err =  mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2471	if (err)
2472		return err;
2473
2474	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2475			    MLXSW_REG_HTGT_INVALID_POLICER,
2476			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2477			    MLXSW_REG_HTGT_DEFAULT_TC);
2478	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2479	if (err)
2480		return err;
2481
2482	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2483			    MLXSW_REG_HTGT_INVALID_POLICER,
2484			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2485			    MLXSW_REG_HTGT_DEFAULT_TC);
2486	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2487	if (err)
2488		return err;
2489
2490	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2491			    MLXSW_REG_HTGT_INVALID_POLICER,
2492			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2493			    MLXSW_REG_HTGT_DEFAULT_TC);
2494	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2495}
2496
2497static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2498	.clock_init	= mlxsw_sp1_ptp_clock_init,
2499	.clock_fini	= mlxsw_sp1_ptp_clock_fini,
2500	.init		= mlxsw_sp1_ptp_init,
2501	.fini		= mlxsw_sp1_ptp_fini,
2502	.receive	= mlxsw_sp1_ptp_receive,
2503	.transmitted	= mlxsw_sp1_ptp_transmitted,
2504	.hwtstamp_get	= mlxsw_sp1_ptp_hwtstamp_get,
2505	.hwtstamp_set	= mlxsw_sp1_ptp_hwtstamp_set,
2506	.shaper_work	= mlxsw_sp1_ptp_shaper_work,
2507	.get_ts_info	= mlxsw_sp1_ptp_get_ts_info,
2508	.get_stats_count = mlxsw_sp1_get_stats_count,
2509	.get_stats_strings = mlxsw_sp1_get_stats_strings,
2510	.get_stats	= mlxsw_sp1_get_stats,
2511};
2512
2513static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2514	.clock_init	= mlxsw_sp2_ptp_clock_init,
2515	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
2516	.init		= mlxsw_sp2_ptp_init,
2517	.fini		= mlxsw_sp2_ptp_fini,
2518	.receive	= mlxsw_sp2_ptp_receive,
2519	.transmitted	= mlxsw_sp2_ptp_transmitted,
2520	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
2521	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
2522	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
2523	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
2524	.get_stats_count = mlxsw_sp2_get_stats_count,
2525	.get_stats_strings = mlxsw_sp2_get_stats_strings,
2526	.get_stats	= mlxsw_sp2_get_stats,
2527};
2528
2529static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2530				    unsigned long event, void *ptr);
2531
2532static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2533			 const struct mlxsw_bus_info *mlxsw_bus_info,
2534			 struct netlink_ext_ack *extack)
2535{
2536	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2537	int err;
2538
2539	mlxsw_sp->core = mlxsw_core;
2540	mlxsw_sp->bus_info = mlxsw_bus_info;
2541
2542	mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2543
2544	err = mlxsw_sp_base_mac_get(mlxsw_sp);
2545	if (err) {
2546		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2547		return err;
2548	}
2549
2550	err = mlxsw_sp_kvdl_init(mlxsw_sp);
2551	if (err) {
2552		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2553		return err;
2554	}
2555
2556	err = mlxsw_sp_fids_init(mlxsw_sp);
2557	if (err) {
2558		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2559		goto err_fids_init;
2560	}
2561
2562	err = mlxsw_sp_policers_init(mlxsw_sp);
2563	if (err) {
2564		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2565		goto err_policers_init;
2566	}
2567
2568	err = mlxsw_sp_traps_init(mlxsw_sp);
2569	if (err) {
2570		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2571		goto err_traps_init;
2572	}
2573
2574	err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2575	if (err) {
2576		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2577		goto err_devlink_traps_init;
2578	}
2579
2580	err = mlxsw_sp_buffers_init(mlxsw_sp);
2581	if (err) {
2582		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2583		goto err_buffers_init;
2584	}
2585
2586	err = mlxsw_sp_lag_init(mlxsw_sp);
2587	if (err) {
2588		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2589		goto err_lag_init;
2590	}
2591
2592	/* Initialize SPAN before router and switchdev, so that those components
2593	 * can call mlxsw_sp_span_respin().
2594	 */
2595	err = mlxsw_sp_span_init(mlxsw_sp);
2596	if (err) {
2597		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2598		goto err_span_init;
2599	}
2600
2601	err = mlxsw_sp_switchdev_init(mlxsw_sp);
2602	if (err) {
2603		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2604		goto err_switchdev_init;
2605	}
2606
2607	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2608	if (err) {
2609		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2610		goto err_counter_pool_init;
2611	}
2612
2613	err = mlxsw_sp_afa_init(mlxsw_sp);
2614	if (err) {
2615		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2616		goto err_afa_init;
2617	}
2618
2619	err = mlxsw_sp_nve_init(mlxsw_sp);
2620	if (err) {
2621		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2622		goto err_nve_init;
2623	}
2624
2625	err = mlxsw_sp_acl_init(mlxsw_sp);
2626	if (err) {
2627		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2628		goto err_acl_init;
2629	}
2630
2631	err = mlxsw_sp_router_init(mlxsw_sp, extack);
2632	if (err) {
2633		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2634		goto err_router_init;
2635	}
2636
2637	if (mlxsw_sp->bus_info->read_frc_capable) {
2638		/* NULL is a valid return value from clock_init */
2639		mlxsw_sp->clock =
2640			mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2641						      mlxsw_sp->bus_info->dev);
2642		if (IS_ERR(mlxsw_sp->clock)) {
2643			err = PTR_ERR(mlxsw_sp->clock);
2644			dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2645			goto err_ptp_clock_init;
2646		}
2647	}
2648
2649	if (mlxsw_sp->clock) {
2650		/* NULL is a valid return value from ptp_ops->init */
2651		mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2652		if (IS_ERR(mlxsw_sp->ptp_state)) {
2653			err = PTR_ERR(mlxsw_sp->ptp_state);
2654			dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2655			goto err_ptp_init;
2656		}
2657	}
2658
2659	/* Initialize netdevice notifier after router and SPAN is initialized,
2660	 * so that the event handler can use router structures and call SPAN
2661	 * respin.
2662	 */
2663	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2664	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2665					      &mlxsw_sp->netdevice_nb);
2666	if (err) {
2667		dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2668		goto err_netdev_notifier;
2669	}
2670
2671	err = mlxsw_sp_dpipe_init(mlxsw_sp);
2672	if (err) {
2673		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2674		goto err_dpipe_init;
2675	}
2676
2677	err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2678	if (err) {
2679		dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2680		goto err_port_module_info_init;
2681	}
2682
2683	err = mlxsw_sp_ports_create(mlxsw_sp);
2684	if (err) {
2685		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2686		goto err_ports_create;
2687	}
2688
2689	return 0;
2690
2691err_ports_create:
2692	mlxsw_sp_port_module_info_fini(mlxsw_sp);
2693err_port_module_info_init:
2694	mlxsw_sp_dpipe_fini(mlxsw_sp);
2695err_dpipe_init:
2696	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2697					  &mlxsw_sp->netdevice_nb);
2698err_netdev_notifier:
2699	if (mlxsw_sp->clock)
2700		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2701err_ptp_init:
2702	if (mlxsw_sp->clock)
2703		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2704err_ptp_clock_init:
2705	mlxsw_sp_router_fini(mlxsw_sp);
2706err_router_init:
2707	mlxsw_sp_acl_fini(mlxsw_sp);
2708err_acl_init:
2709	mlxsw_sp_nve_fini(mlxsw_sp);
2710err_nve_init:
2711	mlxsw_sp_afa_fini(mlxsw_sp);
2712err_afa_init:
2713	mlxsw_sp_counter_pool_fini(mlxsw_sp);
2714err_counter_pool_init:
2715	mlxsw_sp_switchdev_fini(mlxsw_sp);
2716err_switchdev_init:
2717	mlxsw_sp_span_fini(mlxsw_sp);
2718err_span_init:
2719	mlxsw_sp_lag_fini(mlxsw_sp);
2720err_lag_init:
2721	mlxsw_sp_buffers_fini(mlxsw_sp);
2722err_buffers_init:
2723	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2724err_devlink_traps_init:
2725	mlxsw_sp_traps_fini(mlxsw_sp);
2726err_traps_init:
2727	mlxsw_sp_policers_fini(mlxsw_sp);
2728err_policers_init:
2729	mlxsw_sp_fids_fini(mlxsw_sp);
2730err_fids_init:
2731	mlxsw_sp_kvdl_fini(mlxsw_sp);
2732	return err;
2733}
2734
2735static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2736			  const struct mlxsw_bus_info *mlxsw_bus_info,
2737			  struct netlink_ext_ack *extack)
2738{
2739	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2740
2741	mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2742	mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2743	mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2744	mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2745	mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2746	mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2747	mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2748	mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2749	mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2750	mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2751	mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2752	mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2753	mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2754	mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2755	mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2756	mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2757	mlxsw_sp->listeners = mlxsw_sp1_listener;
2758	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2759	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2760
2761	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2762}
2763
2764static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2765			  const struct mlxsw_bus_info *mlxsw_bus_info,
2766			  struct netlink_ext_ack *extack)
2767{
2768	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2769
2770	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2771	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2772	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2773	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2774	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2775	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2776	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2777	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2778	mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2779	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2780	mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2781	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2782	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2783	mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2784	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2785	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2786	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2787
2788	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2789}
2790
2791static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2792			  const struct mlxsw_bus_info *mlxsw_bus_info,
2793			  struct netlink_ext_ack *extack)
2794{
2795	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2796
2797	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2798	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2799	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2800	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2801	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2802	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2803	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2804	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2805	mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2806	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2807	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
2808	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2809	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2810	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2811	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2812	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2813	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2814
2815	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2816}
2817
2818static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2819{
2820	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2821
2822	mlxsw_sp_ports_remove(mlxsw_sp);
2823	mlxsw_sp_port_module_info_fini(mlxsw_sp);
2824	mlxsw_sp_dpipe_fini(mlxsw_sp);
2825	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2826					  &mlxsw_sp->netdevice_nb);
2827	if (mlxsw_sp->clock) {
2828		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2829		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2830	}
2831	mlxsw_sp_router_fini(mlxsw_sp);
2832	mlxsw_sp_acl_fini(mlxsw_sp);
2833	mlxsw_sp_nve_fini(mlxsw_sp);
2834	mlxsw_sp_afa_fini(mlxsw_sp);
2835	mlxsw_sp_counter_pool_fini(mlxsw_sp);
2836	mlxsw_sp_switchdev_fini(mlxsw_sp);
2837	mlxsw_sp_span_fini(mlxsw_sp);
2838	mlxsw_sp_lag_fini(mlxsw_sp);
2839	mlxsw_sp_buffers_fini(mlxsw_sp);
2840	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2841	mlxsw_sp_traps_fini(mlxsw_sp);
2842	mlxsw_sp_policers_fini(mlxsw_sp);
2843	mlxsw_sp_fids_fini(mlxsw_sp);
2844	mlxsw_sp_kvdl_fini(mlxsw_sp);
2845}
2846
2847/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2848 * 802.1Q FIDs
2849 */
2850#define MLXSW_SP_FID_FLOOD_TABLE_SIZE	(MLXSW_SP_FID_8021D_MAX + \
2851					 VLAN_VID_MASK - 1)
2852
2853static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
2854	.used_max_mid			= 1,
2855	.max_mid			= MLXSW_SP_MID_MAX,
2856	.used_flood_tables		= 1,
2857	.used_flood_mode		= 1,
2858	.flood_mode			= 3,
2859	.max_fid_flood_tables		= 3,
2860	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2861	.used_max_ib_mc			= 1,
2862	.max_ib_mc			= 0,
2863	.used_max_pkey			= 1,
2864	.max_pkey			= 0,
2865	.used_kvd_sizes			= 1,
2866	.kvd_hash_single_parts		= 59,
2867	.kvd_hash_double_parts		= 41,
2868	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
2869	.swid_config			= {
2870		{
2871			.used_type	= 1,
2872			.type		= MLXSW_PORT_SWID_TYPE_ETH,
2873		}
2874	},
2875};
2876
2877static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
2878	.used_max_mid			= 1,
2879	.max_mid			= MLXSW_SP_MID_MAX,
2880	.used_flood_tables		= 1,
2881	.used_flood_mode		= 1,
2882	.flood_mode			= 3,
2883	.max_fid_flood_tables		= 3,
2884	.fid_flood_table_size		= MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2885	.used_max_ib_mc			= 1,
2886	.max_ib_mc			= 0,
2887	.used_max_pkey			= 1,
2888	.max_pkey			= 0,
2889	.swid_config			= {
2890		{
2891			.used_type	= 1,
2892			.type		= MLXSW_PORT_SWID_TYPE_ETH,
2893		}
2894	},
2895};
2896
2897static void
2898mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
2899				      struct devlink_resource_size_params *kvd_size_params,
2900				      struct devlink_resource_size_params *linear_size_params,
2901				      struct devlink_resource_size_params *hash_double_size_params,
2902				      struct devlink_resource_size_params *hash_single_size_params)
2903{
2904	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2905						 KVD_SINGLE_MIN_SIZE);
2906	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2907						 KVD_DOUBLE_MIN_SIZE);
2908	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2909	u32 linear_size_min = 0;
2910
2911	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
2912					  MLXSW_SP_KVD_GRANULARITY,
2913					  DEVLINK_RESOURCE_UNIT_ENTRY);
2914	devlink_resource_size_params_init(linear_size_params, linear_size_min,
2915					  kvd_size - single_size_min -
2916					  double_size_min,
2917					  MLXSW_SP_KVD_GRANULARITY,
2918					  DEVLINK_RESOURCE_UNIT_ENTRY);
2919	devlink_resource_size_params_init(hash_double_size_params,
2920					  double_size_min,
2921					  kvd_size - single_size_min -
2922					  linear_size_min,
2923					  MLXSW_SP_KVD_GRANULARITY,
2924					  DEVLINK_RESOURCE_UNIT_ENTRY);
2925	devlink_resource_size_params_init(hash_single_size_params,
2926					  single_size_min,
2927					  kvd_size - double_size_min -
2928					  linear_size_min,
2929					  MLXSW_SP_KVD_GRANULARITY,
2930					  DEVLINK_RESOURCE_UNIT_ENTRY);
2931}
2932
2933static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2934{
2935	struct devlink *devlink = priv_to_devlink(mlxsw_core);
2936	struct devlink_resource_size_params hash_single_size_params;
2937	struct devlink_resource_size_params hash_double_size_params;
2938	struct devlink_resource_size_params linear_size_params;
2939	struct devlink_resource_size_params kvd_size_params;
2940	u32 kvd_size, single_size, double_size, linear_size;
2941	const struct mlxsw_config_profile *profile;
2942	int err;
2943
2944	profile = &mlxsw_sp1_config_profile;
2945	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
2946		return -EIO;
2947
2948	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
2949					      &linear_size_params,
2950					      &hash_double_size_params,
2951					      &hash_single_size_params);
2952
2953	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2954	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
2955					kvd_size, MLXSW_SP_RESOURCE_KVD,
2956					DEVLINK_RESOURCE_ID_PARENT_TOP,
2957					&kvd_size_params);
2958	if (err)
2959		return err;
2960
2961	linear_size = profile->kvd_linear_size;
2962	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
2963					linear_size,
2964					MLXSW_SP_RESOURCE_KVD_LINEAR,
2965					MLXSW_SP_RESOURCE_KVD,
2966					&linear_size_params);
2967	if (err)
2968		return err;
2969
2970	err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
2971	if  (err)
2972		return err;
2973
2974	double_size = kvd_size - linear_size;
2975	double_size *= profile->kvd_hash_double_parts;
2976	double_size /= profile->kvd_hash_double_parts +
2977		       profile->kvd_hash_single_parts;
2978	double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
2979	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
2980					double_size,
2981					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
2982					MLXSW_SP_RESOURCE_KVD,
2983					&hash_double_size_params);
2984	if (err)
2985		return err;
2986
2987	single_size = kvd_size - double_size - linear_size;
2988	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
2989					single_size,
2990					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
2991					MLXSW_SP_RESOURCE_KVD,
2992					&hash_single_size_params);
2993	if (err)
2994		return err;
2995
2996	return 0;
2997}
2998
2999static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3000{
3001	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3002	struct devlink_resource_size_params kvd_size_params;
3003	u32 kvd_size;
3004
3005	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3006		return -EIO;
3007
3008	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3009	devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3010					  MLXSW_SP_KVD_GRANULARITY,
3011					  DEVLINK_RESOURCE_UNIT_ENTRY);
3012
3013	return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3014					 kvd_size, MLXSW_SP_RESOURCE_KVD,
3015					 DEVLINK_RESOURCE_ID_PARENT_TOP,
3016					 &kvd_size_params);
3017}
3018
3019static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3020{
3021	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3022	struct devlink_resource_size_params span_size_params;
3023	u32 max_span;
3024
3025	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3026		return -EIO;
3027
3028	max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3029	devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3030					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
3031
3032	return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3033					 max_span, MLXSW_SP_RESOURCE_SPAN,
3034					 DEVLINK_RESOURCE_ID_PARENT_TOP,
3035					 &span_size_params);
3036}
3037
3038static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3039{
3040	int err;
3041
3042	err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3043	if (err)
3044		return err;
3045
3046	err = mlxsw_sp_resources_span_register(mlxsw_core);
3047	if (err)
3048		goto err_resources_span_register;
3049
3050	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3051	if (err)
3052		goto err_resources_counter_register;
3053
3054	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3055	if (err)
3056		goto err_resources_counter_register;
3057
3058	return 0;
3059
3060err_resources_counter_register:
3061err_resources_span_register:
3062	devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3063	return err;
3064}
3065
3066static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3067{
3068	int err;
3069
3070	err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3071	if (err)
3072		return err;
3073
3074	err = mlxsw_sp_resources_span_register(mlxsw_core);
3075	if (err)
3076		goto err_resources_span_register;
3077
3078	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3079	if (err)
3080		goto err_resources_counter_register;
3081
3082	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3083	if (err)
3084		goto err_resources_counter_register;
3085
3086	return 0;
3087
3088err_resources_counter_register:
3089err_resources_span_register:
3090	devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3091	return err;
3092}
3093
3094static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3095				  const struct mlxsw_config_profile *profile,
3096				  u64 *p_single_size, u64 *p_double_size,
3097				  u64 *p_linear_size)
3098{
3099	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3100	u32 double_size;
3101	int err;
3102
3103	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3104	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3105		return -EIO;
3106
3107	/* The hash part is what left of the kvd without the
3108	 * linear part. It is split to the single size and
3109	 * double size by the parts ratio from the profile.
3110	 * Both sizes must be a multiplications of the
3111	 * granularity from the profile. In case the user
3112	 * provided the sizes they are obtained via devlink.
3113	 */
3114	err = devlink_resource_size_get(devlink,
3115					MLXSW_SP_RESOURCE_KVD_LINEAR,
3116					p_linear_size);
3117	if (err)
3118		*p_linear_size = profile->kvd_linear_size;
3119
3120	err = devlink_resource_size_get(devlink,
3121					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3122					p_double_size);
3123	if (err) {
3124		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3125			      *p_linear_size;
3126		double_size *= profile->kvd_hash_double_parts;
3127		double_size /= profile->kvd_hash_double_parts +
3128			       profile->kvd_hash_single_parts;
3129		*p_double_size = rounddown(double_size,
3130					   MLXSW_SP_KVD_GRANULARITY);
3131	}
3132
3133	err = devlink_resource_size_get(devlink,
3134					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3135					p_single_size);
3136	if (err)
3137		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3138				 *p_double_size - *p_linear_size;
3139
3140	/* Check results are legal. */
3141	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3142	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3143	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3144		return -EIO;
3145
3146	return 0;
3147}
3148
3149static int
3150mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3151					     struct devlink_param_gset_ctx *ctx)
3152{
3153	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3154	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3155
3156	ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3157	return 0;
3158}
3159
3160static int
3161mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3162					     struct devlink_param_gset_ctx *ctx)
3163{
3164	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3165	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3166
3167	return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3168}
3169
3170static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3171	DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3172			     "acl_region_rehash_interval",
3173			     DEVLINK_PARAM_TYPE_U32,
3174			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3175			     mlxsw_sp_params_acl_region_rehash_intrvl_get,
3176			     mlxsw_sp_params_acl_region_rehash_intrvl_set,
3177			     NULL),
3178};
3179
3180static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3181{
3182	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3183	union devlink_param_value value;
3184	int err;
3185
3186	err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3187				      ARRAY_SIZE(mlxsw_sp2_devlink_params));
3188	if (err)
3189		return err;
3190
3191	value.vu32 = 0;
3192	devlink_param_driverinit_value_set(devlink,
3193					   MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3194					   value);
3195	return 0;
3196}
3197
3198static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3199{
3200	devlink_params_unregister(priv_to_devlink(mlxsw_core),
3201				  mlxsw_sp2_devlink_params,
3202				  ARRAY_SIZE(mlxsw_sp2_devlink_params));
3203}
3204
3205static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3206				     struct sk_buff *skb, u8 local_port)
3207{
3208	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3209
3210	skb_pull(skb, MLXSW_TXHDR_LEN);
3211	mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3212}
3213
3214static struct mlxsw_driver mlxsw_sp1_driver = {
3215	.kind				= mlxsw_sp1_driver_name,
3216	.priv_size			= sizeof(struct mlxsw_sp),
3217	.fw_req_rev			= &mlxsw_sp1_fw_rev,
3218	.fw_filename			= MLXSW_SP1_FW_FILENAME,
3219	.init				= mlxsw_sp1_init,
3220	.fini				= mlxsw_sp_fini,
3221	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3222	.port_split			= mlxsw_sp_port_split,
3223	.port_unsplit			= mlxsw_sp_port_unsplit,
3224	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3225	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3226	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3227	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3228	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3229	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3230	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3231	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3232	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3233	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3234	.trap_init			= mlxsw_sp_trap_init,
3235	.trap_fini			= mlxsw_sp_trap_fini,
3236	.trap_action_set		= mlxsw_sp_trap_action_set,
3237	.trap_group_init		= mlxsw_sp_trap_group_init,
3238	.trap_group_set			= mlxsw_sp_trap_group_set,
3239	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3240	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3241	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3242	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3243	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3244	.resources_register		= mlxsw_sp1_resources_register,
3245	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
3246	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3247	.txhdr_len			= MLXSW_TXHDR_LEN,
3248	.profile			= &mlxsw_sp1_config_profile,
3249	.res_query_enabled		= true,
3250	.fw_fatal_enabled		= true,
3251	.temp_warn_enabled		= true,
3252};
3253
3254static struct mlxsw_driver mlxsw_sp2_driver = {
3255	.kind				= mlxsw_sp2_driver_name,
3256	.priv_size			= sizeof(struct mlxsw_sp),
3257	.fw_req_rev			= &mlxsw_sp2_fw_rev,
3258	.fw_filename			= MLXSW_SP2_FW_FILENAME,
3259	.init				= mlxsw_sp2_init,
3260	.fini				= mlxsw_sp_fini,
3261	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3262	.port_split			= mlxsw_sp_port_split,
3263	.port_unsplit			= mlxsw_sp_port_unsplit,
3264	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3265	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3266	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3267	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3268	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3269	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3270	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3271	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3272	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3273	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3274	.trap_init			= mlxsw_sp_trap_init,
3275	.trap_fini			= mlxsw_sp_trap_fini,
3276	.trap_action_set		= mlxsw_sp_trap_action_set,
3277	.trap_group_init		= mlxsw_sp_trap_group_init,
3278	.trap_group_set			= mlxsw_sp_trap_group_set,
3279	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3280	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3281	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3282	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3283	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3284	.resources_register		= mlxsw_sp2_resources_register,
3285	.params_register		= mlxsw_sp2_params_register,
3286	.params_unregister		= mlxsw_sp2_params_unregister,
3287	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3288	.txhdr_len			= MLXSW_TXHDR_LEN,
3289	.profile			= &mlxsw_sp2_config_profile,
3290	.res_query_enabled		= true,
3291	.fw_fatal_enabled		= true,
3292	.temp_warn_enabled		= true,
3293};
3294
3295static struct mlxsw_driver mlxsw_sp3_driver = {
3296	.kind				= mlxsw_sp3_driver_name,
3297	.priv_size			= sizeof(struct mlxsw_sp),
3298	.fw_req_rev			= &mlxsw_sp3_fw_rev,
3299	.fw_filename			= MLXSW_SP3_FW_FILENAME,
3300	.init				= mlxsw_sp3_init,
3301	.fini				= mlxsw_sp_fini,
3302	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3303	.port_split			= mlxsw_sp_port_split,
3304	.port_unsplit			= mlxsw_sp_port_unsplit,
3305	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3306	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3307	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3308	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3309	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3310	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3311	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3312	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3313	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3314	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3315	.trap_init			= mlxsw_sp_trap_init,
3316	.trap_fini			= mlxsw_sp_trap_fini,
3317	.trap_action_set		= mlxsw_sp_trap_action_set,
3318	.trap_group_init		= mlxsw_sp_trap_group_init,
3319	.trap_group_set			= mlxsw_sp_trap_group_set,
3320	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3321	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3322	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3323	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3324	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3325	.resources_register		= mlxsw_sp2_resources_register,
3326	.params_register		= mlxsw_sp2_params_register,
3327	.params_unregister		= mlxsw_sp2_params_unregister,
3328	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3329	.txhdr_len			= MLXSW_TXHDR_LEN,
3330	.profile			= &mlxsw_sp2_config_profile,
3331	.res_query_enabled		= true,
3332	.fw_fatal_enabled		= true,
3333	.temp_warn_enabled		= true,
3334};
3335
3336bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3337{
3338	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3339}
3340
3341static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3342				   struct netdev_nested_priv *priv)
3343{
3344	int ret = 0;
3345
3346	if (mlxsw_sp_port_dev_check(lower_dev)) {
3347		priv->data = (void *)netdev_priv(lower_dev);
3348		ret = 1;
3349	}
3350
3351	return ret;
3352}
3353
3354struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3355{
3356	struct netdev_nested_priv priv = {
3357		.data = NULL,
3358	};
3359
3360	if (mlxsw_sp_port_dev_check(dev))
3361		return netdev_priv(dev);
3362
3363	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3364
3365	return (struct mlxsw_sp_port *)priv.data;
3366}
3367
3368struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3369{
3370	struct mlxsw_sp_port *mlxsw_sp_port;
3371
3372	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3373	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3374}
3375
3376struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3377{
3378	struct netdev_nested_priv priv = {
3379		.data = NULL,
3380	};
3381
3382	if (mlxsw_sp_port_dev_check(dev))
3383		return netdev_priv(dev);
3384
3385	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3386				      &priv);
3387
3388	return (struct mlxsw_sp_port *)priv.data;
3389}
3390
3391struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3392{
3393	struct mlxsw_sp_port *mlxsw_sp_port;
3394
3395	rcu_read_lock();
3396	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3397	if (mlxsw_sp_port)
3398		dev_hold(mlxsw_sp_port->dev);
3399	rcu_read_unlock();
3400	return mlxsw_sp_port;
3401}
3402
3403void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3404{
3405	dev_put(mlxsw_sp_port->dev);
3406}
3407
3408static void
3409mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3410				 struct net_device *lag_dev)
3411{
3412	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3413	struct net_device *upper_dev;
3414	struct list_head *iter;
3415
3416	if (netif_is_bridge_port(lag_dev))
3417		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3418
3419	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3420		if (!netif_is_bridge_port(upper_dev))
3421			continue;
3422		br_dev = netdev_master_upper_dev_get(upper_dev);
3423		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3424	}
3425}
3426
3427static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3428{
3429	char sldr_pl[MLXSW_REG_SLDR_LEN];
3430
3431	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3432	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3433}
3434
3435static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3436{
3437	char sldr_pl[MLXSW_REG_SLDR_LEN];
3438
3439	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3440	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3441}
3442
3443static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3444				     u16 lag_id, u8 port_index)
3445{
3446	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3447	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3448
3449	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3450				      lag_id, port_index);
3451	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3452}
3453
3454static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3455					u16 lag_id)
3456{
3457	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3458	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3459
3460	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3461					 lag_id);
3462	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3463}
3464
3465static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3466					u16 lag_id)
3467{
3468	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3469	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3470
3471	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3472					lag_id);
3473	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3474}
3475
3476static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3477					 u16 lag_id)
3478{
3479	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3480	char slcor_pl[MLXSW_REG_SLCOR_LEN];
3481
3482	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3483					 lag_id);
3484	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3485}
3486
3487static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3488				  struct net_device *lag_dev,
3489				  u16 *p_lag_id)
3490{
3491	struct mlxsw_sp_upper *lag;
3492	int free_lag_id = -1;
3493	u64 max_lag;
3494	int i;
3495
3496	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3497	for (i = 0; i < max_lag; i++) {
3498		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3499		if (lag->ref_count) {
3500			if (lag->dev == lag_dev) {
3501				*p_lag_id = i;
3502				return 0;
3503			}
3504		} else if (free_lag_id < 0) {
3505			free_lag_id = i;
3506		}
3507	}
3508	if (free_lag_id < 0)
3509		return -EBUSY;
3510	*p_lag_id = free_lag_id;
3511	return 0;
3512}
3513
3514static bool
3515mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3516			  struct net_device *lag_dev,
3517			  struct netdev_lag_upper_info *lag_upper_info,
3518			  struct netlink_ext_ack *extack)
3519{
3520	u16 lag_id;
3521
3522	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3523		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3524		return false;
3525	}
3526	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3527		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3528		return false;
3529	}
3530	return true;
3531}
3532
3533static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3534				       u16 lag_id, u8 *p_port_index)
3535{
3536	u64 max_lag_members;
3537	int i;
3538
3539	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3540					     MAX_LAG_MEMBERS);
3541	for (i = 0; i < max_lag_members; i++) {
3542		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3543			*p_port_index = i;
3544			return 0;
3545		}
3546	}
3547	return -EBUSY;
3548}
3549
3550static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3551				  struct net_device *lag_dev)
3552{
3553	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3554	struct mlxsw_sp_upper *lag;
3555	u16 lag_id;
3556	u8 port_index;
3557	int err;
3558
3559	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3560	if (err)
3561		return err;
3562	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3563	if (!lag->ref_count) {
3564		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3565		if (err)
3566			return err;
3567		lag->dev = lag_dev;
3568	}
3569
3570	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3571	if (err)
3572		return err;
3573	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3574	if (err)
3575		goto err_col_port_add;
3576
3577	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3578				   mlxsw_sp_port->local_port);
3579	mlxsw_sp_port->lag_id = lag_id;
3580	mlxsw_sp_port->lagged = 1;
3581	lag->ref_count++;
3582
3583	/* Port is no longer usable as a router interface */
3584	if (mlxsw_sp_port->default_vlan->fid)
3585		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3586
3587	return 0;
3588
3589err_col_port_add:
3590	if (!lag->ref_count)
3591		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3592	return err;
3593}
3594
3595static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3596				    struct net_device *lag_dev)
3597{
3598	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3599	u16 lag_id = mlxsw_sp_port->lag_id;
3600	struct mlxsw_sp_upper *lag;
3601
3602	if (!mlxsw_sp_port->lagged)
3603		return;
3604	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3605	WARN_ON(lag->ref_count == 0);
3606
3607	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3608
3609	/* Any VLANs configured on the port are no longer valid */
3610	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3611	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3612	/* Make the LAG and its directly linked uppers leave bridges they
3613	 * are memeber in
3614	 */
3615	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3616
3617	if (lag->ref_count == 1)
3618		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3619
3620	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3621				     mlxsw_sp_port->local_port);
3622	mlxsw_sp_port->lagged = 0;
3623	lag->ref_count--;
3624
3625	/* Make sure untagged frames are allowed to ingress */
3626	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3627}
3628
3629static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3630				      u16 lag_id)
3631{
3632	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3633	char sldr_pl[MLXSW_REG_SLDR_LEN];
3634
3635	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3636					 mlxsw_sp_port->local_port);
3637	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3638}
3639
3640static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3641					 u16 lag_id)
3642{
3643	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3644	char sldr_pl[MLXSW_REG_SLDR_LEN];
3645
3646	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3647					    mlxsw_sp_port->local_port);
3648	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3649}
3650
3651static int
3652mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3653{
3654	int err;
3655
3656	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3657					   mlxsw_sp_port->lag_id);
3658	if (err)
3659		return err;
3660
3661	err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3662	if (err)
3663		goto err_dist_port_add;
3664
3665	return 0;
3666
3667err_dist_port_add:
3668	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3669	return err;
3670}
3671
3672static int
3673mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3674{
3675	int err;
3676
3677	err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3678					    mlxsw_sp_port->lag_id);
3679	if (err)
3680		return err;
3681
3682	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3683					    mlxsw_sp_port->lag_id);
3684	if (err)
3685		goto err_col_port_disable;
3686
3687	return 0;
3688
3689err_col_port_disable:
3690	mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3691	return err;
3692}
3693
3694static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3695				     struct netdev_lag_lower_state_info *info)
3696{
3697	if (info->tx_enabled)
3698		return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3699	else
3700		return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3701}
3702
3703static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3704				 bool enable)
3705{
3706	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3707	enum mlxsw_reg_spms_state spms_state;
3708	char *spms_pl;
3709	u16 vid;
3710	int err;
3711
3712	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3713			      MLXSW_REG_SPMS_STATE_DISCARDING;
3714
3715	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3716	if (!spms_pl)
3717		return -ENOMEM;
3718	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3719
3720	for (vid = 0; vid < VLAN_N_VID; vid++)
3721		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3722
3723	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3724	kfree(spms_pl);
3725	return err;
3726}
3727
3728static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3729{
3730	u16 vid = 1;
3731	int err;
3732
3733	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3734	if (err)
3735		return err;
3736	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3737	if (err)
3738		goto err_port_stp_set;
3739	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3740				     true, false);
3741	if (err)
3742		goto err_port_vlan_set;
3743
3744	for (; vid <= VLAN_N_VID - 1; vid++) {
3745		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3746						     vid, false);
3747		if (err)
3748			goto err_vid_learning_set;
3749	}
3750
3751	return 0;
3752
3753err_vid_learning_set:
3754	for (vid--; vid >= 1; vid--)
3755		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3756err_port_vlan_set:
3757	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3758err_port_stp_set:
3759	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3760	return err;
3761}
3762
3763static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3764{
3765	u16 vid;
3766
3767	for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3768		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3769					       vid, true);
3770
3771	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3772			       false, false);
3773	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3774	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3775}
3776
3777static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3778{
3779	unsigned int num_vxlans = 0;
3780	struct net_device *dev;
3781	struct list_head *iter;
3782
3783	netdev_for_each_lower_dev(br_dev, dev, iter) {
3784		if (netif_is_vxlan(dev))
3785			num_vxlans++;
3786	}
3787
3788	return num_vxlans > 1;
3789}
3790
3791static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3792{
3793	DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3794	struct net_device *dev;
3795	struct list_head *iter;
3796
3797	netdev_for_each_lower_dev(br_dev, dev, iter) {
3798		u16 pvid;
3799		int err;
3800
3801		if (!netif_is_vxlan(dev))
3802			continue;
3803
3804		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3805		if (err || !pvid)
3806			continue;
3807
3808		if (test_and_set_bit(pvid, vlans))
3809			return false;
3810	}
3811
3812	return true;
3813}
3814
3815static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
3816					   struct netlink_ext_ack *extack)
3817{
3818	if (br_multicast_enabled(br_dev)) {
3819		NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
3820		return false;
3821	}
3822
3823	if (!br_vlan_enabled(br_dev) &&
3824	    mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
3825		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3826		return false;
3827	}
3828
3829	if (br_vlan_enabled(br_dev) &&
3830	    !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
3831		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3832		return false;
3833	}
3834
3835	return true;
3836}
3837
3838static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3839					       struct net_device *dev,
3840					       unsigned long event, void *ptr)
3841{
3842	struct netdev_notifier_changeupper_info *info;
3843	struct mlxsw_sp_port *mlxsw_sp_port;
3844	struct netlink_ext_ack *extack;
3845	struct net_device *upper_dev;
3846	struct mlxsw_sp *mlxsw_sp;
3847	int err = 0;
3848
3849	mlxsw_sp_port = netdev_priv(dev);
3850	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3851	info = ptr;
3852	extack = netdev_notifier_info_to_extack(&info->info);
3853
3854	switch (event) {
3855	case NETDEV_PRECHANGEUPPER:
3856		upper_dev = info->upper_dev;
3857		if (!is_vlan_dev(upper_dev) &&
3858		    !netif_is_lag_master(upper_dev) &&
3859		    !netif_is_bridge_master(upper_dev) &&
3860		    !netif_is_ovs_master(upper_dev) &&
3861		    !netif_is_macvlan(upper_dev)) {
3862			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
3863			return -EINVAL;
3864		}
3865		if (!info->linking)
3866			break;
3867		if (netif_is_bridge_master(upper_dev) &&
3868		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
3869		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
3870		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
3871			return -EOPNOTSUPP;
3872		if (netdev_has_any_upper_dev(upper_dev) &&
3873		    (!netif_is_bridge_master(upper_dev) ||
3874		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
3875							  upper_dev))) {
3876			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
3877			return -EINVAL;
3878		}
3879		if (netif_is_lag_master(upper_dev) &&
3880		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3881					       info->upper_info, extack))
3882			return -EINVAL;
3883		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
3884			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
3885			return -EINVAL;
3886		}
3887		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3888		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
3889			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
3890			return -EINVAL;
3891		}
3892		if (netif_is_macvlan(upper_dev) &&
3893		    !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
3894			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
3895			return -EOPNOTSUPP;
3896		}
3897		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
3898			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
3899			return -EINVAL;
3900		}
3901		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
3902			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
3903			return -EINVAL;
3904		}
3905		break;
3906	case NETDEV_CHANGEUPPER:
3907		upper_dev = info->upper_dev;
3908		if (netif_is_bridge_master(upper_dev)) {
3909			if (info->linking)
3910				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3911								lower_dev,
3912								upper_dev,
3913								extack);
3914			else
3915				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
3916							   lower_dev,
3917							   upper_dev);
3918		} else if (netif_is_lag_master(upper_dev)) {
3919			if (info->linking) {
3920				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3921							     upper_dev);
3922			} else {
3923				mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3924				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3925							upper_dev);
3926			}
3927		} else if (netif_is_ovs_master(upper_dev)) {
3928			if (info->linking)
3929				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
3930			else
3931				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
3932		} else if (netif_is_macvlan(upper_dev)) {
3933			if (!info->linking)
3934				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
3935		} else if (is_vlan_dev(upper_dev)) {
3936			struct net_device *br_dev;
3937
3938			if (!netif_is_bridge_port(upper_dev))
3939				break;
3940			if (info->linking)
3941				break;
3942			br_dev = netdev_master_upper_dev_get(upper_dev);
3943			mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
3944						   br_dev);
3945		}
3946		break;
3947	}
3948
3949	return err;
3950}
3951
3952static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3953					       unsigned long event, void *ptr)
3954{
3955	struct netdev_notifier_changelowerstate_info *info;
3956	struct mlxsw_sp_port *mlxsw_sp_port;
3957	int err;
3958
3959	mlxsw_sp_port = netdev_priv(dev);
3960	info = ptr;
3961
3962	switch (event) {
3963	case NETDEV_CHANGELOWERSTATE:
3964		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3965			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3966							info->lower_state_info);
3967			if (err)
3968				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3969		}
3970		break;
3971	}
3972
3973	return 0;
3974}
3975
3976static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
3977					 struct net_device *port_dev,
3978					 unsigned long event, void *ptr)
3979{
3980	switch (event) {
3981	case NETDEV_PRECHANGEUPPER:
3982	case NETDEV_CHANGEUPPER:
3983		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
3984							   event, ptr);
3985	case NETDEV_CHANGELOWERSTATE:
3986		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
3987							   ptr);
3988	}
3989
3990	return 0;
3991}
3992
3993static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3994					unsigned long event, void *ptr)
3995{
3996	struct net_device *dev;
3997	struct list_head *iter;
3998	int ret;
3999
4000	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4001		if (mlxsw_sp_port_dev_check(dev)) {
4002			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4003							    ptr);
4004			if (ret)
4005				return ret;
4006		}
4007	}
4008
4009	return 0;
4010}
4011
4012static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4013					      struct net_device *dev,
4014					      unsigned long event, void *ptr,
4015					      u16 vid)
4016{
4017	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4018	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4019	struct netdev_notifier_changeupper_info *info = ptr;
4020	struct netlink_ext_ack *extack;
4021	struct net_device *upper_dev;
4022	int err = 0;
4023
4024	extack = netdev_notifier_info_to_extack(&info->info);
4025
4026	switch (event) {
4027	case NETDEV_PRECHANGEUPPER:
4028		upper_dev = info->upper_dev;
4029		if (!netif_is_bridge_master(upper_dev) &&
4030		    !netif_is_macvlan(upper_dev)) {
4031			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4032			return -EINVAL;
4033		}
4034		if (!info->linking)
4035			break;
4036		if (netif_is_bridge_master(upper_dev) &&
4037		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4038		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4039		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4040			return -EOPNOTSUPP;
4041		if (netdev_has_any_upper_dev(upper_dev) &&
4042		    (!netif_is_bridge_master(upper_dev) ||
4043		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4044							  upper_dev))) {
4045			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4046			return -EINVAL;
4047		}
4048		if (netif_is_macvlan(upper_dev) &&
4049		    !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4050			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4051			return -EOPNOTSUPP;
4052		}
4053		break;
4054	case NETDEV_CHANGEUPPER:
4055		upper_dev = info->upper_dev;
4056		if (netif_is_bridge_master(upper_dev)) {
4057			if (info->linking)
4058				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4059								vlan_dev,
4060								upper_dev,
4061								extack);
4062			else
4063				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4064							   vlan_dev,
4065							   upper_dev);
4066		} else if (netif_is_macvlan(upper_dev)) {
4067			if (!info->linking)
4068				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4069		} else {
4070			err = -EINVAL;
4071			WARN_ON(1);
4072		}
4073		break;
4074	}
4075
4076	return err;
4077}
4078
4079static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4080						  struct net_device *lag_dev,
4081						  unsigned long event,
4082						  void *ptr, u16 vid)
4083{
4084	struct net_device *dev;
4085	struct list_head *iter;
4086	int ret;
4087
4088	netdev_for_each_lower_dev(lag_dev, dev, iter) {
4089		if (mlxsw_sp_port_dev_check(dev)) {
4090			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4091								 event, ptr,
4092								 vid);
4093			if (ret)
4094				return ret;
4095		}
4096	}
4097
4098	return 0;
4099}
4100
4101static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4102						struct net_device *br_dev,
4103						unsigned long event, void *ptr,
4104						u16 vid)
4105{
4106	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4107	struct netdev_notifier_changeupper_info *info = ptr;
4108	struct netlink_ext_ack *extack;
4109	struct net_device *upper_dev;
4110
4111	if (!mlxsw_sp)
4112		return 0;
4113
4114	extack = netdev_notifier_info_to_extack(&info->info);
4115
4116	switch (event) {
4117	case NETDEV_PRECHANGEUPPER:
4118		upper_dev = info->upper_dev;
4119		if (!netif_is_macvlan(upper_dev)) {
4120			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4121			return -EOPNOTSUPP;
4122		}
4123		if (!info->linking)
4124			break;
4125		if (netif_is_macvlan(upper_dev) &&
4126		    !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4127			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4128			return -EOPNOTSUPP;
4129		}
4130		break;
4131	case NETDEV_CHANGEUPPER:
4132		upper_dev = info->upper_dev;
4133		if (info->linking)
4134			break;
4135		if (netif_is_macvlan(upper_dev))
4136			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4137		break;
4138	}
4139
4140	return 0;
4141}
4142
4143static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4144					 unsigned long event, void *ptr)
4145{
4146	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4147	u16 vid = vlan_dev_vlan_id(vlan_dev);
4148
4149	if (mlxsw_sp_port_dev_check(real_dev))
4150		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4151							  event, ptr, vid);
4152	else if (netif_is_lag_master(real_dev))
4153		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4154							      real_dev, event,
4155							      ptr, vid);
4156	else if (netif_is_bridge_master(real_dev))
4157		return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4158							    event, ptr, vid);
4159
4160	return 0;
4161}
4162
4163static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4164					   unsigned long event, void *ptr)
4165{
4166	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4167	struct netdev_notifier_changeupper_info *info = ptr;
4168	struct netlink_ext_ack *extack;
4169	struct net_device *upper_dev;
4170
4171	if (!mlxsw_sp)
4172		return 0;
4173
4174	extack = netdev_notifier_info_to_extack(&info->info);
4175
4176	switch (event) {
4177	case NETDEV_PRECHANGEUPPER:
4178		upper_dev = info->upper_dev;
4179		if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4180			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4181			return -EOPNOTSUPP;
4182		}
4183		if (!info->linking)
4184			break;
4185		if (netif_is_macvlan(upper_dev) &&
4186		    !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4187			NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4188			return -EOPNOTSUPP;
4189		}
4190		break;
4191	case NETDEV_CHANGEUPPER:
4192		upper_dev = info->upper_dev;
4193		if (info->linking)
4194			break;
4195		if (is_vlan_dev(upper_dev))
4196			mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4197		if (netif_is_macvlan(upper_dev))
4198			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4199		break;
4200	}
4201
4202	return 0;
4203}
4204
4205static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4206					    unsigned long event, void *ptr)
4207{
4208	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4209	struct netdev_notifier_changeupper_info *info = ptr;
4210	struct netlink_ext_ack *extack;
4211
4212	if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4213		return 0;
4214
4215	extack = netdev_notifier_info_to_extack(&info->info);
4216
4217	/* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4218	NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4219
4220	return -EOPNOTSUPP;
4221}
4222
4223static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4224{
4225	struct netdev_notifier_changeupper_info *info = ptr;
4226
4227	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4228		return false;
4229	return netif_is_l3_master(info->upper_dev);
4230}
4231
4232static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4233					  struct net_device *dev,
4234					  unsigned long event, void *ptr)
4235{
4236	struct netdev_notifier_changeupper_info *cu_info;
4237	struct netdev_notifier_info *info = ptr;
4238	struct netlink_ext_ack *extack;
4239	struct net_device *upper_dev;
4240
4241	extack = netdev_notifier_info_to_extack(info);
4242
4243	switch (event) {
4244	case NETDEV_CHANGEUPPER:
4245		cu_info = container_of(info,
4246				       struct netdev_notifier_changeupper_info,
4247				       info);
4248		upper_dev = cu_info->upper_dev;
4249		if (!netif_is_bridge_master(upper_dev))
4250			return 0;
4251		if (!mlxsw_sp_lower_get(upper_dev))
4252			return 0;
4253		if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4254			return -EOPNOTSUPP;
4255		if (cu_info->linking) {
4256			if (!netif_running(dev))
4257				return 0;
4258			/* When the bridge is VLAN-aware, the VNI of the VxLAN
4259			 * device needs to be mapped to a VLAN, but at this
4260			 * point no VLANs are configured on the VxLAN device
4261			 */
4262			if (br_vlan_enabled(upper_dev))
4263				return 0;
4264			return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4265							  dev, 0, extack);
4266		} else {
4267			/* VLANs were already flushed, which triggered the
4268			 * necessary cleanup
4269			 */
4270			if (br_vlan_enabled(upper_dev))
4271				return 0;
4272			mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4273		}
4274		break;
4275	case NETDEV_PRE_UP:
4276		upper_dev = netdev_master_upper_dev_get(dev);
4277		if (!upper_dev)
4278			return 0;
4279		if (!netif_is_bridge_master(upper_dev))
4280			return 0;
4281		if (!mlxsw_sp_lower_get(upper_dev))
4282			return 0;
4283		return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4284						  extack);
4285	case NETDEV_DOWN:
4286		upper_dev = netdev_master_upper_dev_get(dev);
4287		if (!upper_dev)
4288			return 0;
4289		if (!netif_is_bridge_master(upper_dev))
4290			return 0;
4291		if (!mlxsw_sp_lower_get(upper_dev))
4292			return 0;
4293		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4294		break;
4295	}
4296
4297	return 0;
4298}
4299
4300static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4301				    unsigned long event, void *ptr)
4302{
4303	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4304	struct mlxsw_sp_span_entry *span_entry;
4305	struct mlxsw_sp *mlxsw_sp;
4306	int err = 0;
4307
4308	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4309	if (event == NETDEV_UNREGISTER) {
4310		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4311		if (span_entry)
4312			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4313	}
4314	mlxsw_sp_span_respin(mlxsw_sp);
4315
4316	if (netif_is_vxlan(dev))
4317		err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4318	if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4319		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4320						       event, ptr);
4321	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4322		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4323						       event, ptr);
4324	else if (event == NETDEV_PRE_CHANGEADDR ||
4325		 event == NETDEV_CHANGEADDR ||
4326		 event == NETDEV_CHANGEMTU)
4327		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4328	else if (mlxsw_sp_is_vrf_event(event, ptr))
4329		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4330	else if (mlxsw_sp_port_dev_check(dev))
4331		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4332	else if (netif_is_lag_master(dev))
4333		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4334	else if (is_vlan_dev(dev))
4335		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4336	else if (netif_is_bridge_master(dev))
4337		err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4338	else if (netif_is_macvlan(dev))
4339		err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4340
4341	return notifier_from_errno(err);
4342}
4343
4344static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4345	.notifier_call = mlxsw_sp_inetaddr_valid_event,
4346};
4347
4348static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4349	.notifier_call = mlxsw_sp_inet6addr_valid_event,
4350};
4351
4352static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4353	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4354	{0, },
4355};
4356
4357static struct pci_driver mlxsw_sp1_pci_driver = {
4358	.name = mlxsw_sp1_driver_name,
4359	.id_table = mlxsw_sp1_pci_id_table,
4360};
4361
4362static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4363	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4364	{0, },
4365};
4366
4367static struct pci_driver mlxsw_sp2_pci_driver = {
4368	.name = mlxsw_sp2_driver_name,
4369	.id_table = mlxsw_sp2_pci_id_table,
4370};
4371
4372static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4373	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4374	{0, },
4375};
4376
4377static struct pci_driver mlxsw_sp3_pci_driver = {
4378	.name = mlxsw_sp3_driver_name,
4379	.id_table = mlxsw_sp3_pci_id_table,
4380};
4381
4382static int __init mlxsw_sp_module_init(void)
4383{
4384	int err;
4385
4386	register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4387	register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4388
4389	err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4390	if (err)
4391		goto err_sp1_core_driver_register;
4392
4393	err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4394	if (err)
4395		goto err_sp2_core_driver_register;
4396
4397	err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4398	if (err)
4399		goto err_sp3_core_driver_register;
4400
4401	err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4402	if (err)
4403		goto err_sp1_pci_driver_register;
4404
4405	err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4406	if (err)
4407		goto err_sp2_pci_driver_register;
4408
4409	err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4410	if (err)
4411		goto err_sp3_pci_driver_register;
4412
4413	return 0;
4414
4415err_sp3_pci_driver_register:
4416	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4417err_sp2_pci_driver_register:
4418	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4419err_sp1_pci_driver_register:
4420	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4421err_sp3_core_driver_register:
4422	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4423err_sp2_core_driver_register:
4424	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4425err_sp1_core_driver_register:
4426	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4427	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4428	return err;
4429}
4430
4431static void __exit mlxsw_sp_module_exit(void)
4432{
4433	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4434	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4435	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4436	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4437	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4438	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4439	unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4440	unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4441}
4442
4443module_init(mlxsw_sp_module_init);
4444module_exit(mlxsw_sp_module_exit);
4445
4446MODULE_LICENSE("Dual BSD/GPL");
4447MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4448MODULE_DESCRIPTION("Mellanox Spectrum driver");
4449MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4450MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4451MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4452MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4453MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4454MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
4455