1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4#include <linux/kernel.h> 5#include <linux/module.h> 6#include <linux/types.h> 7#include <linux/pci.h> 8#include <linux/netdevice.h> 9#include <linux/etherdevice.h> 10#include <linux/slab.h> 11#include <linux/device.h> 12#include <linux/skbuff.h> 13#include <linux/if_vlan.h> 14 15#include "pci.h" 16#include "core.h" 17#include "reg.h" 18#include "port.h" 19#include "trap.h" 20#include "txheader.h" 21#include "ib.h" 22 23static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2"; 24static const char mlxsw_sx_driver_version[] = "1.0"; 25 26struct mlxsw_sx_port; 27 28struct mlxsw_sx { 29 struct mlxsw_sx_port **ports; 30 struct mlxsw_core *core; 31 const struct mlxsw_bus_info *bus_info; 32 u8 hw_id[ETH_ALEN]; 33}; 34 35struct mlxsw_sx_port_pcpu_stats { 36 u64 rx_packets; 37 u64 rx_bytes; 38 u64 tx_packets; 39 u64 tx_bytes; 40 struct u64_stats_sync syncp; 41 u32 tx_dropped; 42}; 43 44struct mlxsw_sx_port { 45 struct net_device *dev; 46 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; 47 struct mlxsw_sx *mlxsw_sx; 48 u8 local_port; 49 struct { 50 u8 module; 51 } mapping; 52}; 53 54/* tx_hdr_version 55 * Tx header version. 56 * Must be set to 0. 57 */ 58MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 59 60/* tx_hdr_ctl 61 * Packet control type. 62 * 0 - Ethernet control (e.g. EMADs, LACP) 63 * 1 - Ethernet data 64 */ 65MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 66 67/* tx_hdr_proto 68 * Packet protocol type. Must be set to 1 (Ethernet). 69 */ 70MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 71 72/* tx_hdr_etclass 73 * Egress TClass to be used on the egress device on the egress port. 74 * The MSB is specified in the 'ctclass3' field. 75 * Range is 0-15, where 15 is the highest priority. 76 */ 77MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3); 78 79/* tx_hdr_swid 80 * Switch partition ID. 81 */ 82MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 83 84/* tx_hdr_port_mid 85 * Destination local port for unicast packets. 86 * Destination multicast ID for multicast packets. 87 * 88 * Control packets are directed to a specific egress port, while data 89 * packets are transmitted through the CPU port (0) into the switch partition, 90 * where forwarding rules are applied. 91 */ 92MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 93 94/* tx_hdr_ctclass3 95 * See field 'etclass'. 96 */ 97MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1); 98 99/* tx_hdr_rdq 100 * RDQ for control packets sent to remote CPU. 101 * Must be set to 0x1F for EMADs, otherwise 0. 102 */ 103MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5); 104 105/* tx_hdr_cpu_sig 106 * Signature control for packets going to CPU. Must be set to 0. 107 */ 108MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9); 109 110/* tx_hdr_sig 111 * Stacking protocl signature. Must be set to 0xE0E0. 112 */ 113MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16); 114 115/* tx_hdr_stclass 116 * Stacking TClass. 117 */ 118MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3); 119 120/* tx_hdr_emad 121 * EMAD bit. Must be set for EMADs. 122 */ 123MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1); 124 125/* tx_hdr_type 126 * 0 - Data packets 127 * 6 - Control packets 128 */ 129MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 130 131static void mlxsw_sx_txhdr_construct(struct sk_buff *skb, 132 const struct mlxsw_tx_info *tx_info) 133{ 134 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 135 bool is_emad = tx_info->is_emad; 136 137 memset(txhdr, 0, MLXSW_TXHDR_LEN); 138 139 /* We currently set default values for the egress tclass (QoS). */ 140 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0); 141 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 142 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 143 mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 : 144 MLXSW_TXHDR_ETCLASS_5); 145 mlxsw_tx_hdr_swid_set(txhdr, 0); 146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 147 mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3); 148 mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD : 149 MLXSW_TXHDR_RDQ_OTHER); 150 mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG); 151 mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG); 152 mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE); 153 mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD : 154 MLXSW_TXHDR_NOT_EMAD); 155 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 156} 157 158static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port, 159 bool is_up) 160{ 161 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 162 char paos_pl[MLXSW_REG_PAOS_LEN]; 163 164 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 165 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 166 MLXSW_PORT_ADMIN_STATUS_DOWN); 167 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl); 168} 169 170static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port, 171 bool *p_is_up) 172{ 173 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 174 char paos_pl[MLXSW_REG_PAOS_LEN]; 175 u8 oper_status; 176 int err; 177 178 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0); 179 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl); 180 if (err) 181 return err; 182 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 183 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP; 184 return 0; 185} 186 187static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, 188 u16 mtu) 189{ 190 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 191 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 192 int max_mtu; 193 int err; 194 195 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0); 196 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); 197 if (err) 198 return err; 199 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 200 201 if (mtu > max_mtu) 202 return -EINVAL; 203 204 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu); 205 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); 206} 207 208static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port, 209 u16 mtu) 210{ 211 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 212 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); 213} 214 215static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port, 216 u16 mtu) 217{ 218 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); 219} 220 221static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port, 222 u8 ib_port) 223{ 224 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 225 char plib_pl[MLXSW_REG_PLIB_LEN] = {0}; 226 int err; 227 228 mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port); 229 mlxsw_reg_plib_ib_port_set(plib_pl, ib_port); 230 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl); 231 return err; 232} 233 234static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid) 235{ 236 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 237 char pspa_pl[MLXSW_REG_PSPA_LEN]; 238 239 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port); 240 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl); 241} 242 243static int 244mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port) 245{ 246 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 247 char sspr_pl[MLXSW_REG_SSPR_LEN]; 248 249 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port); 250 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl); 251} 252 253static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx, 254 u8 local_port, u8 *p_module, 255 u8 *p_width) 256{ 257 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 258 int err; 259 260 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 261 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl); 262 if (err) 263 return err; 264 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 265 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 266 return 0; 267} 268 269static int mlxsw_sx_port_open(struct net_device *dev) 270{ 271 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 272 int err; 273 274 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); 275 if (err) 276 return err; 277 netif_start_queue(dev); 278 return 0; 279} 280 281static int mlxsw_sx_port_stop(struct net_device *dev) 282{ 283 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 284 285 netif_stop_queue(dev); 286 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); 287} 288 289static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, 290 struct net_device *dev) 291{ 292 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 293 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 294 struct mlxsw_sx_port_pcpu_stats *pcpu_stats; 295 const struct mlxsw_tx_info tx_info = { 296 .local_port = mlxsw_sx_port->local_port, 297 .is_emad = false, 298 }; 299 u64 len; 300 int err; 301 302 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 303 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); 304 dev_kfree_skb_any(skb); 305 return NETDEV_TX_OK; 306 } 307 308 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 309 310 if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) 311 return NETDEV_TX_BUSY; 312 313 mlxsw_sx_txhdr_construct(skb, &tx_info); 314 /* TX header is consumed by HW on the way so we shouldn't count its 315 * bytes as being sent. 316 */ 317 len = skb->len - MLXSW_TXHDR_LEN; 318 /* Due to a race we might fail here because of a full queue. In that 319 * unlikely case we simply drop the packet. 320 */ 321 err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info); 322 323 if (!err) { 324 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); 325 u64_stats_update_begin(&pcpu_stats->syncp); 326 pcpu_stats->tx_packets++; 327 pcpu_stats->tx_bytes += len; 328 u64_stats_update_end(&pcpu_stats->syncp); 329 } else { 330 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); 331 dev_kfree_skb_any(skb); 332 } 333 return NETDEV_TX_OK; 334} 335 336static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) 337{ 338 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 339 int err; 340 341 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu); 342 if (err) 343 return err; 344 dev->mtu = mtu; 345 return 0; 346} 347 348static void 349mlxsw_sx_port_get_stats64(struct net_device *dev, 350 struct rtnl_link_stats64 *stats) 351{ 352 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 353 struct mlxsw_sx_port_pcpu_stats *p; 354 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 355 u32 tx_dropped = 0; 356 unsigned int start; 357 int i; 358 359 for_each_possible_cpu(i) { 360 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); 361 do { 362 start = u64_stats_fetch_begin_irq(&p->syncp); 363 rx_packets = p->rx_packets; 364 rx_bytes = p->rx_bytes; 365 tx_packets = p->tx_packets; 366 tx_bytes = p->tx_bytes; 367 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 368 369 stats->rx_packets += rx_packets; 370 stats->rx_bytes += rx_bytes; 371 stats->tx_packets += tx_packets; 372 stats->tx_bytes += tx_bytes; 373 /* tx_dropped is u32, updated without syncp protection. */ 374 tx_dropped += p->tx_dropped; 375 } 376 stats->tx_dropped = tx_dropped; 377} 378 379static struct devlink_port * 380mlxsw_sx_port_get_devlink_port(struct net_device *dev) 381{ 382 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 383 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 384 385 return mlxsw_core_port_devlink_port_get(mlxsw_sx->core, 386 mlxsw_sx_port->local_port); 387} 388 389static const struct net_device_ops mlxsw_sx_port_netdev_ops = { 390 .ndo_open = mlxsw_sx_port_open, 391 .ndo_stop = mlxsw_sx_port_stop, 392 .ndo_start_xmit = mlxsw_sx_port_xmit, 393 .ndo_change_mtu = mlxsw_sx_port_change_mtu, 394 .ndo_get_stats64 = mlxsw_sx_port_get_stats64, 395 .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port, 396}; 397 398static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, 399 struct ethtool_drvinfo *drvinfo) 400{ 401 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 402 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 403 404 strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver)); 405 strlcpy(drvinfo->version, mlxsw_sx_driver_version, 406 sizeof(drvinfo->version)); 407 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 408 "%d.%d.%d", 409 mlxsw_sx->bus_info->fw_rev.major, 410 mlxsw_sx->bus_info->fw_rev.minor, 411 mlxsw_sx->bus_info->fw_rev.subminor); 412 strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name, 413 sizeof(drvinfo->bus_info)); 414} 415 416struct mlxsw_sx_port_hw_stats { 417 char str[ETH_GSTRING_LEN]; 418 u64 (*getter)(const char *payload); 419}; 420 421static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { 422 { 423 .str = "a_frames_transmitted_ok", 424 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 425 }, 426 { 427 .str = "a_frames_received_ok", 428 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 429 }, 430 { 431 .str = "a_frame_check_sequence_errors", 432 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 433 }, 434 { 435 .str = "a_alignment_errors", 436 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 437 }, 438 { 439 .str = "a_octets_transmitted_ok", 440 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 441 }, 442 { 443 .str = "a_octets_received_ok", 444 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 445 }, 446 { 447 .str = "a_multicast_frames_xmitted_ok", 448 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 449 }, 450 { 451 .str = "a_broadcast_frames_xmitted_ok", 452 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 453 }, 454 { 455 .str = "a_multicast_frames_received_ok", 456 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 457 }, 458 { 459 .str = "a_broadcast_frames_received_ok", 460 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 461 }, 462 { 463 .str = "a_in_range_length_errors", 464 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 465 }, 466 { 467 .str = "a_out_of_range_length_field", 468 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 469 }, 470 { 471 .str = "a_frame_too_long_errors", 472 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 473 }, 474 { 475 .str = "a_symbol_error_during_carrier", 476 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 477 }, 478 { 479 .str = "a_mac_control_frames_transmitted", 480 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 481 }, 482 { 483 .str = "a_mac_control_frames_received", 484 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 485 }, 486 { 487 .str = "a_unsupported_opcodes_received", 488 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 489 }, 490 { 491 .str = "a_pause_mac_ctrl_frames_received", 492 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 493 }, 494 { 495 .str = "a_pause_mac_ctrl_frames_xmitted", 496 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 497 }, 498}; 499 500#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats) 501 502static void mlxsw_sx_port_get_strings(struct net_device *dev, 503 u32 stringset, u8 *data) 504{ 505 u8 *p = data; 506 int i; 507 508 switch (stringset) { 509 case ETH_SS_STATS: 510 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) { 511 memcpy(p, mlxsw_sx_port_hw_stats[i].str, 512 ETH_GSTRING_LEN); 513 p += ETH_GSTRING_LEN; 514 } 515 break; 516 } 517} 518 519static void mlxsw_sx_port_get_stats(struct net_device *dev, 520 struct ethtool_stats *stats, u64 *data) 521{ 522 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 523 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 524 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 525 int i; 526 int err; 527 528 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port, 529 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); 530 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl); 531 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) 532 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0; 533} 534 535static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset) 536{ 537 switch (sset) { 538 case ETH_SS_STATS: 539 return MLXSW_SX_PORT_HW_STATS_LEN; 540 default: 541 return -EOPNOTSUPP; 542 } 543} 544 545struct mlxsw_sx_port_link_mode { 546 u32 mask; 547 u32 supported; 548 u32 advertised; 549 u32 speed; 550}; 551 552static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = { 553 { 554 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 555 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 556 .supported = SUPPORTED_1000baseKX_Full, 557 .advertised = ADVERTISED_1000baseKX_Full, 558 .speed = 1000, 559 }, 560 { 561 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 562 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 563 .supported = SUPPORTED_10000baseKX4_Full, 564 .advertised = ADVERTISED_10000baseKX4_Full, 565 .speed = 10000, 566 }, 567 { 568 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 569 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 570 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 571 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 572 .supported = SUPPORTED_10000baseKR_Full, 573 .advertised = ADVERTISED_10000baseKR_Full, 574 .speed = 10000, 575 }, 576 { 577 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 578 .supported = SUPPORTED_40000baseCR4_Full, 579 .advertised = ADVERTISED_40000baseCR4_Full, 580 .speed = 40000, 581 }, 582 { 583 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 584 .supported = SUPPORTED_40000baseKR4_Full, 585 .advertised = ADVERTISED_40000baseKR4_Full, 586 .speed = 40000, 587 }, 588 { 589 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 590 .supported = SUPPORTED_40000baseSR4_Full, 591 .advertised = ADVERTISED_40000baseSR4_Full, 592 .speed = 40000, 593 }, 594 { 595 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 596 .supported = SUPPORTED_40000baseLR4_Full, 597 .advertised = ADVERTISED_40000baseLR4_Full, 598 .speed = 40000, 599 }, 600 { 601 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 602 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 603 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 604 .speed = 25000, 605 }, 606 { 607 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 608 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 609 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 610 .speed = 50000, 611 }, 612 { 613 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 614 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 615 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 616 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 617 .speed = 100000, 618 }, 619}; 620 621#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode) 622#define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */ 623 624static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto) 625{ 626 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 627 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 628 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 629 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 630 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 631 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 632 return SUPPORTED_FIBRE; 633 634 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 635 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 636 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 637 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 638 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 639 return SUPPORTED_Backplane; 640 return 0; 641} 642 643static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto) 644{ 645 u32 modes = 0; 646 int i; 647 648 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { 649 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) 650 modes |= mlxsw_sx_port_link_mode[i].supported; 651 } 652 return modes; 653} 654 655static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto) 656{ 657 u32 modes = 0; 658 int i; 659 660 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { 661 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) 662 modes |= mlxsw_sx_port_link_mode[i].advertised; 663 } 664 return modes; 665} 666 667static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 668 struct ethtool_link_ksettings *cmd) 669{ 670 u32 speed = SPEED_UNKNOWN; 671 u8 duplex = DUPLEX_UNKNOWN; 672 int i; 673 674 if (!carrier_ok) 675 goto out; 676 677 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { 678 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) { 679 speed = mlxsw_sx_port_link_mode[i].speed; 680 duplex = DUPLEX_FULL; 681 break; 682 } 683 } 684out: 685 cmd->base.speed = speed; 686 cmd->base.duplex = duplex; 687} 688 689static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) 690{ 691 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 692 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 693 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 694 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 695 return PORT_FIBRE; 696 697 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 698 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 699 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 700 return PORT_DA; 701 702 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 703 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 704 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 705 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 706 return PORT_NONE; 707 708 return PORT_OTHER; 709} 710 711static int 712mlxsw_sx_port_get_link_ksettings(struct net_device *dev, 713 struct ethtool_link_ksettings *cmd) 714{ 715 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 716 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 717 char ptys_pl[MLXSW_REG_PTYS_LEN]; 718 u32 eth_proto_cap; 719 u32 eth_proto_admin; 720 u32 eth_proto_oper; 721 u32 supported, advertising, lp_advertising; 722 int err; 723 724 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false); 725 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); 726 if (err) { 727 netdev_err(dev, "Failed to get proto"); 728 return err; 729 } 730 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, 731 ð_proto_admin, ð_proto_oper); 732 733 supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | 734 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) | 735 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 736 advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); 737 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev), 738 eth_proto_oper, cmd); 739 740 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 741 cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper); 742 lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); 743 744 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 745 supported); 746 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 747 advertising); 748 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 749 lp_advertising); 750 751 return 0; 752} 753 754static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising) 755{ 756 u32 ptys_proto = 0; 757 int i; 758 759 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { 760 if (advertising & mlxsw_sx_port_link_mode[i].advertised) 761 ptys_proto |= mlxsw_sx_port_link_mode[i].mask; 762 } 763 return ptys_proto; 764} 765 766static u32 mlxsw_sx_to_ptys_speed(u32 speed) 767{ 768 u32 ptys_proto = 0; 769 int i; 770 771 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { 772 if (speed == mlxsw_sx_port_link_mode[i].speed) 773 ptys_proto |= mlxsw_sx_port_link_mode[i].mask; 774 } 775 return ptys_proto; 776} 777 778static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed) 779{ 780 u32 ptys_proto = 0; 781 int i; 782 783 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { 784 if (mlxsw_sx_port_link_mode[i].speed <= upper_speed) 785 ptys_proto |= mlxsw_sx_port_link_mode[i].mask; 786 } 787 return ptys_proto; 788} 789 790static int 791mlxsw_sx_port_set_link_ksettings(struct net_device *dev, 792 const struct ethtool_link_ksettings *cmd) 793{ 794 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); 795 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 796 char ptys_pl[MLXSW_REG_PTYS_LEN]; 797 u32 speed; 798 u32 eth_proto_new; 799 u32 eth_proto_cap; 800 u32 eth_proto_admin; 801 u32 advertising; 802 bool is_up; 803 int err; 804 805 speed = cmd->base.speed; 806 807 ethtool_convert_link_mode_to_legacy_u32(&advertising, 808 cmd->link_modes.advertising); 809 810 eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ? 811 mlxsw_sx_to_ptys_advert_link(advertising) : 812 mlxsw_sx_to_ptys_speed(speed); 813 814 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false); 815 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); 816 if (err) { 817 netdev_err(dev, "Failed to get proto"); 818 return err; 819 } 820 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, 821 NULL); 822 823 eth_proto_new = eth_proto_new & eth_proto_cap; 824 if (!eth_proto_new) { 825 netdev_err(dev, "Not supported proto admin requested"); 826 return -EINVAL; 827 } 828 if (eth_proto_new == eth_proto_admin) 829 return 0; 830 831 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 832 eth_proto_new, true); 833 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); 834 if (err) { 835 netdev_err(dev, "Failed to set proto admin"); 836 return err; 837 } 838 839 err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up); 840 if (err) { 841 netdev_err(dev, "Failed to get oper status"); 842 return err; 843 } 844 if (!is_up) 845 return 0; 846 847 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); 848 if (err) { 849 netdev_err(dev, "Failed to set admin status"); 850 return err; 851 } 852 853 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); 854 if (err) { 855 netdev_err(dev, "Failed to set admin status"); 856 return err; 857 } 858 859 return 0; 860} 861 862static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { 863 .get_drvinfo = mlxsw_sx_port_get_drvinfo, 864 .get_link = ethtool_op_get_link, 865 .get_strings = mlxsw_sx_port_get_strings, 866 .get_ethtool_stats = mlxsw_sx_port_get_stats, 867 .get_sset_count = mlxsw_sx_port_get_sset_count, 868 .get_link_ksettings = mlxsw_sx_port_get_link_ksettings, 869 .set_link_ksettings = mlxsw_sx_port_set_link_ksettings, 870}; 871 872static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx) 873{ 874 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 875 int err; 876 877 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl); 878 if (err) 879 return err; 880 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id); 881 return 0; 882} 883 884static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port) 885{ 886 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 887 struct net_device *dev = mlxsw_sx_port->dev; 888 char ppad_pl[MLXSW_REG_PPAD_LEN]; 889 int err; 890 891 mlxsw_reg_ppad_pack(ppad_pl, false, 0); 892 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl); 893 if (err) 894 return err; 895 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr); 896 /* The last byte value in base mac address is guaranteed 897 * to be such it does not overflow when adding local_port 898 * value. 899 */ 900 dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port; 901 return 0; 902} 903 904static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, 905 u16 vid, enum mlxsw_reg_spms_state state) 906{ 907 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 908 char *spms_pl; 909 int err; 910 911 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 912 if (!spms_pl) 913 return -ENOMEM; 914 mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port); 915 mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 916 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); 917 kfree(spms_pl); 918 return err; 919} 920 921static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port, 922 u16 speed, u16 width) 923{ 924 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 925 char ptys_pl[MLXSW_REG_PTYS_LEN]; 926 927 mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed, 928 width); 929 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); 930} 931 932static int 933mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width) 934{ 935 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 936 u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width; 937 char ptys_pl[MLXSW_REG_PTYS_LEN]; 938 u32 eth_proto_admin; 939 940 eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed); 941 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 942 eth_proto_admin, true); 943 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); 944} 945 946static int 947mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, 948 enum mlxsw_reg_spmlr_learn_mode mode) 949{ 950 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; 951 char spmlr_pl[MLXSW_REG_SPMLR_LEN]; 952 953 mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode); 954 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl); 955} 956 957static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, 958 u8 module, u8 width) 959{ 960 struct mlxsw_sx_port *mlxsw_sx_port; 961 struct net_device *dev; 962 int err; 963 964 dev = alloc_etherdev(sizeof(struct mlxsw_sx_port)); 965 if (!dev) 966 return -ENOMEM; 967 SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev); 968 dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core)); 969 mlxsw_sx_port = netdev_priv(dev); 970 mlxsw_sx_port->dev = dev; 971 mlxsw_sx_port->mlxsw_sx = mlxsw_sx; 972 mlxsw_sx_port->local_port = local_port; 973 mlxsw_sx_port->mapping.module = module; 974 975 mlxsw_sx_port->pcpu_stats = 976 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats); 977 if (!mlxsw_sx_port->pcpu_stats) { 978 err = -ENOMEM; 979 goto err_alloc_stats; 980 } 981 982 dev->netdev_ops = &mlxsw_sx_port_netdev_ops; 983 dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops; 984 985 err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port); 986 if (err) { 987 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n", 988 mlxsw_sx_port->local_port); 989 goto err_dev_addr_get; 990 } 991 992 netif_carrier_off(dev); 993 994 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 995 NETIF_F_VLAN_CHALLENGED; 996 997 dev->min_mtu = 0; 998 dev->max_mtu = ETH_MAX_MTU; 999 1000 /* Each packet needs to have a Tx header (metadata) on top all other 1001 * headers. 1002 */ 1003 dev->needed_headroom = MLXSW_TXHDR_LEN; 1004 1005 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); 1006 if (err) { 1007 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1008 mlxsw_sx_port->local_port); 1009 goto err_port_system_port_mapping_set; 1010 } 1011 1012 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0); 1013 if (err) { 1014 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n", 1015 mlxsw_sx_port->local_port); 1016 goto err_port_swid_set; 1017 } 1018 1019 err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width); 1020 if (err) { 1021 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n", 1022 mlxsw_sx_port->local_port); 1023 goto err_port_speed_set; 1024 } 1025 1026 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN); 1027 if (err) { 1028 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n", 1029 mlxsw_sx_port->local_port); 1030 goto err_port_mtu_set; 1031 } 1032 1033 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); 1034 if (err) 1035 goto err_port_admin_status_set; 1036 1037 err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port, 1038 MLXSW_PORT_DEFAULT_VID, 1039 MLXSW_REG_SPMS_STATE_FORWARDING); 1040 if (err) { 1041 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n", 1042 mlxsw_sx_port->local_port); 1043 goto err_port_stp_state_set; 1044 } 1045 1046 err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port, 1047 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE); 1048 if (err) { 1049 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n", 1050 mlxsw_sx_port->local_port); 1051 goto err_port_mac_learning_mode_set; 1052 } 1053 1054 err = register_netdev(dev); 1055 if (err) { 1056 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n", 1057 mlxsw_sx_port->local_port); 1058 goto err_register_netdev; 1059 } 1060 1061 mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port, 1062 mlxsw_sx_port, dev); 1063 mlxsw_sx->ports[local_port] = mlxsw_sx_port; 1064 return 0; 1065 1066err_register_netdev: 1067err_port_mac_learning_mode_set: 1068err_port_stp_state_set: 1069err_port_admin_status_set: 1070err_port_mtu_set: 1071err_port_speed_set: 1072 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); 1073err_port_swid_set: 1074err_port_system_port_mapping_set: 1075err_dev_addr_get: 1076 free_percpu(mlxsw_sx_port->pcpu_stats); 1077err_alloc_stats: 1078 free_netdev(dev); 1079 return err; 1080} 1081 1082static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, 1083 u8 module, u8 width) 1084{ 1085 int err; 1086 1087 err = mlxsw_core_port_init(mlxsw_sx->core, local_port, 1088 module + 1, false, 0, false, 0, 1089 mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id)); 1090 if (err) { 1091 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", 1092 local_port); 1093 return err; 1094 } 1095 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width); 1096 if (err) 1097 goto err_port_create; 1098 1099 return 0; 1100 1101err_port_create: 1102 mlxsw_core_port_fini(mlxsw_sx->core, local_port); 1103 return err; 1104} 1105 1106static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) 1107{ 1108 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; 1109 1110 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx); 1111 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ 1112 mlxsw_sx->ports[local_port] = NULL; 1113 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); 1114 free_percpu(mlxsw_sx_port->pcpu_stats); 1115 free_netdev(mlxsw_sx_port->dev); 1116} 1117 1118static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port) 1119{ 1120 return mlxsw_sx->ports[local_port] != NULL; 1121} 1122 1123static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, 1124 u8 module, u8 width) 1125{ 1126 struct mlxsw_sx_port *mlxsw_sx_port; 1127 int err; 1128 1129 mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL); 1130 if (!mlxsw_sx_port) 1131 return -ENOMEM; 1132 mlxsw_sx_port->mlxsw_sx = mlxsw_sx; 1133 mlxsw_sx_port->local_port = local_port; 1134 mlxsw_sx_port->mapping.module = module; 1135 1136 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); 1137 if (err) { 1138 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1139 mlxsw_sx_port->local_port); 1140 goto err_port_system_port_mapping_set; 1141 } 1142 1143 /* Adding port to Infiniband swid (1) */ 1144 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1); 1145 if (err) { 1146 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n", 1147 mlxsw_sx_port->local_port); 1148 goto err_port_swid_set; 1149 } 1150 1151 /* Expose the IB port number as it's front panel name */ 1152 err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1); 1153 if (err) { 1154 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n", 1155 mlxsw_sx_port->local_port); 1156 goto err_port_ib_set; 1157 } 1158 1159 /* Supports all speeds from SDR to FDR (bitmask) and support bus width 1160 * of 1x, 2x and 4x (3 bits bitmask) 1161 */ 1162 err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port, 1163 MLXSW_REG_PTYS_IB_SPEED_EDR - 1, 1164 BIT(3) - 1); 1165 if (err) { 1166 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n", 1167 mlxsw_sx_port->local_port); 1168 goto err_port_speed_set; 1169 } 1170 1171 /* Change to the maximum MTU the device supports, the SMA will take 1172 * care of the active MTU 1173 */ 1174 err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU); 1175 if (err) { 1176 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n", 1177 mlxsw_sx_port->local_port); 1178 goto err_port_mtu_set; 1179 } 1180 1181 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); 1182 if (err) { 1183 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n", 1184 mlxsw_sx_port->local_port); 1185 goto err_port_admin_set; 1186 } 1187 1188 mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port, 1189 mlxsw_sx_port); 1190 mlxsw_sx->ports[local_port] = mlxsw_sx_port; 1191 return 0; 1192 1193err_port_admin_set: 1194err_port_mtu_set: 1195err_port_speed_set: 1196err_port_ib_set: 1197 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); 1198err_port_swid_set: 1199err_port_system_port_mapping_set: 1200 kfree(mlxsw_sx_port); 1201 return err; 1202} 1203 1204static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) 1205{ 1206 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; 1207 1208 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx); 1209 mlxsw_sx->ports[local_port] = NULL; 1210 mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); 1211 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); 1212 kfree(mlxsw_sx_port); 1213} 1214 1215static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) 1216{ 1217 enum devlink_port_type port_type = 1218 mlxsw_core_port_type_get(mlxsw_sx->core, local_port); 1219 1220 if (port_type == DEVLINK_PORT_TYPE_ETH) 1221 __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port); 1222 else if (port_type == DEVLINK_PORT_TYPE_IB) 1223 __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port); 1224} 1225 1226static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) 1227{ 1228 __mlxsw_sx_port_remove(mlxsw_sx, local_port); 1229 mlxsw_core_port_fini(mlxsw_sx->core, local_port); 1230} 1231 1232static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) 1233{ 1234 int i; 1235 1236 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++) 1237 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1238 mlxsw_sx_port_remove(mlxsw_sx, i); 1239 kfree(mlxsw_sx->ports); 1240 mlxsw_sx->ports = NULL; 1241} 1242 1243static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) 1244{ 1245 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core); 1246 size_t alloc_size; 1247 u8 module, width; 1248 int i; 1249 int err; 1250 1251 alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports; 1252 mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL); 1253 if (!mlxsw_sx->ports) 1254 return -ENOMEM; 1255 1256 for (i = 1; i < max_ports; i++) { 1257 err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module, 1258 &width); 1259 if (err) 1260 goto err_port_module_info_get; 1261 if (!width) 1262 continue; 1263 err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width); 1264 if (err) 1265 goto err_port_create; 1266 } 1267 return 0; 1268 1269err_port_create: 1270err_port_module_info_get: 1271 for (i--; i >= 1; i--) 1272 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1273 mlxsw_sx_port_remove(mlxsw_sx, i); 1274 kfree(mlxsw_sx->ports); 1275 mlxsw_sx->ports = NULL; 1276 return err; 1277} 1278 1279static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port, 1280 enum mlxsw_reg_pude_oper_status status) 1281{ 1282 if (status == MLXSW_PORT_OPER_STATUS_UP) { 1283 netdev_info(mlxsw_sx_port->dev, "link up\n"); 1284 netif_carrier_on(mlxsw_sx_port->dev); 1285 } else { 1286 netdev_info(mlxsw_sx_port->dev, "link down\n"); 1287 netif_carrier_off(mlxsw_sx_port->dev); 1288 } 1289} 1290 1291static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port, 1292 enum mlxsw_reg_pude_oper_status status) 1293{ 1294 if (status == MLXSW_PORT_OPER_STATUS_UP) 1295 pr_info("ib link for port %d - up\n", 1296 mlxsw_sx_port->mapping.module + 1); 1297 else 1298 pr_info("ib link for port %d - down\n", 1299 mlxsw_sx_port->mapping.module + 1); 1300} 1301 1302static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, 1303 char *pude_pl, void *priv) 1304{ 1305 struct mlxsw_sx *mlxsw_sx = priv; 1306 struct mlxsw_sx_port *mlxsw_sx_port; 1307 enum mlxsw_reg_pude_oper_status status; 1308 enum devlink_port_type port_type; 1309 u8 local_port; 1310 1311 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1312 mlxsw_sx_port = mlxsw_sx->ports[local_port]; 1313 if (!mlxsw_sx_port) { 1314 dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n", 1315 local_port); 1316 return; 1317 } 1318 1319 status = mlxsw_reg_pude_oper_status_get(pude_pl); 1320 port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port); 1321 if (port_type == DEVLINK_PORT_TYPE_ETH) 1322 mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status); 1323 else if (port_type == DEVLINK_PORT_TYPE_IB) 1324 mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status); 1325} 1326 1327static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, 1328 void *priv) 1329{ 1330 struct mlxsw_sx *mlxsw_sx = priv; 1331 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; 1332 struct mlxsw_sx_port_pcpu_stats *pcpu_stats; 1333 1334 if (unlikely(!mlxsw_sx_port)) { 1335 dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", 1336 local_port); 1337 return; 1338 } 1339 1340 skb->dev = mlxsw_sx_port->dev; 1341 1342 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); 1343 u64_stats_update_begin(&pcpu_stats->syncp); 1344 pcpu_stats->rx_packets++; 1345 pcpu_stats->rx_bytes += skb->len; 1346 u64_stats_update_end(&pcpu_stats->syncp); 1347 1348 skb->protocol = eth_type_trans(skb, skb->dev); 1349 netif_receive_skb(skb); 1350} 1351 1352static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port, 1353 enum devlink_port_type new_type) 1354{ 1355 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); 1356 u8 module, width; 1357 int err; 1358 1359 if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { 1360 dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", 1361 local_port); 1362 return -EINVAL; 1363 } 1364 1365 if (new_type == DEVLINK_PORT_TYPE_AUTO) 1366 return -EOPNOTSUPP; 1367 1368 __mlxsw_sx_port_remove(mlxsw_sx, local_port); 1369 err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module, 1370 &width); 1371 if (err) 1372 goto err_port_module_info_get; 1373 1374 if (new_type == DEVLINK_PORT_TYPE_ETH) 1375 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, 1376 width); 1377 else if (new_type == DEVLINK_PORT_TYPE_IB) 1378 err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module, 1379 width); 1380 1381err_port_module_info_get: 1382 return err; 1383} 1384 1385enum { 1386 MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX = 1, 1387 MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL = 2, 1388}; 1389 1390#define MLXSW_SX_RXL(_trap_id) \ 1391 MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \ 1392 false, SX2_RX, FORWARD) 1393 1394static const struct mlxsw_listener mlxsw_sx_listener[] = { 1395 MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD), 1396 MLXSW_SX_RXL(FDB_MC), 1397 MLXSW_SX_RXL(STP), 1398 MLXSW_SX_RXL(LACP), 1399 MLXSW_SX_RXL(EAPOL), 1400 MLXSW_SX_RXL(LLDP), 1401 MLXSW_SX_RXL(MMRP), 1402 MLXSW_SX_RXL(MVRP), 1403 MLXSW_SX_RXL(RPVST), 1404 MLXSW_SX_RXL(DHCP), 1405 MLXSW_SX_RXL(IGMP_QUERY), 1406 MLXSW_SX_RXL(IGMP_V1_REPORT), 1407 MLXSW_SX_RXL(IGMP_V2_REPORT), 1408 MLXSW_SX_RXL(IGMP_V2_LEAVE), 1409 MLXSW_SX_RXL(IGMP_V3_REPORT), 1410}; 1411 1412static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) 1413{ 1414 char htgt_pl[MLXSW_REG_HTGT_LEN]; 1415 int i; 1416 int err; 1417 1418 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX, 1419 MLXSW_REG_HTGT_INVALID_POLICER, 1420 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 1421 MLXSW_REG_HTGT_DEFAULT_TC); 1422 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, 1423 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX); 1424 1425 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); 1426 if (err) 1427 return err; 1428 1429 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL, 1430 MLXSW_REG_HTGT_INVALID_POLICER, 1431 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 1432 MLXSW_REG_HTGT_DEFAULT_TC); 1433 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, 1434 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL); 1435 1436 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); 1437 if (err) 1438 return err; 1439 1440 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) { 1441 err = mlxsw_core_trap_register(mlxsw_sx->core, 1442 &mlxsw_sx_listener[i], 1443 mlxsw_sx); 1444 if (err) 1445 goto err_listener_register; 1446 1447 } 1448 return 0; 1449 1450err_listener_register: 1451 for (i--; i >= 0; i--) { 1452 mlxsw_core_trap_unregister(mlxsw_sx->core, 1453 &mlxsw_sx_listener[i], 1454 mlxsw_sx); 1455 } 1456 return err; 1457} 1458 1459static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) 1460{ 1461 int i; 1462 1463 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) { 1464 mlxsw_core_trap_unregister(mlxsw_sx->core, 1465 &mlxsw_sx_listener[i], 1466 mlxsw_sx); 1467 } 1468} 1469 1470static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) 1471{ 1472 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 1473 char sgcr_pl[MLXSW_REG_SGCR_LEN]; 1474 char *sftr_pl; 1475 int err; 1476 1477 /* Configure a flooding table, which includes only CPU port. */ 1478 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 1479 if (!sftr_pl) 1480 return -ENOMEM; 1481 mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0, 1482 MLXSW_PORT_CPU_PORT, true); 1483 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl); 1484 kfree(sftr_pl); 1485 if (err) 1486 return err; 1487 1488 /* Flood different packet types using the flooding table. */ 1489 mlxsw_reg_sfgc_pack(sfgc_pl, 1490 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST, 1491 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, 1492 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 1493 0); 1494 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); 1495 if (err) 1496 return err; 1497 1498 mlxsw_reg_sfgc_pack(sfgc_pl, 1499 MLXSW_REG_SFGC_TYPE_BROADCAST, 1500 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, 1501 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 1502 0); 1503 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); 1504 if (err) 1505 return err; 1506 1507 mlxsw_reg_sfgc_pack(sfgc_pl, 1508 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP, 1509 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, 1510 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 1511 0); 1512 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); 1513 if (err) 1514 return err; 1515 1516 mlxsw_reg_sfgc_pack(sfgc_pl, 1517 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6, 1518 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, 1519 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 1520 0); 1521 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); 1522 if (err) 1523 return err; 1524 1525 mlxsw_reg_sfgc_pack(sfgc_pl, 1526 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4, 1527 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, 1528 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 1529 0); 1530 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); 1531 if (err) 1532 return err; 1533 1534 mlxsw_reg_sgcr_pack(sgcr_pl, true); 1535 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); 1536} 1537 1538static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 1539{ 1540 char htgt_pl[MLXSW_REG_HTGT_LEN]; 1541 1542 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 1543 MLXSW_REG_HTGT_INVALID_POLICER, 1544 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 1545 MLXSW_REG_HTGT_DEFAULT_TC); 1546 mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS); 1547 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, 1548 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD); 1549 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 1550} 1551 1552static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core, 1553 const struct mlxsw_bus_info *mlxsw_bus_info, 1554 struct netlink_ext_ack *extack) 1555{ 1556 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); 1557 int err; 1558 1559 mlxsw_sx->core = mlxsw_core; 1560 mlxsw_sx->bus_info = mlxsw_bus_info; 1561 1562 err = mlxsw_sx_hw_id_get(mlxsw_sx); 1563 if (err) { 1564 dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n"); 1565 return err; 1566 } 1567 1568 err = mlxsw_sx_ports_create(mlxsw_sx); 1569 if (err) { 1570 dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n"); 1571 return err; 1572 } 1573 1574 err = mlxsw_sx_traps_init(mlxsw_sx); 1575 if (err) { 1576 dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n"); 1577 goto err_listener_register; 1578 } 1579 1580 err = mlxsw_sx_flood_init(mlxsw_sx); 1581 if (err) { 1582 dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n"); 1583 goto err_flood_init; 1584 } 1585 1586 return 0; 1587 1588err_flood_init: 1589 mlxsw_sx_traps_fini(mlxsw_sx); 1590err_listener_register: 1591 mlxsw_sx_ports_remove(mlxsw_sx); 1592 return err; 1593} 1594 1595static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core) 1596{ 1597 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); 1598 1599 mlxsw_sx_traps_fini(mlxsw_sx); 1600 mlxsw_sx_ports_remove(mlxsw_sx); 1601} 1602 1603static const struct mlxsw_config_profile mlxsw_sx_config_profile = { 1604 .used_max_vepa_channels = 1, 1605 .max_vepa_channels = 0, 1606 .used_max_mid = 1, 1607 .max_mid = 7000, 1608 .used_max_pgt = 1, 1609 .max_pgt = 0, 1610 .used_max_system_port = 1, 1611 .max_system_port = 48000, 1612 .used_max_vlan_groups = 1, 1613 .max_vlan_groups = 127, 1614 .used_max_regions = 1, 1615 .max_regions = 400, 1616 .used_flood_tables = 1, 1617 .max_flood_tables = 2, 1618 .max_vid_flood_tables = 1, 1619 .used_flood_mode = 1, 1620 .flood_mode = 3, 1621 .used_max_ib_mc = 1, 1622 .max_ib_mc = 6, 1623 .used_max_pkey = 1, 1624 .max_pkey = 0, 1625 .swid_config = { 1626 { 1627 .used_type = 1, 1628 .type = MLXSW_PORT_SWID_TYPE_ETH, 1629 }, 1630 { 1631 .used_type = 1, 1632 .type = MLXSW_PORT_SWID_TYPE_IB, 1633 } 1634 }, 1635}; 1636 1637static struct mlxsw_driver mlxsw_sx_driver = { 1638 .kind = mlxsw_sx_driver_name, 1639 .priv_size = sizeof(struct mlxsw_sx), 1640 .init = mlxsw_sx_init, 1641 .fini = mlxsw_sx_fini, 1642 .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set, 1643 .txhdr_construct = mlxsw_sx_txhdr_construct, 1644 .txhdr_len = MLXSW_TXHDR_LEN, 1645 .profile = &mlxsw_sx_config_profile, 1646 .port_type_set = mlxsw_sx_port_type_set, 1647}; 1648 1649static const struct pci_device_id mlxsw_sx_pci_id_table[] = { 1650 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, 1651 {0, }, 1652}; 1653 1654static struct pci_driver mlxsw_sx_pci_driver = { 1655 .name = mlxsw_sx_driver_name, 1656 .id_table = mlxsw_sx_pci_id_table, 1657}; 1658 1659static int __init mlxsw_sx_module_init(void) 1660{ 1661 int err; 1662 1663 err = mlxsw_core_driver_register(&mlxsw_sx_driver); 1664 if (err) 1665 return err; 1666 1667 err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver); 1668 if (err) 1669 goto err_pci_driver_register; 1670 1671 return 0; 1672 1673err_pci_driver_register: 1674 mlxsw_core_driver_unregister(&mlxsw_sx_driver); 1675 return err; 1676} 1677 1678static void __exit mlxsw_sx_module_exit(void) 1679{ 1680 mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver); 1681 mlxsw_core_driver_unregister(&mlxsw_sx_driver); 1682} 1683 1684module_init(mlxsw_sx_module_init); 1685module_exit(mlxsw_sx_module_exit); 1686 1687MODULE_LICENSE("Dual BSD/GPL"); 1688MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1689MODULE_DESCRIPTION("Mellanox SwitchX-2 driver"); 1690MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table); 1691