1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2/* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7#include <linux/stddef.h> 8#include <linux/pci.h> 9#include <linux/kernel.h> 10#include <linux/slab.h> 11#include <linux/delay.h> 12#include <asm/byteorder.h> 13#include <linux/dma-mapping.h> 14#include <linux/string.h> 15#include <linux/module.h> 16#include <linux/interrupt.h> 17#include <linux/workqueue.h> 18#include <linux/ethtool.h> 19#include <linux/etherdevice.h> 20#include <linux/vmalloc.h> 21#include <linux/crash_dump.h> 22#include <linux/crc32.h> 23#include <linux/qed/qed_if.h> 24#include <linux/qed/qed_ll2_if.h> 25#include <net/devlink.h> 26#include <linux/aer.h> 27#include <linux/phylink.h> 28 29#include "qed.h" 30#include "qed_sriov.h" 31#include "qed_sp.h" 32#include "qed_dev_api.h" 33#include "qed_ll2.h" 34#include "qed_fcoe.h" 35#include "qed_iscsi.h" 36 37#include "qed_mcp.h" 38#include "qed_reg_addr.h" 39#include "qed_hw.h" 40#include "qed_selftest.h" 41#include "qed_debug.h" 42#include "qed_devlink.h" 43 44#define QED_ROCE_QPS (8192) 45#define QED_ROCE_DPIS (8) 46#define QED_RDMA_SRQS QED_ROCE_QPS 47#define QED_NVM_CFG_GET_FLAGS 0xA 48#define QED_NVM_CFG_GET_PF_FLAGS 0x1A 49#define QED_NVM_CFG_MAX_ATTRS 50 50 51static char version[] = 52 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 53 54MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 55MODULE_LICENSE("GPL"); 56MODULE_VERSION(DRV_MODULE_VERSION); 57 58#define FW_FILE_VERSION \ 59 __stringify(FW_MAJOR_VERSION) "." \ 60 __stringify(FW_MINOR_VERSION) "." \ 61 __stringify(FW_REVISION_VERSION) "." \ 62 __stringify(FW_ENGINEERING_VERSION) 63 64#define QED_FW_FILE_NAME \ 65 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 66 67MODULE_FIRMWARE(QED_FW_FILE_NAME); 68 69/* MFW speed capabilities maps */ 70 71struct qed_mfw_speed_map { 72 u32 mfw_val; 73 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 74 75 const u32 *cap_arr; 76 u32 arr_size; 77}; 78 79#define QED_MFW_SPEED_MAP(type, arr) \ 80{ \ 81 .mfw_val = (type), \ 82 .cap_arr = (arr), \ 83 .arr_size = ARRAY_SIZE(arr), \ 84} 85 86static const u32 qed_mfw_ext_1g[] __initconst = { 87 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 88 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 89 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 90}; 91 92static const u32 qed_mfw_ext_10g[] __initconst = { 93 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 95 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 96 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 97 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 99 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 100 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 101}; 102 103static const u32 qed_mfw_ext_20g[] __initconst = { 104 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 105}; 106 107static const u32 qed_mfw_ext_25g[] __initconst = { 108 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 109 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 110 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 111}; 112 113static const u32 qed_mfw_ext_40g[] __initconst = { 114 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 115 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 116 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 117 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 118}; 119 120static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 121 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 122 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 123 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 124 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 126}; 127 128static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 129 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 130 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 131 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 132}; 133 134static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 135 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 136 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 137 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 138 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 140}; 141 142static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 143 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 144 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 145 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 146 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 147}; 148 149static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), 153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 155 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 156 qed_mfw_ext_50g_base_r), 157 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 158 qed_mfw_ext_50g_base_r2), 159 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 160 qed_mfw_ext_100g_base_r2), 161 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 162 qed_mfw_ext_100g_base_r4), 163}; 164 165static const u32 qed_mfw_legacy_1g[] __initconst = { 166 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 167 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 168 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 169}; 170 171static const u32 qed_mfw_legacy_10g[] __initconst = { 172 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 173 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 174 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 175 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 176 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 177 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 178 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 179 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 180}; 181 182static const u32 qed_mfw_legacy_20g[] __initconst = { 183 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 184}; 185 186static const u32 qed_mfw_legacy_25g[] __initconst = { 187 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 188 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 189 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 190}; 191 192static const u32 qed_mfw_legacy_40g[] __initconst = { 193 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 194 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 195 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 196 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 197}; 198 199static const u32 qed_mfw_legacy_50g[] __initconst = { 200 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 201 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 202 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 203}; 204 205static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 206 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 207 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 208 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 209 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 210}; 211 212static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 213 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 214 qed_mfw_legacy_1g), 215 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 216 qed_mfw_legacy_10g), 217 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 218 qed_mfw_legacy_20g), 219 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 220 qed_mfw_legacy_25g), 221 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 222 qed_mfw_legacy_40g), 223 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 224 qed_mfw_legacy_50g), 225 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 226 qed_mfw_legacy_bb_100g), 227}; 228 229static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 230{ 231 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 232 233 map->cap_arr = NULL; 234 map->arr_size = 0; 235} 236 237static void __init qed_mfw_speed_maps_init(void) 238{ 239 u32 i; 240 241 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 242 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 243 244 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 245 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 246} 247 248static int __init qed_init(void) 249{ 250 pr_info("%s", version); 251 252 qed_mfw_speed_maps_init(); 253 254 return 0; 255} 256module_init(qed_init); 257 258static void __exit qed_exit(void) 259{ 260 /* To prevent marking this module as "permanent" */ 261} 262module_exit(qed_exit); 263 264/* Check if the DMA controller on the machine can properly handle the DMA 265 * addressing required by the device. 266*/ 267static int qed_set_coherency_mask(struct qed_dev *cdev) 268{ 269 struct device *dev = &cdev->pdev->dev; 270 271 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 272 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 273 DP_NOTICE(cdev, 274 "Can't request 64-bit consistent allocations\n"); 275 return -EIO; 276 } 277 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 278 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 279 return -EIO; 280 } 281 282 return 0; 283} 284 285static void qed_free_pci(struct qed_dev *cdev) 286{ 287 struct pci_dev *pdev = cdev->pdev; 288 289 pci_disable_pcie_error_reporting(pdev); 290 291 if (cdev->doorbells && cdev->db_size) 292 iounmap(cdev->doorbells); 293 if (cdev->regview) 294 iounmap(cdev->regview); 295 if (atomic_read(&pdev->enable_cnt) == 1) 296 pci_release_regions(pdev); 297 298 pci_disable_device(pdev); 299} 300 301#define PCI_REVISION_ID_ERROR_VAL 0xff 302 303/* Performs PCI initializations as well as initializing PCI-related parameters 304 * in the device structrue. Returns 0 in case of success. 305 */ 306static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 307{ 308 u8 rev_id; 309 int rc; 310 311 cdev->pdev = pdev; 312 313 rc = pci_enable_device(pdev); 314 if (rc) { 315 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 316 goto err0; 317 } 318 319 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 320 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 321 rc = -EIO; 322 goto err1; 323 } 324 325 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 326 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 327 rc = -EIO; 328 goto err1; 329 } 330 331 if (atomic_read(&pdev->enable_cnt) == 1) { 332 rc = pci_request_regions(pdev, "qed"); 333 if (rc) { 334 DP_NOTICE(cdev, 335 "Failed to request PCI memory resources\n"); 336 goto err1; 337 } 338 pci_set_master(pdev); 339 pci_save_state(pdev); 340 } 341 342 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 343 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 344 DP_NOTICE(cdev, 345 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 346 rev_id); 347 rc = -ENODEV; 348 goto err2; 349 } 350 if (!pci_is_pcie(pdev)) { 351 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 352 rc = -EIO; 353 goto err2; 354 } 355 356 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 357 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 358 DP_NOTICE(cdev, "Cannot find power management capability\n"); 359 360 rc = qed_set_coherency_mask(cdev); 361 if (rc) 362 goto err2; 363 364 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 365 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 366 cdev->pci_params.irq = pdev->irq; 367 368 cdev->regview = pci_ioremap_bar(pdev, 0); 369 if (!cdev->regview) { 370 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 371 rc = -ENOMEM; 372 goto err2; 373 } 374 375 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 376 cdev->db_size = pci_resource_len(cdev->pdev, 2); 377 if (!cdev->db_size) { 378 if (IS_PF(cdev)) { 379 DP_NOTICE(cdev, "No Doorbell bar available\n"); 380 return -EINVAL; 381 } else { 382 return 0; 383 } 384 } 385 386 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 387 388 if (!cdev->doorbells) { 389 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 390 return -ENOMEM; 391 } 392 393 /* AER (Advanced Error reporting) configuration */ 394 rc = pci_enable_pcie_error_reporting(pdev); 395 if (rc) 396 DP_VERBOSE(cdev, NETIF_MSG_DRV, 397 "Failed to configure PCIe AER [%d]\n", rc); 398 399 return 0; 400 401err2: 402 pci_release_regions(pdev); 403err1: 404 pci_disable_device(pdev); 405err0: 406 return rc; 407} 408 409int qed_fill_dev_info(struct qed_dev *cdev, 410 struct qed_dev_info *dev_info) 411{ 412 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 413 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 414 struct qed_tunnel_info *tun = &cdev->tunnel; 415 struct qed_ptt *ptt; 416 417 memset(dev_info, 0, sizeof(struct qed_dev_info)); 418 419 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 420 tun->vxlan.b_mode_enabled) 421 dev_info->vxlan_enable = true; 422 423 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 424 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 425 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 426 dev_info->gre_enable = true; 427 428 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 429 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 430 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 431 dev_info->geneve_enable = true; 432 433 dev_info->num_hwfns = cdev->num_hwfns; 434 dev_info->pci_mem_start = cdev->pci_params.mem_start; 435 dev_info->pci_mem_end = cdev->pci_params.mem_end; 436 dev_info->pci_irq = cdev->pci_params.irq; 437 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 438 dev_info->dev_type = cdev->type; 439 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 440 441 if (IS_PF(cdev)) { 442 dev_info->fw_major = FW_MAJOR_VERSION; 443 dev_info->fw_minor = FW_MINOR_VERSION; 444 dev_info->fw_rev = FW_REVISION_VERSION; 445 dev_info->fw_eng = FW_ENGINEERING_VERSION; 446 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 447 &cdev->mf_bits); 448 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) 449 dev_info->b_arfs_capable = true; 450 dev_info->tx_switching = true; 451 452 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 453 dev_info->wol_support = true; 454 455 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 456 457 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 458 } else { 459 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 460 &dev_info->fw_minor, &dev_info->fw_rev, 461 &dev_info->fw_eng); 462 } 463 464 if (IS_PF(cdev)) { 465 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 466 if (ptt) { 467 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 468 &dev_info->mfw_rev, NULL); 469 470 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 471 &dev_info->mbi_version); 472 473 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 474 &dev_info->flash_size); 475 476 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 477 } 478 } else { 479 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 480 &dev_info->mfw_rev, NULL); 481 } 482 483 dev_info->mtu = hw_info->mtu; 484 cdev->common_dev_info = *dev_info; 485 486 return 0; 487} 488 489static void qed_free_cdev(struct qed_dev *cdev) 490{ 491 kfree((void *)cdev); 492} 493 494static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 495{ 496 struct qed_dev *cdev; 497 498 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 499 if (!cdev) 500 return cdev; 501 502 qed_init_struct(cdev); 503 504 return cdev; 505} 506 507/* Sets the requested power state */ 508static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 509{ 510 if (!cdev) 511 return -ENODEV; 512 513 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 514 return 0; 515} 516 517/* probing */ 518static struct qed_dev *qed_probe(struct pci_dev *pdev, 519 struct qed_probe_params *params) 520{ 521 struct qed_dev *cdev; 522 int rc; 523 524 cdev = qed_alloc_cdev(pdev); 525 if (!cdev) 526 goto err0; 527 528 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 529 cdev->protocol = params->protocol; 530 531 if (params->is_vf) 532 cdev->b_is_vf = true; 533 534 qed_init_dp(cdev, params->dp_module, params->dp_level); 535 536 cdev->recov_in_prog = params->recov_in_prog; 537 538 rc = qed_init_pci(cdev, pdev); 539 if (rc) { 540 DP_ERR(cdev, "init pci failed\n"); 541 goto err1; 542 } 543 DP_INFO(cdev, "PCI init completed successfully\n"); 544 545 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 546 if (rc) { 547 DP_ERR(cdev, "hw prepare failed\n"); 548 goto err2; 549 } 550 551 DP_INFO(cdev, "qed_probe completed successfully\n"); 552 553 return cdev; 554 555err2: 556 qed_free_pci(cdev); 557err1: 558 qed_free_cdev(cdev); 559err0: 560 return NULL; 561} 562 563static void qed_remove(struct qed_dev *cdev) 564{ 565 if (!cdev) 566 return; 567 568 qed_hw_remove(cdev); 569 570 qed_free_pci(cdev); 571 572 qed_set_power_state(cdev, PCI_D3hot); 573 574 qed_free_cdev(cdev); 575} 576 577static void qed_disable_msix(struct qed_dev *cdev) 578{ 579 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 580 pci_disable_msix(cdev->pdev); 581 kfree(cdev->int_params.msix_table); 582 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 583 pci_disable_msi(cdev->pdev); 584 } 585 586 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 587} 588 589static int qed_enable_msix(struct qed_dev *cdev, 590 struct qed_int_params *int_params) 591{ 592 int i, rc, cnt; 593 594 cnt = int_params->in.num_vectors; 595 596 for (i = 0; i < cnt; i++) 597 int_params->msix_table[i].entry = i; 598 599 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 600 int_params->in.min_msix_cnt, cnt); 601 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 602 (rc % cdev->num_hwfns)) { 603 pci_disable_msix(cdev->pdev); 604 605 /* If fastpath is initialized, we need at least one interrupt 606 * per hwfn [and the slow path interrupts]. New requested number 607 * should be a multiple of the number of hwfns. 608 */ 609 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 610 DP_NOTICE(cdev, 611 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 612 cnt, int_params->in.num_vectors); 613 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 614 cnt); 615 if (!rc) 616 rc = cnt; 617 } 618 619 /* For VFs, we should return with an error in case we didn't get the 620 * exact number of msix vectors as we requested. 621 * Not doing that will lead to a crash when starting queues for 622 * this VF. 623 */ 624 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { 625 /* MSI-x configuration was achieved */ 626 int_params->out.int_mode = QED_INT_MODE_MSIX; 627 int_params->out.num_vectors = rc; 628 rc = 0; 629 } else { 630 DP_NOTICE(cdev, 631 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 632 cnt, rc); 633 } 634 635 return rc; 636} 637 638/* This function outputs the int mode and the number of enabled msix vector */ 639static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 640{ 641 struct qed_int_params *int_params = &cdev->int_params; 642 struct msix_entry *tbl; 643 int rc = 0, cnt; 644 645 switch (int_params->in.int_mode) { 646 case QED_INT_MODE_MSIX: 647 /* Allocate MSIX table */ 648 cnt = int_params->in.num_vectors; 649 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 650 if (!int_params->msix_table) { 651 rc = -ENOMEM; 652 goto out; 653 } 654 655 /* Enable MSIX */ 656 rc = qed_enable_msix(cdev, int_params); 657 if (!rc) 658 goto out; 659 660 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 661 kfree(int_params->msix_table); 662 if (force_mode) 663 goto out; 664 fallthrough; 665 666 case QED_INT_MODE_MSI: 667 if (cdev->num_hwfns == 1) { 668 rc = pci_enable_msi(cdev->pdev); 669 if (!rc) { 670 int_params->out.int_mode = QED_INT_MODE_MSI; 671 goto out; 672 } 673 674 DP_NOTICE(cdev, "Failed to enable MSI\n"); 675 if (force_mode) 676 goto out; 677 } 678 fallthrough; 679 680 case QED_INT_MODE_INTA: 681 int_params->out.int_mode = QED_INT_MODE_INTA; 682 rc = 0; 683 goto out; 684 default: 685 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 686 int_params->in.int_mode); 687 rc = -EINVAL; 688 } 689 690out: 691 if (!rc) 692 DP_INFO(cdev, "Using %s interrupts\n", 693 int_params->out.int_mode == QED_INT_MODE_INTA ? 694 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 695 "MSI" : "MSIX"); 696 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 697 698 return rc; 699} 700 701static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 702 int index, void(*handler)(void *)) 703{ 704 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 705 int relative_idx = index / cdev->num_hwfns; 706 707 hwfn->simd_proto_handler[relative_idx].func = handler; 708 hwfn->simd_proto_handler[relative_idx].token = token; 709} 710 711static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 712{ 713 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 714 int relative_idx = index / cdev->num_hwfns; 715 716 memset(&hwfn->simd_proto_handler[relative_idx], 0, 717 sizeof(struct qed_simd_fp_handler)); 718} 719 720static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 721{ 722 tasklet_schedule((struct tasklet_struct *)tasklet); 723 return IRQ_HANDLED; 724} 725 726static irqreturn_t qed_single_int(int irq, void *dev_instance) 727{ 728 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 729 struct qed_hwfn *hwfn; 730 irqreturn_t rc = IRQ_NONE; 731 u64 status; 732 int i, j; 733 734 for (i = 0; i < cdev->num_hwfns; i++) { 735 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 736 737 if (!status) 738 continue; 739 740 hwfn = &cdev->hwfns[i]; 741 742 /* Slowpath interrupt */ 743 if (unlikely(status & 0x1)) { 744 tasklet_schedule(&hwfn->sp_dpc); 745 status &= ~0x1; 746 rc = IRQ_HANDLED; 747 } 748 749 /* Fastpath interrupts */ 750 for (j = 0; j < 64; j++) { 751 if ((0x2ULL << j) & status) { 752 struct qed_simd_fp_handler *p_handler = 753 &hwfn->simd_proto_handler[j]; 754 755 if (p_handler->func) 756 p_handler->func(p_handler->token); 757 else 758 DP_NOTICE(hwfn, 759 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 760 j, status); 761 762 status &= ~(0x2ULL << j); 763 rc = IRQ_HANDLED; 764 } 765 } 766 767 if (unlikely(status)) 768 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 769 "got an unknown interrupt status 0x%llx\n", 770 status); 771 } 772 773 return rc; 774} 775 776int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 777{ 778 struct qed_dev *cdev = hwfn->cdev; 779 u32 int_mode; 780 int rc = 0; 781 u8 id; 782 783 int_mode = cdev->int_params.out.int_mode; 784 if (int_mode == QED_INT_MODE_MSIX) { 785 id = hwfn->my_id; 786 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 787 id, cdev->pdev->bus->number, 788 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 789 rc = request_irq(cdev->int_params.msix_table[id].vector, 790 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); 791 } else { 792 unsigned long flags = 0; 793 794 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 795 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 796 PCI_FUNC(cdev->pdev->devfn)); 797 798 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 799 flags |= IRQF_SHARED; 800 801 rc = request_irq(cdev->pdev->irq, qed_single_int, 802 flags, cdev->name, cdev); 803 } 804 805 if (rc) 806 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 807 else 808 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 809 "Requested slowpath %s\n", 810 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 811 812 return rc; 813} 814 815static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 816{ 817 /* Calling the disable function will make sure that any 818 * currently-running function is completed. The following call to the 819 * enable function makes this sequence a flush-like operation. 820 */ 821 if (p_hwfn->b_sp_dpc_enabled) { 822 tasklet_disable(&p_hwfn->sp_dpc); 823 tasklet_enable(&p_hwfn->sp_dpc); 824 } 825} 826 827void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 828{ 829 struct qed_dev *cdev = p_hwfn->cdev; 830 u8 id = p_hwfn->my_id; 831 u32 int_mode; 832 833 int_mode = cdev->int_params.out.int_mode; 834 if (int_mode == QED_INT_MODE_MSIX) 835 synchronize_irq(cdev->int_params.msix_table[id].vector); 836 else 837 synchronize_irq(cdev->pdev->irq); 838 839 qed_slowpath_tasklet_flush(p_hwfn); 840} 841 842static void qed_slowpath_irq_free(struct qed_dev *cdev) 843{ 844 int i; 845 846 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 847 for_each_hwfn(cdev, i) { 848 if (!cdev->hwfns[i].b_int_requested) 849 break; 850 synchronize_irq(cdev->int_params.msix_table[i].vector); 851 free_irq(cdev->int_params.msix_table[i].vector, 852 &cdev->hwfns[i].sp_dpc); 853 } 854 } else { 855 if (QED_LEADING_HWFN(cdev)->b_int_requested) 856 free_irq(cdev->pdev->irq, cdev); 857 } 858 qed_int_disable_post_isr_release(cdev); 859} 860 861static int qed_nic_stop(struct qed_dev *cdev) 862{ 863 int i, rc; 864 865 rc = qed_hw_stop(cdev); 866 867 for (i = 0; i < cdev->num_hwfns; i++) { 868 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 869 870 if (p_hwfn->b_sp_dpc_enabled) { 871 tasklet_disable(&p_hwfn->sp_dpc); 872 p_hwfn->b_sp_dpc_enabled = false; 873 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 874 "Disabled sp tasklet [hwfn %d] at %p\n", 875 i, &p_hwfn->sp_dpc); 876 } 877 } 878 879 qed_dbg_pf_exit(cdev); 880 881 return rc; 882} 883 884static int qed_nic_setup(struct qed_dev *cdev) 885{ 886 int rc, i; 887 888 /* Determine if interface is going to require LL2 */ 889 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 890 for (i = 0; i < cdev->num_hwfns; i++) { 891 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 892 893 p_hwfn->using_ll2 = true; 894 } 895 } 896 897 rc = qed_resc_alloc(cdev); 898 if (rc) 899 return rc; 900 901 DP_INFO(cdev, "Allocated qed resources\n"); 902 903 qed_resc_setup(cdev); 904 905 return rc; 906} 907 908static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 909{ 910 int limit = 0; 911 912 /* Mark the fastpath as free/used */ 913 cdev->int_params.fp_initialized = cnt ? true : false; 914 915 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 916 limit = cdev->num_hwfns * 63; 917 else if (cdev->int_params.fp_msix_cnt) 918 limit = cdev->int_params.fp_msix_cnt; 919 920 if (!limit) 921 return -ENOMEM; 922 923 return min_t(int, cnt, limit); 924} 925 926static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 927{ 928 memset(info, 0, sizeof(struct qed_int_info)); 929 930 if (!cdev->int_params.fp_initialized) { 931 DP_INFO(cdev, 932 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 933 return -EINVAL; 934 } 935 936 /* Need to expose only MSI-X information; Single IRQ is handled solely 937 * by qed. 938 */ 939 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 940 int msix_base = cdev->int_params.fp_msix_base; 941 942 info->msix_cnt = cdev->int_params.fp_msix_cnt; 943 info->msix = &cdev->int_params.msix_table[msix_base]; 944 } 945 946 return 0; 947} 948 949static int qed_slowpath_setup_int(struct qed_dev *cdev, 950 enum qed_int_mode int_mode) 951{ 952 struct qed_sb_cnt_info sb_cnt_info; 953 int num_l2_queues = 0; 954 int rc; 955 int i; 956 957 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 958 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 959 return -EINVAL; 960 } 961 962 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 963 cdev->int_params.in.int_mode = int_mode; 964 for_each_hwfn(cdev, i) { 965 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 966 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 967 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 968 cdev->int_params.in.num_vectors++; /* slowpath */ 969 } 970 971 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 972 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 973 974 if (is_kdump_kernel()) { 975 DP_INFO(cdev, 976 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 977 cdev->int_params.in.min_msix_cnt); 978 cdev->int_params.in.num_vectors = 979 cdev->int_params.in.min_msix_cnt; 980 } 981 982 rc = qed_set_int_mode(cdev, false); 983 if (rc) { 984 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 985 return rc; 986 } 987 988 cdev->int_params.fp_msix_base = cdev->num_hwfns; 989 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 990 cdev->num_hwfns; 991 992 if (!IS_ENABLED(CONFIG_QED_RDMA) || 993 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 994 return 0; 995 996 for_each_hwfn(cdev, i) 997 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 998 999 DP_VERBOSE(cdev, QED_MSG_RDMA, 1000 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 1001 cdev->int_params.fp_msix_cnt, num_l2_queues); 1002 1003 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 1004 cdev->int_params.rdma_msix_cnt = 1005 (cdev->int_params.fp_msix_cnt - num_l2_queues) 1006 / cdev->num_hwfns; 1007 cdev->int_params.rdma_msix_base = 1008 cdev->int_params.fp_msix_base + num_l2_queues; 1009 cdev->int_params.fp_msix_cnt = num_l2_queues; 1010 } else { 1011 cdev->int_params.rdma_msix_cnt = 0; 1012 } 1013 1014 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 1015 cdev->int_params.rdma_msix_cnt, 1016 cdev->int_params.rdma_msix_base); 1017 1018 return 0; 1019} 1020 1021static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 1022{ 1023 int rc; 1024 1025 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 1026 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 1027 1028 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 1029 &cdev->int_params.in.num_vectors); 1030 if (cdev->num_hwfns > 1) { 1031 u8 vectors = 0; 1032 1033 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 1034 cdev->int_params.in.num_vectors += vectors; 1035 } 1036 1037 /* We want a minimum of one fastpath vector per vf hwfn */ 1038 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1039 1040 rc = qed_set_int_mode(cdev, true); 1041 if (rc) 1042 return rc; 1043 1044 cdev->int_params.fp_msix_base = 0; 1045 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1046 1047 return 0; 1048} 1049 1050u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1051 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1052{ 1053 int rc; 1054 1055 p_hwfn->stream->next_in = input_buf; 1056 p_hwfn->stream->avail_in = input_len; 1057 p_hwfn->stream->next_out = unzip_buf; 1058 p_hwfn->stream->avail_out = max_size; 1059 1060 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1061 1062 if (rc != Z_OK) { 1063 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1064 rc); 1065 return 0; 1066 } 1067 1068 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1069 zlib_inflateEnd(p_hwfn->stream); 1070 1071 if (rc != Z_OK && rc != Z_STREAM_END) { 1072 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1073 p_hwfn->stream->msg, rc); 1074 return 0; 1075 } 1076 1077 return p_hwfn->stream->total_out / 4; 1078} 1079 1080static int qed_alloc_stream_mem(struct qed_dev *cdev) 1081{ 1082 int i; 1083 void *workspace; 1084 1085 for_each_hwfn(cdev, i) { 1086 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1087 1088 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1089 if (!p_hwfn->stream) 1090 return -ENOMEM; 1091 1092 workspace = vzalloc(zlib_inflate_workspacesize()); 1093 if (!workspace) 1094 return -ENOMEM; 1095 p_hwfn->stream->workspace = workspace; 1096 } 1097 1098 return 0; 1099} 1100 1101static void qed_free_stream_mem(struct qed_dev *cdev) 1102{ 1103 int i; 1104 1105 for_each_hwfn(cdev, i) { 1106 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1107 1108 if (!p_hwfn->stream) 1109 return; 1110 1111 vfree(p_hwfn->stream->workspace); 1112 kfree(p_hwfn->stream); 1113 } 1114} 1115 1116static void qed_update_pf_params(struct qed_dev *cdev, 1117 struct qed_pf_params *params) 1118{ 1119 int i; 1120 1121 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1122 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1123 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1124 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1125 /* divide by 3 the MRs to avoid MF ILT overflow */ 1126 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1127 } 1128 1129 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1130 params->eth_pf_params.num_arfs_filters = 0; 1131 1132 /* In case we might support RDMA, don't allow qede to be greedy 1133 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1134 * per hwfn. 1135 */ 1136 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1137 u16 *num_cons; 1138 1139 num_cons = ¶ms->eth_pf_params.num_cons; 1140 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1141 } 1142 1143 for (i = 0; i < cdev->num_hwfns; i++) { 1144 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1145 1146 p_hwfn->pf_params = *params; 1147 } 1148} 1149 1150#define QED_PERIODIC_DB_REC_COUNT 10 1151#define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1152#define QED_PERIODIC_DB_REC_INTERVAL \ 1153 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1154 1155static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1156 enum qed_slowpath_wq_flag wq_flag, 1157 unsigned long delay) 1158{ 1159 if (!hwfn->slowpath_wq_active) 1160 return -EINVAL; 1161 1162 /* Memory barrier for setting atomic bit */ 1163 smp_mb__before_atomic(); 1164 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1165 smp_mb__after_atomic(); 1166 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1167 1168 return 0; 1169} 1170 1171void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1172{ 1173 /* Reset periodic Doorbell Recovery counter */ 1174 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1175 1176 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1177 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1178 &p_hwfn->slowpath_task_flags)) 1179 return; 1180 1181 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1182 QED_PERIODIC_DB_REC_INTERVAL); 1183} 1184 1185static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1186{ 1187 int i; 1188 1189 if (IS_VF(cdev)) 1190 return; 1191 1192 for_each_hwfn(cdev, i) { 1193 if (!cdev->hwfns[i].slowpath_wq) 1194 continue; 1195 1196 /* Stop queuing new delayed works */ 1197 cdev->hwfns[i].slowpath_wq_active = false; 1198 1199 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1200 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1201 } 1202} 1203 1204static void qed_slowpath_task(struct work_struct *work) 1205{ 1206 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1207 slowpath_task.work); 1208 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1209 1210 if (!ptt) { 1211 if (hwfn->slowpath_wq_active) 1212 queue_delayed_work(hwfn->slowpath_wq, 1213 &hwfn->slowpath_task, 0); 1214 1215 return; 1216 } 1217 1218 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1219 &hwfn->slowpath_task_flags)) 1220 qed_mfw_process_tlv_req(hwfn, ptt); 1221 1222 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1223 &hwfn->slowpath_task_flags)) { 1224 qed_db_rec_handler(hwfn, ptt); 1225 if (hwfn->periodic_db_rec_count--) 1226 qed_slowpath_delayed_work(hwfn, 1227 QED_SLOWPATH_PERIODIC_DB_REC, 1228 QED_PERIODIC_DB_REC_INTERVAL); 1229 } 1230 1231 qed_ptt_release(hwfn, ptt); 1232} 1233 1234static int qed_slowpath_wq_start(struct qed_dev *cdev) 1235{ 1236 struct qed_hwfn *hwfn; 1237 char name[NAME_SIZE]; 1238 int i; 1239 1240 if (IS_VF(cdev)) 1241 return 0; 1242 1243 for_each_hwfn(cdev, i) { 1244 hwfn = &cdev->hwfns[i]; 1245 1246 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1247 cdev->pdev->bus->number, 1248 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1249 1250 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1251 if (!hwfn->slowpath_wq) { 1252 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1253 return -ENOMEM; 1254 } 1255 1256 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1257 hwfn->slowpath_wq_active = true; 1258 } 1259 1260 return 0; 1261} 1262 1263static int qed_slowpath_start(struct qed_dev *cdev, 1264 struct qed_slowpath_params *params) 1265{ 1266 struct qed_drv_load_params drv_load_params; 1267 struct qed_hw_init_params hw_init_params; 1268 struct qed_mcp_drv_version drv_version; 1269 struct qed_tunnel_info tunn_info; 1270 const u8 *data = NULL; 1271 struct qed_hwfn *hwfn; 1272 struct qed_ptt *p_ptt; 1273 int rc = -EINVAL; 1274 1275 if (qed_iov_wq_start(cdev)) 1276 goto err; 1277 1278 if (qed_slowpath_wq_start(cdev)) 1279 goto err; 1280 1281 if (IS_PF(cdev)) { 1282 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1283 &cdev->pdev->dev); 1284 if (rc) { 1285 DP_NOTICE(cdev, 1286 "Failed to find fw file - /lib/firmware/%s\n", 1287 QED_FW_FILE_NAME); 1288 goto err; 1289 } 1290 1291 if (cdev->num_hwfns == 1) { 1292 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1293 if (p_ptt) { 1294 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1295 } else { 1296 DP_NOTICE(cdev, 1297 "Failed to acquire PTT for aRFS\n"); 1298 rc = -EINVAL; 1299 goto err; 1300 } 1301 } 1302 } 1303 1304 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1305 rc = qed_nic_setup(cdev); 1306 if (rc) 1307 goto err; 1308 1309 if (IS_PF(cdev)) 1310 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1311 else 1312 rc = qed_slowpath_vf_setup_int(cdev); 1313 if (rc) 1314 goto err1; 1315 1316 if (IS_PF(cdev)) { 1317 /* Allocate stream for unzipping */ 1318 rc = qed_alloc_stream_mem(cdev); 1319 if (rc) 1320 goto err2; 1321 1322 /* First Dword used to differentiate between various sources */ 1323 data = cdev->firmware->data + sizeof(u32); 1324 1325 qed_dbg_pf_init(cdev); 1326 } 1327 1328 /* Start the slowpath */ 1329 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1330 memset(&tunn_info, 0, sizeof(tunn_info)); 1331 tunn_info.vxlan.b_mode_enabled = true; 1332 tunn_info.l2_gre.b_mode_enabled = true; 1333 tunn_info.ip_gre.b_mode_enabled = true; 1334 tunn_info.l2_geneve.b_mode_enabled = true; 1335 tunn_info.ip_geneve.b_mode_enabled = true; 1336 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1337 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1338 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1339 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1340 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1341 hw_init_params.p_tunn = &tunn_info; 1342 hw_init_params.b_hw_start = true; 1343 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1344 hw_init_params.allow_npar_tx_switch = true; 1345 hw_init_params.bin_fw_data = data; 1346 1347 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1348 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1349 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1350 drv_load_params.avoid_eng_reset = false; 1351 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1352 hw_init_params.p_drv_load_params = &drv_load_params; 1353 1354 rc = qed_hw_init(cdev, &hw_init_params); 1355 if (rc) 1356 goto err2; 1357 1358 DP_INFO(cdev, 1359 "HW initialization and function start completed successfully\n"); 1360 1361 if (IS_PF(cdev)) { 1362 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1363 BIT(QED_MODE_L2GENEVE_TUNN) | 1364 BIT(QED_MODE_IPGENEVE_TUNN) | 1365 BIT(QED_MODE_L2GRE_TUNN) | 1366 BIT(QED_MODE_IPGRE_TUNN)); 1367 } 1368 1369 /* Allocate LL2 interface if needed */ 1370 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1371 rc = qed_ll2_alloc_if(cdev); 1372 if (rc) 1373 goto err3; 1374 } 1375 if (IS_PF(cdev)) { 1376 hwfn = QED_LEADING_HWFN(cdev); 1377 drv_version.version = (params->drv_major << 24) | 1378 (params->drv_minor << 16) | 1379 (params->drv_rev << 8) | 1380 (params->drv_eng); 1381 strlcpy(drv_version.name, params->name, 1382 MCP_DRV_VER_STR_SIZE - 4); 1383 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1384 &drv_version); 1385 if (rc) { 1386 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1387 goto err4; 1388 } 1389 } 1390 1391 qed_reset_vport_stats(cdev); 1392 1393 return 0; 1394 1395err4: 1396 qed_ll2_dealloc_if(cdev); 1397err3: 1398 qed_hw_stop(cdev); 1399err2: 1400 qed_hw_timers_stop_all(cdev); 1401 if (IS_PF(cdev)) 1402 qed_slowpath_irq_free(cdev); 1403 qed_free_stream_mem(cdev); 1404 qed_disable_msix(cdev); 1405err1: 1406 qed_resc_free(cdev); 1407err: 1408 if (IS_PF(cdev)) 1409 release_firmware(cdev->firmware); 1410 1411 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1412 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1413 qed_ptt_release(QED_LEADING_HWFN(cdev), 1414 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1415 1416 qed_iov_wq_stop(cdev, false); 1417 1418 qed_slowpath_wq_stop(cdev); 1419 1420 return rc; 1421} 1422 1423static int qed_slowpath_stop(struct qed_dev *cdev) 1424{ 1425 if (!cdev) 1426 return -ENODEV; 1427 1428 qed_slowpath_wq_stop(cdev); 1429 1430 qed_ll2_dealloc_if(cdev); 1431 1432 if (IS_PF(cdev)) { 1433 if (cdev->num_hwfns == 1) 1434 qed_ptt_release(QED_LEADING_HWFN(cdev), 1435 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1436 qed_free_stream_mem(cdev); 1437 if (IS_QED_ETH_IF(cdev)) 1438 qed_sriov_disable(cdev, true); 1439 } 1440 1441 qed_nic_stop(cdev); 1442 1443 if (IS_PF(cdev)) 1444 qed_slowpath_irq_free(cdev); 1445 1446 qed_disable_msix(cdev); 1447 1448 qed_resc_free(cdev); 1449 1450 qed_iov_wq_stop(cdev, true); 1451 1452 if (IS_PF(cdev)) 1453 release_firmware(cdev->firmware); 1454 1455 return 0; 1456} 1457 1458static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1459{ 1460 int i; 1461 1462 memcpy(cdev->name, name, NAME_SIZE); 1463 for_each_hwfn(cdev, i) 1464 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1465} 1466 1467static u32 qed_sb_init(struct qed_dev *cdev, 1468 struct qed_sb_info *sb_info, 1469 void *sb_virt_addr, 1470 dma_addr_t sb_phy_addr, u16 sb_id, 1471 enum qed_sb_type type) 1472{ 1473 struct qed_hwfn *p_hwfn; 1474 struct qed_ptt *p_ptt; 1475 u16 rel_sb_id; 1476 u32 rc; 1477 1478 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1479 if (type == QED_SB_TYPE_L2_QUEUE) { 1480 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1481 rel_sb_id = sb_id / cdev->num_hwfns; 1482 } else { 1483 p_hwfn = QED_AFFIN_HWFN(cdev); 1484 rel_sb_id = sb_id; 1485 } 1486 1487 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1488 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1489 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1490 1491 if (IS_PF(p_hwfn->cdev)) { 1492 p_ptt = qed_ptt_acquire(p_hwfn); 1493 if (!p_ptt) 1494 return -EBUSY; 1495 1496 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1497 sb_phy_addr, rel_sb_id); 1498 qed_ptt_release(p_hwfn, p_ptt); 1499 } else { 1500 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1501 sb_phy_addr, rel_sb_id); 1502 } 1503 1504 return rc; 1505} 1506 1507static u32 qed_sb_release(struct qed_dev *cdev, 1508 struct qed_sb_info *sb_info, 1509 u16 sb_id, 1510 enum qed_sb_type type) 1511{ 1512 struct qed_hwfn *p_hwfn; 1513 u16 rel_sb_id; 1514 u32 rc; 1515 1516 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1517 if (type == QED_SB_TYPE_L2_QUEUE) { 1518 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1519 rel_sb_id = sb_id / cdev->num_hwfns; 1520 } else { 1521 p_hwfn = QED_AFFIN_HWFN(cdev); 1522 rel_sb_id = sb_id; 1523 } 1524 1525 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1526 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1527 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1528 1529 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1530 1531 return rc; 1532} 1533 1534static bool qed_can_link_change(struct qed_dev *cdev) 1535{ 1536 return true; 1537} 1538 1539static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1540 const struct qed_link_params *params) 1541{ 1542 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1543 const struct qed_mfw_speed_map *map; 1544 u32 i; 1545 1546 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1547 ext_speed->autoneg = !!params->autoneg; 1548 1549 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1550 ext_speed->advertised_speeds = 0; 1551 1552 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1553 map = qed_mfw_ext_maps + i; 1554 1555 if (linkmode_intersects(params->adv_speeds, map->caps)) 1556 ext_speed->advertised_speeds |= map->mfw_val; 1557 } 1558 } 1559 1560 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1561 switch (params->forced_speed) { 1562 case SPEED_1000: 1563 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1564 break; 1565 case SPEED_10000: 1566 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1567 break; 1568 case SPEED_20000: 1569 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1570 break; 1571 case SPEED_25000: 1572 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1573 break; 1574 case SPEED_40000: 1575 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1576 break; 1577 case SPEED_50000: 1578 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1579 QED_EXT_SPEED_50G_R2; 1580 break; 1581 case SPEED_100000: 1582 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1583 QED_EXT_SPEED_100G_R4 | 1584 QED_EXT_SPEED_100G_P4; 1585 break; 1586 default: 1587 break; 1588 } 1589 } 1590 1591 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1592 return; 1593 1594 switch (params->forced_speed) { 1595 case SPEED_25000: 1596 switch (params->fec) { 1597 case FEC_FORCE_MODE_NONE: 1598 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1599 break; 1600 case FEC_FORCE_MODE_FIRECODE: 1601 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1602 break; 1603 case FEC_FORCE_MODE_RS: 1604 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1605 break; 1606 case FEC_FORCE_MODE_AUTO: 1607 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1608 ETH_EXT_FEC_25G_BASE_R | 1609 ETH_EXT_FEC_25G_NONE; 1610 break; 1611 default: 1612 break; 1613 } 1614 1615 break; 1616 case SPEED_40000: 1617 switch (params->fec) { 1618 case FEC_FORCE_MODE_NONE: 1619 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1620 break; 1621 case FEC_FORCE_MODE_FIRECODE: 1622 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1623 break; 1624 case FEC_FORCE_MODE_AUTO: 1625 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1626 ETH_EXT_FEC_40G_NONE; 1627 break; 1628 default: 1629 break; 1630 } 1631 1632 break; 1633 case SPEED_50000: 1634 switch (params->fec) { 1635 case FEC_FORCE_MODE_NONE: 1636 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1637 break; 1638 case FEC_FORCE_MODE_FIRECODE: 1639 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1640 break; 1641 case FEC_FORCE_MODE_RS: 1642 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1643 break; 1644 case FEC_FORCE_MODE_AUTO: 1645 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1646 ETH_EXT_FEC_50G_BASE_R | 1647 ETH_EXT_FEC_50G_NONE; 1648 break; 1649 default: 1650 break; 1651 } 1652 1653 break; 1654 case SPEED_100000: 1655 switch (params->fec) { 1656 case FEC_FORCE_MODE_NONE: 1657 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1658 break; 1659 case FEC_FORCE_MODE_FIRECODE: 1660 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1661 break; 1662 case FEC_FORCE_MODE_RS: 1663 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1664 break; 1665 case FEC_FORCE_MODE_AUTO: 1666 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1667 ETH_EXT_FEC_100G_BASE_R | 1668 ETH_EXT_FEC_100G_NONE; 1669 break; 1670 default: 1671 break; 1672 } 1673 1674 break; 1675 default: 1676 break; 1677 } 1678} 1679 1680static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1681{ 1682 struct qed_mcp_link_params *link_params; 1683 struct qed_mcp_link_speed_params *speed; 1684 const struct qed_mfw_speed_map *map; 1685 struct qed_hwfn *hwfn; 1686 struct qed_ptt *ptt; 1687 int rc; 1688 u32 i; 1689 1690 if (!cdev) 1691 return -ENODEV; 1692 1693 /* The link should be set only once per PF */ 1694 hwfn = &cdev->hwfns[0]; 1695 1696 /* When VF wants to set link, force it to read the bulletin instead. 1697 * This mimics the PF behavior, where a noitification [both immediate 1698 * and possible later] would be generated when changing properties. 1699 */ 1700 if (IS_VF(cdev)) { 1701 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1702 return 0; 1703 } 1704 1705 ptt = qed_ptt_acquire(hwfn); 1706 if (!ptt) 1707 return -EBUSY; 1708 1709 link_params = qed_mcp_get_link_params(hwfn); 1710 if (!link_params) 1711 return -ENODATA; 1712 1713 speed = &link_params->speed; 1714 1715 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1716 speed->autoneg = !!params->autoneg; 1717 1718 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1719 speed->advertised_speeds = 0; 1720 1721 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1722 map = qed_mfw_legacy_maps + i; 1723 1724 if (linkmode_intersects(params->adv_speeds, map->caps)) 1725 speed->advertised_speeds |= map->mfw_val; 1726 } 1727 } 1728 1729 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1730 speed->forced_speed = params->forced_speed; 1731 1732 if (qed_mcp_is_ext_speed_supported(hwfn)) 1733 qed_set_ext_speed_params(link_params, params); 1734 1735 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1736 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1737 link_params->pause.autoneg = true; 1738 else 1739 link_params->pause.autoneg = false; 1740 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1741 link_params->pause.forced_rx = true; 1742 else 1743 link_params->pause.forced_rx = false; 1744 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1745 link_params->pause.forced_tx = true; 1746 else 1747 link_params->pause.forced_tx = false; 1748 } 1749 1750 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1751 switch (params->loopback_mode) { 1752 case QED_LINK_LOOPBACK_INT_PHY: 1753 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1754 break; 1755 case QED_LINK_LOOPBACK_EXT_PHY: 1756 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1757 break; 1758 case QED_LINK_LOOPBACK_EXT: 1759 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1760 break; 1761 case QED_LINK_LOOPBACK_MAC: 1762 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1763 break; 1764 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1765 link_params->loopback_mode = 1766 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1767 break; 1768 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1769 link_params->loopback_mode = 1770 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1771 break; 1772 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1773 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1774 break; 1775 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1776 link_params->loopback_mode = 1777 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1778 break; 1779 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1780 link_params->loopback_mode = 1781 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1782 break; 1783 default: 1784 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1785 break; 1786 } 1787 } 1788 1789 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1790 memcpy(&link_params->eee, ¶ms->eee, 1791 sizeof(link_params->eee)); 1792 1793 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1794 link_params->fec = params->fec; 1795 1796 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1797 1798 qed_ptt_release(hwfn, ptt); 1799 1800 return rc; 1801} 1802 1803static int qed_get_port_type(u32 media_type) 1804{ 1805 int port_type; 1806 1807 switch (media_type) { 1808 case MEDIA_SFPP_10G_FIBER: 1809 case MEDIA_SFP_1G_FIBER: 1810 case MEDIA_XFP_FIBER: 1811 case MEDIA_MODULE_FIBER: 1812 port_type = PORT_FIBRE; 1813 break; 1814 case MEDIA_DA_TWINAX: 1815 port_type = PORT_DA; 1816 break; 1817 case MEDIA_BASE_T: 1818 port_type = PORT_TP; 1819 break; 1820 case MEDIA_KR: 1821 case MEDIA_NOT_PRESENT: 1822 port_type = PORT_NONE; 1823 break; 1824 case MEDIA_UNSPECIFIED: 1825 default: 1826 port_type = PORT_OTHER; 1827 break; 1828 } 1829 return port_type; 1830} 1831 1832static int qed_get_link_data(struct qed_hwfn *hwfn, 1833 struct qed_mcp_link_params *params, 1834 struct qed_mcp_link_state *link, 1835 struct qed_mcp_link_capabilities *link_caps) 1836{ 1837 void *p; 1838 1839 if (!IS_PF(hwfn->cdev)) { 1840 qed_vf_get_link_params(hwfn, params); 1841 qed_vf_get_link_state(hwfn, link); 1842 qed_vf_get_link_caps(hwfn, link_caps); 1843 1844 return 0; 1845 } 1846 1847 p = qed_mcp_get_link_params(hwfn); 1848 if (!p) 1849 return -ENXIO; 1850 memcpy(params, p, sizeof(*params)); 1851 1852 p = qed_mcp_get_link_state(hwfn); 1853 if (!p) 1854 return -ENXIO; 1855 memcpy(link, p, sizeof(*link)); 1856 1857 p = qed_mcp_get_link_capabilities(hwfn); 1858 if (!p) 1859 return -ENXIO; 1860 memcpy(link_caps, p, sizeof(*link_caps)); 1861 1862 return 0; 1863} 1864 1865static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1866 struct qed_ptt *ptt, u32 capability, 1867 unsigned long *if_caps) 1868{ 1869 u32 media_type, tcvr_state, tcvr_type; 1870 u32 speed_mask, board_cfg; 1871 1872 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1873 media_type = MEDIA_UNSPECIFIED; 1874 1875 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1876 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1877 1878 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1879 speed_mask = 0xFFFFFFFF; 1880 1881 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1882 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1883 1884 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1885 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1886 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1887 1888 switch (media_type) { 1889 case MEDIA_DA_TWINAX: 1890 phylink_set(if_caps, FIBRE); 1891 1892 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1893 phylink_set(if_caps, 20000baseKR2_Full); 1894 1895 /* For DAC media multiple speed capabilities are supported */ 1896 capability |= speed_mask; 1897 1898 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1899 phylink_set(if_caps, 1000baseKX_Full); 1900 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1901 phylink_set(if_caps, 10000baseCR_Full); 1902 1903 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1904 switch (tcvr_type) { 1905 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1906 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1907 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1908 phylink_set(if_caps, 40000baseCR4_Full); 1909 break; 1910 default: 1911 break; 1912 } 1913 1914 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1915 phylink_set(if_caps, 25000baseCR_Full); 1916 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1917 phylink_set(if_caps, 50000baseCR2_Full); 1918 1919 if (capability & 1920 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1921 switch (tcvr_type) { 1922 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1923 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1924 phylink_set(if_caps, 100000baseCR4_Full); 1925 break; 1926 default: 1927 break; 1928 } 1929 1930 break; 1931 case MEDIA_BASE_T: 1932 phylink_set(if_caps, TP); 1933 1934 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1935 if (capability & 1936 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1937 phylink_set(if_caps, 1000baseT_Full); 1938 if (capability & 1939 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1940 phylink_set(if_caps, 10000baseT_Full); 1941 } 1942 1943 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1944 phylink_set(if_caps, FIBRE); 1945 1946 switch (tcvr_type) { 1947 case ETH_TRANSCEIVER_TYPE_1000BASET: 1948 phylink_set(if_caps, 1000baseT_Full); 1949 break; 1950 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1951 phylink_set(if_caps, 10000baseT_Full); 1952 break; 1953 default: 1954 break; 1955 } 1956 } 1957 1958 break; 1959 case MEDIA_SFP_1G_FIBER: 1960 case MEDIA_SFPP_10G_FIBER: 1961 case MEDIA_XFP_FIBER: 1962 case MEDIA_MODULE_FIBER: 1963 phylink_set(if_caps, FIBRE); 1964 capability |= speed_mask; 1965 1966 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1967 switch (tcvr_type) { 1968 case ETH_TRANSCEIVER_TYPE_1G_LX: 1969 case ETH_TRANSCEIVER_TYPE_1G_SX: 1970 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1971 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1972 phylink_set(if_caps, 1000baseKX_Full); 1973 break; 1974 default: 1975 break; 1976 } 1977 1978 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1979 switch (tcvr_type) { 1980 case ETH_TRANSCEIVER_TYPE_10G_SR: 1981 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1982 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1983 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1984 phylink_set(if_caps, 10000baseSR_Full); 1985 break; 1986 case ETH_TRANSCEIVER_TYPE_10G_LR: 1987 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1988 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1989 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1990 phylink_set(if_caps, 10000baseLR_Full); 1991 break; 1992 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1993 phylink_set(if_caps, 10000baseLRM_Full); 1994 break; 1995 case ETH_TRANSCEIVER_TYPE_10G_ER: 1996 phylink_set(if_caps, 10000baseR_FEC); 1997 break; 1998 default: 1999 break; 2000 } 2001 2002 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2003 phylink_set(if_caps, 20000baseKR2_Full); 2004 2005 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2006 switch (tcvr_type) { 2007 case ETH_TRANSCEIVER_TYPE_25G_SR: 2008 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 2009 phylink_set(if_caps, 25000baseSR_Full); 2010 break; 2011 default: 2012 break; 2013 } 2014 2015 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2016 switch (tcvr_type) { 2017 case ETH_TRANSCEIVER_TYPE_40G_LR4: 2018 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 2019 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2020 phylink_set(if_caps, 40000baseLR4_Full); 2021 break; 2022 case ETH_TRANSCEIVER_TYPE_40G_SR4: 2023 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2024 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 2025 phylink_set(if_caps, 40000baseSR4_Full); 2026 break; 2027 default: 2028 break; 2029 } 2030 2031 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2032 phylink_set(if_caps, 50000baseKR2_Full); 2033 2034 if (capability & 2035 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2036 switch (tcvr_type) { 2037 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2038 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2039 phylink_set(if_caps, 100000baseSR4_Full); 2040 break; 2041 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2042 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2043 break; 2044 default: 2045 break; 2046 } 2047 2048 break; 2049 case MEDIA_KR: 2050 phylink_set(if_caps, Backplane); 2051 2052 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2053 phylink_set(if_caps, 20000baseKR2_Full); 2054 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2055 phylink_set(if_caps, 1000baseKX_Full); 2056 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2057 phylink_set(if_caps, 10000baseKR_Full); 2058 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2059 phylink_set(if_caps, 25000baseKR_Full); 2060 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2061 phylink_set(if_caps, 40000baseKR4_Full); 2062 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2063 phylink_set(if_caps, 50000baseKR2_Full); 2064 if (capability & 2065 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2066 phylink_set(if_caps, 100000baseKR4_Full); 2067 2068 break; 2069 case MEDIA_UNSPECIFIED: 2070 case MEDIA_NOT_PRESENT: 2071 default: 2072 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2073 "Unknown media and transceiver type;\n"); 2074 break; 2075 } 2076} 2077 2078static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2079{ 2080 *speed_mask = 0; 2081 2082 if (caps & 2083 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2084 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2085 if (caps & QED_LINK_PARTNER_SPEED_10G) 2086 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2087 if (caps & QED_LINK_PARTNER_SPEED_20G) 2088 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2089 if (caps & QED_LINK_PARTNER_SPEED_25G) 2090 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2091 if (caps & QED_LINK_PARTNER_SPEED_40G) 2092 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2093 if (caps & QED_LINK_PARTNER_SPEED_50G) 2094 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2095 if (caps & QED_LINK_PARTNER_SPEED_100G) 2096 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2097} 2098 2099static void qed_fill_link(struct qed_hwfn *hwfn, 2100 struct qed_ptt *ptt, 2101 struct qed_link_output *if_link) 2102{ 2103 struct qed_mcp_link_capabilities link_caps; 2104 struct qed_mcp_link_params params; 2105 struct qed_mcp_link_state link; 2106 u32 media_type, speed_mask; 2107 2108 memset(if_link, 0, sizeof(*if_link)); 2109 2110 /* Prepare source inputs */ 2111 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2112 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2113 return; 2114 } 2115 2116 /* Set the link parameters to pass to protocol driver */ 2117 if (link.link_up) 2118 if_link->link_up = true; 2119 2120 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2121 if (link_caps.default_ext_autoneg) 2122 phylink_set(if_link->supported_caps, Autoneg); 2123 2124 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2125 2126 if (params.ext_speed.autoneg) 2127 phylink_set(if_link->advertised_caps, Autoneg); 2128 else 2129 phylink_clear(if_link->advertised_caps, Autoneg); 2130 2131 qed_fill_link_capability(hwfn, ptt, 2132 params.ext_speed.advertised_speeds, 2133 if_link->advertised_caps); 2134 } else { 2135 if (link_caps.default_speed_autoneg) 2136 phylink_set(if_link->supported_caps, Autoneg); 2137 2138 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2139 2140 if (params.speed.autoneg) 2141 phylink_set(if_link->advertised_caps, Autoneg); 2142 else 2143 phylink_clear(if_link->advertised_caps, Autoneg); 2144 } 2145 2146 if (params.pause.autoneg || 2147 (params.pause.forced_rx && params.pause.forced_tx)) 2148 phylink_set(if_link->supported_caps, Asym_Pause); 2149 if (params.pause.autoneg || params.pause.forced_rx || 2150 params.pause.forced_tx) 2151 phylink_set(if_link->supported_caps, Pause); 2152 2153 if_link->sup_fec = link_caps.fec_default; 2154 if_link->active_fec = params.fec; 2155 2156 /* Fill link advertised capability */ 2157 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2158 if_link->advertised_caps); 2159 2160 /* Fill link supported capability */ 2161 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2162 if_link->supported_caps); 2163 2164 /* Fill partner advertised capability */ 2165 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2166 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2167 2168 if (link.link_up) 2169 if_link->speed = link.speed; 2170 2171 /* TODO - fill duplex properly */ 2172 if_link->duplex = DUPLEX_FULL; 2173 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2174 if_link->port = qed_get_port_type(media_type); 2175 2176 if_link->autoneg = params.speed.autoneg; 2177 2178 if (params.pause.autoneg) 2179 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2180 if (params.pause.forced_rx) 2181 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2182 if (params.pause.forced_tx) 2183 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2184 2185 if (link.an_complete) 2186 phylink_set(if_link->lp_caps, Autoneg); 2187 if (link.partner_adv_pause) 2188 phylink_set(if_link->lp_caps, Pause); 2189 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2190 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2191 phylink_set(if_link->lp_caps, Asym_Pause); 2192 2193 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2194 if_link->eee_supported = false; 2195 } else { 2196 if_link->eee_supported = true; 2197 if_link->eee_active = link.eee_active; 2198 if_link->sup_caps = link_caps.eee_speed_caps; 2199 /* MFW clears adv_caps on eee disable; use configured value */ 2200 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2201 params.eee.adv_caps; 2202 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2203 if_link->eee.enable = params.eee.enable; 2204 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2205 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2206 } 2207} 2208 2209static void qed_get_current_link(struct qed_dev *cdev, 2210 struct qed_link_output *if_link) 2211{ 2212 struct qed_hwfn *hwfn; 2213 struct qed_ptt *ptt; 2214 int i; 2215 2216 hwfn = &cdev->hwfns[0]; 2217 if (IS_PF(cdev)) { 2218 ptt = qed_ptt_acquire(hwfn); 2219 if (ptt) { 2220 qed_fill_link(hwfn, ptt, if_link); 2221 qed_ptt_release(hwfn, ptt); 2222 } else { 2223 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2224 } 2225 } else { 2226 qed_fill_link(hwfn, NULL, if_link); 2227 } 2228 2229 for_each_hwfn(cdev, i) 2230 qed_inform_vf_link_state(&cdev->hwfns[i]); 2231} 2232 2233void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2234{ 2235 void *cookie = hwfn->cdev->ops_cookie; 2236 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2237 struct qed_link_output if_link; 2238 2239 qed_fill_link(hwfn, ptt, &if_link); 2240 qed_inform_vf_link_state(hwfn); 2241 2242 if (IS_LEAD_HWFN(hwfn) && cookie) 2243 op->link_update(cookie, &if_link); 2244} 2245 2246void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2247{ 2248 void *cookie = hwfn->cdev->ops_cookie; 2249 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2250 2251 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2252 op->bw_update(cookie); 2253} 2254 2255static int qed_drain(struct qed_dev *cdev) 2256{ 2257 struct qed_hwfn *hwfn; 2258 struct qed_ptt *ptt; 2259 int i, rc; 2260 2261 if (IS_VF(cdev)) 2262 return 0; 2263 2264 for_each_hwfn(cdev, i) { 2265 hwfn = &cdev->hwfns[i]; 2266 ptt = qed_ptt_acquire(hwfn); 2267 if (!ptt) { 2268 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2269 return -EBUSY; 2270 } 2271 rc = qed_mcp_drain(hwfn, ptt); 2272 qed_ptt_release(hwfn, ptt); 2273 if (rc) 2274 return rc; 2275 } 2276 2277 return 0; 2278} 2279 2280static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2281 struct qed_nvm_image_att *nvm_image, 2282 u32 *crc) 2283{ 2284 u8 *buf = NULL; 2285 int rc; 2286 2287 /* Allocate a buffer for holding the nvram image */ 2288 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2289 if (!buf) 2290 return -ENOMEM; 2291 2292 /* Read image into buffer */ 2293 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2294 buf, nvm_image->length); 2295 if (rc) { 2296 DP_ERR(cdev, "Failed reading image from nvm\n"); 2297 goto out; 2298 } 2299 2300 /* Convert the buffer into big-endian format (excluding the 2301 * closing 4 bytes of CRC). 2302 */ 2303 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2304 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2305 2306 /* Calc CRC for the "actual" image buffer, i.e. not including 2307 * the last 4 CRC bytes. 2308 */ 2309 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2310 *crc = (__force u32)cpu_to_be32p(crc); 2311 2312out: 2313 kfree(buf); 2314 2315 return rc; 2316} 2317 2318/* Binary file format - 2319 * /----------------------------------------------------------------------\ 2320 * 0B | 0x4 [command index] | 2321 * 4B | image_type | Options | Number of register settings | 2322 * 8B | Value | 2323 * 12B | Mask | 2324 * 16B | Offset | 2325 * \----------------------------------------------------------------------/ 2326 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2327 * Options - 0'b - Calculate & Update CRC for image 2328 */ 2329static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2330 bool *check_resp) 2331{ 2332 struct qed_nvm_image_att nvm_image; 2333 struct qed_hwfn *p_hwfn; 2334 bool is_crc = false; 2335 u32 image_type; 2336 int rc = 0, i; 2337 u16 len; 2338 2339 *data += 4; 2340 image_type = **data; 2341 p_hwfn = QED_LEADING_HWFN(cdev); 2342 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2343 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2344 break; 2345 if (i == p_hwfn->nvm_info.num_images) { 2346 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2347 image_type); 2348 return -ENOENT; 2349 } 2350 2351 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2352 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2353 2354 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2355 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2356 **data, image_type, nvm_image.start_addr, 2357 nvm_image.start_addr + nvm_image.length - 1); 2358 (*data)++; 2359 is_crc = !!(**data & BIT(0)); 2360 (*data)++; 2361 len = *((u16 *)*data); 2362 *data += 2; 2363 if (is_crc) { 2364 u32 crc = 0; 2365 2366 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2367 if (rc) { 2368 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2369 goto exit; 2370 } 2371 2372 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2373 (nvm_image.start_addr + 2374 nvm_image.length - 4), (u8 *)&crc, 4); 2375 if (rc) 2376 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2377 nvm_image.start_addr + nvm_image.length - 4, rc); 2378 goto exit; 2379 } 2380 2381 /* Iterate over the values for setting */ 2382 while (len) { 2383 u32 offset, mask, value, cur_value; 2384 u8 buf[4]; 2385 2386 value = *((u32 *)*data); 2387 *data += 4; 2388 mask = *((u32 *)*data); 2389 *data += 4; 2390 offset = *((u32 *)*data); 2391 *data += 4; 2392 2393 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2394 4); 2395 if (rc) { 2396 DP_ERR(cdev, "Failed reading from %08x\n", 2397 nvm_image.start_addr + offset); 2398 goto exit; 2399 } 2400 2401 cur_value = le32_to_cpu(*((__le32 *)buf)); 2402 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2403 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2404 nvm_image.start_addr + offset, cur_value, 2405 (cur_value & ~mask) | (value & mask), value, mask); 2406 value = (value & mask) | (cur_value & ~mask); 2407 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2408 nvm_image.start_addr + offset, 2409 (u8 *)&value, 4); 2410 if (rc) { 2411 DP_ERR(cdev, "Failed writing to %08x\n", 2412 nvm_image.start_addr + offset); 2413 goto exit; 2414 } 2415 2416 len--; 2417 } 2418exit: 2419 return rc; 2420} 2421 2422/* Binary file format - 2423 * /----------------------------------------------------------------------\ 2424 * 0B | 0x3 [command index] | 2425 * 4B | b'0: check_response? | b'1-31 reserved | 2426 * 8B | File-type | reserved | 2427 * 12B | Image length in bytes | 2428 * \----------------------------------------------------------------------/ 2429 * Start a new file of the provided type 2430 */ 2431static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2432 const u8 **data, bool *check_resp) 2433{ 2434 u32 file_type, file_size = 0; 2435 int rc; 2436 2437 *data += 4; 2438 *check_resp = !!(**data & BIT(0)); 2439 *data += 4; 2440 file_type = **data; 2441 2442 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2443 "About to start a new file of type %02x\n", file_type); 2444 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2445 *data += 4; 2446 file_size = *((u32 *)(*data)); 2447 } 2448 2449 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2450 (u8 *)(&file_size), 4); 2451 *data += 4; 2452 2453 return rc; 2454} 2455 2456/* Binary file format - 2457 * /----------------------------------------------------------------------\ 2458 * 0B | 0x2 [command index] | 2459 * 4B | Length in bytes | 2460 * 8B | b'0: check_response? | b'1-31 reserved | 2461 * 12B | Offset in bytes | 2462 * 16B | Data ... | 2463 * \----------------------------------------------------------------------/ 2464 * Write data as part of a file that was previously started. Data should be 2465 * of length equal to that provided in the message 2466 */ 2467static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2468 const u8 **data, bool *check_resp) 2469{ 2470 u32 offset, len; 2471 int rc; 2472 2473 *data += 4; 2474 len = *((u32 *)(*data)); 2475 *data += 4; 2476 *check_resp = !!(**data & BIT(0)); 2477 *data += 4; 2478 offset = *((u32 *)(*data)); 2479 *data += 4; 2480 2481 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2482 "About to write File-data: %08x bytes to offset %08x\n", 2483 len, offset); 2484 2485 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2486 (char *)(*data), len); 2487 *data += len; 2488 2489 return rc; 2490} 2491 2492/* Binary file format [General header] - 2493 * /----------------------------------------------------------------------\ 2494 * 0B | QED_NVM_SIGNATURE | 2495 * 4B | Length in bytes | 2496 * 8B | Highest command in this batchfile | Reserved | 2497 * \----------------------------------------------------------------------/ 2498 */ 2499static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2500 const struct firmware *image, 2501 const u8 **data) 2502{ 2503 u32 signature, len; 2504 2505 /* Check minimum size */ 2506 if (image->size < 12) { 2507 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2508 return -EINVAL; 2509 } 2510 2511 /* Check signature */ 2512 signature = *((u32 *)(*data)); 2513 if (signature != QED_NVM_SIGNATURE) { 2514 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2515 return -EINVAL; 2516 } 2517 2518 *data += 4; 2519 /* Validate internal size equals the image-size */ 2520 len = *((u32 *)(*data)); 2521 if (len != image->size) { 2522 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2523 len, (u32)image->size); 2524 return -EINVAL; 2525 } 2526 2527 *data += 4; 2528 /* Make sure driver familiar with all commands necessary for this */ 2529 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2530 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2531 *((u16 *)(*data))); 2532 return -EINVAL; 2533 } 2534 2535 *data += 4; 2536 2537 return 0; 2538} 2539 2540/* Binary file format - 2541 * /----------------------------------------------------------------------\ 2542 * 0B | 0x5 [command index] | 2543 * 4B | Number of config attributes | Reserved | 2544 * 4B | Config ID | Entity ID | Length | 2545 * 4B | Value | 2546 * | | 2547 * \----------------------------------------------------------------------/ 2548 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2549 * 'Number of config attributes'. 2550 * 2551 * The API parses config attributes from the user provided buffer and flashes 2552 * them to the respective NVM path using Management FW inerface. 2553 */ 2554static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2555{ 2556 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2557 u8 entity_id, len, buf[32]; 2558 bool need_nvm_init = true; 2559 struct qed_ptt *ptt; 2560 u16 cfg_id, count; 2561 int rc = 0, i; 2562 u32 flags; 2563 2564 ptt = qed_ptt_acquire(hwfn); 2565 if (!ptt) 2566 return -EAGAIN; 2567 2568 /* NVM CFG ID attribute header */ 2569 *data += 4; 2570 count = *((u16 *)*data); 2571 *data += 4; 2572 2573 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2574 "Read config ids: num_attrs = %0d\n", count); 2575 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2576 * arithmetic operations in the implementation. 2577 */ 2578 for (i = 1; i <= count; i++) { 2579 cfg_id = *((u16 *)*data); 2580 *data += 2; 2581 entity_id = **data; 2582 (*data)++; 2583 len = **data; 2584 (*data)++; 2585 memcpy(buf, *data, len); 2586 *data += len; 2587 2588 flags = 0; 2589 if (need_nvm_init) { 2590 flags |= QED_NVM_CFG_OPTION_INIT; 2591 need_nvm_init = false; 2592 } 2593 2594 /* Commit to flash and free the resources */ 2595 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2596 flags |= QED_NVM_CFG_OPTION_COMMIT | 2597 QED_NVM_CFG_OPTION_FREE; 2598 need_nvm_init = true; 2599 } 2600 2601 if (entity_id) 2602 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2603 2604 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2605 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2606 entity_id, len); 2607 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2608 buf, len); 2609 if (rc) { 2610 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2611 break; 2612 } 2613 } 2614 2615 qed_ptt_release(hwfn, ptt); 2616 2617 return rc; 2618} 2619 2620#define QED_MAX_NVM_BUF_LEN 32 2621static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2622{ 2623 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2624 u8 buf[QED_MAX_NVM_BUF_LEN]; 2625 struct qed_ptt *ptt; 2626 u32 len; 2627 int rc; 2628 2629 ptt = qed_ptt_acquire(hwfn); 2630 if (!ptt) 2631 return QED_MAX_NVM_BUF_LEN; 2632 2633 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2634 &len); 2635 if (rc || !len) { 2636 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2637 len = QED_MAX_NVM_BUF_LEN; 2638 } 2639 2640 qed_ptt_release(hwfn, ptt); 2641 2642 return len; 2643} 2644 2645static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2646 u32 cmd, u32 entity_id) 2647{ 2648 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2649 struct qed_ptt *ptt; 2650 u32 flags, len; 2651 int rc = 0; 2652 2653 ptt = qed_ptt_acquire(hwfn); 2654 if (!ptt) 2655 return -EAGAIN; 2656 2657 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2658 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2659 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2660 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2661 if (rc) 2662 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2663 2664 qed_ptt_release(hwfn, ptt); 2665 2666 return rc; 2667} 2668 2669static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2670{ 2671 const struct firmware *image; 2672 const u8 *data, *data_end; 2673 u32 cmd_type; 2674 int rc; 2675 2676 rc = request_firmware(&image, name, &cdev->pdev->dev); 2677 if (rc) { 2678 DP_ERR(cdev, "Failed to find '%s'\n", name); 2679 return rc; 2680 } 2681 2682 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2683 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2684 name, image->data, (u32)image->size); 2685 data = image->data; 2686 data_end = data + image->size; 2687 2688 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2689 if (rc) 2690 goto exit; 2691 2692 while (data < data_end) { 2693 bool check_resp = false; 2694 2695 /* Parse the actual command */ 2696 cmd_type = *((u32 *)data); 2697 switch (cmd_type) { 2698 case QED_NVM_FLASH_CMD_FILE_DATA: 2699 rc = qed_nvm_flash_image_file_data(cdev, &data, 2700 &check_resp); 2701 break; 2702 case QED_NVM_FLASH_CMD_FILE_START: 2703 rc = qed_nvm_flash_image_file_start(cdev, &data, 2704 &check_resp); 2705 break; 2706 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2707 rc = qed_nvm_flash_image_access(cdev, &data, 2708 &check_resp); 2709 break; 2710 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2711 rc = qed_nvm_flash_cfg_write(cdev, &data); 2712 break; 2713 default: 2714 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2715 rc = -EINVAL; 2716 goto exit; 2717 } 2718 2719 if (rc) { 2720 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2721 goto exit; 2722 } 2723 2724 /* Check response if needed */ 2725 if (check_resp) { 2726 u32 mcp_response = 0; 2727 2728 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2729 DP_ERR(cdev, "Failed getting MCP response\n"); 2730 rc = -EINVAL; 2731 goto exit; 2732 } 2733 2734 switch (mcp_response & FW_MSG_CODE_MASK) { 2735 case FW_MSG_CODE_OK: 2736 case FW_MSG_CODE_NVM_OK: 2737 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2738 case FW_MSG_CODE_PHY_OK: 2739 break; 2740 default: 2741 DP_ERR(cdev, "MFW returns error: %08x\n", 2742 mcp_response); 2743 rc = -EINVAL; 2744 goto exit; 2745 } 2746 } 2747 } 2748 2749exit: 2750 release_firmware(image); 2751 2752 return rc; 2753} 2754 2755static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2756 u8 *buf, u16 len) 2757{ 2758 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2759 2760 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2761} 2762 2763void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2764{ 2765 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2766 void *cookie = p_hwfn->cdev->ops_cookie; 2767 2768 if (ops && ops->schedule_recovery_handler) 2769 ops->schedule_recovery_handler(cookie); 2770} 2771 2772static const char * const qed_hw_err_type_descr[] = { 2773 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2774 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2775 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2776 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2777 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2778 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2779 [QED_HW_ERR_LAST] = "Unknown", 2780}; 2781 2782void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2783 enum qed_hw_err_type err_type) 2784{ 2785 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2786 void *cookie = p_hwfn->cdev->ops_cookie; 2787 const char *err_str; 2788 2789 if (err_type > QED_HW_ERR_LAST) 2790 err_type = QED_HW_ERR_LAST; 2791 err_str = qed_hw_err_type_descr[err_type]; 2792 2793 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2794 2795 /* Call the HW error handler of the protocol driver. 2796 * If it is not available - perform a minimal handling of preventing 2797 * HW attentions from being reasserted. 2798 */ 2799 if (ops && ops->schedule_hw_err_handler) 2800 ops->schedule_hw_err_handler(cookie, err_type); 2801 else 2802 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2803} 2804 2805static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2806 void *handle) 2807{ 2808 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2809} 2810 2811static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2812{ 2813 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2814 struct qed_ptt *ptt; 2815 int status = 0; 2816 2817 ptt = qed_ptt_acquire(hwfn); 2818 if (!ptt) 2819 return -EAGAIN; 2820 2821 status = qed_mcp_set_led(hwfn, ptt, mode); 2822 2823 qed_ptt_release(hwfn, ptt); 2824 2825 return status; 2826} 2827 2828int qed_recovery_process(struct qed_dev *cdev) 2829{ 2830 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2831 struct qed_ptt *p_ptt; 2832 int rc = 0; 2833 2834 p_ptt = qed_ptt_acquire(p_hwfn); 2835 if (!p_ptt) 2836 return -EAGAIN; 2837 2838 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2839 2840 qed_ptt_release(p_hwfn, p_ptt); 2841 2842 return rc; 2843} 2844 2845static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2846{ 2847 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2848 struct qed_ptt *ptt; 2849 int rc = 0; 2850 2851 if (IS_VF(cdev)) 2852 return 0; 2853 2854 ptt = qed_ptt_acquire(hwfn); 2855 if (!ptt) 2856 return -EAGAIN; 2857 2858 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2859 : QED_OV_WOL_DISABLED); 2860 if (rc) 2861 goto out; 2862 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2863 2864out: 2865 qed_ptt_release(hwfn, ptt); 2866 return rc; 2867} 2868 2869static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2870{ 2871 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2872 struct qed_ptt *ptt; 2873 int status = 0; 2874 2875 if (IS_VF(cdev)) 2876 return 0; 2877 2878 ptt = qed_ptt_acquire(hwfn); 2879 if (!ptt) 2880 return -EAGAIN; 2881 2882 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2883 QED_OV_DRIVER_STATE_ACTIVE : 2884 QED_OV_DRIVER_STATE_DISABLED); 2885 2886 qed_ptt_release(hwfn, ptt); 2887 2888 return status; 2889} 2890 2891static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2892{ 2893 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2894 struct qed_ptt *ptt; 2895 int status = 0; 2896 2897 if (IS_VF(cdev)) 2898 return 0; 2899 2900 ptt = qed_ptt_acquire(hwfn); 2901 if (!ptt) 2902 return -EAGAIN; 2903 2904 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2905 if (status) 2906 goto out; 2907 2908 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2909 2910out: 2911 qed_ptt_release(hwfn, ptt); 2912 return status; 2913} 2914 2915static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2916{ 2917 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2918 struct qed_ptt *ptt; 2919 int status = 0; 2920 2921 if (IS_VF(cdev)) 2922 return 0; 2923 2924 ptt = qed_ptt_acquire(hwfn); 2925 if (!ptt) 2926 return -EAGAIN; 2927 2928 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2929 if (status) 2930 goto out; 2931 2932 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2933 2934out: 2935 qed_ptt_release(hwfn, ptt); 2936 return status; 2937} 2938 2939static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2940 u8 dev_addr, u32 offset, u32 len) 2941{ 2942 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2943 struct qed_ptt *ptt; 2944 int rc = 0; 2945 2946 if (IS_VF(cdev)) 2947 return 0; 2948 2949 ptt = qed_ptt_acquire(hwfn); 2950 if (!ptt) 2951 return -EAGAIN; 2952 2953 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2954 offset, len, buf); 2955 2956 qed_ptt_release(hwfn, ptt); 2957 2958 return rc; 2959} 2960 2961static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2962{ 2963 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2964 struct qed_ptt *ptt; 2965 int rc = 0; 2966 2967 if (IS_VF(cdev)) 2968 return 0; 2969 2970 ptt = qed_ptt_acquire(hwfn); 2971 if (!ptt) 2972 return -EAGAIN; 2973 2974 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2975 2976 qed_ptt_release(hwfn, ptt); 2977 2978 return rc; 2979} 2980 2981static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2982{ 2983 return QED_AFFIN_HWFN_IDX(cdev); 2984} 2985 2986static struct qed_selftest_ops qed_selftest_ops_pass = { 2987 .selftest_memory = &qed_selftest_memory, 2988 .selftest_interrupt = &qed_selftest_interrupt, 2989 .selftest_register = &qed_selftest_register, 2990 .selftest_clock = &qed_selftest_clock, 2991 .selftest_nvram = &qed_selftest_nvram, 2992}; 2993 2994const struct qed_common_ops qed_common_ops_pass = { 2995 .selftest = &qed_selftest_ops_pass, 2996 .probe = &qed_probe, 2997 .remove = &qed_remove, 2998 .set_power_state = &qed_set_power_state, 2999 .set_name = &qed_set_name, 3000 .update_pf_params = &qed_update_pf_params, 3001 .slowpath_start = &qed_slowpath_start, 3002 .slowpath_stop = &qed_slowpath_stop, 3003 .set_fp_int = &qed_set_int_fp, 3004 .get_fp_int = &qed_get_int_fp, 3005 .sb_init = &qed_sb_init, 3006 .sb_release = &qed_sb_release, 3007 .simd_handler_config = &qed_simd_handler_config, 3008 .simd_handler_clean = &qed_simd_handler_clean, 3009 .dbg_grc = &qed_dbg_grc, 3010 .dbg_grc_size = &qed_dbg_grc_size, 3011 .can_link_change = &qed_can_link_change, 3012 .set_link = &qed_set_link, 3013 .get_link = &qed_get_current_link, 3014 .drain = &qed_drain, 3015 .update_msglvl = &qed_init_dp, 3016 .devlink_register = qed_devlink_register, 3017 .devlink_unregister = qed_devlink_unregister, 3018 .report_fatal_error = qed_report_fatal_error, 3019 .dbg_all_data = &qed_dbg_all_data, 3020 .dbg_all_data_size = &qed_dbg_all_data_size, 3021 .chain_alloc = &qed_chain_alloc, 3022 .chain_free = &qed_chain_free, 3023 .nvm_flash = &qed_nvm_flash, 3024 .nvm_get_image = &qed_nvm_get_image, 3025 .set_coalesce = &qed_set_coalesce, 3026 .set_led = &qed_set_led, 3027 .recovery_process = &qed_recovery_process, 3028 .recovery_prolog = &qed_recovery_prolog, 3029 .attn_clr_enable = &qed_int_attn_clr_enable, 3030 .update_drv_state = &qed_update_drv_state, 3031 .update_mac = &qed_update_mac, 3032 .update_mtu = &qed_update_mtu, 3033 .update_wol = &qed_update_wol, 3034 .db_recovery_add = &qed_db_recovery_add, 3035 .db_recovery_del = &qed_db_recovery_del, 3036 .read_module_eeprom = &qed_read_module_eeprom, 3037 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3038 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3039 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3040 .set_grc_config = &qed_set_grc_config, 3041}; 3042 3043void qed_get_protocol_stats(struct qed_dev *cdev, 3044 enum qed_mcp_protocol_type type, 3045 union qed_mcp_protocol_stats *stats) 3046{ 3047 struct qed_eth_stats eth_stats; 3048 3049 memset(stats, 0, sizeof(*stats)); 3050 3051 switch (type) { 3052 case QED_MCP_LAN_STATS: 3053 qed_get_vport_stats(cdev, ð_stats); 3054 stats->lan_stats.ucast_rx_pkts = 3055 eth_stats.common.rx_ucast_pkts; 3056 stats->lan_stats.ucast_tx_pkts = 3057 eth_stats.common.tx_ucast_pkts; 3058 stats->lan_stats.fcs_err = -1; 3059 break; 3060 case QED_MCP_FCOE_STATS: 3061 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 3062 break; 3063 case QED_MCP_ISCSI_STATS: 3064 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 3065 break; 3066 default: 3067 DP_VERBOSE(cdev, QED_MSG_SP, 3068 "Invalid protocol type = %d\n", type); 3069 return; 3070 } 3071} 3072 3073int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3074{ 3075 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3076 "Scheduling slowpath task [Flag: %d]\n", 3077 QED_SLOWPATH_MFW_TLV_REQ); 3078 smp_mb__before_atomic(); 3079 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3080 smp_mb__after_atomic(); 3081 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3082 3083 return 0; 3084} 3085 3086static void 3087qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3088{ 3089 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3090 struct qed_eth_stats_common *p_common; 3091 struct qed_generic_tlvs gen_tlvs; 3092 struct qed_eth_stats stats; 3093 int i; 3094 3095 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3096 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3097 3098 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3099 tlv->flags.ipv4_csum_offload = true; 3100 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3101 tlv->flags.lso_supported = true; 3102 tlv->flags.b_set = true; 3103 3104 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3105 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3106 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3107 tlv->mac_set[i] = true; 3108 } 3109 } 3110 3111 qed_get_vport_stats(cdev, &stats); 3112 p_common = &stats.common; 3113 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3114 p_common->rx_bcast_pkts; 3115 tlv->rx_frames_set = true; 3116 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3117 p_common->rx_bcast_bytes; 3118 tlv->rx_bytes_set = true; 3119 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3120 p_common->tx_bcast_pkts; 3121 tlv->tx_frames_set = true; 3122 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3123 p_common->tx_bcast_bytes; 3124 tlv->rx_bytes_set = true; 3125} 3126 3127int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3128 union qed_mfw_tlv_data *tlv_buf) 3129{ 3130 struct qed_dev *cdev = hwfn->cdev; 3131 struct qed_common_cb_ops *ops; 3132 3133 ops = cdev->protocol_ops.common; 3134 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3135 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3136 return -EINVAL; 3137 } 3138 3139 switch (type) { 3140 case QED_MFW_TLV_GENERIC: 3141 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3142 break; 3143 case QED_MFW_TLV_ETH: 3144 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3145 break; 3146 case QED_MFW_TLV_FCOE: 3147 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3148 break; 3149 case QED_MFW_TLV_ISCSI: 3150 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3151 break; 3152 default: 3153 break; 3154 } 3155 3156 return 0; 3157} 3158