1/* bnx2x_main.c: QLogic Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * Copyright (c) 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 12 * Written by: Eliezer Tamir 13 * Based on code from Michael Chan's bnx2 driver 14 * UDP CSUM errata workaround by Arik Gendelman 15 * Slowpath and fastpath rework by Vladislav Zolotarov 16 * Statistics and Link management by Yitchak Gertner 17 * 18 */ 19 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22#include <linux/module.h> 23#include <linux/moduleparam.h> 24#include <linux/kernel.h> 25#include <linux/device.h> /* for dev_info() */ 26#include <linux/timer.h> 27#include <linux/errno.h> 28#include <linux/ioport.h> 29#include <linux/slab.h> 30#include <linux/interrupt.h> 31#include <linux/pci.h> 32#include <linux/aer.h> 33#include <linux/init.h> 34#include <linux/netdevice.h> 35#include <linux/etherdevice.h> 36#include <linux/skbuff.h> 37#include <linux/dma-mapping.h> 38#include <linux/bitops.h> 39#include <linux/irq.h> 40#include <linux/delay.h> 41#include <asm/byteorder.h> 42#include <linux/time.h> 43#include <linux/ethtool.h> 44#include <linux/mii.h> 45#include <linux/if_vlan.h> 46#include <linux/crash_dump.h> 47#include <net/ip.h> 48#include <net/ipv6.h> 49#include <net/tcp.h> 50#include <net/vxlan.h> 51#include <net/checksum.h> 52#include <net/ip6_checksum.h> 53#include <linux/workqueue.h> 54#include <linux/crc32.h> 55#include <linux/crc32c.h> 56#include <linux/prefetch.h> 57#include <linux/zlib.h> 58#include <linux/io.h> 59#include <linux/semaphore.h> 60#include <linux/stringify.h> 61#include <linux/vmalloc.h> 62#include "bnx2x.h" 63#include "bnx2x_init.h" 64#include "bnx2x_init_ops.h" 65#include "bnx2x_cmn.h" 66#include "bnx2x_vfpf.h" 67#include "bnx2x_dcb.h" 68#include "bnx2x_sp.h" 69#include <linux/firmware.h> 70#include "bnx2x_fw_file_hdr.h" 71/* FW files */ 72#define FW_FILE_VERSION \ 73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 76 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 77 78#define FW_FILE_VERSION_V15 \ 79 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 80 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 81 __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \ 82 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 83 84#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 85#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 86#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 87#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw" 88#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw" 89#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw" 90 91/* Time in jiffies before concluding the transmitter is hung */ 92#define TX_TIMEOUT (5*HZ) 93 94MODULE_AUTHOR("Eliezer Tamir"); 95MODULE_DESCRIPTION("QLogic " 96 "BCM57710/57711/57711E/" 97 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 98 "57840/57840_MF Driver"); 99MODULE_LICENSE("GPL"); 100MODULE_FIRMWARE(FW_FILE_NAME_E1); 101MODULE_FIRMWARE(FW_FILE_NAME_E1H); 102MODULE_FIRMWARE(FW_FILE_NAME_E2); 103MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); 104MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); 105MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); 106 107int bnx2x_num_queues; 108module_param_named(num_queues, bnx2x_num_queues, int, 0444); 109MODULE_PARM_DESC(num_queues, 110 " Set number of queues (default is as a number of CPUs)"); 111 112static int disable_tpa; 113module_param(disable_tpa, int, 0444); 114MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 115 116static int int_mode; 117module_param(int_mode, int, 0444); 118MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 119 "(1 INT#x; 2 MSI)"); 120 121static int dropless_fc; 122module_param(dropless_fc, int, 0444); 123MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 124 125static int mrrs = -1; 126module_param(mrrs, int, 0444); 127MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 128 129static int debug; 130module_param(debug, int, 0444); 131MODULE_PARM_DESC(debug, " Default debug msglevel"); 132 133static struct workqueue_struct *bnx2x_wq; 134struct workqueue_struct *bnx2x_iov_wq; 135 136struct bnx2x_mac_vals { 137 u32 xmac_addr; 138 u32 xmac_val; 139 u32 emac_addr; 140 u32 emac_val; 141 u32 umac_addr[2]; 142 u32 umac_val[2]; 143 u32 bmac_addr; 144 u32 bmac_val[2]; 145}; 146 147enum bnx2x_board_type { 148 BCM57710 = 0, 149 BCM57711, 150 BCM57711E, 151 BCM57712, 152 BCM57712_MF, 153 BCM57712_VF, 154 BCM57800, 155 BCM57800_MF, 156 BCM57800_VF, 157 BCM57810, 158 BCM57810_MF, 159 BCM57810_VF, 160 BCM57840_4_10, 161 BCM57840_2_20, 162 BCM57840_MF, 163 BCM57840_VF, 164 BCM57811, 165 BCM57811_MF, 166 BCM57840_O, 167 BCM57840_MFO, 168 BCM57811_VF 169}; 170 171/* indexed by board_type, above */ 172static struct { 173 char *name; 174} board_info[] = { 175 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" }, 176 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" }, 177 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" }, 178 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" }, 179 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" }, 180 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" }, 181 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" }, 182 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" }, 183 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" }, 184 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" }, 185 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" }, 186 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" }, 187 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" }, 188 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" }, 189 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, 190 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }, 191 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" }, 192 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" }, 193 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" }, 194 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, 195 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" } 196}; 197 198#ifndef PCI_DEVICE_ID_NX2_57710 199#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 200#endif 201#ifndef PCI_DEVICE_ID_NX2_57711 202#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 203#endif 204#ifndef PCI_DEVICE_ID_NX2_57711E 205#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 206#endif 207#ifndef PCI_DEVICE_ID_NX2_57712 208#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 209#endif 210#ifndef PCI_DEVICE_ID_NX2_57712_MF 211#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 212#endif 213#ifndef PCI_DEVICE_ID_NX2_57712_VF 214#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF 215#endif 216#ifndef PCI_DEVICE_ID_NX2_57800 217#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 218#endif 219#ifndef PCI_DEVICE_ID_NX2_57800_MF 220#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 221#endif 222#ifndef PCI_DEVICE_ID_NX2_57800_VF 223#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF 224#endif 225#ifndef PCI_DEVICE_ID_NX2_57810 226#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 227#endif 228#ifndef PCI_DEVICE_ID_NX2_57810_MF 229#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 230#endif 231#ifndef PCI_DEVICE_ID_NX2_57840_O 232#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE 233#endif 234#ifndef PCI_DEVICE_ID_NX2_57810_VF 235#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF 236#endif 237#ifndef PCI_DEVICE_ID_NX2_57840_4_10 238#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 239#endif 240#ifndef PCI_DEVICE_ID_NX2_57840_2_20 241#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 242#endif 243#ifndef PCI_DEVICE_ID_NX2_57840_MFO 244#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE 245#endif 246#ifndef PCI_DEVICE_ID_NX2_57840_MF 247#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 248#endif 249#ifndef PCI_DEVICE_ID_NX2_57840_VF 250#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF 251#endif 252#ifndef PCI_DEVICE_ID_NX2_57811 253#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 254#endif 255#ifndef PCI_DEVICE_ID_NX2_57811_MF 256#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 257#endif 258#ifndef PCI_DEVICE_ID_NX2_57811_VF 259#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF 260#endif 261 262static const struct pci_device_id bnx2x_pci_tbl[] = { 263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, 269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, 272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, 275 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 276 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, 278 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, 279 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, 280 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 281 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 282 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 283 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 284 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 285 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 286 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, 287 { 0 } 288}; 289 290MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 291 292const u32 dmae_reg_go_c[] = { 293 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 294 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 295 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, 296 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 297}; 298 299/* Global resources for unloading a previously loaded device */ 300#define BNX2X_PREV_WAIT_NEEDED 1 301static DEFINE_SEMAPHORE(bnx2x_prev_sem); 302static LIST_HEAD(bnx2x_prev_list); 303 304/* Forward declaration */ 305static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); 306static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); 307static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); 308 309/**************************************************************************** 310* General service functions 311****************************************************************************/ 312 313static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); 314 315static void __storm_memset_dma_mapping(struct bnx2x *bp, 316 u32 addr, dma_addr_t mapping) 317{ 318 REG_WR(bp, addr, U64_LO(mapping)); 319 REG_WR(bp, addr + 4, U64_HI(mapping)); 320} 321 322static void storm_memset_spq_addr(struct bnx2x *bp, 323 dma_addr_t mapping, u16 abs_fid) 324{ 325 u32 addr = XSEM_REG_FAST_MEMORY + 326 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 327 328 __storm_memset_dma_mapping(bp, addr, mapping); 329} 330 331static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 332 u16 pf_id) 333{ 334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 335 pf_id); 336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 337 pf_id); 338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 339 pf_id); 340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 341 pf_id); 342} 343 344static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 345 u8 enable) 346{ 347 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 348 enable); 349 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 350 enable); 351 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 352 enable); 353 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 354 enable); 355} 356 357static void storm_memset_eq_data(struct bnx2x *bp, 358 struct event_ring_data *eq_data, 359 u16 pfid) 360{ 361 size_t size = sizeof(struct event_ring_data); 362 363 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 364 365 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 366} 367 368static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 369 u16 pfid) 370{ 371 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 372 REG_WR16(bp, addr, eq_prod); 373} 374 375/* used only at init 376 * locking is done by mcp 377 */ 378static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 379{ 380 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 381 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 382 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 383 PCICFG_VENDOR_ID_OFFSET); 384} 385 386static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 387{ 388 u32 val; 389 390 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 391 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 392 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 393 PCICFG_VENDOR_ID_OFFSET); 394 395 return val; 396} 397 398#define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 399#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 400#define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 401#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 402#define DMAE_DP_DST_NONE "dst_addr [none]" 403 404static void bnx2x_dp_dmae(struct bnx2x *bp, 405 struct dmae_command *dmae, int msglvl) 406{ 407 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 408 int i; 409 410 switch (dmae->opcode & DMAE_COMMAND_DST) { 411 case DMAE_CMD_DST_PCI: 412 if (src_type == DMAE_CMD_SRC_PCI) 413 DP(msglvl, "DMAE: opcode 0x%08x\n" 414 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 415 "comp_addr [%x:%08x], comp_val 0x%08x\n", 416 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 417 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 418 dmae->comp_addr_hi, dmae->comp_addr_lo, 419 dmae->comp_val); 420 else 421 DP(msglvl, "DMAE: opcode 0x%08x\n" 422 "src [%08x], len [%d*4], dst [%x:%08x]\n" 423 "comp_addr [%x:%08x], comp_val 0x%08x\n", 424 dmae->opcode, dmae->src_addr_lo >> 2, 425 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 426 dmae->comp_addr_hi, dmae->comp_addr_lo, 427 dmae->comp_val); 428 break; 429 case DMAE_CMD_DST_GRC: 430 if (src_type == DMAE_CMD_SRC_PCI) 431 DP(msglvl, "DMAE: opcode 0x%08x\n" 432 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 433 "comp_addr [%x:%08x], comp_val 0x%08x\n", 434 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 435 dmae->len, dmae->dst_addr_lo >> 2, 436 dmae->comp_addr_hi, dmae->comp_addr_lo, 437 dmae->comp_val); 438 else 439 DP(msglvl, "DMAE: opcode 0x%08x\n" 440 "src [%08x], len [%d*4], dst [%08x]\n" 441 "comp_addr [%x:%08x], comp_val 0x%08x\n", 442 dmae->opcode, dmae->src_addr_lo >> 2, 443 dmae->len, dmae->dst_addr_lo >> 2, 444 dmae->comp_addr_hi, dmae->comp_addr_lo, 445 dmae->comp_val); 446 break; 447 default: 448 if (src_type == DMAE_CMD_SRC_PCI) 449 DP(msglvl, "DMAE: opcode 0x%08x\n" 450 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 451 "comp_addr [%x:%08x] comp_val 0x%08x\n", 452 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 453 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 454 dmae->comp_val); 455 else 456 DP(msglvl, "DMAE: opcode 0x%08x\n" 457 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 458 "comp_addr [%x:%08x] comp_val 0x%08x\n", 459 dmae->opcode, dmae->src_addr_lo >> 2, 460 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 461 dmae->comp_val); 462 break; 463 } 464 465 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) 466 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", 467 i, *(((u32 *)dmae) + i)); 468} 469 470/* copy command into DMAE command memory and set DMAE command go */ 471void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 472{ 473 u32 cmd_offset; 474 int i; 475 476 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 477 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 478 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 479 } 480 REG_WR(bp, dmae_reg_go_c[idx], 1); 481} 482 483u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 484{ 485 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 486 DMAE_CMD_C_ENABLE); 487} 488 489u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 490{ 491 return opcode & ~DMAE_CMD_SRC_RESET; 492} 493 494u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 495 bool with_comp, u8 comp_type) 496{ 497 u32 opcode = 0; 498 499 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 500 (dst_type << DMAE_COMMAND_DST_SHIFT)); 501 502 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 503 504 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 505 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 506 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 507 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 508 509#ifdef __BIG_ENDIAN 510 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 511#else 512 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 513#endif 514 if (with_comp) 515 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 516 return opcode; 517} 518 519void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 520 struct dmae_command *dmae, 521 u8 src_type, u8 dst_type) 522{ 523 memset(dmae, 0, sizeof(struct dmae_command)); 524 525 /* set the opcode */ 526 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 527 true, DMAE_COMP_PCI); 528 529 /* fill in the completion parameters */ 530 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 531 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 532 dmae->comp_val = DMAE_COMP_VAL; 533} 534 535/* issue a dmae command over the init-channel and wait for completion */ 536int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 537 u32 *comp) 538{ 539 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 540 int rc = 0; 541 542 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); 543 544 /* Lock the dmae channel. Disable BHs to prevent a dead-lock 545 * as long as this code is called both from syscall context and 546 * from ndo_set_rx_mode() flow that may be called from BH. 547 */ 548 549 spin_lock_bh(&bp->dmae_lock); 550 551 /* reset completion */ 552 *comp = 0; 553 554 /* post the command on the channel used for initializations */ 555 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 556 557 /* wait for completion */ 558 udelay(5); 559 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 560 561 if (!cnt || 562 (bp->recovery_state != BNX2X_RECOVERY_DONE && 563 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 564 BNX2X_ERR("DMAE timeout!\n"); 565 rc = DMAE_TIMEOUT; 566 goto unlock; 567 } 568 cnt--; 569 udelay(50); 570 } 571 if (*comp & DMAE_PCI_ERR_FLAG) { 572 BNX2X_ERR("DMAE PCI error!\n"); 573 rc = DMAE_PCI_ERROR; 574 } 575 576unlock: 577 578 spin_unlock_bh(&bp->dmae_lock); 579 580 return rc; 581} 582 583void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 584 u32 len32) 585{ 586 int rc; 587 struct dmae_command dmae; 588 589 if (!bp->dmae_ready) { 590 u32 *data = bnx2x_sp(bp, wb_data[0]); 591 592 if (CHIP_IS_E1(bp)) 593 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 594 else 595 bnx2x_init_str_wr(bp, dst_addr, data, len32); 596 return; 597 } 598 599 /* set opcode and fixed command fields */ 600 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 601 602 /* fill in addresses and len */ 603 dmae.src_addr_lo = U64_LO(dma_addr); 604 dmae.src_addr_hi = U64_HI(dma_addr); 605 dmae.dst_addr_lo = dst_addr >> 2; 606 dmae.dst_addr_hi = 0; 607 dmae.len = len32; 608 609 /* issue the command and wait for completion */ 610 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 611 if (rc) { 612 BNX2X_ERR("DMAE returned failure %d\n", rc); 613#ifdef BNX2X_STOP_ON_ERROR 614 bnx2x_panic(); 615#endif 616 } 617} 618 619void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 620{ 621 int rc; 622 struct dmae_command dmae; 623 624 if (!bp->dmae_ready) { 625 u32 *data = bnx2x_sp(bp, wb_data[0]); 626 int i; 627 628 if (CHIP_IS_E1(bp)) 629 for (i = 0; i < len32; i++) 630 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 631 else 632 for (i = 0; i < len32; i++) 633 data[i] = REG_RD(bp, src_addr + i*4); 634 635 return; 636 } 637 638 /* set opcode and fixed command fields */ 639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 640 641 /* fill in addresses and len */ 642 dmae.src_addr_lo = src_addr >> 2; 643 dmae.src_addr_hi = 0; 644 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 645 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 646 dmae.len = len32; 647 648 /* issue the command and wait for completion */ 649 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 650 if (rc) { 651 BNX2X_ERR("DMAE returned failure %d\n", rc); 652#ifdef BNX2X_STOP_ON_ERROR 653 bnx2x_panic(); 654#endif 655 } 656} 657 658static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 659 u32 addr, u32 len) 660{ 661 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 662 int offset = 0; 663 664 while (len > dmae_wr_max) { 665 bnx2x_write_dmae(bp, phys_addr + offset, 666 addr + offset, dmae_wr_max); 667 offset += dmae_wr_max * 4; 668 len -= dmae_wr_max; 669 } 670 671 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 672} 673 674enum storms { 675 XSTORM, 676 TSTORM, 677 CSTORM, 678 USTORM, 679 MAX_STORMS 680}; 681 682#define STORMS_NUM 4 683#define REGS_IN_ENTRY 4 684 685static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, 686 enum storms storm, 687 int entry) 688{ 689 switch (storm) { 690 case XSTORM: 691 return XSTORM_ASSERT_LIST_OFFSET(entry); 692 case TSTORM: 693 return TSTORM_ASSERT_LIST_OFFSET(entry); 694 case CSTORM: 695 return CSTORM_ASSERT_LIST_OFFSET(entry); 696 case USTORM: 697 return USTORM_ASSERT_LIST_OFFSET(entry); 698 case MAX_STORMS: 699 default: 700 BNX2X_ERR("unknown storm\n"); 701 } 702 return -EINVAL; 703} 704 705static int bnx2x_mc_assert(struct bnx2x *bp) 706{ 707 char last_idx; 708 int i, j, rc = 0; 709 enum storms storm; 710 u32 regs[REGS_IN_ENTRY]; 711 u32 bar_storm_intmem[STORMS_NUM] = { 712 BAR_XSTRORM_INTMEM, 713 BAR_TSTRORM_INTMEM, 714 BAR_CSTRORM_INTMEM, 715 BAR_USTRORM_INTMEM 716 }; 717 u32 storm_assert_list_index[STORMS_NUM] = { 718 XSTORM_ASSERT_LIST_INDEX_OFFSET, 719 TSTORM_ASSERT_LIST_INDEX_OFFSET, 720 CSTORM_ASSERT_LIST_INDEX_OFFSET, 721 USTORM_ASSERT_LIST_INDEX_OFFSET 722 }; 723 char *storms_string[STORMS_NUM] = { 724 "XSTORM", 725 "TSTORM", 726 "CSTORM", 727 "USTORM" 728 }; 729 730 for (storm = XSTORM; storm < MAX_STORMS; storm++) { 731 last_idx = REG_RD8(bp, bar_storm_intmem[storm] + 732 storm_assert_list_index[storm]); 733 if (last_idx) 734 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n", 735 storms_string[storm], last_idx); 736 737 /* print the asserts */ 738 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 739 /* read a single assert entry */ 740 for (j = 0; j < REGS_IN_ENTRY; j++) 741 regs[j] = REG_RD(bp, bar_storm_intmem[storm] + 742 bnx2x_get_assert_list_entry(bp, 743 storm, 744 i) + 745 sizeof(u32) * j); 746 747 /* log entry if it contains a valid assert */ 748 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) { 749 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 750 storms_string[storm], i, regs[3], 751 regs[2], regs[1], regs[0]); 752 rc++; 753 } else { 754 break; 755 } 756 } 757 } 758 759 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n", 760 CHIP_IS_E1(bp) ? "everest1" : 761 CHIP_IS_E1H(bp) ? "everest1h" : 762 CHIP_IS_E2(bp) ? "everest2" : "everest3", 763 bp->fw_major, bp->fw_minor, bp->fw_rev); 764 765 return rc; 766} 767 768#define MCPR_TRACE_BUFFER_SIZE (0x800) 769#define SCRATCH_BUFFER_SIZE(bp) \ 770 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) 771 772void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 773{ 774 u32 addr, val; 775 u32 mark, offset; 776 __be32 data[9]; 777 int word; 778 u32 trace_shmem_base; 779 if (BP_NOMCP(bp)) { 780 BNX2X_ERR("NO MCP - can not dump\n"); 781 return; 782 } 783 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 784 (bp->common.bc_ver & 0xff0000) >> 16, 785 (bp->common.bc_ver & 0xff00) >> 8, 786 (bp->common.bc_ver & 0xff)); 787 788 if (pci_channel_offline(bp->pdev)) { 789 BNX2X_ERR("Cannot dump MCP info while in PCI error\n"); 790 return; 791 } 792 793 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 794 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 795 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 796 797 if (BP_PATH(bp) == 0) 798 trace_shmem_base = bp->common.shmem_base; 799 else 800 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 801 802 /* sanity */ 803 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || 804 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + 805 SCRATCH_BUFFER_SIZE(bp)) { 806 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", 807 trace_shmem_base); 808 return; 809 } 810 811 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; 812 813 /* validate TRCB signature */ 814 mark = REG_RD(bp, addr); 815 if (mark != MFW_TRACE_SIGNATURE) { 816 BNX2X_ERR("Trace buffer signature is missing."); 817 return ; 818 } 819 820 /* read cyclic buffer pointer */ 821 addr += 4; 822 mark = REG_RD(bp, addr); 823 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; 824 if (mark >= trace_shmem_base || mark < addr + 4) { 825 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); 826 return; 827 } 828 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 829 830 printk("%s", lvl); 831 832 /* dump buffer after the mark */ 833 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { 834 for (word = 0; word < 8; word++) 835 data[word] = htonl(REG_RD(bp, offset + 4*word)); 836 data[8] = 0x0; 837 pr_cont("%s", (char *)data); 838 } 839 840 /* dump buffer before the mark */ 841 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 842 for (word = 0; word < 8; word++) 843 data[word] = htonl(REG_RD(bp, offset + 4*word)); 844 data[8] = 0x0; 845 pr_cont("%s", (char *)data); 846 } 847 printk("%s" "end of fw dump\n", lvl); 848} 849 850static void bnx2x_fw_dump(struct bnx2x *bp) 851{ 852 bnx2x_fw_dump_lvl(bp, KERN_ERR); 853} 854 855static void bnx2x_hc_int_disable(struct bnx2x *bp) 856{ 857 int port = BP_PORT(bp); 858 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 859 u32 val = REG_RD(bp, addr); 860 861 /* in E1 we must use only PCI configuration space to disable 862 * MSI/MSIX capability 863 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 864 */ 865 if (CHIP_IS_E1(bp)) { 866 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 867 * Use mask register to prevent from HC sending interrupts 868 * after we exit the function 869 */ 870 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 871 872 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 873 HC_CONFIG_0_REG_INT_LINE_EN_0 | 874 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 875 } else 876 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 877 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 878 HC_CONFIG_0_REG_INT_LINE_EN_0 | 879 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 880 881 DP(NETIF_MSG_IFDOWN, 882 "write %x to HC %d (addr 0x%x)\n", 883 val, port, addr); 884 885 REG_WR(bp, addr, val); 886 if (REG_RD(bp, addr) != val) 887 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 888} 889 890static void bnx2x_igu_int_disable(struct bnx2x *bp) 891{ 892 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 893 894 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 895 IGU_PF_CONF_INT_LINE_EN | 896 IGU_PF_CONF_ATTN_BIT_EN); 897 898 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 899 900 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 901 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 902 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 903} 904 905static void bnx2x_int_disable(struct bnx2x *bp) 906{ 907 if (bp->common.int_block == INT_BLOCK_HC) 908 bnx2x_hc_int_disable(bp); 909 else 910 bnx2x_igu_int_disable(bp); 911} 912 913void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) 914{ 915 int i; 916 u16 j; 917 struct hc_sp_status_block_data sp_sb_data; 918 int func = BP_FUNC(bp); 919#ifdef BNX2X_STOP_ON_ERROR 920 u16 start = 0, end = 0; 921 u8 cos; 922#endif 923 if (IS_PF(bp) && disable_int) 924 bnx2x_int_disable(bp); 925 926 bp->stats_state = STATS_STATE_DISABLED; 927 bp->eth_stats.unrecoverable_error++; 928 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 929 930 BNX2X_ERR("begin crash dump -----------------\n"); 931 932 /* Indices */ 933 /* Common */ 934 if (IS_PF(bp)) { 935 struct host_sp_status_block *def_sb = bp->def_status_blk; 936 int data_size, cstorm_offset; 937 938 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 939 bp->def_idx, bp->def_att_idx, bp->attn_state, 940 bp->spq_prod_idx, bp->stats_counter); 941 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 942 def_sb->atten_status_block.attn_bits, 943 def_sb->atten_status_block.attn_bits_ack, 944 def_sb->atten_status_block.status_block_id, 945 def_sb->atten_status_block.attn_bits_index); 946 BNX2X_ERR(" def ("); 947 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 948 pr_cont("0x%x%s", 949 def_sb->sp_sb.index_values[i], 950 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 951 952 data_size = sizeof(struct hc_sp_status_block_data) / 953 sizeof(u32); 954 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); 955 for (i = 0; i < data_size; i++) 956 *((u32 *)&sp_sb_data + i) = 957 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + 958 i * sizeof(u32)); 959 960 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 961 sp_sb_data.igu_sb_id, 962 sp_sb_data.igu_seg_id, 963 sp_sb_data.p_func.pf_id, 964 sp_sb_data.p_func.vnic_id, 965 sp_sb_data.p_func.vf_id, 966 sp_sb_data.p_func.vf_valid, 967 sp_sb_data.state); 968 } 969 970 for_each_eth_queue(bp, i) { 971 struct bnx2x_fastpath *fp = &bp->fp[i]; 972 int loop; 973 struct hc_status_block_data_e2 sb_data_e2; 974 struct hc_status_block_data_e1x sb_data_e1x; 975 struct hc_status_block_sm *hc_sm_p = 976 CHIP_IS_E1x(bp) ? 977 sb_data_e1x.common.state_machine : 978 sb_data_e2.common.state_machine; 979 struct hc_index_data *hc_index_p = 980 CHIP_IS_E1x(bp) ? 981 sb_data_e1x.index_data : 982 sb_data_e2.index_data; 983 u8 data_size, cos; 984 u32 *sb_data_p; 985 struct bnx2x_fp_txdata txdata; 986 987 if (!bp->fp) 988 break; 989 990 if (!fp->rx_cons_sb) 991 continue; 992 993 /* Rx */ 994 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 995 i, fp->rx_bd_prod, fp->rx_bd_cons, 996 fp->rx_comp_prod, 997 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 998 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 999 fp->rx_sge_prod, fp->last_max_sge, 1000 le16_to_cpu(fp->fp_hc_idx)); 1001 1002 /* Tx */ 1003 for_each_cos_in_tx_queue(fp, cos) 1004 { 1005 if (!fp->txdata_ptr[cos]) 1006 break; 1007 1008 txdata = *fp->txdata_ptr[cos]; 1009 1010 if (!txdata.tx_cons_sb) 1011 continue; 1012 1013 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 1014 i, txdata.tx_pkt_prod, 1015 txdata.tx_pkt_cons, txdata.tx_bd_prod, 1016 txdata.tx_bd_cons, 1017 le16_to_cpu(*txdata.tx_cons_sb)); 1018 } 1019 1020 loop = CHIP_IS_E1x(bp) ? 1021 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 1022 1023 /* host sb data */ 1024 1025 if (IS_FCOE_FP(fp)) 1026 continue; 1027 1028 BNX2X_ERR(" run indexes ("); 1029 for (j = 0; j < HC_SB_MAX_SM; j++) 1030 pr_cont("0x%x%s", 1031 fp->sb_running_index[j], 1032 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 1033 1034 BNX2X_ERR(" indexes ("); 1035 for (j = 0; j < loop; j++) 1036 pr_cont("0x%x%s", 1037 fp->sb_index_values[j], 1038 (j == loop - 1) ? ")" : " "); 1039 1040 /* VF cannot access FW refelection for status block */ 1041 if (IS_VF(bp)) 1042 continue; 1043 1044 /* fw sb data */ 1045 data_size = CHIP_IS_E1x(bp) ? 1046 sizeof(struct hc_status_block_data_e1x) : 1047 sizeof(struct hc_status_block_data_e2); 1048 data_size /= sizeof(u32); 1049 sb_data_p = CHIP_IS_E1x(bp) ? 1050 (u32 *)&sb_data_e1x : 1051 (u32 *)&sb_data_e2; 1052 /* copy sb data in here */ 1053 for (j = 0; j < data_size; j++) 1054 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 1055 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 1056 j * sizeof(u32)); 1057 1058 if (!CHIP_IS_E1x(bp)) { 1059 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1060 sb_data_e2.common.p_func.pf_id, 1061 sb_data_e2.common.p_func.vf_id, 1062 sb_data_e2.common.p_func.vf_valid, 1063 sb_data_e2.common.p_func.vnic_id, 1064 sb_data_e2.common.same_igu_sb_1b, 1065 sb_data_e2.common.state); 1066 } else { 1067 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1068 sb_data_e1x.common.p_func.pf_id, 1069 sb_data_e1x.common.p_func.vf_id, 1070 sb_data_e1x.common.p_func.vf_valid, 1071 sb_data_e1x.common.p_func.vnic_id, 1072 sb_data_e1x.common.same_igu_sb_1b, 1073 sb_data_e1x.common.state); 1074 } 1075 1076 /* SB_SMs data */ 1077 for (j = 0; j < HC_SB_MAX_SM; j++) { 1078 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 1079 j, hc_sm_p[j].__flags, 1080 hc_sm_p[j].igu_sb_id, 1081 hc_sm_p[j].igu_seg_id, 1082 hc_sm_p[j].time_to_expire, 1083 hc_sm_p[j].timer_value); 1084 } 1085 1086 /* Indices data */ 1087 for (j = 0; j < loop; j++) { 1088 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 1089 hc_index_p[j].flags, 1090 hc_index_p[j].timeout); 1091 } 1092 } 1093 1094#ifdef BNX2X_STOP_ON_ERROR 1095 if (IS_PF(bp)) { 1096 /* event queue */ 1097 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1098 for (i = 0; i < NUM_EQ_DESC; i++) { 1099 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1100 1101 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1102 i, bp->eq_ring[i].message.opcode, 1103 bp->eq_ring[i].message.error); 1104 BNX2X_ERR("data: %x %x %x\n", 1105 data[0], data[1], data[2]); 1106 } 1107 } 1108 1109 /* Rings */ 1110 /* Rx */ 1111 for_each_valid_rx_queue(bp, i) { 1112 struct bnx2x_fastpath *fp = &bp->fp[i]; 1113 1114 if (!bp->fp) 1115 break; 1116 1117 if (!fp->rx_cons_sb) 1118 continue; 1119 1120 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1121 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 1122 for (j = start; j != end; j = RX_BD(j + 1)) { 1123 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 1124 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 1125 1126 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 1127 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 1128 } 1129 1130 start = RX_SGE(fp->rx_sge_prod); 1131 end = RX_SGE(fp->last_max_sge); 1132 for (j = start; j != end; j = RX_SGE(j + 1)) { 1133 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 1134 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 1135 1136 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 1137 i, j, rx_sge[1], rx_sge[0], sw_page->page); 1138 } 1139 1140 start = RCQ_BD(fp->rx_comp_cons - 10); 1141 end = RCQ_BD(fp->rx_comp_cons + 503); 1142 for (j = start; j != end; j = RCQ_BD(j + 1)) { 1143 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 1144 1145 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 1146 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 1147 } 1148 } 1149 1150 /* Tx */ 1151 for_each_valid_tx_queue(bp, i) { 1152 struct bnx2x_fastpath *fp = &bp->fp[i]; 1153 1154 if (!bp->fp) 1155 break; 1156 1157 for_each_cos_in_tx_queue(fp, cos) { 1158 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1159 1160 if (!fp->txdata_ptr[cos]) 1161 break; 1162 1163 if (!txdata->tx_cons_sb) 1164 continue; 1165 1166 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 1167 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 1168 for (j = start; j != end; j = TX_BD(j + 1)) { 1169 struct sw_tx_bd *sw_bd = 1170 &txdata->tx_buf_ring[j]; 1171 1172 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 1173 i, cos, j, sw_bd->skb, 1174 sw_bd->first_bd); 1175 } 1176 1177 start = TX_BD(txdata->tx_bd_cons - 10); 1178 end = TX_BD(txdata->tx_bd_cons + 254); 1179 for (j = start; j != end; j = TX_BD(j + 1)) { 1180 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 1181 1182 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 1183 i, cos, j, tx_bd[0], tx_bd[1], 1184 tx_bd[2], tx_bd[3]); 1185 } 1186 } 1187 } 1188#endif 1189 if (IS_PF(bp)) { 1190 int tmp_msg_en = bp->msg_enable; 1191 1192 bnx2x_fw_dump(bp); 1193 bp->msg_enable |= NETIF_MSG_HW; 1194 BNX2X_ERR("Idle check (1st round) ----------\n"); 1195 bnx2x_idle_chk(bp); 1196 BNX2X_ERR("Idle check (2nd round) ----------\n"); 1197 bnx2x_idle_chk(bp); 1198 bp->msg_enable = tmp_msg_en; 1199 bnx2x_mc_assert(bp); 1200 } 1201 1202 BNX2X_ERR("end crash dump -----------------\n"); 1203} 1204 1205/* 1206 * FLR Support for E2 1207 * 1208 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 1209 * initialization. 1210 */ 1211#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ 1212#define FLR_WAIT_INTERVAL 50 /* usec */ 1213#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 1214 1215struct pbf_pN_buf_regs { 1216 int pN; 1217 u32 init_crd; 1218 u32 crd; 1219 u32 crd_freed; 1220}; 1221 1222struct pbf_pN_cmd_regs { 1223 int pN; 1224 u32 lines_occup; 1225 u32 lines_freed; 1226}; 1227 1228static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 1229 struct pbf_pN_buf_regs *regs, 1230 u32 poll_count) 1231{ 1232 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 1233 u32 cur_cnt = poll_count; 1234 1235 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 1236 crd = crd_start = REG_RD(bp, regs->crd); 1237 init_crd = REG_RD(bp, regs->init_crd); 1238 1239 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 1240 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 1241 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 1242 1243 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 1244 (init_crd - crd_start))) { 1245 if (cur_cnt--) { 1246 udelay(FLR_WAIT_INTERVAL); 1247 crd = REG_RD(bp, regs->crd); 1248 crd_freed = REG_RD(bp, regs->crd_freed); 1249 } else { 1250 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 1251 regs->pN); 1252 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 1253 regs->pN, crd); 1254 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 1255 regs->pN, crd_freed); 1256 break; 1257 } 1258 } 1259 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 1260 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1261} 1262 1263static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 1264 struct pbf_pN_cmd_regs *regs, 1265 u32 poll_count) 1266{ 1267 u32 occup, to_free, freed, freed_start; 1268 u32 cur_cnt = poll_count; 1269 1270 occup = to_free = REG_RD(bp, regs->lines_occup); 1271 freed = freed_start = REG_RD(bp, regs->lines_freed); 1272 1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 1274 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 1275 1276 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1277 if (cur_cnt--) { 1278 udelay(FLR_WAIT_INTERVAL); 1279 occup = REG_RD(bp, regs->lines_occup); 1280 freed = REG_RD(bp, regs->lines_freed); 1281 } else { 1282 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 1283 regs->pN); 1284 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 1285 regs->pN, occup); 1286 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 1287 regs->pN, freed); 1288 break; 1289 } 1290 } 1291 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1292 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1293} 1294 1295static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1296 u32 expected, u32 poll_count) 1297{ 1298 u32 cur_cnt = poll_count; 1299 u32 val; 1300 1301 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1302 udelay(FLR_WAIT_INTERVAL); 1303 1304 return val; 1305} 1306 1307int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1308 char *msg, u32 poll_cnt) 1309{ 1310 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1311 if (val != 0) { 1312 BNX2X_ERR("%s usage count=%d\n", msg, val); 1313 return 1; 1314 } 1315 return 0; 1316} 1317 1318/* Common routines with VF FLR cleanup */ 1319u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1320{ 1321 /* adjust polling timeout */ 1322 if (CHIP_REV_IS_EMUL(bp)) 1323 return FLR_POLL_CNT * 2000; 1324 1325 if (CHIP_REV_IS_FPGA(bp)) 1326 return FLR_POLL_CNT * 120; 1327 1328 return FLR_POLL_CNT; 1329} 1330 1331void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1332{ 1333 struct pbf_pN_cmd_regs cmd_regs[] = { 1334 {0, (CHIP_IS_E3B0(bp)) ? 1335 PBF_REG_TQ_OCCUPANCY_Q0 : 1336 PBF_REG_P0_TQ_OCCUPANCY, 1337 (CHIP_IS_E3B0(bp)) ? 1338 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1339 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1340 {1, (CHIP_IS_E3B0(bp)) ? 1341 PBF_REG_TQ_OCCUPANCY_Q1 : 1342 PBF_REG_P1_TQ_OCCUPANCY, 1343 (CHIP_IS_E3B0(bp)) ? 1344 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1345 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1346 {4, (CHIP_IS_E3B0(bp)) ? 1347 PBF_REG_TQ_OCCUPANCY_LB_Q : 1348 PBF_REG_P4_TQ_OCCUPANCY, 1349 (CHIP_IS_E3B0(bp)) ? 1350 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1351 PBF_REG_P4_TQ_LINES_FREED_CNT} 1352 }; 1353 1354 struct pbf_pN_buf_regs buf_regs[] = { 1355 {0, (CHIP_IS_E3B0(bp)) ? 1356 PBF_REG_INIT_CRD_Q0 : 1357 PBF_REG_P0_INIT_CRD , 1358 (CHIP_IS_E3B0(bp)) ? 1359 PBF_REG_CREDIT_Q0 : 1360 PBF_REG_P0_CREDIT, 1361 (CHIP_IS_E3B0(bp)) ? 1362 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1363 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1364 {1, (CHIP_IS_E3B0(bp)) ? 1365 PBF_REG_INIT_CRD_Q1 : 1366 PBF_REG_P1_INIT_CRD, 1367 (CHIP_IS_E3B0(bp)) ? 1368 PBF_REG_CREDIT_Q1 : 1369 PBF_REG_P1_CREDIT, 1370 (CHIP_IS_E3B0(bp)) ? 1371 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1372 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1373 {4, (CHIP_IS_E3B0(bp)) ? 1374 PBF_REG_INIT_CRD_LB_Q : 1375 PBF_REG_P4_INIT_CRD, 1376 (CHIP_IS_E3B0(bp)) ? 1377 PBF_REG_CREDIT_LB_Q : 1378 PBF_REG_P4_CREDIT, 1379 (CHIP_IS_E3B0(bp)) ? 1380 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1381 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1382 }; 1383 1384 int i; 1385 1386 /* Verify the command queues are flushed P0, P1, P4 */ 1387 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1388 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1389 1390 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1391 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1392 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1393} 1394 1395#define OP_GEN_PARAM(param) \ 1396 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1397 1398#define OP_GEN_TYPE(type) \ 1399 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1400 1401#define OP_GEN_AGG_VECT(index) \ 1402 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1403 1404int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) 1405{ 1406 u32 op_gen_command = 0; 1407 u32 comp_addr = BAR_CSTRORM_INTMEM + 1408 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1409 int ret = 0; 1410 1411 if (REG_RD(bp, comp_addr)) { 1412 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1413 return 1; 1414 } 1415 1416 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1417 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1418 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 1419 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1420 1421 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1422 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); 1423 1424 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1425 BNX2X_ERR("FW final cleanup did not succeed\n"); 1426 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1427 (REG_RD(bp, comp_addr))); 1428 bnx2x_panic(); 1429 return 1; 1430 } 1431 /* Zero completion for next FLR */ 1432 REG_WR(bp, comp_addr, 0); 1433 1434 return ret; 1435} 1436 1437u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1438{ 1439 u16 status; 1440 1441 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1442 return status & PCI_EXP_DEVSTA_TRPND; 1443} 1444 1445/* PF FLR specific routines 1446*/ 1447static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1448{ 1449 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1450 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1451 CFC_REG_NUM_LCIDS_INSIDE_PF, 1452 "CFC PF usage counter timed out", 1453 poll_cnt)) 1454 return 1; 1455 1456 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1457 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1458 DORQ_REG_PF_USAGE_CNT, 1459 "DQ PF usage counter timed out", 1460 poll_cnt)) 1461 return 1; 1462 1463 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1464 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1465 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1466 "QM PF usage counter timed out", 1467 poll_cnt)) 1468 return 1; 1469 1470 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1471 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1472 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1473 "Timers VNIC usage counter timed out", 1474 poll_cnt)) 1475 return 1; 1476 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1477 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1478 "Timers NUM_SCANS usage counter timed out", 1479 poll_cnt)) 1480 return 1; 1481 1482 /* Wait DMAE PF usage counter to zero */ 1483 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1484 dmae_reg_go_c[INIT_DMAE_C(bp)], 1485 "DMAE command register timed out", 1486 poll_cnt)) 1487 return 1; 1488 1489 return 0; 1490} 1491 1492static void bnx2x_hw_enable_status(struct bnx2x *bp) 1493{ 1494 u32 val; 1495 1496 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1497 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1498 1499 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1500 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1501 1502 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1503 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1504 1505 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1506 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1507 1508 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1509 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1510 1511 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1512 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1513 1514 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1515 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1516 1517 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1518 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1519 val); 1520} 1521 1522static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1523{ 1524 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1525 1526 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1527 1528 /* Re-enable PF target read access */ 1529 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1530 1531 /* Poll HW usage counters */ 1532 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1533 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1534 return -EBUSY; 1535 1536 /* Zero the igu 'trailing edge' and 'leading edge' */ 1537 1538 /* Send the FW cleanup command */ 1539 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1540 return -EBUSY; 1541 1542 /* ATC cleanup */ 1543 1544 /* Verify TX hw is flushed */ 1545 bnx2x_tx_hw_flushed(bp, poll_cnt); 1546 1547 /* Wait 100ms (not adjusted according to platform) */ 1548 msleep(100); 1549 1550 /* Verify no pending pci transactions */ 1551 if (bnx2x_is_pcie_pending(bp->pdev)) 1552 BNX2X_ERR("PCIE Transactions still pending\n"); 1553 1554 /* Debug */ 1555 bnx2x_hw_enable_status(bp); 1556 1557 /* 1558 * Master enable - Due to WB DMAE writes performed before this 1559 * register is re-initialized as part of the regular function init 1560 */ 1561 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1562 1563 return 0; 1564} 1565 1566static void bnx2x_hc_int_enable(struct bnx2x *bp) 1567{ 1568 int port = BP_PORT(bp); 1569 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1570 u32 val = REG_RD(bp, addr); 1571 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1572 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1573 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1574 1575 if (msix) { 1576 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1577 HC_CONFIG_0_REG_INT_LINE_EN_0); 1578 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1579 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1580 if (single_msix) 1581 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1582 } else if (msi) { 1583 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1584 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1585 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1586 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1587 } else { 1588 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1589 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1590 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1591 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1592 1593 if (!CHIP_IS_E1(bp)) { 1594 DP(NETIF_MSG_IFUP, 1595 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1596 1597 REG_WR(bp, addr, val); 1598 1599 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1600 } 1601 } 1602 1603 if (CHIP_IS_E1(bp)) 1604 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1605 1606 DP(NETIF_MSG_IFUP, 1607 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1608 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1609 1610 REG_WR(bp, addr, val); 1611 /* 1612 * Ensure that HC_CONFIG is written before leading/trailing edge config 1613 */ 1614 barrier(); 1615 1616 if (!CHIP_IS_E1(bp)) { 1617 /* init leading/trailing edge */ 1618 if (IS_MF(bp)) { 1619 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1620 if (bp->port.pmf) 1621 /* enable nig and gpio3 attention */ 1622 val |= 0x1100; 1623 } else 1624 val = 0xffff; 1625 1626 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1627 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1628 } 1629} 1630 1631static void bnx2x_igu_int_enable(struct bnx2x *bp) 1632{ 1633 u32 val; 1634 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1635 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1636 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1637 1638 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1639 1640 if (msix) { 1641 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1642 IGU_PF_CONF_SINGLE_ISR_EN); 1643 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1644 IGU_PF_CONF_ATTN_BIT_EN); 1645 1646 if (single_msix) 1647 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1648 } else if (msi) { 1649 val &= ~IGU_PF_CONF_INT_LINE_EN; 1650 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1651 IGU_PF_CONF_ATTN_BIT_EN | 1652 IGU_PF_CONF_SINGLE_ISR_EN); 1653 } else { 1654 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1655 val |= (IGU_PF_CONF_INT_LINE_EN | 1656 IGU_PF_CONF_ATTN_BIT_EN | 1657 IGU_PF_CONF_SINGLE_ISR_EN); 1658 } 1659 1660 /* Clean previous status - need to configure igu prior to ack*/ 1661 if ((!msix) || single_msix) { 1662 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1663 bnx2x_ack_int(bp); 1664 } 1665 1666 val |= IGU_PF_CONF_FUNC_EN; 1667 1668 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1669 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1670 1671 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1672 1673 if (val & IGU_PF_CONF_INT_LINE_EN) 1674 pci_intx(bp->pdev, true); 1675 1676 barrier(); 1677 1678 /* init leading/trailing edge */ 1679 if (IS_MF(bp)) { 1680 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1681 if (bp->port.pmf) 1682 /* enable nig and gpio3 attention */ 1683 val |= 0x1100; 1684 } else 1685 val = 0xffff; 1686 1687 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1688 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1689} 1690 1691void bnx2x_int_enable(struct bnx2x *bp) 1692{ 1693 if (bp->common.int_block == INT_BLOCK_HC) 1694 bnx2x_hc_int_enable(bp); 1695 else 1696 bnx2x_igu_int_enable(bp); 1697} 1698 1699void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1700{ 1701 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1702 int i, offset; 1703 1704 if (disable_hw) 1705 /* prevent the HW from sending interrupts */ 1706 bnx2x_int_disable(bp); 1707 1708 /* make sure all ISRs are done */ 1709 if (msix) { 1710 synchronize_irq(bp->msix_table[0].vector); 1711 offset = 1; 1712 if (CNIC_SUPPORT(bp)) 1713 offset++; 1714 for_each_eth_queue(bp, i) 1715 synchronize_irq(bp->msix_table[offset++].vector); 1716 } else 1717 synchronize_irq(bp->pdev->irq); 1718 1719 /* make sure sp_task is not running */ 1720 cancel_delayed_work(&bp->sp_task); 1721 cancel_delayed_work(&bp->period_task); 1722 flush_workqueue(bnx2x_wq); 1723} 1724 1725/* fast path */ 1726 1727/* 1728 * General service functions 1729 */ 1730 1731/* Return true if succeeded to acquire the lock */ 1732static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1733{ 1734 u32 lock_status; 1735 u32 resource_bit = (1 << resource); 1736 int func = BP_FUNC(bp); 1737 u32 hw_lock_control_reg; 1738 1739 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1740 "Trying to take a lock on resource %d\n", resource); 1741 1742 /* Validating that the resource is within range */ 1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1744 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1745 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1746 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1747 return false; 1748 } 1749 1750 if (func <= 5) 1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1752 else 1753 hw_lock_control_reg = 1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1755 1756 /* Try to acquire the lock */ 1757 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1758 lock_status = REG_RD(bp, hw_lock_control_reg); 1759 if (lock_status & resource_bit) 1760 return true; 1761 1762 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1763 "Failed to get a lock on resource %d\n", resource); 1764 return false; 1765} 1766 1767/** 1768 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1769 * 1770 * @bp: driver handle 1771 * 1772 * Returns the recovery leader resource id according to the engine this function 1773 * belongs to. Currently only only 2 engines is supported. 1774 */ 1775static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1776{ 1777 if (BP_PATH(bp)) 1778 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1779 else 1780 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1781} 1782 1783/** 1784 * bnx2x_trylock_leader_lock- try to acquire a leader lock. 1785 * 1786 * @bp: driver handle 1787 * 1788 * Tries to acquire a leader lock for current engine. 1789 */ 1790static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1791{ 1792 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1793} 1794 1795static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1796 1797/* schedule the sp task and mark that interrupt occurred (runs from ISR) */ 1798static int bnx2x_schedule_sp_task(struct bnx2x *bp) 1799{ 1800 /* Set the interrupt occurred bit for the sp-task to recognize it 1801 * must ack the interrupt and transition according to the IGU 1802 * state machine. 1803 */ 1804 atomic_set(&bp->interrupt_occurred, 1); 1805 1806 /* The sp_task must execute only after this bit 1807 * is set, otherwise we will get out of sync and miss all 1808 * further interrupts. Hence, the barrier. 1809 */ 1810 smp_wmb(); 1811 1812 /* schedule sp_task to workqueue */ 1813 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1814} 1815 1816void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1817{ 1818 struct bnx2x *bp = fp->bp; 1819 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1820 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1821 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1822 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 1823 1824 DP(BNX2X_MSG_SP, 1825 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1826 fp->index, cid, command, bp->state, 1827 rr_cqe->ramrod_cqe.ramrod_type); 1828 1829 /* If cid is within VF range, replace the slowpath object with the 1830 * one corresponding to this VF 1831 */ 1832 if (cid >= BNX2X_FIRST_VF_CID && 1833 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) 1834 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); 1835 1836 switch (command) { 1837 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1838 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1839 drv_cmd = BNX2X_Q_CMD_UPDATE; 1840 break; 1841 1842 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1843 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1844 drv_cmd = BNX2X_Q_CMD_SETUP; 1845 break; 1846 1847 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1848 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1849 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1850 break; 1851 1852 case (RAMROD_CMD_ID_ETH_HALT): 1853 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1854 drv_cmd = BNX2X_Q_CMD_HALT; 1855 break; 1856 1857 case (RAMROD_CMD_ID_ETH_TERMINATE): 1858 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); 1859 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1860 break; 1861 1862 case (RAMROD_CMD_ID_ETH_EMPTY): 1863 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1864 drv_cmd = BNX2X_Q_CMD_EMPTY; 1865 break; 1866 1867 case (RAMROD_CMD_ID_ETH_TPA_UPDATE): 1868 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); 1869 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; 1870 break; 1871 1872 default: 1873 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1874 command, fp->index); 1875 return; 1876 } 1877 1878 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1879 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1880 /* q_obj->complete_cmd() failure means that this was 1881 * an unexpected completion. 1882 * 1883 * In this case we don't want to increase the bp->spq_left 1884 * because apparently we haven't sent this command the first 1885 * place. 1886 */ 1887#ifdef BNX2X_STOP_ON_ERROR 1888 bnx2x_panic(); 1889#else 1890 return; 1891#endif 1892 1893 smp_mb__before_atomic(); 1894 atomic_inc(&bp->cq_spq_left); 1895 /* push the change in bp->spq_left and towards the memory */ 1896 smp_mb__after_atomic(); 1897 1898 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1899 1900 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1901 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1902 /* if Q update ramrod is completed for last Q in AFEX vif set 1903 * flow, then ACK MCP at the end 1904 * 1905 * mark pending ACK to MCP bit. 1906 * prevent case that both bits are cleared. 1907 * At the end of load/unload driver checks that 1908 * sp_state is cleared, and this order prevents 1909 * races 1910 */ 1911 smp_mb__before_atomic(); 1912 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1913 wmb(); 1914 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1915 smp_mb__after_atomic(); 1916 1917 /* schedule the sp task as mcp ack is required */ 1918 bnx2x_schedule_sp_task(bp); 1919 } 1920 1921 return; 1922} 1923 1924irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1925{ 1926 struct bnx2x *bp = netdev_priv(dev_instance); 1927 u16 status = bnx2x_ack_int(bp); 1928 u16 mask; 1929 int i; 1930 u8 cos; 1931 1932 /* Return here if interrupt is shared and it's not for us */ 1933 if (unlikely(status == 0)) { 1934 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1935 return IRQ_NONE; 1936 } 1937 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1938 1939#ifdef BNX2X_STOP_ON_ERROR 1940 if (unlikely(bp->panic)) 1941 return IRQ_HANDLED; 1942#endif 1943 1944 for_each_eth_queue(bp, i) { 1945 struct bnx2x_fastpath *fp = &bp->fp[i]; 1946 1947 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); 1948 if (status & mask) { 1949 /* Handle Rx or Tx according to SB id */ 1950 for_each_cos_in_tx_queue(fp, cos) 1951 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1952 prefetch(&fp->sb_running_index[SM_RX_ID]); 1953 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); 1954 status &= ~mask; 1955 } 1956 } 1957 1958 if (CNIC_SUPPORT(bp)) { 1959 mask = 0x2; 1960 if (status & (mask | 0x1)) { 1961 struct cnic_ops *c_ops = NULL; 1962 1963 rcu_read_lock(); 1964 c_ops = rcu_dereference(bp->cnic_ops); 1965 if (c_ops && (bp->cnic_eth_dev.drv_state & 1966 CNIC_DRV_STATE_HANDLES_IRQ)) 1967 c_ops->cnic_handler(bp->cnic_data, NULL); 1968 rcu_read_unlock(); 1969 1970 status &= ~mask; 1971 } 1972 } 1973 1974 if (unlikely(status & 0x1)) { 1975 1976 /* schedule sp task to perform default status block work, ack 1977 * attentions and enable interrupts. 1978 */ 1979 bnx2x_schedule_sp_task(bp); 1980 1981 status &= ~0x1; 1982 if (!status) 1983 return IRQ_HANDLED; 1984 } 1985 1986 if (unlikely(status)) 1987 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1988 status); 1989 1990 return IRQ_HANDLED; 1991} 1992 1993/* Link */ 1994 1995/* 1996 * General service functions 1997 */ 1998 1999int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 2000{ 2001 u32 lock_status; 2002 u32 resource_bit = (1 << resource); 2003 int func = BP_FUNC(bp); 2004 u32 hw_lock_control_reg; 2005 int cnt; 2006 2007 /* Validating that the resource is within range */ 2008 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 2009 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 2010 resource, HW_LOCK_MAX_RESOURCE_VALUE); 2011 return -EINVAL; 2012 } 2013 2014 if (func <= 5) { 2015 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 2016 } else { 2017 hw_lock_control_reg = 2018 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 2019 } 2020 2021 /* Validating that the resource is not already taken */ 2022 lock_status = REG_RD(bp, hw_lock_control_reg); 2023 if (lock_status & resource_bit) { 2024 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 2025 lock_status, resource_bit); 2026 return -EEXIST; 2027 } 2028 2029 /* Try for 5 second every 5ms */ 2030 for (cnt = 0; cnt < 1000; cnt++) { 2031 /* Try to acquire the lock */ 2032 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 2033 lock_status = REG_RD(bp, hw_lock_control_reg); 2034 if (lock_status & resource_bit) 2035 return 0; 2036 2037 usleep_range(5000, 10000); 2038 } 2039 BNX2X_ERR("Timeout\n"); 2040 return -EAGAIN; 2041} 2042 2043int bnx2x_release_leader_lock(struct bnx2x *bp) 2044{ 2045 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 2046} 2047 2048int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 2049{ 2050 u32 lock_status; 2051 u32 resource_bit = (1 << resource); 2052 int func = BP_FUNC(bp); 2053 u32 hw_lock_control_reg; 2054 2055 /* Validating that the resource is within range */ 2056 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 2057 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 2058 resource, HW_LOCK_MAX_RESOURCE_VALUE); 2059 return -EINVAL; 2060 } 2061 2062 if (func <= 5) { 2063 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 2064 } else { 2065 hw_lock_control_reg = 2066 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 2067 } 2068 2069 /* Validating that the resource is currently taken */ 2070 lock_status = REG_RD(bp, hw_lock_control_reg); 2071 if (!(lock_status & resource_bit)) { 2072 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", 2073 lock_status, resource_bit); 2074 return -EFAULT; 2075 } 2076 2077 REG_WR(bp, hw_lock_control_reg, resource_bit); 2078 return 0; 2079} 2080 2081int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 2082{ 2083 /* The GPIO should be swapped if swap register is set and active */ 2084 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2085 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2086 int gpio_shift = gpio_num + 2087 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2088 u32 gpio_mask = (1 << gpio_shift); 2089 u32 gpio_reg; 2090 int value; 2091 2092 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2093 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2094 return -EINVAL; 2095 } 2096 2097 /* read GPIO value */ 2098 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2099 2100 /* get the requested pin value */ 2101 if ((gpio_reg & gpio_mask) == gpio_mask) 2102 value = 1; 2103 else 2104 value = 0; 2105 2106 return value; 2107} 2108 2109int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2110{ 2111 /* The GPIO should be swapped if swap register is set and active */ 2112 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2113 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2114 int gpio_shift = gpio_num + 2115 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2116 u32 gpio_mask = (1 << gpio_shift); 2117 u32 gpio_reg; 2118 2119 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2120 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2121 return -EINVAL; 2122 } 2123 2124 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2125 /* read GPIO and mask except the float bits */ 2126 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2127 2128 switch (mode) { 2129 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2130 DP(NETIF_MSG_LINK, 2131 "Set GPIO %d (shift %d) -> output low\n", 2132 gpio_num, gpio_shift); 2133 /* clear FLOAT and set CLR */ 2134 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2135 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2136 break; 2137 2138 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2139 DP(NETIF_MSG_LINK, 2140 "Set GPIO %d (shift %d) -> output high\n", 2141 gpio_num, gpio_shift); 2142 /* clear FLOAT and set SET */ 2143 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2144 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2145 break; 2146 2147 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2148 DP(NETIF_MSG_LINK, 2149 "Set GPIO %d (shift %d) -> input\n", 2150 gpio_num, gpio_shift); 2151 /* set FLOAT */ 2152 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2153 break; 2154 2155 default: 2156 break; 2157 } 2158 2159 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2161 2162 return 0; 2163} 2164 2165int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 2166{ 2167 u32 gpio_reg = 0; 2168 int rc = 0; 2169 2170 /* Any port swapping should be handled by caller. */ 2171 2172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2173 /* read GPIO and mask except the float bits */ 2174 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2175 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2176 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2177 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2178 2179 switch (mode) { 2180 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2181 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 2182 /* set CLR */ 2183 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2184 break; 2185 2186 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2187 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 2188 /* set SET */ 2189 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2190 break; 2191 2192 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2193 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 2194 /* set FLOAT */ 2195 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2196 break; 2197 2198 default: 2199 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 2200 rc = -EINVAL; 2201 break; 2202 } 2203 2204 if (rc == 0) 2205 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2206 2207 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2208 2209 return rc; 2210} 2211 2212int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2213{ 2214 /* The GPIO should be swapped if swap register is set and active */ 2215 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2216 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2217 int gpio_shift = gpio_num + 2218 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2219 u32 gpio_mask = (1 << gpio_shift); 2220 u32 gpio_reg; 2221 2222 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2223 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2224 return -EINVAL; 2225 } 2226 2227 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2228 /* read GPIO int */ 2229 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 2230 2231 switch (mode) { 2232 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2233 DP(NETIF_MSG_LINK, 2234 "Clear GPIO INT %d (shift %d) -> output low\n", 2235 gpio_num, gpio_shift); 2236 /* clear SET and set CLR */ 2237 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2238 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2239 break; 2240 2241 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2242 DP(NETIF_MSG_LINK, 2243 "Set GPIO INT %d (shift %d) -> output high\n", 2244 gpio_num, gpio_shift); 2245 /* clear CLR and set SET */ 2246 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2247 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2248 break; 2249 2250 default: 2251 break; 2252 } 2253 2254 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2255 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2256 2257 return 0; 2258} 2259 2260static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) 2261{ 2262 u32 spio_reg; 2263 2264 /* Only 2 SPIOs are configurable */ 2265 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 2266 BNX2X_ERR("Invalid SPIO 0x%x\n", spio); 2267 return -EINVAL; 2268 } 2269 2270 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2271 /* read SPIO and mask except the float bits */ 2272 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 2273 2274 switch (mode) { 2275 case MISC_SPIO_OUTPUT_LOW: 2276 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); 2277 /* clear FLOAT and set CLR */ 2278 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2279 spio_reg |= (spio << MISC_SPIO_CLR_POS); 2280 break; 2281 2282 case MISC_SPIO_OUTPUT_HIGH: 2283 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); 2284 /* clear FLOAT and set SET */ 2285 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2286 spio_reg |= (spio << MISC_SPIO_SET_POS); 2287 break; 2288 2289 case MISC_SPIO_INPUT_HI_Z: 2290 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); 2291 /* set FLOAT */ 2292 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 2293 break; 2294 2295 default: 2296 break; 2297 } 2298 2299 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2300 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2301 2302 return 0; 2303} 2304 2305void bnx2x_calc_fc_adv(struct bnx2x *bp) 2306{ 2307 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2308 2309 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2310 ADVERTISED_Pause); 2311 switch (bp->link_vars.ieee_fc & 2312 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2313 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2314 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2315 ADVERTISED_Pause); 2316 break; 2317 2318 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2319 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2320 break; 2321 2322 default: 2323 break; 2324 } 2325} 2326 2327static void bnx2x_set_requested_fc(struct bnx2x *bp) 2328{ 2329 /* Initialize link parameters structure variables 2330 * It is recommended to turn off RX FC for jumbo frames 2331 * for better performance 2332 */ 2333 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2334 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2335 else 2336 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2337} 2338 2339static void bnx2x_init_dropless_fc(struct bnx2x *bp) 2340{ 2341 u32 pause_enabled = 0; 2342 2343 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { 2344 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2345 pause_enabled = 1; 2346 2347 REG_WR(bp, BAR_USTRORM_INTMEM + 2348 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), 2349 pause_enabled); 2350 } 2351 2352 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", 2353 pause_enabled ? "enabled" : "disabled"); 2354} 2355 2356int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2357{ 2358 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2359 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2360 2361 if (!BP_NOMCP(bp)) { 2362 bnx2x_set_requested_fc(bp); 2363 bnx2x_acquire_phy_lock(bp); 2364 2365 if (load_mode == LOAD_DIAG) { 2366 struct link_params *lp = &bp->link_params; 2367 lp->loopback_mode = LOOPBACK_XGXS; 2368 /* Prefer doing PHY loopback at highest speed */ 2369 if (lp->req_line_speed[cfx_idx] < SPEED_20000) { 2370 if (lp->speed_cap_mask[cfx_idx] & 2371 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) 2372 lp->req_line_speed[cfx_idx] = 2373 SPEED_20000; 2374 else if (lp->speed_cap_mask[cfx_idx] & 2375 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2376 lp->req_line_speed[cfx_idx] = 2377 SPEED_10000; 2378 else 2379 lp->req_line_speed[cfx_idx] = 2380 SPEED_1000; 2381 } 2382 } 2383 2384 if (load_mode == LOAD_LOOPBACK_EXT) { 2385 struct link_params *lp = &bp->link_params; 2386 lp->loopback_mode = LOOPBACK_EXT; 2387 } 2388 2389 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2390 2391 bnx2x_release_phy_lock(bp); 2392 2393 bnx2x_init_dropless_fc(bp); 2394 2395 bnx2x_calc_fc_adv(bp); 2396 2397 if (bp->link_vars.link_up) { 2398 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2399 bnx2x_link_report(bp); 2400 } 2401 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2402 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2403 return rc; 2404 } 2405 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2406 return -EINVAL; 2407} 2408 2409void bnx2x_link_set(struct bnx2x *bp) 2410{ 2411 if (!BP_NOMCP(bp)) { 2412 bnx2x_acquire_phy_lock(bp); 2413 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2414 bnx2x_release_phy_lock(bp); 2415 2416 bnx2x_init_dropless_fc(bp); 2417 2418 bnx2x_calc_fc_adv(bp); 2419 } else 2420 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2421} 2422 2423static void bnx2x__link_reset(struct bnx2x *bp) 2424{ 2425 if (!BP_NOMCP(bp)) { 2426 bnx2x_acquire_phy_lock(bp); 2427 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); 2428 bnx2x_release_phy_lock(bp); 2429 } else 2430 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2431} 2432 2433void bnx2x_force_link_reset(struct bnx2x *bp) 2434{ 2435 bnx2x_acquire_phy_lock(bp); 2436 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2437 bnx2x_release_phy_lock(bp); 2438} 2439 2440u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2441{ 2442 u8 rc = 0; 2443 2444 if (!BP_NOMCP(bp)) { 2445 bnx2x_acquire_phy_lock(bp); 2446 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2447 is_serdes); 2448 bnx2x_release_phy_lock(bp); 2449 } else 2450 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2451 2452 return rc; 2453} 2454 2455/* Calculates the sum of vn_min_rates. 2456 It's needed for further normalizing of the min_rates. 2457 Returns: 2458 sum of vn_min_rates. 2459 or 2460 0 - if all the min_rates are 0. 2461 In the later case fairness algorithm should be deactivated. 2462 If not all min_rates are zero then those that are zeroes will be set to 1. 2463 */ 2464static void bnx2x_calc_vn_min(struct bnx2x *bp, 2465 struct cmng_init_input *input) 2466{ 2467 int all_zero = 1; 2468 int vn; 2469 2470 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2471 u32 vn_cfg = bp->mf_config[vn]; 2472 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2473 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2474 2475 /* Skip hidden vns */ 2476 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2477 vn_min_rate = 0; 2478 /* If min rate is zero - set it to 1 */ 2479 else if (!vn_min_rate) 2480 vn_min_rate = DEF_MIN_RATE; 2481 else 2482 all_zero = 0; 2483 2484 input->vnic_min_rate[vn] = vn_min_rate; 2485 } 2486 2487 /* if ETS or all min rates are zeros - disable fairness */ 2488 if (BNX2X_IS_ETS_ENABLED(bp)) { 2489 input->flags.cmng_enables &= 2490 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2491 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2492 } else if (all_zero) { 2493 input->flags.cmng_enables &= 2494 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2495 DP(NETIF_MSG_IFUP, 2496 "All MIN values are zeroes fairness will be disabled\n"); 2497 } else 2498 input->flags.cmng_enables |= 2499 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2500} 2501 2502static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2503 struct cmng_init_input *input) 2504{ 2505 u16 vn_max_rate; 2506 u32 vn_cfg = bp->mf_config[vn]; 2507 2508 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2509 vn_max_rate = 0; 2510 else { 2511 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2512 2513 if (IS_MF_PERCENT_BW(bp)) { 2514 /* maxCfg in percents of linkspeed */ 2515 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2516 } else /* SD modes */ 2517 /* maxCfg is absolute in 100Mb units */ 2518 vn_max_rate = maxCfg * 100; 2519 } 2520 2521 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2522 2523 input->vnic_max_rate[vn] = vn_max_rate; 2524} 2525 2526static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2527{ 2528 if (CHIP_REV_IS_SLOW(bp)) 2529 return CMNG_FNS_NONE; 2530 if (IS_MF(bp)) 2531 return CMNG_FNS_MINMAX; 2532 2533 return CMNG_FNS_NONE; 2534} 2535 2536void bnx2x_read_mf_cfg(struct bnx2x *bp) 2537{ 2538 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2539 2540 if (BP_NOMCP(bp)) 2541 return; /* what should be the default value in this case */ 2542 2543 /* For 2 port configuration the absolute function number formula 2544 * is: 2545 * abs_func = 2 * vn + BP_PORT + BP_PATH 2546 * 2547 * and there are 4 functions per port 2548 * 2549 * For 4 port configuration it is 2550 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2551 * 2552 * and there are 2 functions per port 2553 */ 2554 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2555 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2556 2557 if (func >= E1H_FUNC_MAX) 2558 break; 2559 2560 bp->mf_config[vn] = 2561 MF_CFG_RD(bp, func_mf_config[func].config); 2562 } 2563 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2564 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2565 bp->flags |= MF_FUNC_DIS; 2566 } else { 2567 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2568 bp->flags &= ~MF_FUNC_DIS; 2569 } 2570} 2571 2572static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2573{ 2574 struct cmng_init_input input; 2575 memset(&input, 0, sizeof(struct cmng_init_input)); 2576 2577 input.port_rate = bp->link_vars.line_speed; 2578 2579 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { 2580 int vn; 2581 2582 /* read mf conf from shmem */ 2583 if (read_cfg) 2584 bnx2x_read_mf_cfg(bp); 2585 2586 /* vn_weight_sum and enable fairness if not 0 */ 2587 bnx2x_calc_vn_min(bp, &input); 2588 2589 /* calculate and set min-max rate for each vn */ 2590 if (bp->port.pmf) 2591 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2592 bnx2x_calc_vn_max(bp, vn, &input); 2593 2594 /* always enable rate shaping and fairness */ 2595 input.flags.cmng_enables |= 2596 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2597 2598 bnx2x_init_cmng(&input, &bp->cmng); 2599 return; 2600 } 2601 2602 /* rate shaping and fairness are disabled */ 2603 DP(NETIF_MSG_IFUP, 2604 "rate shaping and fairness are disabled\n"); 2605} 2606 2607static void storm_memset_cmng(struct bnx2x *bp, 2608 struct cmng_init *cmng, 2609 u8 port) 2610{ 2611 int vn; 2612 size_t size = sizeof(struct cmng_struct_per_port); 2613 2614 u32 addr = BAR_XSTRORM_INTMEM + 2615 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2616 2617 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2618 2619 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2620 int func = func_by_vn(bp, vn); 2621 2622 addr = BAR_XSTRORM_INTMEM + 2623 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2624 size = sizeof(struct rate_shaping_vars_per_vn); 2625 __storm_memset_struct(bp, addr, size, 2626 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2627 2628 addr = BAR_XSTRORM_INTMEM + 2629 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2630 size = sizeof(struct fairness_vars_per_vn); 2631 __storm_memset_struct(bp, addr, size, 2632 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2633 } 2634} 2635 2636/* init cmng mode in HW according to local configuration */ 2637void bnx2x_set_local_cmng(struct bnx2x *bp) 2638{ 2639 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2640 2641 if (cmng_fns != CMNG_FNS_NONE) { 2642 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2643 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2644 } else { 2645 /* rate shaping and fairness are disabled */ 2646 DP(NETIF_MSG_IFUP, 2647 "single function mode without fairness\n"); 2648 } 2649} 2650 2651/* This function is called upon link interrupt */ 2652static void bnx2x_link_attn(struct bnx2x *bp) 2653{ 2654 /* Make sure that we are synced with the current statistics */ 2655 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2656 2657 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2658 2659 bnx2x_init_dropless_fc(bp); 2660 2661 if (bp->link_vars.link_up) { 2662 2663 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2664 struct host_port_stats *pstats; 2665 2666 pstats = bnx2x_sp(bp, port_stats); 2667 /* reset old mac stats */ 2668 memset(&(pstats->mac_stx[0]), 0, 2669 sizeof(struct mac_stx)); 2670 } 2671 if (bp->state == BNX2X_STATE_OPEN) 2672 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2673 } 2674 2675 if (bp->link_vars.link_up && bp->link_vars.line_speed) 2676 bnx2x_set_local_cmng(bp); 2677 2678 __bnx2x_link_report(bp); 2679 2680 if (IS_MF(bp)) 2681 bnx2x_link_sync_notify(bp); 2682} 2683 2684void bnx2x__link_status_update(struct bnx2x *bp) 2685{ 2686 if (bp->state != BNX2X_STATE_OPEN) 2687 return; 2688 2689 /* read updated dcb configuration */ 2690 if (IS_PF(bp)) { 2691 bnx2x_dcbx_pmf_update(bp); 2692 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2693 if (bp->link_vars.link_up) 2694 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2695 else 2696 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2697 /* indicate link status */ 2698 bnx2x_link_report(bp); 2699 2700 } else { /* VF */ 2701 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | 2702 SUPPORTED_10baseT_Full | 2703 SUPPORTED_100baseT_Half | 2704 SUPPORTED_100baseT_Full | 2705 SUPPORTED_1000baseT_Full | 2706 SUPPORTED_2500baseX_Full | 2707 SUPPORTED_10000baseT_Full | 2708 SUPPORTED_TP | 2709 SUPPORTED_FIBRE | 2710 SUPPORTED_Autoneg | 2711 SUPPORTED_Pause | 2712 SUPPORTED_Asym_Pause); 2713 bp->port.advertising[0] = bp->port.supported[0]; 2714 2715 bp->link_params.bp = bp; 2716 bp->link_params.port = BP_PORT(bp); 2717 bp->link_params.req_duplex[0] = DUPLEX_FULL; 2718 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; 2719 bp->link_params.req_line_speed[0] = SPEED_10000; 2720 bp->link_params.speed_cap_mask[0] = 0x7f0000; 2721 bp->link_params.switch_cfg = SWITCH_CFG_10G; 2722 bp->link_vars.mac_type = MAC_TYPE_BMAC; 2723 bp->link_vars.line_speed = SPEED_10000; 2724 bp->link_vars.link_status = 2725 (LINK_STATUS_LINK_UP | 2726 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 2727 bp->link_vars.link_up = 1; 2728 bp->link_vars.duplex = DUPLEX_FULL; 2729 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2730 __bnx2x_link_report(bp); 2731 2732 bnx2x_sample_bulletin(bp); 2733 2734 /* if bulletin board did not have an update for link status 2735 * __bnx2x_link_report will report current status 2736 * but it will NOT duplicate report in case of already reported 2737 * during sampling bulletin board. 2738 */ 2739 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2740 } 2741} 2742 2743static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2744 u16 vlan_val, u8 allowed_prio) 2745{ 2746 struct bnx2x_func_state_params func_params = {NULL}; 2747 struct bnx2x_func_afex_update_params *f_update_params = 2748 &func_params.params.afex_update; 2749 2750 func_params.f_obj = &bp->func_obj; 2751 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2752 2753 /* no need to wait for RAMROD completion, so don't 2754 * set RAMROD_COMP_WAIT flag 2755 */ 2756 2757 f_update_params->vif_id = vifid; 2758 f_update_params->afex_default_vlan = vlan_val; 2759 f_update_params->allowed_priorities = allowed_prio; 2760 2761 /* if ramrod can not be sent, response to MCP immediately */ 2762 if (bnx2x_func_state_change(bp, &func_params) < 0) 2763 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2764 2765 return 0; 2766} 2767 2768static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2769 u16 vif_index, u8 func_bit_map) 2770{ 2771 struct bnx2x_func_state_params func_params = {NULL}; 2772 struct bnx2x_func_afex_viflists_params *update_params = 2773 &func_params.params.afex_viflists; 2774 int rc; 2775 u32 drv_msg_code; 2776 2777 /* validate only LIST_SET and LIST_GET are received from switch */ 2778 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2779 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2780 cmd_type); 2781 2782 func_params.f_obj = &bp->func_obj; 2783 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2784 2785 /* set parameters according to cmd_type */ 2786 update_params->afex_vif_list_command = cmd_type; 2787 update_params->vif_list_index = vif_index; 2788 update_params->func_bit_map = 2789 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2790 update_params->func_to_clear = 0; 2791 drv_msg_code = 2792 (cmd_type == VIF_LIST_RULE_GET) ? 2793 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2794 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2795 2796 /* if ramrod can not be sent, respond to MCP immediately for 2797 * SET and GET requests (other are not triggered from MCP) 2798 */ 2799 rc = bnx2x_func_state_change(bp, &func_params); 2800 if (rc < 0) 2801 bnx2x_fw_command(bp, drv_msg_code, 0); 2802 2803 return 0; 2804} 2805 2806static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2807{ 2808 struct afex_stats afex_stats; 2809 u32 func = BP_ABS_FUNC(bp); 2810 u32 mf_config; 2811 u16 vlan_val; 2812 u32 vlan_prio; 2813 u16 vif_id; 2814 u8 allowed_prio; 2815 u8 vlan_mode; 2816 u32 addr_to_write, vifid, addrs, stats_type, i; 2817 2818 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2819 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2820 DP(BNX2X_MSG_MCP, 2821 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2822 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2823 } 2824 2825 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2826 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2827 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2828 DP(BNX2X_MSG_MCP, 2829 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2830 vifid, addrs); 2831 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2832 addrs); 2833 } 2834 2835 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2836 addr_to_write = SHMEM2_RD(bp, 2837 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2838 stats_type = SHMEM2_RD(bp, 2839 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2840 2841 DP(BNX2X_MSG_MCP, 2842 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2843 addr_to_write); 2844 2845 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2846 2847 /* write response to scratchpad, for MCP */ 2848 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2849 REG_WR(bp, addr_to_write + i*sizeof(u32), 2850 *(((u32 *)(&afex_stats))+i)); 2851 2852 /* send ack message to MCP */ 2853 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2854 } 2855 2856 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2857 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2858 bp->mf_config[BP_VN(bp)] = mf_config; 2859 DP(BNX2X_MSG_MCP, 2860 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2861 mf_config); 2862 2863 /* if VIF_SET is "enabled" */ 2864 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2865 /* set rate limit directly to internal RAM */ 2866 struct cmng_init_input cmng_input; 2867 struct rate_shaping_vars_per_vn m_rs_vn; 2868 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2869 u32 addr = BAR_XSTRORM_INTMEM + 2870 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2871 2872 bp->mf_config[BP_VN(bp)] = mf_config; 2873 2874 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2875 m_rs_vn.vn_counter.rate = 2876 cmng_input.vnic_max_rate[BP_VN(bp)]; 2877 m_rs_vn.vn_counter.quota = 2878 (m_rs_vn.vn_counter.rate * 2879 RS_PERIODIC_TIMEOUT_USEC) / 8; 2880 2881 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2882 2883 /* read relevant values from mf_cfg struct in shmem */ 2884 vif_id = 2885 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2886 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2887 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2888 vlan_val = 2889 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2890 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2891 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2892 vlan_prio = (mf_config & 2893 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2894 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2895 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2896 vlan_mode = 2897 (MF_CFG_RD(bp, 2898 func_mf_config[func].afex_config) & 2899 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2900 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2901 allowed_prio = 2902 (MF_CFG_RD(bp, 2903 func_mf_config[func].afex_config) & 2904 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2905 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2906 2907 /* send ramrod to FW, return in case of failure */ 2908 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2909 allowed_prio)) 2910 return; 2911 2912 bp->afex_def_vlan_tag = vlan_val; 2913 bp->afex_vlan_mode = vlan_mode; 2914 } else { 2915 /* notify link down because BP->flags is disabled */ 2916 bnx2x_link_report(bp); 2917 2918 /* send INVALID VIF ramrod to FW */ 2919 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2920 2921 /* Reset the default afex VLAN */ 2922 bp->afex_def_vlan_tag = -1; 2923 } 2924 } 2925} 2926 2927static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) 2928{ 2929 struct bnx2x_func_switch_update_params *switch_update_params; 2930 struct bnx2x_func_state_params func_params; 2931 2932 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params)); 2933 switch_update_params = &func_params.params.switch_update; 2934 func_params.f_obj = &bp->func_obj; 2935 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 2936 2937 /* Prepare parameters for function state transitions */ 2938 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 2939 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 2940 2941 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { 2942 int func = BP_ABS_FUNC(bp); 2943 u32 val; 2944 2945 /* Re-learn the S-tag from shmem */ 2946 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2947 FUNC_MF_CFG_E1HOV_TAG_MASK; 2948 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 2949 bp->mf_ov = val; 2950 } else { 2951 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n"); 2952 goto fail; 2953 } 2954 2955 /* Configure new S-tag in LLH */ 2956 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, 2957 bp->mf_ov); 2958 2959 /* Send Ramrod to update FW of change */ 2960 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, 2961 &switch_update_params->changes); 2962 switch_update_params->vlan = bp->mf_ov; 2963 2964 if (bnx2x_func_state_change(bp, &func_params) < 0) { 2965 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", 2966 bp->mf_ov); 2967 goto fail; 2968 } else { 2969 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", 2970 bp->mf_ov); 2971 } 2972 } else { 2973 goto fail; 2974 } 2975 2976 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); 2977 return; 2978fail: 2979 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); 2980} 2981 2982static void bnx2x_pmf_update(struct bnx2x *bp) 2983{ 2984 int port = BP_PORT(bp); 2985 u32 val; 2986 2987 bp->port.pmf = 1; 2988 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2989 2990 /* 2991 * We need the mb() to ensure the ordering between the writing to 2992 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2993 */ 2994 smp_mb(); 2995 2996 /* queue a periodic task */ 2997 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2998 2999 bnx2x_dcbx_pmf_update(bp); 3000 3001 /* enable nig attention */ 3002 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 3003 if (bp->common.int_block == INT_BLOCK_HC) { 3004 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 3005 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 3006 } else if (!CHIP_IS_E1x(bp)) { 3007 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 3008 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 3009 } 3010 3011 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 3012} 3013 3014/* end of Link */ 3015 3016/* slow path */ 3017 3018/* 3019 * General service functions 3020 */ 3021 3022/* send the MCP a request, block until there is a reply */ 3023u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 3024{ 3025 int mb_idx = BP_FW_MB_IDX(bp); 3026 u32 seq; 3027 u32 rc = 0; 3028 u32 cnt = 1; 3029 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 3030 3031 mutex_lock(&bp->fw_mb_mutex); 3032 seq = ++bp->fw_seq; 3033 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 3034 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 3035 3036 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 3037 (command | seq), param); 3038 3039 do { 3040 /* let the FW do it's magic ... */ 3041 msleep(delay); 3042 3043 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 3044 3045 /* Give the FW up to 5 second (500*10ms) */ 3046 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 3047 3048 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 3049 cnt*delay, rc, seq); 3050 3051 /* is this a reply to our command? */ 3052 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 3053 rc &= FW_MSG_CODE_MASK; 3054 else { 3055 /* FW BUG! */ 3056 BNX2X_ERR("FW failed to respond!\n"); 3057 bnx2x_fw_dump(bp); 3058 rc = 0; 3059 } 3060 mutex_unlock(&bp->fw_mb_mutex); 3061 3062 return rc; 3063} 3064 3065static void storm_memset_func_cfg(struct bnx2x *bp, 3066 struct tstorm_eth_function_common_config *tcfg, 3067 u16 abs_fid) 3068{ 3069 size_t size = sizeof(struct tstorm_eth_function_common_config); 3070 3071 u32 addr = BAR_TSTRORM_INTMEM + 3072 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 3073 3074 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 3075} 3076 3077void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 3078{ 3079 if (CHIP_IS_E1x(bp)) { 3080 struct tstorm_eth_function_common_config tcfg = {0}; 3081 3082 storm_memset_func_cfg(bp, &tcfg, p->func_id); 3083 } 3084 3085 /* Enable the function in the FW */ 3086 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 3087 storm_memset_func_en(bp, p->func_id, 1); 3088 3089 /* spq */ 3090 if (p->spq_active) { 3091 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 3092 REG_WR(bp, XSEM_REG_FAST_MEMORY + 3093 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 3094 } 3095} 3096 3097/** 3098 * bnx2x_get_common_flags - Return common flags 3099 * 3100 * @bp: device handle 3101 * @fp: queue handle 3102 * @zero_stats: TRUE if statistics zeroing is needed 3103 * 3104 * Return the flags that are common for the Tx-only and not normal connections. 3105 */ 3106static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 3107 struct bnx2x_fastpath *fp, 3108 bool zero_stats) 3109{ 3110 unsigned long flags = 0; 3111 3112 /* PF driver will always initialize the Queue to an ACTIVE state */ 3113 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 3114 3115 /* tx only connections collect statistics (on the same index as the 3116 * parent connection). The statistics are zeroed when the parent 3117 * connection is initialized. 3118 */ 3119 3120 __set_bit(BNX2X_Q_FLG_STATS, &flags); 3121 if (zero_stats) 3122 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 3123 3124 if (bp->flags & TX_SWITCHING) 3125 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); 3126 3127 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); 3128 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); 3129 3130#ifdef BNX2X_STOP_ON_ERROR 3131 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 3132#endif 3133 3134 return flags; 3135} 3136 3137static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 3138 struct bnx2x_fastpath *fp, 3139 bool leading) 3140{ 3141 unsigned long flags = 0; 3142 3143 /* calculate other queue flags */ 3144 if (IS_MF_SD(bp)) 3145 __set_bit(BNX2X_Q_FLG_OV, &flags); 3146 3147 if (IS_FCOE_FP(fp)) { 3148 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 3149 /* For FCoE - force usage of default priority (for afex) */ 3150 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 3151 } 3152 3153 if (fp->mode != TPA_MODE_DISABLED) { 3154 __set_bit(BNX2X_Q_FLG_TPA, &flags); 3155 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 3156 if (fp->mode == TPA_MODE_GRO) 3157 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 3158 } 3159 3160 if (leading) { 3161 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 3162 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 3163 } 3164 3165 /* Always set HW VLAN stripping */ 3166 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 3167 3168 /* configure silent vlan removal */ 3169 if (IS_MF_AFEX(bp)) 3170 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 3171 3172 return flags | bnx2x_get_common_flags(bp, fp, true); 3173} 3174 3175static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 3176 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 3177 u8 cos) 3178{ 3179 gen_init->stat_id = bnx2x_stats_id(fp); 3180 gen_init->spcl_id = fp->cl_id; 3181 3182 /* Always use mini-jumbo MTU for FCoE L2 ring */ 3183 if (IS_FCOE_FP(fp)) 3184 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 3185 else 3186 gen_init->mtu = bp->dev->mtu; 3187 3188 gen_init->cos = cos; 3189 3190 gen_init->fp_hsi = ETH_FP_HSI_VERSION; 3191} 3192 3193static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 3194 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 3195 struct bnx2x_rxq_setup_params *rxq_init) 3196{ 3197 u8 max_sge = 0; 3198 u16 sge_sz = 0; 3199 u16 tpa_agg_size = 0; 3200 3201 if (fp->mode != TPA_MODE_DISABLED) { 3202 pause->sge_th_lo = SGE_TH_LO(bp); 3203 pause->sge_th_hi = SGE_TH_HI(bp); 3204 3205 /* validate SGE ring has enough to cross high threshold */ 3206 WARN_ON(bp->dropless_fc && 3207 pause->sge_th_hi + FW_PREFETCH_CNT > 3208 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 3209 3210 tpa_agg_size = TPA_AGG_SIZE; 3211 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 3212 SGE_PAGE_SHIFT; 3213 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 3214 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 3215 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); 3216 } 3217 3218 /* pause - not for e1 */ 3219 if (!CHIP_IS_E1(bp)) { 3220 pause->bd_th_lo = BD_TH_LO(bp); 3221 pause->bd_th_hi = BD_TH_HI(bp); 3222 3223 pause->rcq_th_lo = RCQ_TH_LO(bp); 3224 pause->rcq_th_hi = RCQ_TH_HI(bp); 3225 /* 3226 * validate that rings have enough entries to cross 3227 * high thresholds 3228 */ 3229 WARN_ON(bp->dropless_fc && 3230 pause->bd_th_hi + FW_PREFETCH_CNT > 3231 bp->rx_ring_size); 3232 WARN_ON(bp->dropless_fc && 3233 pause->rcq_th_hi + FW_PREFETCH_CNT > 3234 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 3235 3236 pause->pri_map = 1; 3237 } 3238 3239 /* rxq setup */ 3240 rxq_init->dscr_map = fp->rx_desc_mapping; 3241 rxq_init->sge_map = fp->rx_sge_mapping; 3242 rxq_init->rcq_map = fp->rx_comp_mapping; 3243 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 3244 3245 /* This should be a maximum number of data bytes that may be 3246 * placed on the BD (not including paddings). 3247 */ 3248 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 3249 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 3250 3251 rxq_init->cl_qzone_id = fp->cl_qzone_id; 3252 rxq_init->tpa_agg_sz = tpa_agg_size; 3253 rxq_init->sge_buf_sz = sge_sz; 3254 rxq_init->max_sges_pkt = max_sge; 3255 rxq_init->rss_engine_id = BP_FUNC(bp); 3256 rxq_init->mcast_engine_id = BP_FUNC(bp); 3257 3258 /* Maximum number or simultaneous TPA aggregation for this Queue. 3259 * 3260 * For PF Clients it should be the maximum available number. 3261 * VF driver(s) may want to define it to a smaller value. 3262 */ 3263 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 3264 3265 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 3266 rxq_init->fw_sb_id = fp->fw_sb_id; 3267 3268 if (IS_FCOE_FP(fp)) 3269 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 3270 else 3271 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 3272 /* configure silent vlan removal 3273 * if multi function mode is afex, then mask default vlan 3274 */ 3275 if (IS_MF_AFEX(bp)) { 3276 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 3277 rxq_init->silent_removal_mask = VLAN_VID_MASK; 3278 } 3279} 3280 3281static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 3282 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 3283 u8 cos) 3284{ 3285 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 3286 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 3287 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 3288 txq_init->fw_sb_id = fp->fw_sb_id; 3289 3290 /* 3291 * set the tss leading client id for TX classification == 3292 * leading RSS client id 3293 */ 3294 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 3295 3296 if (IS_FCOE_FP(fp)) { 3297 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 3298 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 3299 } 3300} 3301 3302static void bnx2x_pf_init(struct bnx2x *bp) 3303{ 3304 struct bnx2x_func_init_params func_init = {0}; 3305 struct event_ring_data eq_data = { {0} }; 3306 3307 if (!CHIP_IS_E1x(bp)) { 3308 /* reset IGU PF statistics: MSIX + ATTN */ 3309 /* PF */ 3310 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3311 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3312 (CHIP_MODE_IS_4_PORT(bp) ? 3313 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3314 /* ATTN */ 3315 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3316 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3317 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 3318 (CHIP_MODE_IS_4_PORT(bp) ? 3319 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3320 } 3321 3322 func_init.spq_active = true; 3323 func_init.pf_id = BP_FUNC(bp); 3324 func_init.func_id = BP_FUNC(bp); 3325 func_init.spq_map = bp->spq_mapping; 3326 func_init.spq_prod = bp->spq_prod_idx; 3327 3328 bnx2x_func_init(bp, &func_init); 3329 3330 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 3331 3332 /* 3333 * Congestion management values depend on the link rate 3334 * There is no active link so initial link rate is set to 10 Gbps. 3335 * When the link comes up The congestion management values are 3336 * re-calculated according to the actual link rate. 3337 */ 3338 bp->link_vars.line_speed = SPEED_10000; 3339 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 3340 3341 /* Only the PMF sets the HW */ 3342 if (bp->port.pmf) 3343 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3344 3345 /* init Event Queue - PCI bus guarantees correct endianity*/ 3346 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 3347 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 3348 eq_data.producer = bp->eq_prod; 3349 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 3350 eq_data.sb_id = DEF_SB_ID; 3351 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3352} 3353 3354static void bnx2x_e1h_disable(struct bnx2x *bp) 3355{ 3356 int port = BP_PORT(bp); 3357 3358 bnx2x_tx_disable(bp); 3359 3360 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3361} 3362 3363static void bnx2x_e1h_enable(struct bnx2x *bp) 3364{ 3365 int port = BP_PORT(bp); 3366 3367 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 3368 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 3369 3370 /* Tx queue should be only re-enabled */ 3371 netif_tx_wake_all_queues(bp->dev); 3372 3373 /* 3374 * Should not call netif_carrier_on since it will be called if the link 3375 * is up when checking for link state 3376 */ 3377} 3378 3379#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3380 3381static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3382{ 3383 struct eth_stats_info *ether_stat = 3384 &bp->slowpath->drv_info_to_mcp.ether_stat; 3385 struct bnx2x_vlan_mac_obj *mac_obj = 3386 &bp->sp_objs->mac_obj; 3387 int i; 3388 3389 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3390 ETH_STAT_INFO_VERSION_LEN); 3391 3392 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the 3393 * mac_local field in ether_stat struct. The base address is offset by 2 3394 * bytes to account for the field being 8 bytes but a mac address is 3395 * only 6 bytes. Likewise, the stride for the get_n_elements function is 3396 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes 3397 * allocated by the ether_stat struct, so the macs will land in their 3398 * proper positions. 3399 */ 3400 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) 3401 memset(ether_stat->mac_local + i, 0, 3402 sizeof(ether_stat->mac_local[0])); 3403 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3404 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3405 ether_stat->mac_local + MAC_PAD, MAC_PAD, 3406 ETH_ALEN); 3407 ether_stat->mtu_size = bp->dev->mtu; 3408 if (bp->dev->features & NETIF_F_RXCSUM) 3409 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3410 if (bp->dev->features & NETIF_F_TSO) 3411 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3412 ether_stat->feature_flags |= bp->common.boot_mode; 3413 3414 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3415 3416 ether_stat->txq_size = bp->tx_ring_size; 3417 ether_stat->rxq_size = bp->rx_ring_size; 3418 3419#ifdef CONFIG_BNX2X_SRIOV 3420 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; 3421#endif 3422} 3423 3424static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3425{ 3426 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3427 struct fcoe_stats_info *fcoe_stat = 3428 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3429 3430 if (!CNIC_LOADED(bp)) 3431 return; 3432 3433 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); 3434 3435 fcoe_stat->qos_priority = 3436 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3437 3438 /* insert FCoE stats from ramrod response */ 3439 if (!NO_FCOE(bp)) { 3440 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3441 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3442 tstorm_queue_statistics; 3443 3444 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3445 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3446 xstorm_queue_statistics; 3447 3448 struct fcoe_statistics_params *fw_fcoe_stat = 3449 &bp->fw_stats_data->fcoe; 3450 3451 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, 3452 fcoe_stat->rx_bytes_lo, 3453 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3454 3455 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3456 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3457 fcoe_stat->rx_bytes_lo, 3458 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3459 3460 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3461 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3462 fcoe_stat->rx_bytes_lo, 3463 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3464 3465 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3466 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3467 fcoe_stat->rx_bytes_lo, 3468 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3469 3470 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3471 fcoe_stat->rx_frames_lo, 3472 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3473 3474 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3475 fcoe_stat->rx_frames_lo, 3476 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3477 3478 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3479 fcoe_stat->rx_frames_lo, 3480 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3481 3482 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3483 fcoe_stat->rx_frames_lo, 3484 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3485 3486 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, 3487 fcoe_stat->tx_bytes_lo, 3488 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3489 3490 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3491 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3492 fcoe_stat->tx_bytes_lo, 3493 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3494 3495 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3496 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3497 fcoe_stat->tx_bytes_lo, 3498 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3499 3500 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3501 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3502 fcoe_stat->tx_bytes_lo, 3503 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3504 3505 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3506 fcoe_stat->tx_frames_lo, 3507 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3508 3509 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3510 fcoe_stat->tx_frames_lo, 3511 fcoe_q_xstorm_stats->ucast_pkts_sent); 3512 3513 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3514 fcoe_stat->tx_frames_lo, 3515 fcoe_q_xstorm_stats->bcast_pkts_sent); 3516 3517 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3518 fcoe_stat->tx_frames_lo, 3519 fcoe_q_xstorm_stats->mcast_pkts_sent); 3520 } 3521 3522 /* ask L5 driver to add data to the struct */ 3523 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3524} 3525 3526static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3527{ 3528 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3529 struct iscsi_stats_info *iscsi_stat = 3530 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3531 3532 if (!CNIC_LOADED(bp)) 3533 return; 3534 3535 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, 3536 ETH_ALEN); 3537 3538 iscsi_stat->qos_priority = 3539 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3540 3541 /* ask L5 driver to add data to the struct */ 3542 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3543} 3544 3545/* called due to MCP event (on pmf): 3546 * reread new bandwidth configuration 3547 * configure FW 3548 * notify others function about the change 3549 */ 3550static void bnx2x_config_mf_bw(struct bnx2x *bp) 3551{ 3552 /* Workaround for MFW bug. 3553 * MFW is not supposed to generate BW attention in 3554 * single function mode. 3555 */ 3556 if (!IS_MF(bp)) { 3557 DP(BNX2X_MSG_MCP, 3558 "Ignoring MF BW config in single function mode\n"); 3559 return; 3560 } 3561 3562 if (bp->link_vars.link_up) { 3563 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3564 bnx2x_link_sync_notify(bp); 3565 } 3566 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3567} 3568 3569static void bnx2x_set_mf_bw(struct bnx2x *bp) 3570{ 3571 bnx2x_config_mf_bw(bp); 3572 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3573} 3574 3575static void bnx2x_handle_eee_event(struct bnx2x *bp) 3576{ 3577 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3578 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3579} 3580 3581#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) 3582#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) 3583 3584static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3585{ 3586 enum drv_info_opcode op_code; 3587 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3588 bool release = false; 3589 int wait; 3590 3591 /* if drv_info version supported by MFW doesn't match - send NACK */ 3592 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3593 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3594 return; 3595 } 3596 3597 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3598 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3599 3600 /* Must prevent other flows from accessing drv_info_to_mcp */ 3601 mutex_lock(&bp->drv_info_mutex); 3602 3603 memset(&bp->slowpath->drv_info_to_mcp, 0, 3604 sizeof(union drv_info_to_mcp)); 3605 3606 switch (op_code) { 3607 case ETH_STATS_OPCODE: 3608 bnx2x_drv_info_ether_stat(bp); 3609 break; 3610 case FCOE_STATS_OPCODE: 3611 bnx2x_drv_info_fcoe_stat(bp); 3612 break; 3613 case ISCSI_STATS_OPCODE: 3614 bnx2x_drv_info_iscsi_stat(bp); 3615 break; 3616 default: 3617 /* if op code isn't supported - send NACK */ 3618 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3619 goto out; 3620 } 3621 3622 /* if we got drv_info attn from MFW then these fields are defined in 3623 * shmem2 for sure 3624 */ 3625 SHMEM2_WR(bp, drv_info_host_addr_lo, 3626 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3627 SHMEM2_WR(bp, drv_info_host_addr_hi, 3628 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3629 3630 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3631 3632 /* Since possible management wants both this and get_driver_version 3633 * need to wait until management notifies us it finished utilizing 3634 * the buffer. 3635 */ 3636 if (!SHMEM2_HAS(bp, mfw_drv_indication)) { 3637 DP(BNX2X_MSG_MCP, "Management does not support indication\n"); 3638 } else if (!bp->drv_info_mng_owner) { 3639 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); 3640 3641 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { 3642 u32 indication = SHMEM2_RD(bp, mfw_drv_indication); 3643 3644 /* Management is done; need to clear indication */ 3645 if (indication & bit) { 3646 SHMEM2_WR(bp, mfw_drv_indication, 3647 indication & ~bit); 3648 release = true; 3649 break; 3650 } 3651 3652 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); 3653 } 3654 } 3655 if (!release) { 3656 DP(BNX2X_MSG_MCP, "Management did not release indication\n"); 3657 bp->drv_info_mng_owner = true; 3658 } 3659 3660out: 3661 mutex_unlock(&bp->drv_info_mutex); 3662} 3663 3664static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) 3665{ 3666 u8 vals[4]; 3667 int i = 0; 3668 3669 if (bnx2x_format) { 3670 i = sscanf(version, "1.%c%hhd.%hhd.%hhd", 3671 &vals[0], &vals[1], &vals[2], &vals[3]); 3672 if (i > 0) 3673 vals[0] -= '0'; 3674 } else { 3675 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", 3676 &vals[0], &vals[1], &vals[2], &vals[3]); 3677 } 3678 3679 while (i < 4) 3680 vals[i++] = 0; 3681 3682 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; 3683} 3684 3685void bnx2x_update_mng_version(struct bnx2x *bp) 3686{ 3687 u32 iscsiver = DRV_VER_NOT_LOADED; 3688 u32 fcoever = DRV_VER_NOT_LOADED; 3689 u32 ethver = DRV_VER_NOT_LOADED; 3690 int idx = BP_FW_MB_IDX(bp); 3691 u8 *version; 3692 3693 if (!SHMEM2_HAS(bp, func_os_drv_ver)) 3694 return; 3695 3696 mutex_lock(&bp->drv_info_mutex); 3697 /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ 3698 if (bp->drv_info_mng_owner) 3699 goto out; 3700 3701 if (bp->state != BNX2X_STATE_OPEN) 3702 goto out; 3703 3704 /* Parse ethernet driver version */ 3705 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3706 if (!CNIC_LOADED(bp)) 3707 goto out; 3708 3709 /* Try getting storage driver version via cnic */ 3710 memset(&bp->slowpath->drv_info_to_mcp, 0, 3711 sizeof(union drv_info_to_mcp)); 3712 bnx2x_drv_info_iscsi_stat(bp); 3713 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; 3714 iscsiver = bnx2x_update_mng_version_utility(version, false); 3715 3716 memset(&bp->slowpath->drv_info_to_mcp, 0, 3717 sizeof(union drv_info_to_mcp)); 3718 bnx2x_drv_info_fcoe_stat(bp); 3719 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; 3720 fcoever = bnx2x_update_mng_version_utility(version, false); 3721 3722out: 3723 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); 3724 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); 3725 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); 3726 3727 mutex_unlock(&bp->drv_info_mutex); 3728 3729 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", 3730 ethver, iscsiver, fcoever); 3731} 3732 3733void bnx2x_update_mfw_dump(struct bnx2x *bp) 3734{ 3735 u32 drv_ver; 3736 u32 valid_dump; 3737 3738 if (!SHMEM2_HAS(bp, drv_info)) 3739 return; 3740 3741 /* Update Driver load time, possibly broken in y2038 */ 3742 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds()); 3743 3744 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3745 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); 3746 3747 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM)); 3748 3749 /* Check & notify On-Chip dump. */ 3750 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump); 3751 3752 if (valid_dump & FIRST_DUMP_VALID) 3753 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); 3754 3755 if (valid_dump & SECOND_DUMP_VALID) 3756 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); 3757} 3758 3759static void bnx2x_oem_event(struct bnx2x *bp, u32 event) 3760{ 3761 u32 cmd_ok, cmd_fail; 3762 3763 /* sanity */ 3764 if (event & DRV_STATUS_DCC_EVENT_MASK && 3765 event & DRV_STATUS_OEM_EVENT_MASK) { 3766 BNX2X_ERR("Received simultaneous events %08x\n", event); 3767 return; 3768 } 3769 3770 if (event & DRV_STATUS_DCC_EVENT_MASK) { 3771 cmd_fail = DRV_MSG_CODE_DCC_FAILURE; 3772 cmd_ok = DRV_MSG_CODE_DCC_OK; 3773 } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ { 3774 cmd_fail = DRV_MSG_CODE_OEM_FAILURE; 3775 cmd_ok = DRV_MSG_CODE_OEM_OK; 3776 } 3777 3778 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); 3779 3780 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF | 3781 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) { 3782 /* This is the only place besides the function initialization 3783 * where the bp->flags can change so it is done without any 3784 * locks 3785 */ 3786 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3787 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3788 bp->flags |= MF_FUNC_DIS; 3789 3790 bnx2x_e1h_disable(bp); 3791 } else { 3792 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3793 bp->flags &= ~MF_FUNC_DIS; 3794 3795 bnx2x_e1h_enable(bp); 3796 } 3797 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF | 3798 DRV_STATUS_OEM_DISABLE_ENABLE_PF); 3799 } 3800 3801 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | 3802 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) { 3803 bnx2x_config_mf_bw(bp); 3804 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | 3805 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION); 3806 } 3807 3808 /* Report results to MCP */ 3809 if (event) 3810 bnx2x_fw_command(bp, cmd_fail, 0); 3811 else 3812 bnx2x_fw_command(bp, cmd_ok, 0); 3813} 3814 3815/* must be called under the spq lock */ 3816static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3817{ 3818 struct eth_spe *next_spe = bp->spq_prod_bd; 3819 3820 if (bp->spq_prod_bd == bp->spq_last_bd) { 3821 bp->spq_prod_bd = bp->spq; 3822 bp->spq_prod_idx = 0; 3823 DP(BNX2X_MSG_SP, "end of spq\n"); 3824 } else { 3825 bp->spq_prod_bd++; 3826 bp->spq_prod_idx++; 3827 } 3828 return next_spe; 3829} 3830 3831/* must be called under the spq lock */ 3832static void bnx2x_sp_prod_update(struct bnx2x *bp) 3833{ 3834 int func = BP_FUNC(bp); 3835 3836 /* 3837 * Make sure that BD data is updated before writing the producer: 3838 * BD data is written to the memory, the producer is read from the 3839 * memory, thus we need a full memory barrier to ensure the ordering. 3840 */ 3841 mb(); 3842 3843 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3844 bp->spq_prod_idx); 3845} 3846 3847/** 3848 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3849 * 3850 * @cmd: command to check 3851 * @cmd_type: command type 3852 */ 3853static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3854{ 3855 if ((cmd_type == NONE_CONNECTION_TYPE) || 3856 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3857 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3858 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3859 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3860 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3861 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3862 return true; 3863 else 3864 return false; 3865} 3866 3867/** 3868 * bnx2x_sp_post - place a single command on an SP ring 3869 * 3870 * @bp: driver handle 3871 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3872 * @cid: SW CID the command is related to 3873 * @data_hi: command private data address (high 32 bits) 3874 * @data_lo: command private data address (low 32 bits) 3875 * @cmd_type: command type (e.g. NONE, ETH) 3876 * 3877 * SP data is handled as if it's always an address pair, thus data fields are 3878 * not swapped to little endian in upper functions. Instead this function swaps 3879 * data as if it's two u32 fields. 3880 */ 3881int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3882 u32 data_hi, u32 data_lo, int cmd_type) 3883{ 3884 struct eth_spe *spe; 3885 u16 type; 3886 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3887 3888#ifdef BNX2X_STOP_ON_ERROR 3889 if (unlikely(bp->panic)) { 3890 BNX2X_ERR("Can't post SP when there is panic\n"); 3891 return -EIO; 3892 } 3893#endif 3894 3895 spin_lock_bh(&bp->spq_lock); 3896 3897 if (common) { 3898 if (!atomic_read(&bp->eq_spq_left)) { 3899 BNX2X_ERR("BUG! EQ ring full!\n"); 3900 spin_unlock_bh(&bp->spq_lock); 3901 bnx2x_panic(); 3902 return -EBUSY; 3903 } 3904 } else if (!atomic_read(&bp->cq_spq_left)) { 3905 BNX2X_ERR("BUG! SPQ ring full!\n"); 3906 spin_unlock_bh(&bp->spq_lock); 3907 bnx2x_panic(); 3908 return -EBUSY; 3909 } 3910 3911 spe = bnx2x_sp_get_next(bp); 3912 3913 /* CID needs port number to be encoded int it */ 3914 spe->hdr.conn_and_cmd_data = 3915 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3916 HW_CID(bp, cid)); 3917 3918 /* In some cases, type may already contain the func-id 3919 * mainly in SRIOV related use cases, so we add it here only 3920 * if it's not already set. 3921 */ 3922 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { 3923 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & 3924 SPE_HDR_CONN_TYPE; 3925 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3926 SPE_HDR_FUNCTION_ID); 3927 } else { 3928 type = cmd_type; 3929 } 3930 3931 spe->hdr.type = cpu_to_le16(type); 3932 3933 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3934 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3935 3936 /* 3937 * It's ok if the actual decrement is issued towards the memory 3938 * somewhere between the spin_lock and spin_unlock. Thus no 3939 * more explicit memory barrier is needed. 3940 */ 3941 if (common) 3942 atomic_dec(&bp->eq_spq_left); 3943 else 3944 atomic_dec(&bp->cq_spq_left); 3945 3946 DP(BNX2X_MSG_SP, 3947 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3948 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3949 (u32)(U64_LO(bp->spq_mapping) + 3950 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3951 HW_CID(bp, cid), data_hi, data_lo, type, 3952 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3953 3954 bnx2x_sp_prod_update(bp); 3955 spin_unlock_bh(&bp->spq_lock); 3956 return 0; 3957} 3958 3959/* acquire split MCP access lock register */ 3960static int bnx2x_acquire_alr(struct bnx2x *bp) 3961{ 3962 u32 j, val; 3963 int rc = 0; 3964 3965 might_sleep(); 3966 for (j = 0; j < 1000; j++) { 3967 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); 3968 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); 3969 if (val & MCPR_ACCESS_LOCK_LOCK) 3970 break; 3971 3972 usleep_range(5000, 10000); 3973 } 3974 if (!(val & MCPR_ACCESS_LOCK_LOCK)) { 3975 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3976 rc = -EBUSY; 3977 } 3978 3979 return rc; 3980} 3981 3982/* release split MCP access lock register */ 3983static void bnx2x_release_alr(struct bnx2x *bp) 3984{ 3985 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 3986} 3987 3988#define BNX2X_DEF_SB_ATT_IDX 0x0001 3989#define BNX2X_DEF_SB_IDX 0x0002 3990 3991static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3992{ 3993 struct host_sp_status_block *def_sb = bp->def_status_blk; 3994 u16 rc = 0; 3995 3996 barrier(); /* status block is written to by the chip */ 3997 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3998 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3999 rc |= BNX2X_DEF_SB_ATT_IDX; 4000 } 4001 4002 if (bp->def_idx != def_sb->sp_sb.running_index) { 4003 bp->def_idx = def_sb->sp_sb.running_index; 4004 rc |= BNX2X_DEF_SB_IDX; 4005 } 4006 4007 /* Do not reorder: indices reading should complete before handling */ 4008 barrier(); 4009 return rc; 4010} 4011 4012/* 4013 * slow path service functions 4014 */ 4015 4016static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 4017{ 4018 int port = BP_PORT(bp); 4019 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4020 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4021 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 4022 NIG_REG_MASK_INTERRUPT_PORT0; 4023 u32 aeu_mask; 4024 u32 nig_mask = 0; 4025 u32 reg_addr; 4026 4027 if (bp->attn_state & asserted) 4028 BNX2X_ERR("IGU ERROR\n"); 4029 4030 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4031 aeu_mask = REG_RD(bp, aeu_addr); 4032 4033 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 4034 aeu_mask, asserted); 4035 aeu_mask &= ~(asserted & 0x3ff); 4036 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4037 4038 REG_WR(bp, aeu_addr, aeu_mask); 4039 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4040 4041 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4042 bp->attn_state |= asserted; 4043 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4044 4045 if (asserted & ATTN_HARD_WIRED_MASK) { 4046 if (asserted & ATTN_NIG_FOR_FUNC) { 4047 4048 bnx2x_acquire_phy_lock(bp); 4049 4050 /* save nig interrupt mask */ 4051 nig_mask = REG_RD(bp, nig_int_mask_addr); 4052 4053 /* If nig_mask is not set, no need to call the update 4054 * function. 4055 */ 4056 if (nig_mask) { 4057 REG_WR(bp, nig_int_mask_addr, 0); 4058 4059 bnx2x_link_attn(bp); 4060 } 4061 4062 /* handle unicore attn? */ 4063 } 4064 if (asserted & ATTN_SW_TIMER_4_FUNC) 4065 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 4066 4067 if (asserted & GPIO_2_FUNC) 4068 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 4069 4070 if (asserted & GPIO_3_FUNC) 4071 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 4072 4073 if (asserted & GPIO_4_FUNC) 4074 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 4075 4076 if (port == 0) { 4077 if (asserted & ATTN_GENERAL_ATTN_1) { 4078 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 4079 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 4080 } 4081 if (asserted & ATTN_GENERAL_ATTN_2) { 4082 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 4083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 4084 } 4085 if (asserted & ATTN_GENERAL_ATTN_3) { 4086 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 4087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 4088 } 4089 } else { 4090 if (asserted & ATTN_GENERAL_ATTN_4) { 4091 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 4092 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 4093 } 4094 if (asserted & ATTN_GENERAL_ATTN_5) { 4095 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 4096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 4097 } 4098 if (asserted & ATTN_GENERAL_ATTN_6) { 4099 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 4100 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 4101 } 4102 } 4103 4104 } /* if hardwired */ 4105 4106 if (bp->common.int_block == INT_BLOCK_HC) 4107 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4108 COMMAND_REG_ATTN_BITS_SET); 4109 else 4110 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 4111 4112 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 4113 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4114 REG_WR(bp, reg_addr, asserted); 4115 4116 /* now set back the mask */ 4117 if (asserted & ATTN_NIG_FOR_FUNC) { 4118 /* Verify that IGU ack through BAR was written before restoring 4119 * NIG mask. This loop should exit after 2-3 iterations max. 4120 */ 4121 if (bp->common.int_block != INT_BLOCK_HC) { 4122 u32 cnt = 0, igu_acked; 4123 do { 4124 igu_acked = REG_RD(bp, 4125 IGU_REG_ATTENTION_ACK_BITS); 4126 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 4127 (++cnt < MAX_IGU_ATTN_ACK_TO)); 4128 if (!igu_acked) 4129 DP(NETIF_MSG_HW, 4130 "Failed to verify IGU ack on time\n"); 4131 barrier(); 4132 } 4133 REG_WR(bp, nig_int_mask_addr, nig_mask); 4134 bnx2x_release_phy_lock(bp); 4135 } 4136} 4137 4138static void bnx2x_fan_failure(struct bnx2x *bp) 4139{ 4140 int port = BP_PORT(bp); 4141 u32 ext_phy_config; 4142 /* mark the failure */ 4143 ext_phy_config = 4144 SHMEM_RD(bp, 4145 dev_info.port_hw_config[port].external_phy_config); 4146 4147 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 4148 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 4149 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 4150 ext_phy_config); 4151 4152 /* log the failure */ 4153 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 4154 "Please contact OEM Support for assistance\n"); 4155 4156 /* Schedule device reset (unload) 4157 * This is due to some boards consuming sufficient power when driver is 4158 * up to overheat if fan fails. 4159 */ 4160 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); 4161} 4162 4163static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 4164{ 4165 int port = BP_PORT(bp); 4166 int reg_offset; 4167 u32 val; 4168 4169 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4170 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4171 4172 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 4173 4174 val = REG_RD(bp, reg_offset); 4175 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 4176 REG_WR(bp, reg_offset, val); 4177 4178 BNX2X_ERR("SPIO5 hw attention\n"); 4179 4180 /* Fan failure attention */ 4181 bnx2x_hw_reset_phy(&bp->link_params); 4182 bnx2x_fan_failure(bp); 4183 } 4184 4185 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 4186 bnx2x_acquire_phy_lock(bp); 4187 bnx2x_handle_module_detect_int(&bp->link_params); 4188 bnx2x_release_phy_lock(bp); 4189 } 4190 4191 if (attn & HW_INTERRUPT_ASSERT_SET_0) { 4192 4193 val = REG_RD(bp, reg_offset); 4194 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0); 4195 REG_WR(bp, reg_offset, val); 4196 4197 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4198 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0)); 4199 bnx2x_panic(); 4200 } 4201} 4202 4203static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 4204{ 4205 u32 val; 4206 4207 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 4208 4209 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 4210 BNX2X_ERR("DB hw attention 0x%x\n", val); 4211 /* DORQ discard attention */ 4212 if (val & 0x2) 4213 BNX2X_ERR("FATAL error from DORQ\n"); 4214 } 4215 4216 if (attn & HW_INTERRUPT_ASSERT_SET_1) { 4217 4218 int port = BP_PORT(bp); 4219 int reg_offset; 4220 4221 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 4222 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4223 4224 val = REG_RD(bp, reg_offset); 4225 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1); 4226 REG_WR(bp, reg_offset, val); 4227 4228 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4229 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1)); 4230 bnx2x_panic(); 4231 } 4232} 4233 4234static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 4235{ 4236 u32 val; 4237 4238 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 4239 4240 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 4241 BNX2X_ERR("CFC hw attention 0x%x\n", val); 4242 /* CFC error attention */ 4243 if (val & 0x2) 4244 BNX2X_ERR("FATAL error from CFC\n"); 4245 } 4246 4247 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 4248 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 4249 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 4250 /* RQ_USDMDP_FIFO_OVERFLOW */ 4251 if (val & 0x18000) 4252 BNX2X_ERR("FATAL error from PXP\n"); 4253 4254 if (!CHIP_IS_E1x(bp)) { 4255 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 4256 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 4257 } 4258 } 4259 4260 if (attn & HW_INTERRUPT_ASSERT_SET_2) { 4261 4262 int port = BP_PORT(bp); 4263 int reg_offset; 4264 4265 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 4266 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4267 4268 val = REG_RD(bp, reg_offset); 4269 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2); 4270 REG_WR(bp, reg_offset, val); 4271 4272 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4273 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2)); 4274 bnx2x_panic(); 4275 } 4276} 4277 4278static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 4279{ 4280 u32 val; 4281 4282 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 4283 4284 if (attn & BNX2X_PMF_LINK_ASSERT) { 4285 int func = BP_FUNC(bp); 4286 4287 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 4288 bnx2x_read_mf_cfg(bp); 4289 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 4290 func_mf_config[BP_ABS_FUNC(bp)].config); 4291 val = SHMEM_RD(bp, 4292 func_mb[BP_FW_MB_IDX(bp)].drv_status); 4293 4294 if (val & (DRV_STATUS_DCC_EVENT_MASK | 4295 DRV_STATUS_OEM_EVENT_MASK)) 4296 bnx2x_oem_event(bp, 4297 (val & (DRV_STATUS_DCC_EVENT_MASK | 4298 DRV_STATUS_OEM_EVENT_MASK))); 4299 4300 if (val & DRV_STATUS_SET_MF_BW) 4301 bnx2x_set_mf_bw(bp); 4302 4303 if (val & DRV_STATUS_DRV_INFO_REQ) 4304 bnx2x_handle_drv_info_req(bp); 4305 4306 if (val & DRV_STATUS_VF_DISABLED) 4307 bnx2x_schedule_iov_task(bp, 4308 BNX2X_IOV_HANDLE_FLR); 4309 4310 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 4311 bnx2x_pmf_update(bp); 4312 4313 if (bp->port.pmf && 4314 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 4315 bp->dcbx_enabled > 0) 4316 /* start dcbx state machine */ 4317 bnx2x_dcbx_set_params(bp, 4318 BNX2X_DCBX_STATE_NEG_RECEIVED); 4319 if (val & DRV_STATUS_AFEX_EVENT_MASK) 4320 bnx2x_handle_afex_cmd(bp, 4321 val & DRV_STATUS_AFEX_EVENT_MASK); 4322 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 4323 bnx2x_handle_eee_event(bp); 4324 4325 if (val & DRV_STATUS_OEM_UPDATE_SVID) 4326 bnx2x_schedule_sp_rtnl(bp, 4327 BNX2X_SP_RTNL_UPDATE_SVID, 0); 4328 4329 if (bp->link_vars.periodic_flags & 4330 PERIODIC_FLAGS_LINK_EVENT) { 4331 /* sync with link */ 4332 bnx2x_acquire_phy_lock(bp); 4333 bp->link_vars.periodic_flags &= 4334 ~PERIODIC_FLAGS_LINK_EVENT; 4335 bnx2x_release_phy_lock(bp); 4336 if (IS_MF(bp)) 4337 bnx2x_link_sync_notify(bp); 4338 bnx2x_link_report(bp); 4339 } 4340 /* Always call it here: bnx2x_link_report() will 4341 * prevent the link indication duplication. 4342 */ 4343 bnx2x__link_status_update(bp); 4344 } else if (attn & BNX2X_MC_ASSERT_BITS) { 4345 4346 BNX2X_ERR("MC assert!\n"); 4347 bnx2x_mc_assert(bp); 4348 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 4349 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 4350 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 4351 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 4352 bnx2x_panic(); 4353 4354 } else if (attn & BNX2X_MCP_ASSERT) { 4355 4356 BNX2X_ERR("MCP assert!\n"); 4357 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 4358 bnx2x_fw_dump(bp); 4359 4360 } else 4361 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 4362 } 4363 4364 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 4365 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 4366 if (attn & BNX2X_GRC_TIMEOUT) { 4367 val = CHIP_IS_E1(bp) ? 0 : 4368 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 4369 BNX2X_ERR("GRC time-out 0x%08x\n", val); 4370 } 4371 if (attn & BNX2X_GRC_RSV) { 4372 val = CHIP_IS_E1(bp) ? 0 : 4373 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 4374 BNX2X_ERR("GRC reserved 0x%08x\n", val); 4375 } 4376 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 4377 } 4378} 4379 4380/* 4381 * Bits map: 4382 * 0-7 - Engine0 load counter. 4383 * 8-15 - Engine1 load counter. 4384 * 16 - Engine0 RESET_IN_PROGRESS bit. 4385 * 17 - Engine1 RESET_IN_PROGRESS bit. 4386 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 4387 * on the engine 4388 * 19 - Engine1 ONE_IS_LOADED. 4389 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 4390 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 4391 * just the one belonging to its engine). 4392 * 4393 */ 4394#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 4395 4396#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 4397#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 4398#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 4399#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 4400#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 4401#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 4402#define BNX2X_GLOBAL_RESET_BIT 0x00040000 4403 4404/* 4405 * Set the GLOBAL_RESET bit. 4406 * 4407 * Should be run under rtnl lock 4408 */ 4409void bnx2x_set_reset_global(struct bnx2x *bp) 4410{ 4411 u32 val; 4412 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4413 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4414 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 4415 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4416} 4417 4418/* 4419 * Clear the GLOBAL_RESET bit. 4420 * 4421 * Should be run under rtnl lock 4422 */ 4423static void bnx2x_clear_reset_global(struct bnx2x *bp) 4424{ 4425 u32 val; 4426 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4427 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4428 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 4429 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4430} 4431 4432/* 4433 * Checks the GLOBAL_RESET bit. 4434 * 4435 * should be run under rtnl lock 4436 */ 4437static bool bnx2x_reset_is_global(struct bnx2x *bp) 4438{ 4439 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4440 4441 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 4442 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 4443} 4444 4445/* 4446 * Clear RESET_IN_PROGRESS bit for the current engine. 4447 * 4448 * Should be run under rtnl lock 4449 */ 4450static void bnx2x_set_reset_done(struct bnx2x *bp) 4451{ 4452 u32 val; 4453 u32 bit = BP_PATH(bp) ? 4454 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4455 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4456 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4457 4458 /* Clear the bit */ 4459 val &= ~bit; 4460 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4461 4462 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4463} 4464 4465/* 4466 * Set RESET_IN_PROGRESS for the current engine. 4467 * 4468 * should be run under rtnl lock 4469 */ 4470void bnx2x_set_reset_in_progress(struct bnx2x *bp) 4471{ 4472 u32 val; 4473 u32 bit = BP_PATH(bp) ? 4474 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4475 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4476 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4477 4478 /* Set the bit */ 4479 val |= bit; 4480 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4481 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4482} 4483 4484/* 4485 * Checks the RESET_IN_PROGRESS bit for the given engine. 4486 * should be run under rtnl lock 4487 */ 4488bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 4489{ 4490 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4491 u32 bit = engine ? 4492 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4493 4494 /* return false if bit is set */ 4495 return (val & bit) ? false : true; 4496} 4497 4498/* 4499 * set pf load for the current pf. 4500 * 4501 * should be run under rtnl lock 4502 */ 4503void bnx2x_set_pf_load(struct bnx2x *bp) 4504{ 4505 u32 val1, val; 4506 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4507 BNX2X_PATH0_LOAD_CNT_MASK; 4508 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4509 BNX2X_PATH0_LOAD_CNT_SHIFT; 4510 4511 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4512 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4513 4514 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 4515 4516 /* get the current counter value */ 4517 val1 = (val & mask) >> shift; 4518 4519 /* set bit of that PF */ 4520 val1 |= (1 << bp->pf_num); 4521 4522 /* clear the old value */ 4523 val &= ~mask; 4524 4525 /* set the new one */ 4526 val |= ((val1 << shift) & mask); 4527 4528 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4530} 4531 4532/** 4533 * bnx2x_clear_pf_load - clear pf load mark 4534 * 4535 * @bp: driver handle 4536 * 4537 * Should be run under rtnl lock. 4538 * Decrements the load counter for the current engine. Returns 4539 * whether other functions are still loaded 4540 */ 4541bool bnx2x_clear_pf_load(struct bnx2x *bp) 4542{ 4543 u32 val1, val; 4544 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4545 BNX2X_PATH0_LOAD_CNT_MASK; 4546 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4547 BNX2X_PATH0_LOAD_CNT_SHIFT; 4548 4549 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4550 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4551 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 4552 4553 /* get the current counter value */ 4554 val1 = (val & mask) >> shift; 4555 4556 /* clear bit of that PF */ 4557 val1 &= ~(1 << bp->pf_num); 4558 4559 /* clear the old value */ 4560 val &= ~mask; 4561 4562 /* set the new one */ 4563 val |= ((val1 << shift) & mask); 4564 4565 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4566 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4567 return val1 != 0; 4568} 4569 4570/* 4571 * Read the load status for the current engine. 4572 * 4573 * should be run under rtnl lock 4574 */ 4575static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4576{ 4577 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4578 BNX2X_PATH0_LOAD_CNT_MASK); 4579 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4580 BNX2X_PATH0_LOAD_CNT_SHIFT); 4581 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4582 4583 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4584 4585 val = (val & mask) >> shift; 4586 4587 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4588 engine, val); 4589 4590 return val != 0; 4591} 4592 4593static void _print_parity(struct bnx2x *bp, u32 reg) 4594{ 4595 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); 4596} 4597 4598static void _print_next_block(int idx, const char *blk) 4599{ 4600 pr_cont("%s%s", idx ? ", " : "", blk); 4601} 4602 4603static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4604 int *par_num, bool print) 4605{ 4606 u32 cur_bit; 4607 bool res; 4608 int i; 4609 4610 res = false; 4611 4612 for (i = 0; sig; i++) { 4613 cur_bit = (0x1UL << i); 4614 if (sig & cur_bit) { 4615 res |= true; /* Each bit is real error! */ 4616 4617 if (print) { 4618 switch (cur_bit) { 4619 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4620 _print_next_block((*par_num)++, "BRB"); 4621 _print_parity(bp, 4622 BRB1_REG_BRB1_PRTY_STS); 4623 break; 4624 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4625 _print_next_block((*par_num)++, 4626 "PARSER"); 4627 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4628 break; 4629 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4630 _print_next_block((*par_num)++, "TSDM"); 4631 _print_parity(bp, 4632 TSDM_REG_TSDM_PRTY_STS); 4633 break; 4634 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4635 _print_next_block((*par_num)++, 4636 "SEARCHER"); 4637 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4638 break; 4639 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4640 _print_next_block((*par_num)++, "TCM"); 4641 _print_parity(bp, TCM_REG_TCM_PRTY_STS); 4642 break; 4643 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4644 _print_next_block((*par_num)++, 4645 "TSEMI"); 4646 _print_parity(bp, 4647 TSEM_REG_TSEM_PRTY_STS_0); 4648 _print_parity(bp, 4649 TSEM_REG_TSEM_PRTY_STS_1); 4650 break; 4651 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4652 _print_next_block((*par_num)++, "XPB"); 4653 _print_parity(bp, GRCBASE_XPB + 4654 PB_REG_PB_PRTY_STS); 4655 break; 4656 } 4657 } 4658 4659 /* Clear the bit */ 4660 sig &= ~cur_bit; 4661 } 4662 } 4663 4664 return res; 4665} 4666 4667static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4668 int *par_num, bool *global, 4669 bool print) 4670{ 4671 u32 cur_bit; 4672 bool res; 4673 int i; 4674 4675 res = false; 4676 4677 for (i = 0; sig; i++) { 4678 cur_bit = (0x1UL << i); 4679 if (sig & cur_bit) { 4680 res |= true; /* Each bit is real error! */ 4681 switch (cur_bit) { 4682 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4683 if (print) { 4684 _print_next_block((*par_num)++, "PBF"); 4685 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4686 } 4687 break; 4688 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4689 if (print) { 4690 _print_next_block((*par_num)++, "QM"); 4691 _print_parity(bp, QM_REG_QM_PRTY_STS); 4692 } 4693 break; 4694 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4695 if (print) { 4696 _print_next_block((*par_num)++, "TM"); 4697 _print_parity(bp, TM_REG_TM_PRTY_STS); 4698 } 4699 break; 4700 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4701 if (print) { 4702 _print_next_block((*par_num)++, "XSDM"); 4703 _print_parity(bp, 4704 XSDM_REG_XSDM_PRTY_STS); 4705 } 4706 break; 4707 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4708 if (print) { 4709 _print_next_block((*par_num)++, "XCM"); 4710 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4711 } 4712 break; 4713 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4714 if (print) { 4715 _print_next_block((*par_num)++, 4716 "XSEMI"); 4717 _print_parity(bp, 4718 XSEM_REG_XSEM_PRTY_STS_0); 4719 _print_parity(bp, 4720 XSEM_REG_XSEM_PRTY_STS_1); 4721 } 4722 break; 4723 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4724 if (print) { 4725 _print_next_block((*par_num)++, 4726 "DOORBELLQ"); 4727 _print_parity(bp, 4728 DORQ_REG_DORQ_PRTY_STS); 4729 } 4730 break; 4731 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4732 if (print) { 4733 _print_next_block((*par_num)++, "NIG"); 4734 if (CHIP_IS_E1x(bp)) { 4735 _print_parity(bp, 4736 NIG_REG_NIG_PRTY_STS); 4737 } else { 4738 _print_parity(bp, 4739 NIG_REG_NIG_PRTY_STS_0); 4740 _print_parity(bp, 4741 NIG_REG_NIG_PRTY_STS_1); 4742 } 4743 } 4744 break; 4745 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4746 if (print) 4747 _print_next_block((*par_num)++, 4748 "VAUX PCI CORE"); 4749 *global = true; 4750 break; 4751 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4752 if (print) { 4753 _print_next_block((*par_num)++, 4754 "DEBUG"); 4755 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4756 } 4757 break; 4758 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4759 if (print) { 4760 _print_next_block((*par_num)++, "USDM"); 4761 _print_parity(bp, 4762 USDM_REG_USDM_PRTY_STS); 4763 } 4764 break; 4765 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4766 if (print) { 4767 _print_next_block((*par_num)++, "UCM"); 4768 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4769 } 4770 break; 4771 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4772 if (print) { 4773 _print_next_block((*par_num)++, 4774 "USEMI"); 4775 _print_parity(bp, 4776 USEM_REG_USEM_PRTY_STS_0); 4777 _print_parity(bp, 4778 USEM_REG_USEM_PRTY_STS_1); 4779 } 4780 break; 4781 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4782 if (print) { 4783 _print_next_block((*par_num)++, "UPB"); 4784 _print_parity(bp, GRCBASE_UPB + 4785 PB_REG_PB_PRTY_STS); 4786 } 4787 break; 4788 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4789 if (print) { 4790 _print_next_block((*par_num)++, "CSDM"); 4791 _print_parity(bp, 4792 CSDM_REG_CSDM_PRTY_STS); 4793 } 4794 break; 4795 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4796 if (print) { 4797 _print_next_block((*par_num)++, "CCM"); 4798 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4799 } 4800 break; 4801 } 4802 4803 /* Clear the bit */ 4804 sig &= ~cur_bit; 4805 } 4806 } 4807 4808 return res; 4809} 4810 4811static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4812 int *par_num, bool print) 4813{ 4814 u32 cur_bit; 4815 bool res; 4816 int i; 4817 4818 res = false; 4819 4820 for (i = 0; sig; i++) { 4821 cur_bit = (0x1UL << i); 4822 if (sig & cur_bit) { 4823 res = true; /* Each bit is real error! */ 4824 if (print) { 4825 switch (cur_bit) { 4826 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4827 _print_next_block((*par_num)++, 4828 "CSEMI"); 4829 _print_parity(bp, 4830 CSEM_REG_CSEM_PRTY_STS_0); 4831 _print_parity(bp, 4832 CSEM_REG_CSEM_PRTY_STS_1); 4833 break; 4834 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4835 _print_next_block((*par_num)++, "PXP"); 4836 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4837 _print_parity(bp, 4838 PXP2_REG_PXP2_PRTY_STS_0); 4839 _print_parity(bp, 4840 PXP2_REG_PXP2_PRTY_STS_1); 4841 break; 4842 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4843 _print_next_block((*par_num)++, 4844 "PXPPCICLOCKCLIENT"); 4845 break; 4846 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4847 _print_next_block((*par_num)++, "CFC"); 4848 _print_parity(bp, 4849 CFC_REG_CFC_PRTY_STS); 4850 break; 4851 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4852 _print_next_block((*par_num)++, "CDU"); 4853 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4854 break; 4855 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4856 _print_next_block((*par_num)++, "DMAE"); 4857 _print_parity(bp, 4858 DMAE_REG_DMAE_PRTY_STS); 4859 break; 4860 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4861 _print_next_block((*par_num)++, "IGU"); 4862 if (CHIP_IS_E1x(bp)) 4863 _print_parity(bp, 4864 HC_REG_HC_PRTY_STS); 4865 else 4866 _print_parity(bp, 4867 IGU_REG_IGU_PRTY_STS); 4868 break; 4869 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4870 _print_next_block((*par_num)++, "MISC"); 4871 _print_parity(bp, 4872 MISC_REG_MISC_PRTY_STS); 4873 break; 4874 } 4875 } 4876 4877 /* Clear the bit */ 4878 sig &= ~cur_bit; 4879 } 4880 } 4881 4882 return res; 4883} 4884 4885static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, 4886 int *par_num, bool *global, 4887 bool print) 4888{ 4889 bool res = false; 4890 u32 cur_bit; 4891 int i; 4892 4893 for (i = 0; sig; i++) { 4894 cur_bit = (0x1UL << i); 4895 if (sig & cur_bit) { 4896 switch (cur_bit) { 4897 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4898 if (print) 4899 _print_next_block((*par_num)++, 4900 "MCP ROM"); 4901 *global = true; 4902 res = true; 4903 break; 4904 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4905 if (print) 4906 _print_next_block((*par_num)++, 4907 "MCP UMP RX"); 4908 *global = true; 4909 res = true; 4910 break; 4911 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4912 if (print) 4913 _print_next_block((*par_num)++, 4914 "MCP UMP TX"); 4915 *global = true; 4916 res = true; 4917 break; 4918 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4919 (*par_num)++; 4920 /* clear latched SCPAD PATIRY from MCP */ 4921 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4922 1UL << 10); 4923 break; 4924 } 4925 4926 /* Clear the bit */ 4927 sig &= ~cur_bit; 4928 } 4929 } 4930 4931 return res; 4932} 4933 4934static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4935 int *par_num, bool print) 4936{ 4937 u32 cur_bit; 4938 bool res; 4939 int i; 4940 4941 res = false; 4942 4943 for (i = 0; sig; i++) { 4944 cur_bit = (0x1UL << i); 4945 if (sig & cur_bit) { 4946 res = true; /* Each bit is real error! */ 4947 if (print) { 4948 switch (cur_bit) { 4949 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4950 _print_next_block((*par_num)++, 4951 "PGLUE_B"); 4952 _print_parity(bp, 4953 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4954 break; 4955 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4956 _print_next_block((*par_num)++, "ATC"); 4957 _print_parity(bp, 4958 ATC_REG_ATC_PRTY_STS); 4959 break; 4960 } 4961 } 4962 /* Clear the bit */ 4963 sig &= ~cur_bit; 4964 } 4965 } 4966 4967 return res; 4968} 4969 4970static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4971 u32 *sig) 4972{ 4973 bool res = false; 4974 4975 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4976 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4977 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4978 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4979 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4980 int par_num = 0; 4981 4982 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4983 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4984 sig[0] & HW_PRTY_ASSERT_SET_0, 4985 sig[1] & HW_PRTY_ASSERT_SET_1, 4986 sig[2] & HW_PRTY_ASSERT_SET_2, 4987 sig[3] & HW_PRTY_ASSERT_SET_3, 4988 sig[4] & HW_PRTY_ASSERT_SET_4); 4989 if (print) { 4990 if (((sig[0] & HW_PRTY_ASSERT_SET_0) || 4991 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4992 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4993 (sig[4] & HW_PRTY_ASSERT_SET_4)) || 4994 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) { 4995 netdev_err(bp->dev, 4996 "Parity errors detected in blocks: "); 4997 } else { 4998 print = false; 4999 } 5000 } 5001 res |= bnx2x_check_blocks_with_parity0(bp, 5002 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 5003 res |= bnx2x_check_blocks_with_parity1(bp, 5004 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); 5005 res |= bnx2x_check_blocks_with_parity2(bp, 5006 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); 5007 res |= bnx2x_check_blocks_with_parity3(bp, 5008 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); 5009 res |= bnx2x_check_blocks_with_parity4(bp, 5010 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); 5011 5012 if (print) 5013 pr_cont("\n"); 5014 } 5015 5016 return res; 5017} 5018 5019/** 5020 * bnx2x_chk_parity_attn - checks for parity attentions. 5021 * 5022 * @bp: driver handle 5023 * @global: true if there was a global attention 5024 * @print: show parity attention in syslog 5025 */ 5026bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 5027{ 5028 struct attn_route attn = { {0} }; 5029 int port = BP_PORT(bp); 5030 5031 attn.sig[0] = REG_RD(bp, 5032 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 5033 port*4); 5034 attn.sig[1] = REG_RD(bp, 5035 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 5036 port*4); 5037 attn.sig[2] = REG_RD(bp, 5038 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 5039 port*4); 5040 attn.sig[3] = REG_RD(bp, 5041 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 5042 port*4); 5043 /* Since MCP attentions can't be disabled inside the block, we need to 5044 * read AEU registers to see whether they're currently disabled 5045 */ 5046 attn.sig[3] &= ((REG_RD(bp, 5047 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 5048 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & 5049 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 5050 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 5051 5052 if (!CHIP_IS_E1x(bp)) 5053 attn.sig[4] = REG_RD(bp, 5054 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 5055 port*4); 5056 5057 return bnx2x_parity_attn(bp, global, print, attn.sig); 5058} 5059 5060static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 5061{ 5062 u32 val; 5063 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 5064 5065 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 5066 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 5067 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 5068 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 5069 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 5070 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 5071 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 5072 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 5073 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 5074 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 5075 if (val & 5076 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 5077 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 5078 if (val & 5079 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 5080 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 5081 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 5082 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 5083 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 5084 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 5085 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 5086 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 5087 } 5088 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 5089 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 5090 BNX2X_ERR("ATC hw attention 0x%x\n", val); 5091 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 5092 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 5093 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 5094 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 5095 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 5096 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 5097 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 5098 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 5099 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 5100 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 5101 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 5102 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 5103 } 5104 5105 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 5106 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 5107 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 5108 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 5109 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 5110 } 5111} 5112 5113static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 5114{ 5115 struct attn_route attn, *group_mask; 5116 int port = BP_PORT(bp); 5117 int index; 5118 u32 reg_addr; 5119 u32 val; 5120 u32 aeu_mask; 5121 bool global = false; 5122 5123 /* need to take HW lock because MCP or other port might also 5124 try to handle this event */ 5125 bnx2x_acquire_alr(bp); 5126 5127 if (bnx2x_chk_parity_attn(bp, &global, true)) { 5128#ifndef BNX2X_STOP_ON_ERROR 5129 bp->recovery_state = BNX2X_RECOVERY_INIT; 5130 schedule_delayed_work(&bp->sp_rtnl_task, 0); 5131 /* Disable HW interrupts */ 5132 bnx2x_int_disable(bp); 5133 /* In case of parity errors don't handle attentions so that 5134 * other function would "see" parity errors. 5135 */ 5136#else 5137 bnx2x_panic(); 5138#endif 5139 bnx2x_release_alr(bp); 5140 return; 5141 } 5142 5143 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 5144 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 5145 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 5146 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 5147 if (!CHIP_IS_E1x(bp)) 5148 attn.sig[4] = 5149 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 5150 else 5151 attn.sig[4] = 0; 5152 5153 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 5154 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 5155 5156 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5157 if (deasserted & (1 << index)) { 5158 group_mask = &bp->attn_group[index]; 5159 5160 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 5161 index, 5162 group_mask->sig[0], group_mask->sig[1], 5163 group_mask->sig[2], group_mask->sig[3], 5164 group_mask->sig[4]); 5165 5166 bnx2x_attn_int_deasserted4(bp, 5167 attn.sig[4] & group_mask->sig[4]); 5168 bnx2x_attn_int_deasserted3(bp, 5169 attn.sig[3] & group_mask->sig[3]); 5170 bnx2x_attn_int_deasserted1(bp, 5171 attn.sig[1] & group_mask->sig[1]); 5172 bnx2x_attn_int_deasserted2(bp, 5173 attn.sig[2] & group_mask->sig[2]); 5174 bnx2x_attn_int_deasserted0(bp, 5175 attn.sig[0] & group_mask->sig[0]); 5176 } 5177 } 5178 5179 bnx2x_release_alr(bp); 5180 5181 if (bp->common.int_block == INT_BLOCK_HC) 5182 reg_addr = (HC_REG_COMMAND_REG + port*32 + 5183 COMMAND_REG_ATTN_BITS_CLR); 5184 else 5185 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 5186 5187 val = ~deasserted; 5188 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 5189 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 5190 REG_WR(bp, reg_addr, val); 5191 5192 if (~bp->attn_state & deasserted) 5193 BNX2X_ERR("IGU ERROR\n"); 5194 5195 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 5196 MISC_REG_AEU_MASK_ATTN_FUNC_0; 5197 5198 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 5199 aeu_mask = REG_RD(bp, reg_addr); 5200 5201 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 5202 aeu_mask, deasserted); 5203 aeu_mask |= (deasserted & 0x3ff); 5204 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 5205 5206 REG_WR(bp, reg_addr, aeu_mask); 5207 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 5208 5209 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 5210 bp->attn_state &= ~deasserted; 5211 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 5212} 5213 5214static void bnx2x_attn_int(struct bnx2x *bp) 5215{ 5216 /* read local copy of bits */ 5217 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 5218 attn_bits); 5219 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 5220 attn_bits_ack); 5221 u32 attn_state = bp->attn_state; 5222 5223 /* look for changed bits */ 5224 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 5225 u32 deasserted = ~attn_bits & attn_ack & attn_state; 5226 5227 DP(NETIF_MSG_HW, 5228 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 5229 attn_bits, attn_ack, asserted, deasserted); 5230 5231 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 5232 BNX2X_ERR("BAD attention state\n"); 5233 5234 /* handle bits that were raised */ 5235 if (asserted) 5236 bnx2x_attn_int_asserted(bp, asserted); 5237 5238 if (deasserted) 5239 bnx2x_attn_int_deasserted(bp, deasserted); 5240} 5241 5242void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 5243 u16 index, u8 op, u8 update) 5244{ 5245 u32 igu_addr = bp->igu_base_addr; 5246 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 5247 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 5248 igu_addr); 5249} 5250 5251static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 5252{ 5253 /* No memory barriers */ 5254 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 5255} 5256 5257static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 5258 union event_ring_elem *elem) 5259{ 5260 u8 err = elem->message.error; 5261 5262 if (!bp->cnic_eth_dev.starting_cid || 5263 (cid < bp->cnic_eth_dev.starting_cid && 5264 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 5265 return 1; 5266 5267 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 5268 5269 if (unlikely(err)) { 5270 5271 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 5272 cid); 5273 bnx2x_panic_dump(bp, false); 5274 } 5275 bnx2x_cnic_cfc_comp(bp, cid, err); 5276 return 0; 5277} 5278 5279static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 5280{ 5281 struct bnx2x_mcast_ramrod_params rparam; 5282 int rc; 5283 5284 memset(&rparam, 0, sizeof(rparam)); 5285 5286 rparam.mcast_obj = &bp->mcast_obj; 5287 5288 netif_addr_lock_bh(bp->dev); 5289 5290 /* Clear pending state for the last command */ 5291 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 5292 5293 /* If there are pending mcast commands - send them */ 5294 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 5295 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 5296 if (rc < 0) 5297 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 5298 rc); 5299 } 5300 5301 netif_addr_unlock_bh(bp->dev); 5302} 5303 5304static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 5305 union event_ring_elem *elem) 5306{ 5307 unsigned long ramrod_flags = 0; 5308 int rc = 0; 5309 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); 5310 u32 cid = echo & BNX2X_SWCID_MASK; 5311 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 5312 5313 /* Always push next commands out, don't wait here */ 5314 __set_bit(RAMROD_CONT, &ramrod_flags); 5315 5316 switch (echo >> BNX2X_SWCID_SHIFT) { 5317 case BNX2X_FILTER_MAC_PENDING: 5318 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 5319 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 5320 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 5321 else 5322 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 5323 5324 break; 5325 case BNX2X_FILTER_VLAN_PENDING: 5326 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); 5327 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; 5328 break; 5329 case BNX2X_FILTER_MCAST_PENDING: 5330 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 5331 /* This is only relevant for 57710 where multicast MACs are 5332 * configured as unicast MACs using the same ramrod. 5333 */ 5334 bnx2x_handle_mcast_eqe(bp); 5335 return; 5336 default: 5337 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); 5338 return; 5339 } 5340 5341 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 5342 5343 if (rc < 0) 5344 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 5345 else if (rc > 0) 5346 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 5347} 5348 5349static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 5350 5351static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 5352{ 5353 netif_addr_lock_bh(bp->dev); 5354 5355 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5356 5357 /* Send rx_mode command again if was requested */ 5358 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 5359 bnx2x_set_storm_rx_mode(bp); 5360 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 5361 &bp->sp_state)) 5362 bnx2x_set_iscsi_eth_rx_mode(bp, true); 5363 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 5364 &bp->sp_state)) 5365 bnx2x_set_iscsi_eth_rx_mode(bp, false); 5366 5367 netif_addr_unlock_bh(bp->dev); 5368} 5369 5370static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 5371 union event_ring_elem *elem) 5372{ 5373 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 5374 DP(BNX2X_MSG_SP, 5375 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 5376 elem->message.data.vif_list_event.func_bit_map); 5377 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 5378 elem->message.data.vif_list_event.func_bit_map); 5379 } else if (elem->message.data.vif_list_event.echo == 5380 VIF_LIST_RULE_SET) { 5381 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 5382 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 5383 } 5384} 5385 5386/* called with rtnl_lock */ 5387static void bnx2x_after_function_update(struct bnx2x *bp) 5388{ 5389 int q, rc; 5390 struct bnx2x_fastpath *fp; 5391 struct bnx2x_queue_state_params queue_params = {NULL}; 5392 struct bnx2x_queue_update_params *q_update_params = 5393 &queue_params.params.update; 5394 5395 /* Send Q update command with afex vlan removal values for all Qs */ 5396 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 5397 5398 /* set silent vlan removal values according to vlan mode */ 5399 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5400 &q_update_params->update_flags); 5401 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 5402 &q_update_params->update_flags); 5403 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5404 5405 /* in access mode mark mask and value are 0 to strip all vlans */ 5406 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 5407 q_update_params->silent_removal_value = 0; 5408 q_update_params->silent_removal_mask = 0; 5409 } else { 5410 q_update_params->silent_removal_value = 5411 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 5412 q_update_params->silent_removal_mask = VLAN_VID_MASK; 5413 } 5414 5415 for_each_eth_queue(bp, q) { 5416 /* Set the appropriate Queue object */ 5417 fp = &bp->fp[q]; 5418 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5419 5420 /* send the ramrod */ 5421 rc = bnx2x_queue_state_change(bp, &queue_params); 5422 if (rc < 0) 5423 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5424 q); 5425 } 5426 5427 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { 5428 fp = &bp->fp[FCOE_IDX(bp)]; 5429 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5430 5431 /* clear pending completion bit */ 5432 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5433 5434 /* mark latest Q bit */ 5435 smp_mb__before_atomic(); 5436 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5437 smp_mb__after_atomic(); 5438 5439 /* send Q update ramrod for FCoE Q */ 5440 rc = bnx2x_queue_state_change(bp, &queue_params); 5441 if (rc < 0) 5442 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5443 q); 5444 } else { 5445 /* If no FCoE ring - ACK MCP now */ 5446 bnx2x_link_report(bp); 5447 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5448 } 5449} 5450 5451static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 5452 struct bnx2x *bp, u32 cid) 5453{ 5454 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 5455 5456 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) 5457 return &bnx2x_fcoe_sp_obj(bp, q_obj); 5458 else 5459 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 5460} 5461 5462static void bnx2x_eq_int(struct bnx2x *bp) 5463{ 5464 u16 hw_cons, sw_cons, sw_prod; 5465 union event_ring_elem *elem; 5466 u8 echo; 5467 u32 cid; 5468 u8 opcode; 5469 int rc, spqe_cnt = 0; 5470 struct bnx2x_queue_sp_obj *q_obj; 5471 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 5472 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 5473 5474 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 5475 5476 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 5477 * when we get the next-page we need to adjust so the loop 5478 * condition below will be met. The next element is the size of a 5479 * regular element and hence incrementing by 1 5480 */ 5481 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 5482 hw_cons++; 5483 5484 /* This function may never run in parallel with itself for a 5485 * specific bp, thus there is no need in "paired" read memory 5486 * barrier here. 5487 */ 5488 sw_cons = bp->eq_cons; 5489 sw_prod = bp->eq_prod; 5490 5491 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 5492 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 5493 5494 for (; sw_cons != hw_cons; 5495 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 5496 5497 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 5498 5499 rc = bnx2x_iov_eq_sp_event(bp, elem); 5500 if (!rc) { 5501 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", 5502 rc); 5503 goto next_spqe; 5504 } 5505 5506 opcode = elem->message.opcode; 5507 5508 /* handle eq element */ 5509 switch (opcode) { 5510 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5511 bnx2x_vf_mbx_schedule(bp, 5512 &elem->message.data.vf_pf_event); 5513 continue; 5514 5515 case EVENT_RING_OPCODE_STAT_QUERY: 5516 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), 5517 "got statistics comp event %d\n", 5518 bp->stats_comp++); 5519 /* nothing to do with stats comp */ 5520 goto next_spqe; 5521 5522 case EVENT_RING_OPCODE_CFC_DEL: 5523 /* handle according to cid range */ 5524 /* 5525 * we may want to verify here that the bp state is 5526 * HALTING 5527 */ 5528 5529 /* elem CID originates from FW; actually LE */ 5530 cid = SW_CID(elem->message.data.cfc_del_event.cid); 5531 5532 DP(BNX2X_MSG_SP, 5533 "got delete ramrod for MULTI[%d]\n", cid); 5534 5535 if (CNIC_LOADED(bp) && 5536 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 5537 goto next_spqe; 5538 5539 q_obj = bnx2x_cid_to_q_obj(bp, cid); 5540 5541 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 5542 break; 5543 5544 goto next_spqe; 5545 5546 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5547 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 5548 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 5549 if (f_obj->complete_cmd(bp, f_obj, 5550 BNX2X_F_CMD_TX_STOP)) 5551 break; 5552 goto next_spqe; 5553 5554 case EVENT_RING_OPCODE_START_TRAFFIC: 5555 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 5556 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 5557 if (f_obj->complete_cmd(bp, f_obj, 5558 BNX2X_F_CMD_TX_START)) 5559 break; 5560 goto next_spqe; 5561 5562 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 5563 echo = elem->message.data.function_update_event.echo; 5564 if (echo == SWITCH_UPDATE) { 5565 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5566 "got FUNC_SWITCH_UPDATE ramrod\n"); 5567 if (f_obj->complete_cmd( 5568 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) 5569 break; 5570 5571 } else { 5572 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; 5573 5574 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5575 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5576 f_obj->complete_cmd(bp, f_obj, 5577 BNX2X_F_CMD_AFEX_UPDATE); 5578 5579 /* We will perform the Queues update from 5580 * sp_rtnl task as all Queue SP operations 5581 * should run under rtnl_lock. 5582 */ 5583 bnx2x_schedule_sp_rtnl(bp, cmd, 0); 5584 } 5585 5586 goto next_spqe; 5587 5588 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 5589 f_obj->complete_cmd(bp, f_obj, 5590 BNX2X_F_CMD_AFEX_VIFLISTS); 5591 bnx2x_after_afex_vif_lists(bp, elem); 5592 goto next_spqe; 5593 case EVENT_RING_OPCODE_FUNCTION_START: 5594 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5595 "got FUNC_START ramrod\n"); 5596 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 5597 break; 5598 5599 goto next_spqe; 5600 5601 case EVENT_RING_OPCODE_FUNCTION_STOP: 5602 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5603 "got FUNC_STOP ramrod\n"); 5604 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 5605 break; 5606 5607 goto next_spqe; 5608 5609 case EVENT_RING_OPCODE_SET_TIMESYNC: 5610 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, 5611 "got set_timesync ramrod completion\n"); 5612 if (f_obj->complete_cmd(bp, f_obj, 5613 BNX2X_F_CMD_SET_TIMESYNC)) 5614 break; 5615 goto next_spqe; 5616 } 5617 5618 switch (opcode | bp->state) { 5619 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5620 BNX2X_STATE_OPEN): 5621 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5622 BNX2X_STATE_OPENING_WAIT4_PORT): 5623 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5624 BNX2X_STATE_CLOSING_WAIT4_HALT): 5625 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5626 SW_CID(elem->message.data.eth_event.echo)); 5627 rss_raw->clear_pending(rss_raw); 5628 break; 5629 5630 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 5631 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 5632 case (EVENT_RING_OPCODE_SET_MAC | 5633 BNX2X_STATE_CLOSING_WAIT4_HALT): 5634 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5635 BNX2X_STATE_OPEN): 5636 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5637 BNX2X_STATE_DIAG): 5638 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5639 BNX2X_STATE_CLOSING_WAIT4_HALT): 5640 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); 5641 bnx2x_handle_classification_eqe(bp, elem); 5642 break; 5643 5644 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5645 BNX2X_STATE_OPEN): 5646 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5647 BNX2X_STATE_DIAG): 5648 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5649 BNX2X_STATE_CLOSING_WAIT4_HALT): 5650 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 5651 bnx2x_handle_mcast_eqe(bp); 5652 break; 5653 5654 case (EVENT_RING_OPCODE_FILTERS_RULES | 5655 BNX2X_STATE_OPEN): 5656 case (EVENT_RING_OPCODE_FILTERS_RULES | 5657 BNX2X_STATE_DIAG): 5658 case (EVENT_RING_OPCODE_FILTERS_RULES | 5659 BNX2X_STATE_CLOSING_WAIT4_HALT): 5660 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 5661 bnx2x_handle_rx_mode_eqe(bp); 5662 break; 5663 default: 5664 /* unknown event log error and continue */ 5665 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 5666 elem->message.opcode, bp->state); 5667 } 5668next_spqe: 5669 spqe_cnt++; 5670 } /* for */ 5671 5672 smp_mb__before_atomic(); 5673 atomic_add(spqe_cnt, &bp->eq_spq_left); 5674 5675 bp->eq_cons = sw_cons; 5676 bp->eq_prod = sw_prod; 5677 /* Make sure that above mem writes were issued towards the memory */ 5678 smp_wmb(); 5679 5680 /* update producer */ 5681 bnx2x_update_eq_prod(bp, bp->eq_prod); 5682} 5683 5684static void bnx2x_sp_task(struct work_struct *work) 5685{ 5686 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 5687 5688 DP(BNX2X_MSG_SP, "sp task invoked\n"); 5689 5690 /* make sure the atomic interrupt_occurred has been written */ 5691 smp_rmb(); 5692 if (atomic_read(&bp->interrupt_occurred)) { 5693 5694 /* what work needs to be performed? */ 5695 u16 status = bnx2x_update_dsb_idx(bp); 5696 5697 DP(BNX2X_MSG_SP, "status %x\n", status); 5698 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); 5699 atomic_set(&bp->interrupt_occurred, 0); 5700 5701 /* HW attentions */ 5702 if (status & BNX2X_DEF_SB_ATT_IDX) { 5703 bnx2x_attn_int(bp); 5704 status &= ~BNX2X_DEF_SB_ATT_IDX; 5705 } 5706 5707 /* SP events: STAT_QUERY and others */ 5708 if (status & BNX2X_DEF_SB_IDX) { 5709 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5710 5711 if (FCOE_INIT(bp) && 5712 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5713 /* Prevent local bottom-halves from running as 5714 * we are going to change the local NAPI list. 5715 */ 5716 local_bh_disable(); 5717 napi_schedule(&bnx2x_fcoe(bp, napi)); 5718 local_bh_enable(); 5719 } 5720 5721 /* Handle EQ completions */ 5722 bnx2x_eq_int(bp); 5723 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5724 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5725 5726 status &= ~BNX2X_DEF_SB_IDX; 5727 } 5728 5729 /* if status is non zero then perhaps something went wrong */ 5730 if (unlikely(status)) 5731 DP(BNX2X_MSG_SP, 5732 "got an unknown interrupt! (status 0x%x)\n", status); 5733 5734 /* ack status block only if something was actually handled */ 5735 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5736 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5737 } 5738 5739 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5740 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5741 &bp->sp_state)) { 5742 bnx2x_link_report(bp); 5743 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5744 } 5745} 5746 5747irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5748{ 5749 struct net_device *dev = dev_instance; 5750 struct bnx2x *bp = netdev_priv(dev); 5751 5752 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5753 IGU_INT_DISABLE, 0); 5754 5755#ifdef BNX2X_STOP_ON_ERROR 5756 if (unlikely(bp->panic)) 5757 return IRQ_HANDLED; 5758#endif 5759 5760 if (CNIC_LOADED(bp)) { 5761 struct cnic_ops *c_ops; 5762 5763 rcu_read_lock(); 5764 c_ops = rcu_dereference(bp->cnic_ops); 5765 if (c_ops) 5766 c_ops->cnic_handler(bp->cnic_data, NULL); 5767 rcu_read_unlock(); 5768 } 5769 5770 /* schedule sp task to perform default status block work, ack 5771 * attentions and enable interrupts. 5772 */ 5773 bnx2x_schedule_sp_task(bp); 5774 5775 return IRQ_HANDLED; 5776} 5777 5778/* end of slow path */ 5779 5780void bnx2x_drv_pulse(struct bnx2x *bp) 5781{ 5782 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5783 bp->fw_drv_pulse_wr_seq); 5784} 5785 5786static void bnx2x_timer(struct timer_list *t) 5787{ 5788 struct bnx2x *bp = from_timer(bp, t, timer); 5789 5790 if (!netif_running(bp->dev)) 5791 return; 5792 5793 if (IS_PF(bp) && 5794 !BP_NOMCP(bp)) { 5795 int mb_idx = BP_FW_MB_IDX(bp); 5796 u16 drv_pulse; 5797 u16 mcp_pulse; 5798 5799 ++bp->fw_drv_pulse_wr_seq; 5800 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5801 drv_pulse = bp->fw_drv_pulse_wr_seq; 5802 bnx2x_drv_pulse(bp); 5803 5804 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5805 MCP_PULSE_SEQ_MASK); 5806 /* The delta between driver pulse and mcp response 5807 * should not get too big. If the MFW is more than 5 pulses 5808 * behind, we should worry about it enough to generate an error 5809 * log. 5810 */ 5811 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) 5812 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5813 drv_pulse, mcp_pulse); 5814 } 5815 5816 if (bp->state == BNX2X_STATE_OPEN) 5817 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5818 5819 /* sample pf vf bulletin board for new posts from pf */ 5820 if (IS_VF(bp)) 5821 bnx2x_timer_sriov(bp); 5822 5823 mod_timer(&bp->timer, jiffies + bp->current_interval); 5824} 5825 5826/* end of Statistics */ 5827 5828/* nic init */ 5829 5830/* 5831 * nic init service functions 5832 */ 5833 5834static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5835{ 5836 u32 i; 5837 if (!(len%4) && !(addr%4)) 5838 for (i = 0; i < len; i += 4) 5839 REG_WR(bp, addr + i, fill); 5840 else 5841 for (i = 0; i < len; i++) 5842 REG_WR8(bp, addr + i, fill); 5843} 5844 5845/* helper: writes FP SP data to FW - data_size in dwords */ 5846static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5847 int fw_sb_id, 5848 u32 *sb_data_p, 5849 u32 data_size) 5850{ 5851 int index; 5852 for (index = 0; index < data_size; index++) 5853 REG_WR(bp, BAR_CSTRORM_INTMEM + 5854 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5855 sizeof(u32)*index, 5856 *(sb_data_p + index)); 5857} 5858 5859static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5860{ 5861 u32 *sb_data_p; 5862 u32 data_size = 0; 5863 struct hc_status_block_data_e2 sb_data_e2; 5864 struct hc_status_block_data_e1x sb_data_e1x; 5865 5866 /* disable the function first */ 5867 if (!CHIP_IS_E1x(bp)) { 5868 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5869 sb_data_e2.common.state = SB_DISABLED; 5870 sb_data_e2.common.p_func.vf_valid = false; 5871 sb_data_p = (u32 *)&sb_data_e2; 5872 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5873 } else { 5874 memset(&sb_data_e1x, 0, 5875 sizeof(struct hc_status_block_data_e1x)); 5876 sb_data_e1x.common.state = SB_DISABLED; 5877 sb_data_e1x.common.p_func.vf_valid = false; 5878 sb_data_p = (u32 *)&sb_data_e1x; 5879 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5880 } 5881 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5882 5883 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5884 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5885 CSTORM_STATUS_BLOCK_SIZE); 5886 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5887 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5888 CSTORM_SYNC_BLOCK_SIZE); 5889} 5890 5891/* helper: writes SP SB data to FW */ 5892static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5893 struct hc_sp_status_block_data *sp_sb_data) 5894{ 5895 int func = BP_FUNC(bp); 5896 int i; 5897 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5898 REG_WR(bp, BAR_CSTRORM_INTMEM + 5899 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5900 i*sizeof(u32), 5901 *((u32 *)sp_sb_data + i)); 5902} 5903 5904static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5905{ 5906 int func = BP_FUNC(bp); 5907 struct hc_sp_status_block_data sp_sb_data; 5908 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5909 5910 sp_sb_data.state = SB_DISABLED; 5911 sp_sb_data.p_func.vf_valid = false; 5912 5913 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5914 5915 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5916 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5917 CSTORM_SP_STATUS_BLOCK_SIZE); 5918 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5919 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5920 CSTORM_SP_SYNC_BLOCK_SIZE); 5921} 5922 5923static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5924 int igu_sb_id, int igu_seg_id) 5925{ 5926 hc_sm->igu_sb_id = igu_sb_id; 5927 hc_sm->igu_seg_id = igu_seg_id; 5928 hc_sm->timer_value = 0xFF; 5929 hc_sm->time_to_expire = 0xFFFFFFFF; 5930} 5931 5932/* allocates state machine ids. */ 5933static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5934{ 5935 /* zero out state machine indices */ 5936 /* rx indices */ 5937 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5938 5939 /* tx indices */ 5940 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5941 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5942 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5943 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5944 5945 /* map indices */ 5946 /* rx indices */ 5947 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5948 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5949 5950 /* tx indices */ 5951 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5952 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5953 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5954 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5955 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5956 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5957 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5958 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5959} 5960 5961void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5962 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5963{ 5964 int igu_seg_id; 5965 5966 struct hc_status_block_data_e2 sb_data_e2; 5967 struct hc_status_block_data_e1x sb_data_e1x; 5968 struct hc_status_block_sm *hc_sm_p; 5969 int data_size; 5970 u32 *sb_data_p; 5971 5972 if (CHIP_INT_MODE_IS_BC(bp)) 5973 igu_seg_id = HC_SEG_ACCESS_NORM; 5974 else 5975 igu_seg_id = IGU_SEG_ACCESS_NORM; 5976 5977 bnx2x_zero_fp_sb(bp, fw_sb_id); 5978 5979 if (!CHIP_IS_E1x(bp)) { 5980 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5981 sb_data_e2.common.state = SB_ENABLED; 5982 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5983 sb_data_e2.common.p_func.vf_id = vfid; 5984 sb_data_e2.common.p_func.vf_valid = vf_valid; 5985 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5986 sb_data_e2.common.same_igu_sb_1b = true; 5987 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5988 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5989 hc_sm_p = sb_data_e2.common.state_machine; 5990 sb_data_p = (u32 *)&sb_data_e2; 5991 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5992 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5993 } else { 5994 memset(&sb_data_e1x, 0, 5995 sizeof(struct hc_status_block_data_e1x)); 5996 sb_data_e1x.common.state = SB_ENABLED; 5997 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5998 sb_data_e1x.common.p_func.vf_id = 0xff; 5999 sb_data_e1x.common.p_func.vf_valid = false; 6000 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 6001 sb_data_e1x.common.same_igu_sb_1b = true; 6002 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 6003 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 6004 hc_sm_p = sb_data_e1x.common.state_machine; 6005 sb_data_p = (u32 *)&sb_data_e1x; 6006 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 6007 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 6008 } 6009 6010 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 6011 igu_sb_id, igu_seg_id); 6012 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 6013 igu_sb_id, igu_seg_id); 6014 6015 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 6016 6017 /* write indices to HW - PCI guarantees endianity of regpairs */ 6018 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 6019} 6020 6021static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 6022 u16 tx_usec, u16 rx_usec) 6023{ 6024 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 6025 false, rx_usec); 6026 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 6027 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 6028 tx_usec); 6029 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 6030 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 6031 tx_usec); 6032 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 6033 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 6034 tx_usec); 6035} 6036 6037static void bnx2x_init_def_sb(struct bnx2x *bp) 6038{ 6039 struct host_sp_status_block *def_sb = bp->def_status_blk; 6040 dma_addr_t mapping = bp->def_status_blk_mapping; 6041 int igu_sp_sb_index; 6042 int igu_seg_id; 6043 int port = BP_PORT(bp); 6044 int func = BP_FUNC(bp); 6045 int reg_offset, reg_offset_en5; 6046 u64 section; 6047 int index; 6048 struct hc_sp_status_block_data sp_sb_data; 6049 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 6050 6051 if (CHIP_INT_MODE_IS_BC(bp)) { 6052 igu_sp_sb_index = DEF_SB_IGU_ID; 6053 igu_seg_id = HC_SEG_ACCESS_DEF; 6054 } else { 6055 igu_sp_sb_index = bp->igu_dsb_id; 6056 igu_seg_id = IGU_SEG_ACCESS_DEF; 6057 } 6058 6059 /* ATTN */ 6060 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 6061 atten_status_block); 6062 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 6063 6064 bp->attn_state = 0; 6065 6066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 6067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 6068 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 6069 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 6070 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 6071 int sindex; 6072 /* take care of sig[0]..sig[4] */ 6073 for (sindex = 0; sindex < 4; sindex++) 6074 bp->attn_group[index].sig[sindex] = 6075 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 6076 6077 if (!CHIP_IS_E1x(bp)) 6078 /* 6079 * enable5 is separate from the rest of the registers, 6080 * and therefore the address skip is 4 6081 * and not 16 between the different groups 6082 */ 6083 bp->attn_group[index].sig[4] = REG_RD(bp, 6084 reg_offset_en5 + 0x4*index); 6085 else 6086 bp->attn_group[index].sig[4] = 0; 6087 } 6088 6089 if (bp->common.int_block == INT_BLOCK_HC) { 6090 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 6091 HC_REG_ATTN_MSG0_ADDR_L); 6092 6093 REG_WR(bp, reg_offset, U64_LO(section)); 6094 REG_WR(bp, reg_offset + 4, U64_HI(section)); 6095 } else if (!CHIP_IS_E1x(bp)) { 6096 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 6097 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 6098 } 6099 6100 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 6101 sp_sb); 6102 6103 bnx2x_zero_sp_sb(bp); 6104 6105 /* PCI guarantees endianity of regpairs */ 6106 sp_sb_data.state = SB_ENABLED; 6107 sp_sb_data.host_sb_addr.lo = U64_LO(section); 6108 sp_sb_data.host_sb_addr.hi = U64_HI(section); 6109 sp_sb_data.igu_sb_id = igu_sp_sb_index; 6110 sp_sb_data.igu_seg_id = igu_seg_id; 6111 sp_sb_data.p_func.pf_id = func; 6112 sp_sb_data.p_func.vnic_id = BP_VN(bp); 6113 sp_sb_data.p_func.vf_id = 0xff; 6114 6115 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 6116 6117 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 6118} 6119 6120void bnx2x_update_coalesce(struct bnx2x *bp) 6121{ 6122 int i; 6123 6124 for_each_eth_queue(bp, i) 6125 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 6126 bp->tx_ticks, bp->rx_ticks); 6127} 6128 6129static void bnx2x_init_sp_ring(struct bnx2x *bp) 6130{ 6131 spin_lock_init(&bp->spq_lock); 6132 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 6133 6134 bp->spq_prod_idx = 0; 6135 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 6136 bp->spq_prod_bd = bp->spq; 6137 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 6138} 6139 6140static void bnx2x_init_eq_ring(struct bnx2x *bp) 6141{ 6142 int i; 6143 for (i = 1; i <= NUM_EQ_PAGES; i++) { 6144 union event_ring_elem *elem = 6145 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 6146 6147 elem->next_page.addr.hi = 6148 cpu_to_le32(U64_HI(bp->eq_mapping + 6149 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 6150 elem->next_page.addr.lo = 6151 cpu_to_le32(U64_LO(bp->eq_mapping + 6152 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 6153 } 6154 bp->eq_cons = 0; 6155 bp->eq_prod = NUM_EQ_DESC; 6156 bp->eq_cons_sb = BNX2X_EQ_INDEX; 6157 /* we want a warning message before it gets wrought... */ 6158 atomic_set(&bp->eq_spq_left, 6159 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 6160} 6161 6162/* called with netif_addr_lock_bh() */ 6163static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 6164 unsigned long rx_mode_flags, 6165 unsigned long rx_accept_flags, 6166 unsigned long tx_accept_flags, 6167 unsigned long ramrod_flags) 6168{ 6169 struct bnx2x_rx_mode_ramrod_params ramrod_param; 6170 int rc; 6171 6172 memset(&ramrod_param, 0, sizeof(ramrod_param)); 6173 6174 /* Prepare ramrod parameters */ 6175 ramrod_param.cid = 0; 6176 ramrod_param.cl_id = cl_id; 6177 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 6178 ramrod_param.func_id = BP_FUNC(bp); 6179 6180 ramrod_param.pstate = &bp->sp_state; 6181 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 6182 6183 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 6184 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 6185 6186 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 6187 6188 ramrod_param.ramrod_flags = ramrod_flags; 6189 ramrod_param.rx_mode_flags = rx_mode_flags; 6190 6191 ramrod_param.rx_accept_flags = rx_accept_flags; 6192 ramrod_param.tx_accept_flags = tx_accept_flags; 6193 6194 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 6195 if (rc < 0) { 6196 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 6197 return rc; 6198 } 6199 6200 return 0; 6201} 6202 6203static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, 6204 unsigned long *rx_accept_flags, 6205 unsigned long *tx_accept_flags) 6206{ 6207 /* Clear the flags first */ 6208 *rx_accept_flags = 0; 6209 *tx_accept_flags = 0; 6210 6211 switch (rx_mode) { 6212 case BNX2X_RX_MODE_NONE: 6213 /* 6214 * 'drop all' supersedes any accept flags that may have been 6215 * passed to the function. 6216 */ 6217 break; 6218 case BNX2X_RX_MODE_NORMAL: 6219 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6220 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); 6221 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6222 6223 /* internal switching mode */ 6224 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6225 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); 6226 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6227 6228 if (bp->accept_any_vlan) { 6229 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6230 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6231 } 6232 6233 break; 6234 case BNX2X_RX_MODE_ALLMULTI: 6235 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6236 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 6237 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6238 6239 /* internal switching mode */ 6240 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6241 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 6242 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6243 6244 if (bp->accept_any_vlan) { 6245 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6246 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6247 } 6248 6249 break; 6250 case BNX2X_RX_MODE_PROMISC: 6251 /* According to definition of SI mode, iface in promisc mode 6252 * should receive matched and unmatched (in resolution of port) 6253 * unicast packets. 6254 */ 6255 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); 6256 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 6257 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 6258 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 6259 6260 /* internal switching mode */ 6261 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 6262 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 6263 6264 if (IS_MF_SI(bp)) 6265 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); 6266 else 6267 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 6268 6269 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6270 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6271 6272 break; 6273 default: 6274 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); 6275 return -EINVAL; 6276 } 6277 6278 return 0; 6279} 6280 6281/* called with netif_addr_lock_bh() */ 6282static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) 6283{ 6284 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 6285 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 6286 int rc; 6287 6288 if (!NO_FCOE(bp)) 6289 /* Configure rx_mode of FCoE Queue */ 6290 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 6291 6292 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, 6293 &tx_accept_flags); 6294 if (rc) 6295 return rc; 6296 6297 __set_bit(RAMROD_RX, &ramrod_flags); 6298 __set_bit(RAMROD_TX, &ramrod_flags); 6299 6300 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, 6301 rx_accept_flags, tx_accept_flags, 6302 ramrod_flags); 6303} 6304 6305static void bnx2x_init_internal_common(struct bnx2x *bp) 6306{ 6307 int i; 6308 6309 /* Zero this manually as its initialization is 6310 currently missing in the initTool */ 6311 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 6312 REG_WR(bp, BAR_USTRORM_INTMEM + 6313 USTORM_AGG_DATA_OFFSET + i * 4, 0); 6314 if (!CHIP_IS_E1x(bp)) { 6315 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 6316 CHIP_INT_MODE_IS_BC(bp) ? 6317 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 6318 } 6319} 6320 6321static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 6322{ 6323 switch (load_code) { 6324 case FW_MSG_CODE_DRV_LOAD_COMMON: 6325 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 6326 bnx2x_init_internal_common(bp); 6327 fallthrough; 6328 6329 case FW_MSG_CODE_DRV_LOAD_PORT: 6330 /* nothing to do */ 6331 fallthrough; 6332 6333 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 6334 /* internal memory per function is 6335 initialized inside bnx2x_pf_init */ 6336 break; 6337 6338 default: 6339 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 6340 break; 6341 } 6342} 6343 6344static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 6345{ 6346 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); 6347} 6348 6349static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 6350{ 6351 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); 6352} 6353 6354static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 6355{ 6356 if (CHIP_IS_E1x(fp->bp)) 6357 return BP_L_ID(fp->bp) + fp->index; 6358 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 6359 return bnx2x_fp_igu_sb_id(fp); 6360} 6361 6362static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 6363{ 6364 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 6365 u8 cos; 6366 unsigned long q_type = 0; 6367 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 6368 fp->rx_queue = fp_idx; 6369 fp->cid = fp_idx; 6370 fp->cl_id = bnx2x_fp_cl_id(fp); 6371 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 6372 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 6373 /* qZone id equals to FW (per path) client id */ 6374 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 6375 6376 /* init shortcut */ 6377 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 6378 6379 /* Setup SB indices */ 6380 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 6381 6382 /* Configure Queue State object */ 6383 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6384 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6385 6386 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 6387 6388 /* init tx data */ 6389 for_each_cos_in_tx_queue(fp, cos) { 6390 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 6391 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 6392 FP_COS_TO_TXQ(fp, cos, bp), 6393 BNX2X_TX_SB_INDEX_BASE + cos, fp); 6394 cids[cos] = fp->txdata_ptr[cos]->cid; 6395 } 6396 6397 /* nothing more for vf to do here */ 6398 if (IS_VF(bp)) 6399 return; 6400 6401 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 6402 fp->fw_sb_id, fp->igu_sb_id); 6403 bnx2x_update_fpsb_idx(fp); 6404 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 6405 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6406 bnx2x_sp_mapping(bp, q_rdata), q_type); 6407 6408 /** 6409 * Configure classification DBs: Always enable Tx switching 6410 */ 6411 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 6412 6413 DP(NETIF_MSG_IFUP, 6414 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6415 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6416 fp->igu_sb_id); 6417} 6418 6419static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 6420{ 6421 int i; 6422 6423 for (i = 1; i <= NUM_TX_RINGS; i++) { 6424 struct eth_tx_next_bd *tx_next_bd = 6425 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 6426 6427 tx_next_bd->addr_hi = 6428 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 6429 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6430 tx_next_bd->addr_lo = 6431 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 6432 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6433 } 6434 6435 *txdata->tx_cons_sb = cpu_to_le16(0); 6436 6437 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 6438 txdata->tx_db.data.zero_fill1 = 0; 6439 txdata->tx_db.data.prod = 0; 6440 6441 txdata->tx_pkt_prod = 0; 6442 txdata->tx_pkt_cons = 0; 6443 txdata->tx_bd_prod = 0; 6444 txdata->tx_bd_cons = 0; 6445 txdata->tx_pkt = 0; 6446} 6447 6448static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) 6449{ 6450 int i; 6451 6452 for_each_tx_queue_cnic(bp, i) 6453 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); 6454} 6455 6456static void bnx2x_init_tx_rings(struct bnx2x *bp) 6457{ 6458 int i; 6459 u8 cos; 6460 6461 for_each_eth_queue(bp, i) 6462 for_each_cos_in_tx_queue(&bp->fp[i], cos) 6463 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 6464} 6465 6466static void bnx2x_init_fcoe_fp(struct bnx2x *bp) 6467{ 6468 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 6469 unsigned long q_type = 0; 6470 6471 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 6472 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 6473 BNX2X_FCOE_ETH_CL_ID_IDX); 6474 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 6475 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 6476 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 6477 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 6478 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 6479 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 6480 fp); 6481 6482 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 6483 6484 /* qZone id equals to FW (per path) client id */ 6485 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 6486 /* init shortcut */ 6487 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 6488 bnx2x_rx_ustorm_prods_offset(fp); 6489 6490 /* Configure Queue State object */ 6491 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6492 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6493 6494 /* No multi-CoS for FCoE L2 client */ 6495 BUG_ON(fp->max_cos != 1); 6496 6497 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 6498 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6499 bnx2x_sp_mapping(bp, q_rdata), q_type); 6500 6501 DP(NETIF_MSG_IFUP, 6502 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6503 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6504 fp->igu_sb_id); 6505} 6506 6507void bnx2x_nic_init_cnic(struct bnx2x *bp) 6508{ 6509 if (!NO_FCOE(bp)) 6510 bnx2x_init_fcoe_fp(bp); 6511 6512 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 6513 BNX2X_VF_ID_INVALID, false, 6514 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 6515 6516 /* ensure status block indices were read */ 6517 rmb(); 6518 bnx2x_init_rx_rings_cnic(bp); 6519 bnx2x_init_tx_rings_cnic(bp); 6520 6521 /* flush all */ 6522 mb(); 6523} 6524 6525void bnx2x_pre_irq_nic_init(struct bnx2x *bp) 6526{ 6527 int i; 6528 6529 /* Setup NIC internals and enable interrupts */ 6530 for_each_eth_queue(bp, i) 6531 bnx2x_init_eth_fp(bp, i); 6532 6533 /* ensure status block indices were read */ 6534 rmb(); 6535 bnx2x_init_rx_rings(bp); 6536 bnx2x_init_tx_rings(bp); 6537 6538 if (IS_PF(bp)) { 6539 /* Initialize MOD_ABS interrupts */ 6540 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6541 bp->common.shmem_base, 6542 bp->common.shmem2_base, BP_PORT(bp)); 6543 6544 /* initialize the default status block and sp ring */ 6545 bnx2x_init_def_sb(bp); 6546 bnx2x_update_dsb_idx(bp); 6547 bnx2x_init_sp_ring(bp); 6548 } else { 6549 bnx2x_memset_stats(bp); 6550 } 6551} 6552 6553void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) 6554{ 6555 bnx2x_init_eq_ring(bp); 6556 bnx2x_init_internal(bp, load_code); 6557 bnx2x_pf_init(bp); 6558 bnx2x_stats_init(bp); 6559 6560 /* flush all before enabling interrupts */ 6561 mb(); 6562 6563 bnx2x_int_enable(bp); 6564 6565 /* Check for SPIO5 */ 6566 bnx2x_attn_int_deasserted0(bp, 6567 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 6568 AEU_INPUTS_ATTN_BITS_SPIO5); 6569} 6570 6571/* gzip service functions */ 6572static int bnx2x_gunzip_init(struct bnx2x *bp) 6573{ 6574 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 6575 &bp->gunzip_mapping, GFP_KERNEL); 6576 if (bp->gunzip_buf == NULL) 6577 goto gunzip_nomem1; 6578 6579 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 6580 if (bp->strm == NULL) 6581 goto gunzip_nomem2; 6582 6583 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 6584 if (bp->strm->workspace == NULL) 6585 goto gunzip_nomem3; 6586 6587 return 0; 6588 6589gunzip_nomem3: 6590 kfree(bp->strm); 6591 bp->strm = NULL; 6592 6593gunzip_nomem2: 6594 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6595 bp->gunzip_mapping); 6596 bp->gunzip_buf = NULL; 6597 6598gunzip_nomem1: 6599 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 6600 return -ENOMEM; 6601} 6602 6603static void bnx2x_gunzip_end(struct bnx2x *bp) 6604{ 6605 if (bp->strm) { 6606 vfree(bp->strm->workspace); 6607 kfree(bp->strm); 6608 bp->strm = NULL; 6609 } 6610 6611 if (bp->gunzip_buf) { 6612 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6613 bp->gunzip_mapping); 6614 bp->gunzip_buf = NULL; 6615 } 6616} 6617 6618static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 6619{ 6620 int n, rc; 6621 6622 /* check gzip header */ 6623 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 6624 BNX2X_ERR("Bad gzip header\n"); 6625 return -EINVAL; 6626 } 6627 6628 n = 10; 6629 6630#define FNAME 0x8 6631 6632 if (zbuf[3] & FNAME) 6633 while ((zbuf[n++] != 0) && (n < len)); 6634 6635 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 6636 bp->strm->avail_in = len - n; 6637 bp->strm->next_out = bp->gunzip_buf; 6638 bp->strm->avail_out = FW_BUF_SIZE; 6639 6640 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 6641 if (rc != Z_OK) 6642 return rc; 6643 6644 rc = zlib_inflate(bp->strm, Z_FINISH); 6645 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 6646 netdev_err(bp->dev, "Firmware decompression error: %s\n", 6647 bp->strm->msg); 6648 6649 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6650 if (bp->gunzip_outlen & 0x3) 6651 netdev_err(bp->dev, 6652 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6653 bp->gunzip_outlen); 6654 bp->gunzip_outlen >>= 2; 6655 6656 zlib_inflateEnd(bp->strm); 6657 6658 if (rc == Z_STREAM_END) 6659 return 0; 6660 6661 return rc; 6662} 6663 6664/* nic load/unload */ 6665 6666/* 6667 * General service functions 6668 */ 6669 6670/* send a NIG loopback debug packet */ 6671static void bnx2x_lb_pckt(struct bnx2x *bp) 6672{ 6673 u32 wb_write[3]; 6674 6675 /* Ethernet source and destination addresses */ 6676 wb_write[0] = 0x55555555; 6677 wb_write[1] = 0x55555555; 6678 wb_write[2] = 0x20; /* SOP */ 6679 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6680 6681 /* NON-IP protocol */ 6682 wb_write[0] = 0x09000000; 6683 wb_write[1] = 0x55555555; 6684 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 6685 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6686} 6687 6688/* some of the internal memories 6689 * are not directly readable from the driver 6690 * to test them we send debug packets 6691 */ 6692static int bnx2x_int_mem_test(struct bnx2x *bp) 6693{ 6694 int factor; 6695 int count, i; 6696 u32 val = 0; 6697 6698 if (CHIP_REV_IS_FPGA(bp)) 6699 factor = 120; 6700 else if (CHIP_REV_IS_EMUL(bp)) 6701 factor = 200; 6702 else 6703 factor = 1; 6704 6705 /* Disable inputs of parser neighbor blocks */ 6706 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6707 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6708 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6709 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6710 6711 /* Write 0 to parser credits for CFC search request */ 6712 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6713 6714 /* send Ethernet packet */ 6715 bnx2x_lb_pckt(bp); 6716 6717 /* TODO do i reset NIG statistic? */ 6718 /* Wait until NIG register shows 1 packet of size 0x10 */ 6719 count = 1000 * factor; 6720 while (count) { 6721 6722 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6723 val = *bnx2x_sp(bp, wb_data[0]); 6724 if (val == 0x10) 6725 break; 6726 6727 usleep_range(10000, 20000); 6728 count--; 6729 } 6730 if (val != 0x10) { 6731 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6732 return -1; 6733 } 6734 6735 /* Wait until PRS register shows 1 packet */ 6736 count = 1000 * factor; 6737 while (count) { 6738 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6739 if (val == 1) 6740 break; 6741 6742 usleep_range(10000, 20000); 6743 count--; 6744 } 6745 if (val != 0x1) { 6746 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6747 return -2; 6748 } 6749 6750 /* Reset and init BRB, PRS */ 6751 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6752 msleep(50); 6753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6754 msleep(50); 6755 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6756 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6757 6758 DP(NETIF_MSG_HW, "part2\n"); 6759 6760 /* Disable inputs of parser neighbor blocks */ 6761 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6762 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6763 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6764 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6765 6766 /* Write 0 to parser credits for CFC search request */ 6767 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6768 6769 /* send 10 Ethernet packets */ 6770 for (i = 0; i < 10; i++) 6771 bnx2x_lb_pckt(bp); 6772 6773 /* Wait until NIG register shows 10 + 1 6774 packets of size 11*0x10 = 0xb0 */ 6775 count = 1000 * factor; 6776 while (count) { 6777 6778 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6779 val = *bnx2x_sp(bp, wb_data[0]); 6780 if (val == 0xb0) 6781 break; 6782 6783 usleep_range(10000, 20000); 6784 count--; 6785 } 6786 if (val != 0xb0) { 6787 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6788 return -3; 6789 } 6790 6791 /* Wait until PRS register shows 2 packets */ 6792 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6793 if (val != 2) 6794 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6795 6796 /* Write 1 to parser credits for CFC search request */ 6797 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6798 6799 /* Wait until PRS register shows 3 packets */ 6800 msleep(10 * factor); 6801 /* Wait until NIG register shows 1 packet of size 0x10 */ 6802 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6803 if (val != 3) 6804 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6805 6806 /* clear NIG EOP FIFO */ 6807 for (i = 0; i < 11; i++) 6808 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6809 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6810 if (val != 1) { 6811 BNX2X_ERR("clear of NIG failed\n"); 6812 return -4; 6813 } 6814 6815 /* Reset and init BRB, PRS, NIG */ 6816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6817 msleep(50); 6818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6819 msleep(50); 6820 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6821 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6822 if (!CNIC_SUPPORT(bp)) 6823 /* set NIC mode */ 6824 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6825 6826 /* Enable inputs of parser neighbor blocks */ 6827 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6828 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6829 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6830 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6831 6832 DP(NETIF_MSG_HW, "done\n"); 6833 6834 return 0; /* OK */ 6835} 6836 6837static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6838{ 6839 u32 val; 6840 6841 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6842 if (!CHIP_IS_E1x(bp)) 6843 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6844 else 6845 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6846 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6847 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6848 /* 6849 * mask read length error interrupts in brb for parser 6850 * (parsing unit and 'checksum and crc' unit) 6851 * these errors are legal (PU reads fixed length and CAC can cause 6852 * read length error on truncated packets) 6853 */ 6854 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6855 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6856 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6857 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6858 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6859 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6860/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6861/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6862 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6863 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6864 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6865/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6866/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6867 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6868 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6869 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6870 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6871/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6872/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6873 6874 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 6875 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 6876 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; 6877 if (!CHIP_IS_E1x(bp)) 6878 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 6879 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; 6880 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); 6881 6882 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6883 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6884 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6885/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6886 6887 if (!CHIP_IS_E1x(bp)) 6888 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6889 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6890 6891 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6892 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6893/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6894 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6895} 6896 6897static void bnx2x_reset_common(struct bnx2x *bp) 6898{ 6899 u32 val = 0x1400; 6900 6901 /* reset_common */ 6902 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6903 0xd3ffff7f); 6904 6905 if (CHIP_IS_E3(bp)) { 6906 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6907 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6908 } 6909 6910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6911} 6912 6913static void bnx2x_setup_dmae(struct bnx2x *bp) 6914{ 6915 bp->dmae_ready = 0; 6916 spin_lock_init(&bp->dmae_lock); 6917} 6918 6919static void bnx2x_init_pxp(struct bnx2x *bp) 6920{ 6921 u16 devctl; 6922 int r_order, w_order; 6923 6924 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6925 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6926 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6927 if (bp->mrrs == -1) 6928 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6929 else { 6930 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6931 r_order = bp->mrrs; 6932 } 6933 6934 bnx2x_init_pxp_arb(bp, r_order, w_order); 6935} 6936 6937static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6938{ 6939 int is_required; 6940 u32 val; 6941 int port; 6942 6943 if (BP_NOMCP(bp)) 6944 return; 6945 6946 is_required = 0; 6947 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6948 SHARED_HW_CFG_FAN_FAILURE_MASK; 6949 6950 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6951 is_required = 1; 6952 6953 /* 6954 * The fan failure mechanism is usually related to the PHY type since 6955 * the power consumption of the board is affected by the PHY. Currently, 6956 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6957 */ 6958 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6959 for (port = PORT_0; port < PORT_MAX; port++) { 6960 is_required |= 6961 bnx2x_fan_failure_det_req( 6962 bp, 6963 bp->common.shmem_base, 6964 bp->common.shmem2_base, 6965 port); 6966 } 6967 6968 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6969 6970 if (is_required == 0) 6971 return; 6972 6973 /* Fan failure is indicated by SPIO 5 */ 6974 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 6975 6976 /* set to active low mode */ 6977 val = REG_RD(bp, MISC_REG_SPIO_INT); 6978 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 6979 REG_WR(bp, MISC_REG_SPIO_INT, val); 6980 6981 /* enable interrupt to signal the IGU */ 6982 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6983 val |= MISC_SPIO_SPIO5; 6984 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6985} 6986 6987void bnx2x_pf_disable(struct bnx2x *bp) 6988{ 6989 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6990 val &= ~IGU_PF_CONF_FUNC_EN; 6991 6992 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6993 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6994 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6995} 6996 6997static void bnx2x__common_init_phy(struct bnx2x *bp) 6998{ 6999 u32 shmem_base[2], shmem2_base[2]; 7000 /* Avoid common init in case MFW supports LFA */ 7001 if (SHMEM2_RD(bp, size) > 7002 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 7003 return; 7004 shmem_base[0] = bp->common.shmem_base; 7005 shmem2_base[0] = bp->common.shmem2_base; 7006 if (!CHIP_IS_E1x(bp)) { 7007 shmem_base[1] = 7008 SHMEM2_RD(bp, other_shmem_base_addr); 7009 shmem2_base[1] = 7010 SHMEM2_RD(bp, other_shmem2_base_addr); 7011 } 7012 bnx2x_acquire_phy_lock(bp); 7013 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 7014 bp->common.chip_id); 7015 bnx2x_release_phy_lock(bp); 7016} 7017 7018static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) 7019{ 7020 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); 7021 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); 7022 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); 7023 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); 7024 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); 7025 7026 /* make sure this value is 0 */ 7027 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 7028 7029 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); 7030 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); 7031 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); 7032 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); 7033} 7034 7035static void bnx2x_set_endianity(struct bnx2x *bp) 7036{ 7037#ifdef __BIG_ENDIAN 7038 bnx2x_config_endianity(bp, 1); 7039#else 7040 bnx2x_config_endianity(bp, 0); 7041#endif 7042} 7043 7044static void bnx2x_reset_endianity(struct bnx2x *bp) 7045{ 7046 bnx2x_config_endianity(bp, 0); 7047} 7048 7049/** 7050 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 7051 * 7052 * @bp: driver handle 7053 */ 7054static int bnx2x_init_hw_common(struct bnx2x *bp) 7055{ 7056 u32 val; 7057 7058 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 7059 7060 /* 7061 * take the RESET lock to protect undi_unload flow from accessing 7062 * registers while we're resetting the chip 7063 */ 7064 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 7065 7066 bnx2x_reset_common(bp); 7067 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 7068 7069 val = 0xfffc; 7070 if (CHIP_IS_E3(bp)) { 7071 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 7072 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 7073 } 7074 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 7075 7076 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 7077 7078 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 7079 7080 if (!CHIP_IS_E1x(bp)) { 7081 u8 abs_func_id; 7082 7083 /** 7084 * 4-port mode or 2-port mode we need to turn of master-enable 7085 * for everyone, after that, turn it back on for self. 7086 * so, we disregard multi-function or not, and always disable 7087 * for all functions on the given path, this means 0,2,4,6 for 7088 * path 0 and 1,3,5,7 for path 1 7089 */ 7090 for (abs_func_id = BP_PATH(bp); 7091 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 7092 if (abs_func_id == BP_ABS_FUNC(bp)) { 7093 REG_WR(bp, 7094 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 7095 1); 7096 continue; 7097 } 7098 7099 bnx2x_pretend_func(bp, abs_func_id); 7100 /* clear pf enable */ 7101 bnx2x_pf_disable(bp); 7102 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 7103 } 7104 } 7105 7106 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 7107 if (CHIP_IS_E1(bp)) { 7108 /* enable HW interrupt from PXP on USDM overflow 7109 bit 16 on INT_MASK_0 */ 7110 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 7111 } 7112 7113 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 7114 bnx2x_init_pxp(bp); 7115 bnx2x_set_endianity(bp); 7116 bnx2x_ilt_init_page_size(bp, INITOP_SET); 7117 7118 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 7119 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 7120 7121 /* let the HW do it's magic ... */ 7122 msleep(100); 7123 /* finish PXP init */ 7124 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 7125 if (val != 1) { 7126 BNX2X_ERR("PXP2 CFG failed\n"); 7127 return -EBUSY; 7128 } 7129 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 7130 if (val != 1) { 7131 BNX2X_ERR("PXP2 RD_INIT failed\n"); 7132 return -EBUSY; 7133 } 7134 7135 /* Timers bug workaround E2 only. We need to set the entire ILT to 7136 * have entries with value "0" and valid bit on. 7137 * This needs to be done by the first PF that is loaded in a path 7138 * (i.e. common phase) 7139 */ 7140 if (!CHIP_IS_E1x(bp)) { 7141/* In E2 there is a bug in the timers block that can cause function 6 / 7 7142 * (i.e. vnic3) to start even if it is marked as "scan-off". 7143 * This occurs when a different function (func2,3) is being marked 7144 * as "scan-off". Real-life scenario for example: if a driver is being 7145 * load-unloaded while func6,7 are down. This will cause the timer to access 7146 * the ilt, translate to a logical address and send a request to read/write. 7147 * Since the ilt for the function that is down is not valid, this will cause 7148 * a translation error which is unrecoverable. 7149 * The Workaround is intended to make sure that when this happens nothing fatal 7150 * will occur. The workaround: 7151 * 1. First PF driver which loads on a path will: 7152 * a. After taking the chip out of reset, by using pretend, 7153 * it will write "0" to the following registers of 7154 * the other vnics. 7155 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 7156 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 7157 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 7158 * And for itself it will write '1' to 7159 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 7160 * dmae-operations (writing to pram for example.) 7161 * note: can be done for only function 6,7 but cleaner this 7162 * way. 7163 * b. Write zero+valid to the entire ILT. 7164 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 7165 * VNIC3 (of that port). The range allocated will be the 7166 * entire ILT. This is needed to prevent ILT range error. 7167 * 2. Any PF driver load flow: 7168 * a. ILT update with the physical addresses of the allocated 7169 * logical pages. 7170 * b. Wait 20msec. - note that this timeout is needed to make 7171 * sure there are no requests in one of the PXP internal 7172 * queues with "old" ILT addresses. 7173 * c. PF enable in the PGLC. 7174 * d. Clear the was_error of the PF in the PGLC. (could have 7175 * occurred while driver was down) 7176 * e. PF enable in the CFC (WEAK + STRONG) 7177 * f. Timers scan enable 7178 * 3. PF driver unload flow: 7179 * a. Clear the Timers scan_en. 7180 * b. Polling for scan_on=0 for that PF. 7181 * c. Clear the PF enable bit in the PXP. 7182 * d. Clear the PF enable in the CFC (WEAK + STRONG) 7183 * e. Write zero+valid to all ILT entries (The valid bit must 7184 * stay set) 7185 * f. If this is VNIC 3 of a port then also init 7186 * first_timers_ilt_entry to zero and last_timers_ilt_entry 7187 * to the last entry in the ILT. 7188 * 7189 * Notes: 7190 * Currently the PF error in the PGLC is non recoverable. 7191 * In the future the there will be a recovery routine for this error. 7192 * Currently attention is masked. 7193 * Having an MCP lock on the load/unload process does not guarantee that 7194 * there is no Timer disable during Func6/7 enable. This is because the 7195 * Timers scan is currently being cleared by the MCP on FLR. 7196 * Step 2.d can be done only for PF6/7 and the driver can also check if 7197 * there is error before clearing it. But the flow above is simpler and 7198 * more general. 7199 * All ILT entries are written by zero+valid and not just PF6/7 7200 * ILT entries since in the future the ILT entries allocation for 7201 * PF-s might be dynamic. 7202 */ 7203 struct ilt_client_info ilt_cli; 7204 struct bnx2x_ilt ilt; 7205 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 7206 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 7207 7208 /* initialize dummy TM client */ 7209 ilt_cli.start = 0; 7210 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 7211 ilt_cli.client_num = ILT_CLIENT_TM; 7212 7213 /* Step 1: set zeroes to all ilt page entries with valid bit on 7214 * Step 2: set the timers first/last ilt entry to point 7215 * to the entire range to prevent ILT range error for 3rd/4th 7216 * vnic (this code assumes existence of the vnic) 7217 * 7218 * both steps performed by call to bnx2x_ilt_client_init_op() 7219 * with dummy TM client 7220 * 7221 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 7222 * and his brother are split registers 7223 */ 7224 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 7225 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 7226 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 7227 7228 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 7229 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 7230 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 7231 } 7232 7233 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 7234 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 7235 7236 if (!CHIP_IS_E1x(bp)) { 7237 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 7238 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 7239 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 7240 7241 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 7242 7243 /* let the HW do it's magic ... */ 7244 do { 7245 msleep(200); 7246 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 7247 } while (factor-- && (val != 1)); 7248 7249 if (val != 1) { 7250 BNX2X_ERR("ATC_INIT failed\n"); 7251 return -EBUSY; 7252 } 7253 } 7254 7255 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 7256 7257 bnx2x_iov_init_dmae(bp); 7258 7259 /* clean the DMAE memory */ 7260 bp->dmae_ready = 1; 7261 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 7262 7263 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 7264 7265 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 7266 7267 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 7268 7269 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 7270 7271 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 7272 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 7273 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 7274 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 7275 7276 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 7277 7278 /* QM queues pointers table */ 7279 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 7280 7281 /* soft reset pulse */ 7282 REG_WR(bp, QM_REG_SOFT_RESET, 1); 7283 REG_WR(bp, QM_REG_SOFT_RESET, 0); 7284 7285 if (CNIC_SUPPORT(bp)) 7286 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 7287 7288 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 7289 7290 if (!CHIP_REV_IS_SLOW(bp)) 7291 /* enable hw interrupt from doorbell Q */ 7292 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 7293 7294 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 7295 7296 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 7297 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 7298 7299 if (!CHIP_IS_E1(bp)) 7300 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 7301 7302 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 7303 if (IS_MF_AFEX(bp)) { 7304 /* configure that VNTag and VLAN headers must be 7305 * received in afex mode 7306 */ 7307 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 7308 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 7309 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 7310 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 7311 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 7312 } else { 7313 /* Bit-map indicating which L2 hdrs may appear 7314 * after the basic Ethernet header 7315 */ 7316 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 7317 bp->path_has_ovlan ? 7 : 6); 7318 } 7319 } 7320 7321 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 7322 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 7323 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 7324 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 7325 7326 if (!CHIP_IS_E1x(bp)) { 7327 /* reset VFC memories */ 7328 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7329 VFC_MEMORIES_RST_REG_CAM_RST | 7330 VFC_MEMORIES_RST_REG_RAM_RST); 7331 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7332 VFC_MEMORIES_RST_REG_CAM_RST | 7333 VFC_MEMORIES_RST_REG_RAM_RST); 7334 7335 msleep(20); 7336 } 7337 7338 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 7339 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 7340 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 7341 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 7342 7343 /* sync semi rtc */ 7344 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 7345 0x80000000); 7346 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 7347 0x80000000); 7348 7349 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 7350 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 7351 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 7352 7353 if (!CHIP_IS_E1x(bp)) { 7354 if (IS_MF_AFEX(bp)) { 7355 /* configure that VNTag and VLAN headers must be 7356 * sent in afex mode 7357 */ 7358 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 7359 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 7360 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 7361 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 7362 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 7363 } else { 7364 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 7365 bp->path_has_ovlan ? 7 : 6); 7366 } 7367 } 7368 7369 REG_WR(bp, SRC_REG_SOFT_RST, 1); 7370 7371 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 7372 7373 if (CNIC_SUPPORT(bp)) { 7374 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 7375 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 7376 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 7377 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 7378 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 7379 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 7380 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 7381 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 7382 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 7383 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 7384 } 7385 REG_WR(bp, SRC_REG_SOFT_RST, 0); 7386 7387 if (sizeof(union cdu_context) != 1024) 7388 /* we currently assume that a context is 1024 bytes */ 7389 dev_alert(&bp->pdev->dev, 7390 "please adjust the size of cdu_context(%ld)\n", 7391 (long)sizeof(union cdu_context)); 7392 7393 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 7394 val = (4 << 24) + (0 << 12) + 1024; 7395 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 7396 7397 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 7398 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 7399 /* enable context validation interrupt from CFC */ 7400 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 7401 7402 /* set the thresholds to prevent CFC/CDU race */ 7403 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 7404 7405 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 7406 7407 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 7408 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 7409 7410 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 7411 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 7412 7413 /* Reset PCIE errors for debug */ 7414 REG_WR(bp, 0x2814, 0xffffffff); 7415 REG_WR(bp, 0x3820, 0xffffffff); 7416 7417 if (!CHIP_IS_E1x(bp)) { 7418 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 7419 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 7420 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 7421 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 7422 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 7423 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 7424 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 7425 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 7426 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 7427 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 7428 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 7429 } 7430 7431 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 7432 if (!CHIP_IS_E1(bp)) { 7433 /* in E3 this done in per-port section */ 7434 if (!CHIP_IS_E3(bp)) 7435 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7436 } 7437 if (CHIP_IS_E1H(bp)) 7438 /* not applicable for E2 (and above ...) */ 7439 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 7440 7441 if (CHIP_REV_IS_SLOW(bp)) 7442 msleep(200); 7443 7444 /* finish CFC init */ 7445 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 7446 if (val != 1) { 7447 BNX2X_ERR("CFC LL_INIT failed\n"); 7448 return -EBUSY; 7449 } 7450 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 7451 if (val != 1) { 7452 BNX2X_ERR("CFC AC_INIT failed\n"); 7453 return -EBUSY; 7454 } 7455 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 7456 if (val != 1) { 7457 BNX2X_ERR("CFC CAM_INIT failed\n"); 7458 return -EBUSY; 7459 } 7460 REG_WR(bp, CFC_REG_DEBUG0, 0); 7461 7462 if (CHIP_IS_E1(bp)) { 7463 /* read NIG statistic 7464 to see if this is our first up since powerup */ 7465 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 7466 val = *bnx2x_sp(bp, wb_data[0]); 7467 7468 /* do internal memory self test */ 7469 if ((val == 0) && bnx2x_int_mem_test(bp)) { 7470 BNX2X_ERR("internal mem self test failed\n"); 7471 return -EBUSY; 7472 } 7473 } 7474 7475 bnx2x_setup_fan_failure_detection(bp); 7476 7477 /* clear PXP2 attentions */ 7478 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 7479 7480 bnx2x_enable_blocks_attention(bp); 7481 bnx2x_enable_blocks_parity(bp); 7482 7483 if (!BP_NOMCP(bp)) { 7484 if (CHIP_IS_E1x(bp)) 7485 bnx2x__common_init_phy(bp); 7486 } else 7487 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 7488 7489 if (SHMEM2_HAS(bp, netproc_fw_ver)) 7490 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM)); 7491 7492 return 0; 7493} 7494 7495/** 7496 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 7497 * 7498 * @bp: driver handle 7499 */ 7500static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 7501{ 7502 int rc = bnx2x_init_hw_common(bp); 7503 7504 if (rc) 7505 return rc; 7506 7507 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 7508 if (!BP_NOMCP(bp)) 7509 bnx2x__common_init_phy(bp); 7510 7511 return 0; 7512} 7513 7514static int bnx2x_init_hw_port(struct bnx2x *bp) 7515{ 7516 int port = BP_PORT(bp); 7517 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7518 u32 low, high; 7519 u32 val, reg; 7520 7521 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7522 7523 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 7524 7525 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7526 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7527 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7528 7529 /* Timers bug workaround: disables the pf_master bit in pglue at 7530 * common phase, we need to enable it here before any dmae access are 7531 * attempted. Therefore we manually added the enable-master to the 7532 * port phase (it also happens in the function phase) 7533 */ 7534 if (!CHIP_IS_E1x(bp)) 7535 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7536 7537 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7538 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7539 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7540 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7541 7542 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7543 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7544 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7545 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7546 7547 /* QM cid (connection) count */ 7548 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 7549 7550 if (CNIC_SUPPORT(bp)) { 7551 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7552 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 7553 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 7554 } 7555 7556 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7557 7558 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7559 7560 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 7561 7562 if (IS_MF(bp)) 7563 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 7564 else if (bp->dev->mtu > 4096) { 7565 if (bp->flags & ONE_PORT_FLAG) 7566 low = 160; 7567 else { 7568 val = bp->dev->mtu; 7569 /* (24*1024 + val*4)/256 */ 7570 low = 96 + (val/64) + 7571 ((val % 64) ? 1 : 0); 7572 } 7573 } else 7574 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 7575 high = low + 56; /* 14*1024/256 */ 7576 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 7577 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 7578 } 7579 7580 if (CHIP_MODE_IS_4_PORT(bp)) 7581 REG_WR(bp, (BP_PORT(bp) ? 7582 BRB1_REG_MAC_GUARANTIED_1 : 7583 BRB1_REG_MAC_GUARANTIED_0), 40); 7584 7585 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7586 if (CHIP_IS_E3B0(bp)) { 7587 if (IS_MF_AFEX(bp)) { 7588 /* configure headers for AFEX mode */ 7589 REG_WR(bp, BP_PORT(bp) ? 7590 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7591 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 7592 REG_WR(bp, BP_PORT(bp) ? 7593 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 7594 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 7595 REG_WR(bp, BP_PORT(bp) ? 7596 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 7597 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 7598 } else { 7599 /* Ovlan exists only if we are in multi-function + 7600 * switch-dependent mode, in switch-independent there 7601 * is no ovlan headers 7602 */ 7603 REG_WR(bp, BP_PORT(bp) ? 7604 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7605 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 7606 (bp->path_has_ovlan ? 7 : 6)); 7607 } 7608 } 7609 7610 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7611 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7612 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7613 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7614 7615 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7616 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7617 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7618 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7619 7620 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7621 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7622 7623 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7624 7625 if (CHIP_IS_E1x(bp)) { 7626 /* configure PBF to work without PAUSE mtu 9000 */ 7627 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 7628 7629 /* update threshold */ 7630 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 7631 /* update init credit */ 7632 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 7633 7634 /* probe changes */ 7635 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 7636 udelay(50); 7637 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 7638 } 7639 7640 if (CNIC_SUPPORT(bp)) 7641 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7642 7643 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7644 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7645 7646 if (CHIP_IS_E1(bp)) { 7647 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7648 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7649 } 7650 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7651 7652 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7653 7654 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7655 /* init aeu_mask_attn_func_0/1: 7656 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use 7657 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF 7658 * bits 4-7 are used for "per vn group attention" */ 7659 val = IS_MF(bp) ? 0xF7 : 0x7; 7660 /* Enable DCBX attention for all but E1 */ 7661 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7662 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7663 7664 /* SCPAD_PARITY should NOT trigger close the gates */ 7665 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; 7666 REG_WR(bp, reg, 7667 REG_RD(bp, reg) & 7668 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7669 7670 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; 7671 REG_WR(bp, reg, 7672 REG_RD(bp, reg) & 7673 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7674 7675 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7676 7677 if (!CHIP_IS_E1x(bp)) { 7678 /* Bit-map indicating which L2 hdrs may appear after the 7679 * basic Ethernet header 7680 */ 7681 if (IS_MF_AFEX(bp)) 7682 REG_WR(bp, BP_PORT(bp) ? 7683 NIG_REG_P1_HDRS_AFTER_BASIC : 7684 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 7685 else 7686 REG_WR(bp, BP_PORT(bp) ? 7687 NIG_REG_P1_HDRS_AFTER_BASIC : 7688 NIG_REG_P0_HDRS_AFTER_BASIC, 7689 IS_MF_SD(bp) ? 7 : 6); 7690 7691 if (CHIP_IS_E3(bp)) 7692 REG_WR(bp, BP_PORT(bp) ? 7693 NIG_REG_LLH1_MF_MODE : 7694 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7695 } 7696 if (!CHIP_IS_E3(bp)) 7697 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 7698 7699 if (!CHIP_IS_E1(bp)) { 7700 /* 0x2 disable mf_ov, 0x1 enable */ 7701 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 7702 (IS_MF_SD(bp) ? 0x1 : 0x2)); 7703 7704 if (!CHIP_IS_E1x(bp)) { 7705 val = 0; 7706 switch (bp->mf_mode) { 7707 case MULTI_FUNCTION_SD: 7708 val = 1; 7709 break; 7710 case MULTI_FUNCTION_SI: 7711 case MULTI_FUNCTION_AFEX: 7712 val = 2; 7713 break; 7714 } 7715 7716 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 7717 NIG_REG_LLH0_CLS_TYPE), val); 7718 } 7719 { 7720 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 7721 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 7722 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 7723 } 7724 } 7725 7726 /* If SPIO5 is set to generate interrupts, enable it for this port */ 7727 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 7728 if (val & MISC_SPIO_SPIO5) { 7729 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 7730 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 7731 val = REG_RD(bp, reg_addr); 7732 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 7733 REG_WR(bp, reg_addr, val); 7734 } 7735 7736 if (CHIP_IS_E3B0(bp)) 7737 bp->flags |= PTP_SUPPORTED; 7738 7739 return 0; 7740} 7741 7742static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 7743{ 7744 int reg; 7745 u32 wb_write[2]; 7746 7747 if (CHIP_IS_E1(bp)) 7748 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 7749 else 7750 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 7751 7752 wb_write[0] = ONCHIP_ADDR1(addr); 7753 wb_write[1] = ONCHIP_ADDR2(addr); 7754 REG_WR_DMAE(bp, reg, wb_write, 2); 7755} 7756 7757void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) 7758{ 7759 u32 data, ctl, cnt = 100; 7760 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 7761 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 7762 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 7763 u32 sb_bit = 1 << (idu_sb_id%32); 7764 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 7765 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 7766 7767 /* Not supported in BC mode */ 7768 if (CHIP_INT_MODE_IS_BC(bp)) 7769 return; 7770 7771 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 7772 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 7773 IGU_REGULAR_CLEANUP_SET | 7774 IGU_REGULAR_BCLEANUP; 7775 7776 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 7777 func_encode << IGU_CTRL_REG_FID_SHIFT | 7778 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7779 7780 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7781 data, igu_addr_data); 7782 REG_WR(bp, igu_addr_data, data); 7783 barrier(); 7784 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7785 ctl, igu_addr_ctl); 7786 REG_WR(bp, igu_addr_ctl, ctl); 7787 barrier(); 7788 7789 /* wait for clean up to finish */ 7790 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7791 msleep(20); 7792 7793 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7794 DP(NETIF_MSG_HW, 7795 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7796 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7797 } 7798} 7799 7800static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7801{ 7802 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7803} 7804 7805static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7806{ 7807 u32 i, base = FUNC_ILT_BASE(func); 7808 for (i = base; i < base + ILT_PER_FUNC; i++) 7809 bnx2x_ilt_wr(bp, i, 0); 7810} 7811 7812static void bnx2x_init_searcher(struct bnx2x *bp) 7813{ 7814 int port = BP_PORT(bp); 7815 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7816 /* T1 hash bits value determines the T1 number of entries */ 7817 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7818} 7819 7820static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) 7821{ 7822 int rc; 7823 struct bnx2x_func_state_params func_params = {NULL}; 7824 struct bnx2x_func_switch_update_params *switch_update_params = 7825 &func_params.params.switch_update; 7826 7827 /* Prepare parameters for function state transitions */ 7828 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 7829 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 7830 7831 func_params.f_obj = &bp->func_obj; 7832 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 7833 7834 /* Function parameters */ 7835 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, 7836 &switch_update_params->changes); 7837 if (suspend) 7838 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, 7839 &switch_update_params->changes); 7840 7841 rc = bnx2x_func_state_change(bp, &func_params); 7842 7843 return rc; 7844} 7845 7846static int bnx2x_reset_nic_mode(struct bnx2x *bp) 7847{ 7848 int rc, i, port = BP_PORT(bp); 7849 int vlan_en = 0, mac_en[NUM_MACS]; 7850 7851 /* Close input from network */ 7852 if (bp->mf_mode == SINGLE_FUNCTION) { 7853 bnx2x_set_rx_filter(&bp->link_params, 0); 7854 } else { 7855 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : 7856 NIG_REG_LLH0_FUNC_EN); 7857 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7858 NIG_REG_LLH0_FUNC_EN, 0); 7859 for (i = 0; i < NUM_MACS; i++) { 7860 mac_en[i] = REG_RD(bp, port ? 7861 (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7862 4 * i) : 7863 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 7864 4 * i)); 7865 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7866 4 * i) : 7867 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); 7868 } 7869 } 7870 7871 /* Close BMC to host */ 7872 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7873 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); 7874 7875 /* Suspend Tx switching to the PF. Completion of this ramrod 7876 * further guarantees that all the packets of that PF / child 7877 * VFs in BRB were processed by the Parser, so it is safe to 7878 * change the NIC_MODE register. 7879 */ 7880 rc = bnx2x_func_switch_update(bp, 1); 7881 if (rc) { 7882 BNX2X_ERR("Can't suspend tx-switching!\n"); 7883 return rc; 7884 } 7885 7886 /* Change NIC_MODE register */ 7887 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7888 7889 /* Open input from network */ 7890 if (bp->mf_mode == SINGLE_FUNCTION) { 7891 bnx2x_set_rx_filter(&bp->link_params, 1); 7892 } else { 7893 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7894 NIG_REG_LLH0_FUNC_EN, vlan_en); 7895 for (i = 0; i < NUM_MACS; i++) { 7896 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7897 4 * i) : 7898 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 7899 mac_en[i]); 7900 } 7901 } 7902 7903 /* Enable BMC to host */ 7904 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7905 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); 7906 7907 /* Resume Tx switching to the PF */ 7908 rc = bnx2x_func_switch_update(bp, 0); 7909 if (rc) { 7910 BNX2X_ERR("Can't resume tx-switching!\n"); 7911 return rc; 7912 } 7913 7914 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7915 return 0; 7916} 7917 7918int bnx2x_init_hw_func_cnic(struct bnx2x *bp) 7919{ 7920 int rc; 7921 7922 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); 7923 7924 if (CONFIGURE_NIC_MODE(bp)) { 7925 /* Configure searcher as part of function hw init */ 7926 bnx2x_init_searcher(bp); 7927 7928 /* Reset NIC mode */ 7929 rc = bnx2x_reset_nic_mode(bp); 7930 if (rc) 7931 BNX2X_ERR("Can't change NIC mode!\n"); 7932 return rc; 7933 } 7934 7935 return 0; 7936} 7937 7938/* previous driver DMAE transaction may have occurred when pre-boot stage ended 7939 * and boot began, or when kdump kernel was loaded. Either case would invalidate 7940 * the addresses of the transaction, resulting in was-error bit set in the pci 7941 * causing all hw-to-host pcie transactions to timeout. If this happened we want 7942 * to clear the interrupt which detected this from the pglueb and the was done 7943 * bit 7944 */ 7945static void bnx2x_clean_pglue_errors(struct bnx2x *bp) 7946{ 7947 if (!CHIP_IS_E1x(bp)) 7948 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 7949 1 << BP_ABS_FUNC(bp)); 7950} 7951 7952static int bnx2x_init_hw_func(struct bnx2x *bp) 7953{ 7954 int port = BP_PORT(bp); 7955 int func = BP_FUNC(bp); 7956 int init_phase = PHASE_PF0 + func; 7957 struct bnx2x_ilt *ilt = BP_ILT(bp); 7958 u16 cdu_ilt_start; 7959 u32 addr, val; 7960 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7961 int i, main_mem_width, rc; 7962 7963 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7964 7965 /* FLR cleanup - hmmm */ 7966 if (!CHIP_IS_E1x(bp)) { 7967 rc = bnx2x_pf_flr_clnup(bp); 7968 if (rc) { 7969 bnx2x_fw_dump(bp); 7970 return rc; 7971 } 7972 } 7973 7974 /* set MSI reconfigure capability */ 7975 if (bp->common.int_block == INT_BLOCK_HC) { 7976 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7977 val = REG_RD(bp, addr); 7978 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7979 REG_WR(bp, addr, val); 7980 } 7981 7982 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7983 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7984 7985 ilt = BP_ILT(bp); 7986 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7987 7988 if (IS_SRIOV(bp)) 7989 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; 7990 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); 7991 7992 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes 7993 * those of the VFs, so start line should be reset 7994 */ 7995 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7996 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7997 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7998 ilt->lines[cdu_ilt_start + i].page_mapping = 7999 bp->context[i].cxt_mapping; 8000 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 8001 } 8002 8003 bnx2x_ilt_init_op(bp, INITOP_SET); 8004 8005 if (!CONFIGURE_NIC_MODE(bp)) { 8006 bnx2x_init_searcher(bp); 8007 REG_WR(bp, PRS_REG_NIC_MODE, 0); 8008 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 8009 } else { 8010 /* Set NIC mode */ 8011 REG_WR(bp, PRS_REG_NIC_MODE, 1); 8012 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); 8013 } 8014 8015 if (!CHIP_IS_E1x(bp)) { 8016 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 8017 8018 /* Turn on a single ISR mode in IGU if driver is going to use 8019 * INT#x or MSI 8020 */ 8021 if (!(bp->flags & USING_MSIX_FLAG)) 8022 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 8023 /* 8024 * Timers workaround bug: function init part. 8025 * Need to wait 20msec after initializing ILT, 8026 * needed to make sure there are no requests in 8027 * one of the PXP internal queues with "old" ILT addresses 8028 */ 8029 msleep(20); 8030 /* 8031 * Master enable - Due to WB DMAE writes performed before this 8032 * register is re-initialized as part of the regular function 8033 * init 8034 */ 8035 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 8036 /* Enable the function in IGU */ 8037 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 8038 } 8039 8040 bp->dmae_ready = 1; 8041 8042 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 8043 8044 bnx2x_clean_pglue_errors(bp); 8045 8046 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 8047 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 8048 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 8049 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 8050 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 8051 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 8052 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 8053 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 8054 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 8055 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 8056 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 8057 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 8058 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 8059 8060 if (!CHIP_IS_E1x(bp)) 8061 REG_WR(bp, QM_REG_PF_EN, 1); 8062 8063 if (!CHIP_IS_E1x(bp)) { 8064 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8065 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8066 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8067 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 8068 } 8069 bnx2x_init_block(bp, BLOCK_QM, init_phase); 8070 8071 bnx2x_init_block(bp, BLOCK_TM, init_phase); 8072 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 8073 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ 8074 8075 bnx2x_iov_init_dq(bp); 8076 8077 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 8078 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 8079 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 8080 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 8081 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 8082 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 8083 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 8084 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 8085 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 8086 if (!CHIP_IS_E1x(bp)) 8087 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 8088 8089 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 8090 8091 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 8092 8093 if (!CHIP_IS_E1x(bp)) 8094 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 8095 8096 if (IS_MF(bp)) { 8097 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { 8098 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); 8099 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, 8100 bp->mf_ov); 8101 } 8102 } 8103 8104 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 8105 8106 /* HC init per function */ 8107 if (bp->common.int_block == INT_BLOCK_HC) { 8108 if (CHIP_IS_E1H(bp)) { 8109 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8110 8111 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8112 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8113 } 8114 bnx2x_init_block(bp, BLOCK_HC, init_phase); 8115 8116 } else { 8117 int num_segs, sb_idx, prod_offset; 8118 8119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8120 8121 if (!CHIP_IS_E1x(bp)) { 8122 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8123 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8124 } 8125 8126 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 8127 8128 if (!CHIP_IS_E1x(bp)) { 8129 int dsb_idx = 0; 8130 /** 8131 * Producer memory: 8132 * E2 mode: address 0-135 match to the mapping memory; 8133 * 136 - PF0 default prod; 137 - PF1 default prod; 8134 * 138 - PF2 default prod; 139 - PF3 default prod; 8135 * 140 - PF0 attn prod; 141 - PF1 attn prod; 8136 * 142 - PF2 attn prod; 143 - PF3 attn prod; 8137 * 144-147 reserved. 8138 * 8139 * E1.5 mode - In backward compatible mode; 8140 * for non default SB; each even line in the memory 8141 * holds the U producer and each odd line hold 8142 * the C producer. The first 128 producers are for 8143 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 8144 * producers are for the DSB for each PF. 8145 * Each PF has five segments: (the order inside each 8146 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 8147 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 8148 * 144-147 attn prods; 8149 */ 8150 /* non-default-status-blocks */ 8151 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 8152 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 8153 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 8154 prod_offset = (bp->igu_base_sb + sb_idx) * 8155 num_segs; 8156 8157 for (i = 0; i < num_segs; i++) { 8158 addr = IGU_REG_PROD_CONS_MEMORY + 8159 (prod_offset + i) * 4; 8160 REG_WR(bp, addr, 0); 8161 } 8162 /* send consumer update with value 0 */ 8163 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 8164 USTORM_ID, 0, IGU_INT_NOP, 1); 8165 bnx2x_igu_clear_sb(bp, 8166 bp->igu_base_sb + sb_idx); 8167 } 8168 8169 /* default-status-blocks */ 8170 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 8171 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 8172 8173 if (CHIP_MODE_IS_4_PORT(bp)) 8174 dsb_idx = BP_FUNC(bp); 8175 else 8176 dsb_idx = BP_VN(bp); 8177 8178 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 8179 IGU_BC_BASE_DSB_PROD + dsb_idx : 8180 IGU_NORM_BASE_DSB_PROD + dsb_idx); 8181 8182 /* 8183 * igu prods come in chunks of E1HVN_MAX (4) - 8184 * does not matters what is the current chip mode 8185 */ 8186 for (i = 0; i < (num_segs * E1HVN_MAX); 8187 i += E1HVN_MAX) { 8188 addr = IGU_REG_PROD_CONS_MEMORY + 8189 (prod_offset + i)*4; 8190 REG_WR(bp, addr, 0); 8191 } 8192 /* send consumer update with 0 */ 8193 if (CHIP_INT_MODE_IS_BC(bp)) { 8194 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8195 USTORM_ID, 0, IGU_INT_NOP, 1); 8196 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8197 CSTORM_ID, 0, IGU_INT_NOP, 1); 8198 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8199 XSTORM_ID, 0, IGU_INT_NOP, 1); 8200 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8201 TSTORM_ID, 0, IGU_INT_NOP, 1); 8202 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8203 ATTENTION_ID, 0, IGU_INT_NOP, 1); 8204 } else { 8205 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8206 USTORM_ID, 0, IGU_INT_NOP, 1); 8207 bnx2x_ack_sb(bp, bp->igu_dsb_id, 8208 ATTENTION_ID, 0, IGU_INT_NOP, 1); 8209 } 8210 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 8211 8212 /* !!! These should become driver const once 8213 rf-tool supports split-68 const */ 8214 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 8215 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 8216 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 8217 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 8218 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 8219 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 8220 } 8221 } 8222 8223 /* Reset PCIE errors for debug */ 8224 REG_WR(bp, 0x2114, 0xffffffff); 8225 REG_WR(bp, 0x2120, 0xffffffff); 8226 8227 if (CHIP_IS_E1x(bp)) { 8228 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 8229 main_mem_base = HC_REG_MAIN_MEMORY + 8230 BP_PORT(bp) * (main_mem_size * 4); 8231 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 8232 main_mem_width = 8; 8233 8234 val = REG_RD(bp, main_mem_prty_clr); 8235 if (val) 8236 DP(NETIF_MSG_HW, 8237 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 8238 val); 8239 8240 /* Clear "false" parity errors in MSI-X table */ 8241 for (i = main_mem_base; 8242 i < main_mem_base + main_mem_size * 4; 8243 i += main_mem_width) { 8244 bnx2x_read_dmae(bp, i, main_mem_width / 4); 8245 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 8246 i, main_mem_width / 4); 8247 } 8248 /* Clear HC parity attention */ 8249 REG_RD(bp, main_mem_prty_clr); 8250 } 8251 8252#ifdef BNX2X_STOP_ON_ERROR 8253 /* Enable STORMs SP logging */ 8254 REG_WR8(bp, BAR_USTRORM_INTMEM + 8255 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8256 REG_WR8(bp, BAR_TSTRORM_INTMEM + 8257 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8258 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8259 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8260 REG_WR8(bp, BAR_XSTRORM_INTMEM + 8261 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 8262#endif 8263 8264 bnx2x_phy_probe(&bp->link_params); 8265 8266 return 0; 8267} 8268 8269void bnx2x_free_mem_cnic(struct bnx2x *bp) 8270{ 8271 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); 8272 8273 if (!CHIP_IS_E1x(bp)) 8274 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 8275 sizeof(struct host_hc_status_block_e2)); 8276 else 8277 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 8278 sizeof(struct host_hc_status_block_e1x)); 8279 8280 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 8281} 8282 8283void bnx2x_free_mem(struct bnx2x *bp) 8284{ 8285 int i; 8286 8287 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 8288 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 8289 8290 if (IS_VF(bp)) 8291 return; 8292 8293 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 8294 sizeof(struct host_sp_status_block)); 8295 8296 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 8297 sizeof(struct bnx2x_slowpath)); 8298 8299 for (i = 0; i < L2_ILT_LINES(bp); i++) 8300 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 8301 bp->context[i].size); 8302 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 8303 8304 BNX2X_FREE(bp->ilt->lines); 8305 8306 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 8307 8308 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 8309 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8310 8311 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 8312 8313 bnx2x_iov_free_mem(bp); 8314} 8315 8316int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 8317{ 8318 if (!CHIP_IS_E1x(bp)) { 8319 /* size = the status block + ramrod buffers */ 8320 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, 8321 sizeof(struct host_hc_status_block_e2)); 8322 if (!bp->cnic_sb.e2_sb) 8323 goto alloc_mem_err; 8324 } else { 8325 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, 8326 sizeof(struct host_hc_status_block_e1x)); 8327 if (!bp->cnic_sb.e1x_sb) 8328 goto alloc_mem_err; 8329 } 8330 8331 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { 8332 /* allocate searcher T2 table, as it wasn't allocated before */ 8333 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); 8334 if (!bp->t2) 8335 goto alloc_mem_err; 8336 } 8337 8338 /* write address to which L5 should insert its values */ 8339 bp->cnic_eth_dev.addr_drv_info_to_mcp = 8340 &bp->slowpath->drv_info_to_mcp; 8341 8342 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) 8343 goto alloc_mem_err; 8344 8345 return 0; 8346 8347alloc_mem_err: 8348 bnx2x_free_mem_cnic(bp); 8349 BNX2X_ERR("Can't allocate memory\n"); 8350 return -ENOMEM; 8351} 8352 8353int bnx2x_alloc_mem(struct bnx2x *bp) 8354{ 8355 int i, allocated, context_size; 8356 8357 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { 8358 /* allocate searcher T2 table */ 8359 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); 8360 if (!bp->t2) 8361 goto alloc_mem_err; 8362 } 8363 8364 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, 8365 sizeof(struct host_sp_status_block)); 8366 if (!bp->def_status_blk) 8367 goto alloc_mem_err; 8368 8369 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, 8370 sizeof(struct bnx2x_slowpath)); 8371 if (!bp->slowpath) 8372 goto alloc_mem_err; 8373 8374 /* Allocate memory for CDU context: 8375 * This memory is allocated separately and not in the generic ILT 8376 * functions because CDU differs in few aspects: 8377 * 1. There are multiple entities allocating memory for context - 8378 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 8379 * its own ILT lines. 8380 * 2. Since CDU page-size is not a single 4KB page (which is the case 8381 * for the other ILT clients), to be efficient we want to support 8382 * allocation of sub-page-size in the last entry. 8383 * 3. Context pointers are used by the driver to pass to FW / update 8384 * the context (for the other ILT clients the pointers are used just to 8385 * free the memory during unload). 8386 */ 8387 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 8388 8389 for (i = 0, allocated = 0; allocated < context_size; i++) { 8390 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 8391 (context_size - allocated)); 8392 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, 8393 bp->context[i].size); 8394 if (!bp->context[i].vcxt) 8395 goto alloc_mem_err; 8396 allocated += bp->context[i].size; 8397 } 8398 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), 8399 GFP_KERNEL); 8400 if (!bp->ilt->lines) 8401 goto alloc_mem_err; 8402 8403 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 8404 goto alloc_mem_err; 8405 8406 if (bnx2x_iov_alloc_mem(bp)) 8407 goto alloc_mem_err; 8408 8409 /* Slow path ring */ 8410 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); 8411 if (!bp->spq) 8412 goto alloc_mem_err; 8413 8414 /* EQ */ 8415 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, 8416 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8417 if (!bp->eq_ring) 8418 goto alloc_mem_err; 8419 8420 return 0; 8421 8422alloc_mem_err: 8423 bnx2x_free_mem(bp); 8424 BNX2X_ERR("Can't allocate memory\n"); 8425 return -ENOMEM; 8426} 8427 8428/* 8429 * Init service functions 8430 */ 8431 8432int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 8433 struct bnx2x_vlan_mac_obj *obj, bool set, 8434 int mac_type, unsigned long *ramrod_flags) 8435{ 8436 int rc; 8437 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 8438 8439 memset(&ramrod_param, 0, sizeof(ramrod_param)); 8440 8441 /* Fill general parameters */ 8442 ramrod_param.vlan_mac_obj = obj; 8443 ramrod_param.ramrod_flags = *ramrod_flags; 8444 8445 /* Fill a user request section if needed */ 8446 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8447 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 8448 8449 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 8450 8451 /* Set the command: ADD or DEL */ 8452 if (set) 8453 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8454 else 8455 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 8456 } 8457 8458 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 8459 8460 if (rc == -EEXIST) { 8461 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 8462 /* do not treat adding same MAC as error */ 8463 rc = 0; 8464 } else if (rc < 0) 8465 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 8466 8467 return rc; 8468} 8469 8470int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, 8471 struct bnx2x_vlan_mac_obj *obj, bool set, 8472 unsigned long *ramrod_flags) 8473{ 8474 int rc; 8475 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 8476 8477 memset(&ramrod_param, 0, sizeof(ramrod_param)); 8478 8479 /* Fill general parameters */ 8480 ramrod_param.vlan_mac_obj = obj; 8481 ramrod_param.ramrod_flags = *ramrod_flags; 8482 8483 /* Fill a user request section if needed */ 8484 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8485 ramrod_param.user_req.u.vlan.vlan = vlan; 8486 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags); 8487 /* Set the command: ADD or DEL */ 8488 if (set) 8489 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8490 else 8491 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 8492 } 8493 8494 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 8495 8496 if (rc == -EEXIST) { 8497 /* Do not treat adding same vlan as error. */ 8498 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 8499 rc = 0; 8500 } else if (rc < 0) { 8501 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del")); 8502 } 8503 8504 return rc; 8505} 8506 8507void bnx2x_clear_vlan_info(struct bnx2x *bp) 8508{ 8509 struct bnx2x_vlan_entry *vlan; 8510 8511 /* Mark that hw forgot all entries */ 8512 list_for_each_entry(vlan, &bp->vlan_reg, link) 8513 vlan->hw = false; 8514 8515 bp->vlan_cnt = 0; 8516} 8517 8518static int bnx2x_del_all_vlans(struct bnx2x *bp) 8519{ 8520 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; 8521 unsigned long ramrod_flags = 0, vlan_flags = 0; 8522 int rc; 8523 8524 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8525 __set_bit(BNX2X_VLAN, &vlan_flags); 8526 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); 8527 if (rc) 8528 return rc; 8529 8530 bnx2x_clear_vlan_info(bp); 8531 8532 return 0; 8533} 8534 8535int bnx2x_del_all_macs(struct bnx2x *bp, 8536 struct bnx2x_vlan_mac_obj *mac_obj, 8537 int mac_type, bool wait_for_comp) 8538{ 8539 int rc; 8540 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 8541 8542 /* Wait for completion of requested */ 8543 if (wait_for_comp) 8544 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8545 8546 /* Set the mac type of addresses we want to clear */ 8547 __set_bit(mac_type, &vlan_mac_flags); 8548 8549 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 8550 if (rc < 0) 8551 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 8552 8553 return rc; 8554} 8555 8556int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 8557{ 8558 if (IS_PF(bp)) { 8559 unsigned long ramrod_flags = 0; 8560 8561 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 8562 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8563 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, 8564 &bp->sp_objs->mac_obj, set, 8565 BNX2X_ETH_MAC, &ramrod_flags); 8566 } else { /* vf */ 8567 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, 8568 bp->fp->index, set); 8569 } 8570} 8571 8572int bnx2x_setup_leading(struct bnx2x *bp) 8573{ 8574 if (IS_PF(bp)) 8575 return bnx2x_setup_queue(bp, &bp->fp[0], true); 8576 else /* VF */ 8577 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); 8578} 8579 8580/** 8581 * bnx2x_set_int_mode - configure interrupt mode 8582 * 8583 * @bp: driver handle 8584 * 8585 * In case of MSI-X it will also try to enable MSI-X. 8586 */ 8587int bnx2x_set_int_mode(struct bnx2x *bp) 8588{ 8589 int rc = 0; 8590 8591 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { 8592 BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); 8593 return -EINVAL; 8594 } 8595 8596 switch (int_mode) { 8597 case BNX2X_INT_MODE_MSIX: 8598 /* attempt to enable msix */ 8599 rc = bnx2x_enable_msix(bp); 8600 8601 /* msix attained */ 8602 if (!rc) 8603 return 0; 8604 8605 /* vfs use only msix */ 8606 if (rc && IS_VF(bp)) 8607 return rc; 8608 8609 /* failed to enable multiple MSI-X */ 8610 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 8611 bp->num_queues, 8612 1 + bp->num_cnic_queues); 8613 8614 fallthrough; 8615 case BNX2X_INT_MODE_MSI: 8616 bnx2x_enable_msi(bp); 8617 8618 fallthrough; 8619 case BNX2X_INT_MODE_INTX: 8620 bp->num_ethernet_queues = 1; 8621 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 8622 BNX2X_DEV_INFO("set number of queues to 1\n"); 8623 break; 8624 default: 8625 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); 8626 return -EINVAL; 8627 } 8628 return 0; 8629} 8630 8631/* must be called prior to any HW initializations */ 8632static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 8633{ 8634 if (IS_SRIOV(bp)) 8635 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; 8636 return L2_ILT_LINES(bp); 8637} 8638 8639void bnx2x_ilt_set_info(struct bnx2x *bp) 8640{ 8641 struct ilt_client_info *ilt_client; 8642 struct bnx2x_ilt *ilt = BP_ILT(bp); 8643 u16 line = 0; 8644 8645 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 8646 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 8647 8648 /* CDU */ 8649 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 8650 ilt_client->client_num = ILT_CLIENT_CDU; 8651 ilt_client->page_size = CDU_ILT_PAGE_SZ; 8652 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 8653 ilt_client->start = line; 8654 line += bnx2x_cid_ilt_lines(bp); 8655 8656 if (CNIC_SUPPORT(bp)) 8657 line += CNIC_ILT_LINES; 8658 ilt_client->end = line - 1; 8659 8660 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8661 ilt_client->start, 8662 ilt_client->end, 8663 ilt_client->page_size, 8664 ilt_client->flags, 8665 ilog2(ilt_client->page_size >> 12)); 8666 8667 /* QM */ 8668 if (QM_INIT(bp->qm_cid_count)) { 8669 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 8670 ilt_client->client_num = ILT_CLIENT_QM; 8671 ilt_client->page_size = QM_ILT_PAGE_SZ; 8672 ilt_client->flags = 0; 8673 ilt_client->start = line; 8674 8675 /* 4 bytes for each cid */ 8676 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 8677 QM_ILT_PAGE_SZ); 8678 8679 ilt_client->end = line - 1; 8680 8681 DP(NETIF_MSG_IFUP, 8682 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8683 ilt_client->start, 8684 ilt_client->end, 8685 ilt_client->page_size, 8686 ilt_client->flags, 8687 ilog2(ilt_client->page_size >> 12)); 8688 } 8689 8690 if (CNIC_SUPPORT(bp)) { 8691 /* SRC */ 8692 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 8693 ilt_client->client_num = ILT_CLIENT_SRC; 8694 ilt_client->page_size = SRC_ILT_PAGE_SZ; 8695 ilt_client->flags = 0; 8696 ilt_client->start = line; 8697 line += SRC_ILT_LINES; 8698 ilt_client->end = line - 1; 8699 8700 DP(NETIF_MSG_IFUP, 8701 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8702 ilt_client->start, 8703 ilt_client->end, 8704 ilt_client->page_size, 8705 ilt_client->flags, 8706 ilog2(ilt_client->page_size >> 12)); 8707 8708 /* TM */ 8709 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 8710 ilt_client->client_num = ILT_CLIENT_TM; 8711 ilt_client->page_size = TM_ILT_PAGE_SZ; 8712 ilt_client->flags = 0; 8713 ilt_client->start = line; 8714 line += TM_ILT_LINES; 8715 ilt_client->end = line - 1; 8716 8717 DP(NETIF_MSG_IFUP, 8718 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8719 ilt_client->start, 8720 ilt_client->end, 8721 ilt_client->page_size, 8722 ilt_client->flags, 8723 ilog2(ilt_client->page_size >> 12)); 8724 } 8725 8726 BUG_ON(line > ILT_MAX_LINES); 8727} 8728 8729/** 8730 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 8731 * 8732 * @bp: driver handle 8733 * @fp: pointer to fastpath 8734 * @init_params: pointer to parameters structure 8735 * 8736 * parameters configured: 8737 * - HC configuration 8738 * - Queue's CDU context 8739 */ 8740static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 8741 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 8742{ 8743 u8 cos; 8744 int cxt_index, cxt_offset; 8745 8746 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 8747 if (!IS_FCOE_FP(fp)) { 8748 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 8749 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 8750 8751 /* If HC is supported, enable host coalescing in the transition 8752 * to INIT state. 8753 */ 8754 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 8755 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 8756 8757 /* HC rate */ 8758 init_params->rx.hc_rate = bp->rx_ticks ? 8759 (1000000 / bp->rx_ticks) : 0; 8760 init_params->tx.hc_rate = bp->tx_ticks ? 8761 (1000000 / bp->tx_ticks) : 0; 8762 8763 /* FW SB ID */ 8764 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 8765 fp->fw_sb_id; 8766 8767 /* 8768 * CQ index among the SB indices: FCoE clients uses the default 8769 * SB, therefore it's different. 8770 */ 8771 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 8772 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 8773 } 8774 8775 /* set maximum number of COSs supported by this queue */ 8776 init_params->max_cos = fp->max_cos; 8777 8778 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 8779 fp->index, init_params->max_cos); 8780 8781 /* set the context pointers queue object */ 8782 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 8783 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 8784 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 8785 ILT_PAGE_CIDS); 8786 init_params->cxts[cos] = 8787 &bp->context[cxt_index].vcxt[cxt_offset].eth; 8788 } 8789} 8790 8791static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8792 struct bnx2x_queue_state_params *q_params, 8793 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 8794 int tx_index, bool leading) 8795{ 8796 memset(tx_only_params, 0, sizeof(*tx_only_params)); 8797 8798 /* Set the command */ 8799 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 8800 8801 /* Set tx-only QUEUE flags: don't zero statistics */ 8802 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 8803 8804 /* choose the index of the cid to send the slow path on */ 8805 tx_only_params->cid_index = tx_index; 8806 8807 /* Set general TX_ONLY_SETUP parameters */ 8808 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 8809 8810 /* Set Tx TX_ONLY_SETUP parameters */ 8811 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 8812 8813 DP(NETIF_MSG_IFUP, 8814 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 8815 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 8816 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 8817 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 8818 8819 /* send the ramrod */ 8820 return bnx2x_queue_state_change(bp, q_params); 8821} 8822 8823/** 8824 * bnx2x_setup_queue - setup queue 8825 * 8826 * @bp: driver handle 8827 * @fp: pointer to fastpath 8828 * @leading: is leading 8829 * 8830 * This function performs 2 steps in a Queue state machine 8831 * actually: 1) RESET->INIT 2) INIT->SETUP 8832 */ 8833 8834int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8835 bool leading) 8836{ 8837 struct bnx2x_queue_state_params q_params = {NULL}; 8838 struct bnx2x_queue_setup_params *setup_params = 8839 &q_params.params.setup; 8840 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 8841 &q_params.params.tx_only; 8842 int rc; 8843 u8 tx_index; 8844 8845 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 8846 8847 /* reset IGU state skip FCoE L2 queue */ 8848 if (!IS_FCOE_FP(fp)) 8849 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 8850 IGU_INT_ENABLE, 0); 8851 8852 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8853 /* We want to wait for completion in this context */ 8854 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8855 8856 /* Prepare the INIT parameters */ 8857 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 8858 8859 /* Set the command */ 8860 q_params.cmd = BNX2X_Q_CMD_INIT; 8861 8862 /* Change the state to INIT */ 8863 rc = bnx2x_queue_state_change(bp, &q_params); 8864 if (rc) { 8865 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 8866 return rc; 8867 } 8868 8869 DP(NETIF_MSG_IFUP, "init complete\n"); 8870 8871 /* Now move the Queue to the SETUP state... */ 8872 memset(setup_params, 0, sizeof(*setup_params)); 8873 8874 /* Set QUEUE flags */ 8875 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 8876 8877 /* Set general SETUP parameters */ 8878 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 8879 FIRST_TX_COS_INDEX); 8880 8881 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 8882 &setup_params->rxq_params); 8883 8884 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 8885 FIRST_TX_COS_INDEX); 8886 8887 /* Set the command */ 8888 q_params.cmd = BNX2X_Q_CMD_SETUP; 8889 8890 if (IS_FCOE_FP(fp)) 8891 bp->fcoe_init = true; 8892 8893 /* Change the state to SETUP */ 8894 rc = bnx2x_queue_state_change(bp, &q_params); 8895 if (rc) { 8896 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 8897 return rc; 8898 } 8899 8900 /* loop through the relevant tx-only indices */ 8901 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8902 tx_index < fp->max_cos; 8903 tx_index++) { 8904 8905 /* prepare and send tx-only ramrod*/ 8906 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 8907 tx_only_params, tx_index, leading); 8908 if (rc) { 8909 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 8910 fp->index, tx_index); 8911 return rc; 8912 } 8913 } 8914 8915 return rc; 8916} 8917 8918static int bnx2x_stop_queue(struct bnx2x *bp, int index) 8919{ 8920 struct bnx2x_fastpath *fp = &bp->fp[index]; 8921 struct bnx2x_fp_txdata *txdata; 8922 struct bnx2x_queue_state_params q_params = {NULL}; 8923 int rc, tx_index; 8924 8925 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 8926 8927 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8928 /* We want to wait for completion in this context */ 8929 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8930 8931 /* close tx-only connections */ 8932 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8933 tx_index < fp->max_cos; 8934 tx_index++){ 8935 8936 /* ascertain this is a normal queue*/ 8937 txdata = fp->txdata_ptr[tx_index]; 8938 8939 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 8940 txdata->txq_index); 8941 8942 /* send halt terminate on tx-only connection */ 8943 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8944 memset(&q_params.params.terminate, 0, 8945 sizeof(q_params.params.terminate)); 8946 q_params.params.terminate.cid_index = tx_index; 8947 8948 rc = bnx2x_queue_state_change(bp, &q_params); 8949 if (rc) 8950 return rc; 8951 8952 /* send halt terminate on tx-only connection */ 8953 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8954 memset(&q_params.params.cfc_del, 0, 8955 sizeof(q_params.params.cfc_del)); 8956 q_params.params.cfc_del.cid_index = tx_index; 8957 rc = bnx2x_queue_state_change(bp, &q_params); 8958 if (rc) 8959 return rc; 8960 } 8961 /* Stop the primary connection: */ 8962 /* ...halt the connection */ 8963 q_params.cmd = BNX2X_Q_CMD_HALT; 8964 rc = bnx2x_queue_state_change(bp, &q_params); 8965 if (rc) 8966 return rc; 8967 8968 /* ...terminate the connection */ 8969 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8970 memset(&q_params.params.terminate, 0, 8971 sizeof(q_params.params.terminate)); 8972 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8973 rc = bnx2x_queue_state_change(bp, &q_params); 8974 if (rc) 8975 return rc; 8976 /* ...delete cfc entry */ 8977 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8978 memset(&q_params.params.cfc_del, 0, 8979 sizeof(q_params.params.cfc_del)); 8980 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8981 return bnx2x_queue_state_change(bp, &q_params); 8982} 8983 8984static void bnx2x_reset_func(struct bnx2x *bp) 8985{ 8986 int port = BP_PORT(bp); 8987 int func = BP_FUNC(bp); 8988 int i; 8989 8990 /* Disable the function in the FW */ 8991 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8992 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8993 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8994 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8995 8996 /* FP SBs */ 8997 for_each_eth_queue(bp, i) { 8998 struct bnx2x_fastpath *fp = &bp->fp[i]; 8999 REG_WR8(bp, BAR_CSTRORM_INTMEM + 9000 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 9001 SB_DISABLED); 9002 } 9003 9004 if (CNIC_LOADED(bp)) 9005 /* CNIC SB */ 9006 REG_WR8(bp, BAR_CSTRORM_INTMEM + 9007 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 9008 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); 9009 9010 /* SP SB */ 9011 REG_WR8(bp, BAR_CSTRORM_INTMEM + 9012 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 9013 SB_DISABLED); 9014 9015 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 9016 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 9017 0); 9018 9019 /* Configure IGU */ 9020 if (bp->common.int_block == INT_BLOCK_HC) { 9021 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 9022 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 9023 } else { 9024 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 9025 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 9026 } 9027 9028 if (CNIC_LOADED(bp)) { 9029 /* Disable Timer scan */ 9030 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 9031 /* 9032 * Wait for at least 10ms and up to 2 second for the timers 9033 * scan to complete 9034 */ 9035 for (i = 0; i < 200; i++) { 9036 usleep_range(10000, 20000); 9037 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 9038 break; 9039 } 9040 } 9041 /* Clear ILT */ 9042 bnx2x_clear_func_ilt(bp, func); 9043 9044 /* Timers workaround bug for E2: if this is vnic-3, 9045 * we need to set the entire ilt range for this timers. 9046 */ 9047 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 9048 struct ilt_client_info ilt_cli; 9049 /* use dummy TM client */ 9050 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 9051 ilt_cli.start = 0; 9052 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 9053 ilt_cli.client_num = ILT_CLIENT_TM; 9054 9055 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 9056 } 9057 9058 /* this assumes that reset_port() called before reset_func()*/ 9059 if (!CHIP_IS_E1x(bp)) 9060 bnx2x_pf_disable(bp); 9061 9062 bp->dmae_ready = 0; 9063} 9064 9065static void bnx2x_reset_port(struct bnx2x *bp) 9066{ 9067 int port = BP_PORT(bp); 9068 u32 val; 9069 9070 /* Reset physical Link */ 9071 bnx2x__link_reset(bp); 9072 9073 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 9074 9075 /* Do not rcv packets to BRB */ 9076 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 9077 /* Do not direct rcv packets that are not for MCP to the BRB */ 9078 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 9079 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 9080 9081 /* Configure AEU */ 9082 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 9083 9084 msleep(100); 9085 /* Check for BRB port occupancy */ 9086 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 9087 if (val) 9088 DP(NETIF_MSG_IFDOWN, 9089 "BRB1 is not empty %d blocks are occupied\n", val); 9090 9091 /* TODO: Close Doorbell port? */ 9092} 9093 9094static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 9095{ 9096 struct bnx2x_func_state_params func_params = {NULL}; 9097 9098 /* Prepare parameters for function state transitions */ 9099 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 9100 9101 func_params.f_obj = &bp->func_obj; 9102 func_params.cmd = BNX2X_F_CMD_HW_RESET; 9103 9104 func_params.params.hw_init.load_phase = load_code; 9105 9106 return bnx2x_func_state_change(bp, &func_params); 9107} 9108 9109static int bnx2x_func_stop(struct bnx2x *bp) 9110{ 9111 struct bnx2x_func_state_params func_params = {NULL}; 9112 int rc; 9113 9114 /* Prepare parameters for function state transitions */ 9115 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 9116 func_params.f_obj = &bp->func_obj; 9117 func_params.cmd = BNX2X_F_CMD_STOP; 9118 9119 /* 9120 * Try to stop the function the 'good way'. If fails (in case 9121 * of a parity error during bnx2x_chip_cleanup()) and we are 9122 * not in a debug mode, perform a state transaction in order to 9123 * enable further HW_RESET transaction. 9124 */ 9125 rc = bnx2x_func_state_change(bp, &func_params); 9126 if (rc) { 9127#ifdef BNX2X_STOP_ON_ERROR 9128 return rc; 9129#else 9130 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 9131 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 9132 return bnx2x_func_state_change(bp, &func_params); 9133#endif 9134 } 9135 9136 return 0; 9137} 9138 9139/** 9140 * bnx2x_send_unload_req - request unload mode from the MCP. 9141 * 9142 * @bp: driver handle 9143 * @unload_mode: requested function's unload mode 9144 * 9145 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 9146 */ 9147u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 9148{ 9149 u32 reset_code = 0; 9150 int port = BP_PORT(bp); 9151 9152 /* Select the UNLOAD request mode */ 9153 if (unload_mode == UNLOAD_NORMAL) 9154 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 9155 9156 else if (bp->flags & NO_WOL_FLAG) 9157 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 9158 9159 else if (bp->wol) { 9160 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 9161 u8 *mac_addr = bp->dev->dev_addr; 9162 struct pci_dev *pdev = bp->pdev; 9163 u32 val; 9164 u16 pmc; 9165 9166 /* The mac address is written to entries 1-4 to 9167 * preserve entry 0 which is used by the PMF 9168 */ 9169 u8 entry = (BP_VN(bp) + 1)*8; 9170 9171 val = (mac_addr[0] << 8) | mac_addr[1]; 9172 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 9173 9174 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 9175 (mac_addr[4] << 8) | mac_addr[5]; 9176 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 9177 9178 /* Enable the PME and clear the status */ 9179 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); 9180 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 9181 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); 9182 9183 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 9184 9185 } else 9186 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 9187 9188 /* Send the request to the MCP */ 9189 if (!BP_NOMCP(bp)) 9190 reset_code = bnx2x_fw_command(bp, reset_code, 0); 9191 else { 9192 int path = BP_PATH(bp); 9193 9194 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 9195 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 9196 bnx2x_load_count[path][2]); 9197 bnx2x_load_count[path][0]--; 9198 bnx2x_load_count[path][1 + port]--; 9199 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 9200 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 9201 bnx2x_load_count[path][2]); 9202 if (bnx2x_load_count[path][0] == 0) 9203 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 9204 else if (bnx2x_load_count[path][1 + port] == 0) 9205 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 9206 else 9207 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 9208 } 9209 9210 return reset_code; 9211} 9212 9213/** 9214 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 9215 * 9216 * @bp: driver handle 9217 * @keep_link: true iff link should be kept up 9218 */ 9219void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) 9220{ 9221 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 9222 9223 /* Report UNLOAD_DONE to MCP */ 9224 if (!BP_NOMCP(bp)) 9225 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 9226} 9227 9228static int bnx2x_func_wait_started(struct bnx2x *bp) 9229{ 9230 int tout = 50; 9231 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 9232 9233 if (!bp->port.pmf) 9234 return 0; 9235 9236 /* 9237 * (assumption: No Attention from MCP at this stage) 9238 * PMF probably in the middle of TX disable/enable transaction 9239 * 1. Sync IRS for default SB 9240 * 2. Sync SP queue - this guarantees us that attention handling started 9241 * 3. Wait, that TX disable/enable transaction completes 9242 * 9243 * 1+2 guarantee that if DCBx attention was scheduled it already changed 9244 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 9245 * received completion for the transaction the state is TX_STOPPED. 9246 * State will return to STARTED after completion of TX_STOPPED-->STARTED 9247 * transaction. 9248 */ 9249 9250 /* make sure default SB ISR is done */ 9251 if (msix) 9252 synchronize_irq(bp->msix_table[0].vector); 9253 else 9254 synchronize_irq(bp->pdev->irq); 9255 9256 flush_workqueue(bnx2x_wq); 9257 flush_workqueue(bnx2x_iov_wq); 9258 9259 while (bnx2x_func_get_state(bp, &bp->func_obj) != 9260 BNX2X_F_STATE_STARTED && tout--) 9261 msleep(20); 9262 9263 if (bnx2x_func_get_state(bp, &bp->func_obj) != 9264 BNX2X_F_STATE_STARTED) { 9265#ifdef BNX2X_STOP_ON_ERROR 9266 BNX2X_ERR("Wrong function state\n"); 9267 return -EBUSY; 9268#else 9269 /* 9270 * Failed to complete the transaction in a "good way" 9271 * Force both transactions with CLR bit 9272 */ 9273 struct bnx2x_func_state_params func_params = {NULL}; 9274 9275 DP(NETIF_MSG_IFDOWN, 9276 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); 9277 9278 func_params.f_obj = &bp->func_obj; 9279 __set_bit(RAMROD_DRV_CLR_ONLY, 9280 &func_params.ramrod_flags); 9281 9282 /* STARTED-->TX_ST0PPED */ 9283 func_params.cmd = BNX2X_F_CMD_TX_STOP; 9284 bnx2x_func_state_change(bp, &func_params); 9285 9286 /* TX_ST0PPED-->STARTED */ 9287 func_params.cmd = BNX2X_F_CMD_TX_START; 9288 return bnx2x_func_state_change(bp, &func_params); 9289#endif 9290 } 9291 9292 return 0; 9293} 9294 9295static void bnx2x_disable_ptp(struct bnx2x *bp) 9296{ 9297 int port = BP_PORT(bp); 9298 9299 /* Disable sending PTP packets to host */ 9300 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : 9301 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); 9302 9303 /* Reset PTP event detection rules */ 9304 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : 9305 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); 9306 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : 9307 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); 9308 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : 9309 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); 9310 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : 9311 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); 9312 9313 /* Disable the PTP feature */ 9314 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : 9315 NIG_REG_P0_PTP_EN, 0x0); 9316} 9317 9318/* Called during unload, to stop PTP-related stuff */ 9319static void bnx2x_stop_ptp(struct bnx2x *bp) 9320{ 9321 /* Cancel PTP work queue. Should be done after the Tx queues are 9322 * drained to prevent additional scheduling. 9323 */ 9324 cancel_work_sync(&bp->ptp_task); 9325 9326 if (bp->ptp_tx_skb) { 9327 dev_kfree_skb_any(bp->ptp_tx_skb); 9328 bp->ptp_tx_skb = NULL; 9329 } 9330 9331 /* Disable PTP in HW */ 9332 bnx2x_disable_ptp(bp); 9333 9334 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); 9335} 9336 9337void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 9338{ 9339 int port = BP_PORT(bp); 9340 int i, rc = 0; 9341 u8 cos; 9342 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 9343 u32 reset_code; 9344 9345 /* Wait until tx fastpath tasks complete */ 9346 for_each_tx_queue(bp, i) { 9347 struct bnx2x_fastpath *fp = &bp->fp[i]; 9348 9349 for_each_cos_in_tx_queue(fp, cos) 9350 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 9351#ifdef BNX2X_STOP_ON_ERROR 9352 if (rc) 9353 return; 9354#endif 9355 } 9356 9357 /* Give HW time to discard old tx messages */ 9358 usleep_range(1000, 2000); 9359 9360 /* Clean all ETH MACs */ 9361 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 9362 false); 9363 if (rc < 0) 9364 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 9365 9366 /* Clean up UC list */ 9367 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 9368 true); 9369 if (rc < 0) 9370 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 9371 rc); 9372 9373 /* The whole *vlan_obj structure may be not initialized if VLAN 9374 * filtering offload is not supported by hardware. Currently this is 9375 * true for all hardware covered by CHIP_IS_E1x(). 9376 */ 9377 if (!CHIP_IS_E1x(bp)) { 9378 /* Remove all currently configured VLANs */ 9379 rc = bnx2x_del_all_vlans(bp); 9380 if (rc < 0) 9381 BNX2X_ERR("Failed to delete all VLANs\n"); 9382 } 9383 9384 /* Disable LLH */ 9385 if (!CHIP_IS_E1(bp)) 9386 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 9387 9388 /* Set "drop all" (stop Rx). 9389 * We need to take a netif_addr_lock() here in order to prevent 9390 * a race between the completion code and this code. 9391 */ 9392 netif_addr_lock_bh(bp->dev); 9393 /* Schedule the rx_mode command */ 9394 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 9395 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 9396 else if (bp->slowpath) 9397 bnx2x_set_storm_rx_mode(bp); 9398 9399 /* Cleanup multicast configuration */ 9400 rparam.mcast_obj = &bp->mcast_obj; 9401 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 9402 if (rc < 0) 9403 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 9404 9405 netif_addr_unlock_bh(bp->dev); 9406 9407 bnx2x_iov_chip_cleanup(bp); 9408 9409 /* 9410 * Send the UNLOAD_REQUEST to the MCP. This will return if 9411 * this function should perform FUNC, PORT or COMMON HW 9412 * reset. 9413 */ 9414 reset_code = bnx2x_send_unload_req(bp, unload_mode); 9415 9416 /* 9417 * (assumption: No Attention from MCP at this stage) 9418 * PMF probably in the middle of TX disable/enable transaction 9419 */ 9420 rc = bnx2x_func_wait_started(bp); 9421 if (rc) { 9422 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 9423#ifdef BNX2X_STOP_ON_ERROR 9424 return; 9425#endif 9426 } 9427 9428 /* Close multi and leading connections 9429 * Completions for ramrods are collected in a synchronous way 9430 */ 9431 for_each_eth_queue(bp, i) 9432 if (bnx2x_stop_queue(bp, i)) 9433#ifdef BNX2X_STOP_ON_ERROR 9434 return; 9435#else 9436 goto unload_error; 9437#endif 9438 9439 if (CNIC_LOADED(bp)) { 9440 for_each_cnic_queue(bp, i) 9441 if (bnx2x_stop_queue(bp, i)) 9442#ifdef BNX2X_STOP_ON_ERROR 9443 return; 9444#else 9445 goto unload_error; 9446#endif 9447 } 9448 9449 /* If SP settings didn't get completed so far - something 9450 * very wrong has happen. 9451 */ 9452 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 9453 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 9454 9455#ifndef BNX2X_STOP_ON_ERROR 9456unload_error: 9457#endif 9458 rc = bnx2x_func_stop(bp); 9459 if (rc) { 9460 BNX2X_ERR("Function stop failed!\n"); 9461#ifdef BNX2X_STOP_ON_ERROR 9462 return; 9463#endif 9464 } 9465 9466 /* stop_ptp should be after the Tx queues are drained to prevent 9467 * scheduling to the cancelled PTP work queue. It should also be after 9468 * function stop ramrod is sent, since as part of this ramrod FW access 9469 * PTP registers. 9470 */ 9471 if (bp->flags & PTP_SUPPORTED) { 9472 bnx2x_stop_ptp(bp); 9473 if (bp->ptp_clock) { 9474 ptp_clock_unregister(bp->ptp_clock); 9475 bp->ptp_clock = NULL; 9476 } 9477 } 9478 9479 /* Disable HW interrupts, NAPI */ 9480 bnx2x_netif_stop(bp, 1); 9481 /* Delete all NAPI objects */ 9482 bnx2x_del_all_napi(bp); 9483 if (CNIC_LOADED(bp)) 9484 bnx2x_del_all_napi_cnic(bp); 9485 9486 /* Release IRQs */ 9487 bnx2x_free_irq(bp); 9488 9489 /* Reset the chip, unless PCI function is offline. If we reach this 9490 * point following a PCI error handling, it means device is really 9491 * in a bad state and we're about to remove it, so reset the chip 9492 * is not a good idea. 9493 */ 9494 if (!pci_channel_offline(bp->pdev)) { 9495 rc = bnx2x_reset_hw(bp, reset_code); 9496 if (rc) 9497 BNX2X_ERR("HW_RESET failed\n"); 9498 } 9499 9500 /* Report UNLOAD_DONE to MCP */ 9501 bnx2x_send_unload_done(bp, keep_link); 9502} 9503 9504void bnx2x_disable_close_the_gate(struct bnx2x *bp) 9505{ 9506 u32 val; 9507 9508 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 9509 9510 if (CHIP_IS_E1(bp)) { 9511 int port = BP_PORT(bp); 9512 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 9513 MISC_REG_AEU_MASK_ATTN_FUNC_0; 9514 9515 val = REG_RD(bp, addr); 9516 val &= ~(0x300); 9517 REG_WR(bp, addr, val); 9518 } else { 9519 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 9520 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 9521 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 9522 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 9523 } 9524} 9525 9526/* Close gates #2, #3 and #4: */ 9527static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 9528{ 9529 u32 val; 9530 9531 /* Gates #2 and #4a are closed/opened for "not E1" only */ 9532 if (!CHIP_IS_E1(bp)) { 9533 /* #4 */ 9534 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 9535 /* #2 */ 9536 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 9537 } 9538 9539 /* #3 */ 9540 if (CHIP_IS_E1x(bp)) { 9541 /* Prevent interrupts from HC on both ports */ 9542 val = REG_RD(bp, HC_REG_CONFIG_1); 9543 REG_WR(bp, HC_REG_CONFIG_1, 9544 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 9545 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 9546 9547 val = REG_RD(bp, HC_REG_CONFIG_0); 9548 REG_WR(bp, HC_REG_CONFIG_0, 9549 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 9550 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 9551 } else { 9552 /* Prevent incoming interrupts in IGU */ 9553 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9554 9555 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 9556 (!close) ? 9557 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 9558 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 9559 } 9560 9561 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 9562 close ? "closing" : "opening"); 9563} 9564 9565#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 9566 9567static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 9568{ 9569 /* Do some magic... */ 9570 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9571 *magic_val = val & SHARED_MF_CLP_MAGIC; 9572 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 9573} 9574 9575/** 9576 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 9577 * 9578 * @bp: driver handle 9579 * @magic_val: old value of the `magic' bit. 9580 */ 9581static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 9582{ 9583 /* Restore the `magic' bit value... */ 9584 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9585 MF_CFG_WR(bp, shared_mf_config.clp_mb, 9586 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 9587} 9588 9589/** 9590 * bnx2x_reset_mcp_prep - prepare for MCP reset. 9591 * 9592 * @bp: driver handle 9593 * @magic_val: old value of 'magic' bit. 9594 * 9595 * Takes care of CLP configurations. 9596 */ 9597static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 9598{ 9599 u32 shmem; 9600 u32 validity_offset; 9601 9602 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 9603 9604 /* Set `magic' bit in order to save MF config */ 9605 if (!CHIP_IS_E1(bp)) 9606 bnx2x_clp_reset_prep(bp, magic_val); 9607 9608 /* Get shmem offset */ 9609 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9610 validity_offset = 9611 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); 9612 9613 /* Clear validity map flags */ 9614 if (shmem > 0) 9615 REG_WR(bp, shmem + validity_offset, 0); 9616} 9617 9618#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 9619#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 9620 9621/** 9622 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 9623 * 9624 * @bp: driver handle 9625 */ 9626static void bnx2x_mcp_wait_one(struct bnx2x *bp) 9627{ 9628 /* special handling for emulation and FPGA, 9629 wait 10 times longer */ 9630 if (CHIP_REV_IS_SLOW(bp)) 9631 msleep(MCP_ONE_TIMEOUT*10); 9632 else 9633 msleep(MCP_ONE_TIMEOUT); 9634} 9635 9636/* 9637 * initializes bp->common.shmem_base and waits for validity signature to appear 9638 */ 9639static int bnx2x_init_shmem(struct bnx2x *bp) 9640{ 9641 int cnt = 0; 9642 u32 val = 0; 9643 9644 do { 9645 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9646 9647 /* If we read all 0xFFs, means we are in PCI error state and 9648 * should bail out to avoid crashes on adapter's FW reads. 9649 */ 9650 if (bp->common.shmem_base == 0xFFFFFFFF) { 9651 bp->flags |= NO_MCP_FLAG; 9652 return -ENODEV; 9653 } 9654 9655 if (bp->common.shmem_base) { 9656 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9657 if (val & SHR_MEM_VALIDITY_MB) 9658 return 0; 9659 } 9660 9661 bnx2x_mcp_wait_one(bp); 9662 9663 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 9664 9665 BNX2X_ERR("BAD MCP validity signature\n"); 9666 9667 return -ENODEV; 9668} 9669 9670static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 9671{ 9672 int rc = bnx2x_init_shmem(bp); 9673 9674 /* Restore the `magic' bit value */ 9675 if (!CHIP_IS_E1(bp)) 9676 bnx2x_clp_reset_done(bp, magic_val); 9677 9678 return rc; 9679} 9680 9681static void bnx2x_pxp_prep(struct bnx2x *bp) 9682{ 9683 if (!CHIP_IS_E1(bp)) { 9684 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 9685 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 9686 } 9687} 9688 9689/* 9690 * Reset the whole chip except for: 9691 * - PCIE core 9692 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 9693 * one reset bit) 9694 * - IGU 9695 * - MISC (including AEU) 9696 * - GRC 9697 * - RBCN, RBCP 9698 */ 9699static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 9700{ 9701 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 9702 u32 global_bits2, stay_reset2; 9703 9704 /* 9705 * Bits that have to be set in reset_mask2 if we want to reset 'global' 9706 * (per chip) blocks. 9707 */ 9708 global_bits2 = 9709 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 9710 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 9711 9712 /* Don't reset the following blocks. 9713 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 9714 * reset, as in 4 port device they might still be owned 9715 * by the MCP (there is only one leader per path). 9716 */ 9717 not_reset_mask1 = 9718 MISC_REGISTERS_RESET_REG_1_RST_HC | 9719 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 9720 MISC_REGISTERS_RESET_REG_1_RST_PXP; 9721 9722 not_reset_mask2 = 9723 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 9724 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 9725 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 9726 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 9727 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 9728 MISC_REGISTERS_RESET_REG_2_RST_GRC | 9729 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 9730 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 9731 MISC_REGISTERS_RESET_REG_2_RST_ATC | 9732 MISC_REGISTERS_RESET_REG_2_PGLC | 9733 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 9734 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 9735 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 9736 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 9737 MISC_REGISTERS_RESET_REG_2_UMAC0 | 9738 MISC_REGISTERS_RESET_REG_2_UMAC1; 9739 9740 /* 9741 * Keep the following blocks in reset: 9742 * - all xxMACs are handled by the bnx2x_link code. 9743 */ 9744 stay_reset2 = 9745 MISC_REGISTERS_RESET_REG_2_XMAC | 9746 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 9747 9748 /* Full reset masks according to the chip */ 9749 reset_mask1 = 0xffffffff; 9750 9751 if (CHIP_IS_E1(bp)) 9752 reset_mask2 = 0xffff; 9753 else if (CHIP_IS_E1H(bp)) 9754 reset_mask2 = 0x1ffff; 9755 else if (CHIP_IS_E2(bp)) 9756 reset_mask2 = 0xfffff; 9757 else /* CHIP_IS_E3 */ 9758 reset_mask2 = 0x3ffffff; 9759 9760 /* Don't reset global blocks unless we need to */ 9761 if (!global) 9762 reset_mask2 &= ~global_bits2; 9763 9764 /* 9765 * In case of attention in the QM, we need to reset PXP 9766 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 9767 * because otherwise QM reset would release 'close the gates' shortly 9768 * before resetting the PXP, then the PSWRQ would send a write 9769 * request to PGLUE. Then when PXP is reset, PGLUE would try to 9770 * read the payload data from PSWWR, but PSWWR would not 9771 * respond. The write queue in PGLUE would stuck, dmae commands 9772 * would not return. Therefore it's important to reset the second 9773 * reset register (containing the 9774 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 9775 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 9776 * bit). 9777 */ 9778 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 9779 reset_mask2 & (~not_reset_mask2)); 9780 9781 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 9782 reset_mask1 & (~not_reset_mask1)); 9783 9784 barrier(); 9785 9786 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 9787 reset_mask2 & (~stay_reset2)); 9788 9789 barrier(); 9790 9791 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 9792} 9793 9794/** 9795 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 9796 * It should get cleared in no more than 1s. 9797 * 9798 * @bp: driver handle 9799 * 9800 * It should get cleared in no more than 1s. Returns 0 if 9801 * pending writes bit gets cleared. 9802 */ 9803static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 9804{ 9805 u32 cnt = 1000; 9806 u32 pend_bits = 0; 9807 9808 do { 9809 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 9810 9811 if (pend_bits == 0) 9812 break; 9813 9814 usleep_range(1000, 2000); 9815 } while (cnt-- > 0); 9816 9817 if (cnt <= 0) { 9818 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 9819 pend_bits); 9820 return -EBUSY; 9821 } 9822 9823 return 0; 9824} 9825 9826static int bnx2x_process_kill(struct bnx2x *bp, bool global) 9827{ 9828 int cnt = 1000; 9829 u32 val = 0; 9830 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 9831 u32 tags_63_32 = 0; 9832 9833 /* Empty the Tetris buffer, wait for 1s */ 9834 do { 9835 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 9836 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 9837 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 9838 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 9839 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 9840 if (CHIP_IS_E3(bp)) 9841 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); 9842 9843 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 9844 ((port_is_idle_0 & 0x1) == 0x1) && 9845 ((port_is_idle_1 & 0x1) == 0x1) && 9846 (pgl_exp_rom2 == 0xffffffff) && 9847 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) 9848 break; 9849 usleep_range(1000, 2000); 9850 } while (cnt-- > 0); 9851 9852 if (cnt <= 0) { 9853 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 9854 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 9855 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 9856 pgl_exp_rom2); 9857 return -EAGAIN; 9858 } 9859 9860 barrier(); 9861 9862 /* Close gates #2, #3 and #4 */ 9863 bnx2x_set_234_gates(bp, true); 9864 9865 /* Poll for IGU VQs for 57712 and newer chips */ 9866 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 9867 return -EAGAIN; 9868 9869 /* TBD: Indicate that "process kill" is in progress to MCP */ 9870 9871 /* Clear "unprepared" bit */ 9872 REG_WR(bp, MISC_REG_UNPREPARED, 0); 9873 barrier(); 9874 9875 /* Wait for 1ms to empty GLUE and PCI-E core queues, 9876 * PSWHST, GRC and PSWRD Tetris buffer. 9877 */ 9878 usleep_range(1000, 2000); 9879 9880 /* Prepare to chip reset: */ 9881 /* MCP */ 9882 if (global) 9883 bnx2x_reset_mcp_prep(bp, &val); 9884 9885 /* PXP */ 9886 bnx2x_pxp_prep(bp); 9887 barrier(); 9888 9889 /* reset the chip */ 9890 bnx2x_process_kill_chip_reset(bp, global); 9891 barrier(); 9892 9893 /* clear errors in PGB */ 9894 if (!CHIP_IS_E1x(bp)) 9895 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 9896 9897 /* Recover after reset: */ 9898 /* MCP */ 9899 if (global && bnx2x_reset_mcp_comp(bp, val)) 9900 return -EAGAIN; 9901 9902 /* TBD: Add resetting the NO_MCP mode DB here */ 9903 9904 /* Open the gates #2, #3 and #4 */ 9905 bnx2x_set_234_gates(bp, false); 9906 9907 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 9908 * reset state, re-enable attentions. */ 9909 9910 return 0; 9911} 9912 9913static int bnx2x_leader_reset(struct bnx2x *bp) 9914{ 9915 int rc = 0; 9916 bool global = bnx2x_reset_is_global(bp); 9917 u32 load_code; 9918 9919 /* if not going to reset MCP - load "fake" driver to reset HW while 9920 * driver is owner of the HW 9921 */ 9922 if (!global && !BP_NOMCP(bp)) { 9923 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 9924 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 9925 if (!load_code) { 9926 BNX2X_ERR("MCP response failure, aborting\n"); 9927 rc = -EAGAIN; 9928 goto exit_leader_reset; 9929 } 9930 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 9931 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 9932 BNX2X_ERR("MCP unexpected resp, aborting\n"); 9933 rc = -EAGAIN; 9934 goto exit_leader_reset2; 9935 } 9936 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 9937 if (!load_code) { 9938 BNX2X_ERR("MCP response failure, aborting\n"); 9939 rc = -EAGAIN; 9940 goto exit_leader_reset2; 9941 } 9942 } 9943 9944 /* Try to recover after the failure */ 9945 if (bnx2x_process_kill(bp, global)) { 9946 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 9947 BP_PATH(bp)); 9948 rc = -EAGAIN; 9949 goto exit_leader_reset2; 9950 } 9951 9952 /* 9953 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 9954 * state. 9955 */ 9956 bnx2x_set_reset_done(bp); 9957 if (global) 9958 bnx2x_clear_reset_global(bp); 9959 9960exit_leader_reset2: 9961 /* unload "fake driver" if it was loaded */ 9962 if (!global && !BP_NOMCP(bp)) { 9963 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 9964 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9965 } 9966exit_leader_reset: 9967 bp->is_leader = 0; 9968 bnx2x_release_leader_lock(bp); 9969 smp_mb(); 9970 return rc; 9971} 9972 9973static void bnx2x_recovery_failed(struct bnx2x *bp) 9974{ 9975 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 9976 9977 /* Disconnect this device */ 9978 netif_device_detach(bp->dev); 9979 9980 /* 9981 * Block ifup for all function on this engine until "process kill" 9982 * or power cycle. 9983 */ 9984 bnx2x_set_reset_in_progress(bp); 9985 9986 /* Shut down the power */ 9987 bnx2x_set_power_state(bp, PCI_D3hot); 9988 9989 bp->recovery_state = BNX2X_RECOVERY_FAILED; 9990 9991 smp_mb(); 9992} 9993 9994/* 9995 * Assumption: runs under rtnl lock. This together with the fact 9996 * that it's called only from bnx2x_sp_rtnl() ensure that it 9997 * will never be called when netif_running(bp->dev) is false. 9998 */ 9999static void bnx2x_parity_recover(struct bnx2x *bp) 10000{ 10001 u32 error_recovered, error_unrecovered; 10002 bool is_parity, global = false; 10003#ifdef CONFIG_BNX2X_SRIOV 10004 int vf_idx; 10005 10006 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { 10007 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 10008 10009 if (vf) 10010 vf->state = VF_LOST; 10011 } 10012#endif 10013 DP(NETIF_MSG_HW, "Handling parity\n"); 10014 while (1) { 10015 switch (bp->recovery_state) { 10016 case BNX2X_RECOVERY_INIT: 10017 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 10018 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 10019 WARN_ON(!is_parity); 10020 10021 /* Try to get a LEADER_LOCK HW lock */ 10022 if (bnx2x_trylock_leader_lock(bp)) { 10023 bnx2x_set_reset_in_progress(bp); 10024 /* 10025 * Check if there is a global attention and if 10026 * there was a global attention, set the global 10027 * reset bit. 10028 */ 10029 10030 if (global) 10031 bnx2x_set_reset_global(bp); 10032 10033 bp->is_leader = 1; 10034 } 10035 10036 /* Stop the driver */ 10037 /* If interface has been removed - break */ 10038 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) 10039 return; 10040 10041 bp->recovery_state = BNX2X_RECOVERY_WAIT; 10042 10043 /* Ensure "is_leader", MCP command sequence and 10044 * "recovery_state" update values are seen on other 10045 * CPUs. 10046 */ 10047 smp_mb(); 10048 break; 10049 10050 case BNX2X_RECOVERY_WAIT: 10051 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 10052 if (bp->is_leader) { 10053 int other_engine = BP_PATH(bp) ? 0 : 1; 10054 bool other_load_status = 10055 bnx2x_get_load_status(bp, other_engine); 10056 bool load_status = 10057 bnx2x_get_load_status(bp, BP_PATH(bp)); 10058 global = bnx2x_reset_is_global(bp); 10059 10060 /* 10061 * In case of a parity in a global block, let 10062 * the first leader that performs a 10063 * leader_reset() reset the global blocks in 10064 * order to clear global attentions. Otherwise 10065 * the gates will remain closed for that 10066 * engine. 10067 */ 10068 if (load_status || 10069 (global && other_load_status)) { 10070 /* Wait until all other functions get 10071 * down. 10072 */ 10073 schedule_delayed_work(&bp->sp_rtnl_task, 10074 HZ/10); 10075 return; 10076 } else { 10077 /* If all other functions got down - 10078 * try to bring the chip back to 10079 * normal. In any case it's an exit 10080 * point for a leader. 10081 */ 10082 if (bnx2x_leader_reset(bp)) { 10083 bnx2x_recovery_failed(bp); 10084 return; 10085 } 10086 10087 /* If we are here, means that the 10088 * leader has succeeded and doesn't 10089 * want to be a leader any more. Try 10090 * to continue as a none-leader. 10091 */ 10092 break; 10093 } 10094 } else { /* non-leader */ 10095 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 10096 /* Try to get a LEADER_LOCK HW lock as 10097 * long as a former leader may have 10098 * been unloaded by the user or 10099 * released a leadership by another 10100 * reason. 10101 */ 10102 if (bnx2x_trylock_leader_lock(bp)) { 10103 /* I'm a leader now! Restart a 10104 * switch case. 10105 */ 10106 bp->is_leader = 1; 10107 break; 10108 } 10109 10110 schedule_delayed_work(&bp->sp_rtnl_task, 10111 HZ/10); 10112 return; 10113 10114 } else { 10115 /* 10116 * If there was a global attention, wait 10117 * for it to be cleared. 10118 */ 10119 if (bnx2x_reset_is_global(bp)) { 10120 schedule_delayed_work( 10121 &bp->sp_rtnl_task, 10122 HZ/10); 10123 return; 10124 } 10125 10126 error_recovered = 10127 bp->eth_stats.recoverable_error; 10128 error_unrecovered = 10129 bp->eth_stats.unrecoverable_error; 10130 bp->recovery_state = 10131 BNX2X_RECOVERY_NIC_LOADING; 10132 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 10133 error_unrecovered++; 10134 netdev_err(bp->dev, 10135 "Recovery failed. Power cycle needed\n"); 10136 /* Disconnect this device */ 10137 netif_device_detach(bp->dev); 10138 /* Shut down the power */ 10139 bnx2x_set_power_state( 10140 bp, PCI_D3hot); 10141 smp_mb(); 10142 } else { 10143 bp->recovery_state = 10144 BNX2X_RECOVERY_DONE; 10145 error_recovered++; 10146 smp_mb(); 10147 } 10148 bp->eth_stats.recoverable_error = 10149 error_recovered; 10150 bp->eth_stats.unrecoverable_error = 10151 error_unrecovered; 10152 10153 return; 10154 } 10155 } 10156 default: 10157 return; 10158 } 10159 } 10160} 10161 10162static int bnx2x_udp_port_update(struct bnx2x *bp) 10163{ 10164 struct bnx2x_func_switch_update_params *switch_update_params; 10165 struct bnx2x_func_state_params func_params = {NULL}; 10166 u16 vxlan_port = 0, geneve_port = 0; 10167 int rc; 10168 10169 switch_update_params = &func_params.params.switch_update; 10170 10171 /* Prepare parameters for function state transitions */ 10172 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 10173 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 10174 10175 func_params.f_obj = &bp->func_obj; 10176 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 10177 10178 /* Function parameters */ 10179 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, 10180 &switch_update_params->changes); 10181 10182 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { 10183 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; 10184 switch_update_params->geneve_dst_port = geneve_port; 10185 } 10186 10187 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { 10188 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; 10189 switch_update_params->vxlan_dst_port = vxlan_port; 10190 } 10191 10192 /* Re-enable inner-rss for the offloaded UDP tunnels */ 10193 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, 10194 &switch_update_params->changes); 10195 10196 rc = bnx2x_func_state_change(bp, &func_params); 10197 if (rc) 10198 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n", 10199 vxlan_port, geneve_port, rc); 10200 else 10201 DP(BNX2X_MSG_SP, 10202 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n", 10203 vxlan_port, geneve_port); 10204 10205 return rc; 10206} 10207 10208static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table) 10209{ 10210 struct bnx2x *bp = netdev_priv(netdev); 10211 struct udp_tunnel_info ti; 10212 10213 udp_tunnel_nic_get_port(netdev, table, 0, &ti); 10214 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port); 10215 10216 return bnx2x_udp_port_update(bp); 10217} 10218 10219static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = { 10220 .sync_table = bnx2x_udp_tunnel_sync, 10221 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 10222 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 10223 .tables = { 10224 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 10225 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 10226 }, 10227}; 10228 10229static int bnx2x_close(struct net_device *dev); 10230 10231/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 10232 * scheduled on a general queue in order to prevent a dead lock. 10233 */ 10234static void bnx2x_sp_rtnl_task(struct work_struct *work) 10235{ 10236 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 10237 10238 rtnl_lock(); 10239 10240 if (!netif_running(bp->dev)) { 10241 rtnl_unlock(); 10242 return; 10243 } 10244 10245 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 10246#ifdef BNX2X_STOP_ON_ERROR 10247 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 10248 "you will need to reboot when done\n"); 10249 goto sp_rtnl_not_reset; 10250#endif 10251 /* 10252 * Clear all pending SP commands as we are going to reset the 10253 * function anyway. 10254 */ 10255 bp->sp_rtnl_state = 0; 10256 smp_mb(); 10257 10258 bnx2x_parity_recover(bp); 10259 10260 rtnl_unlock(); 10261 return; 10262 } 10263 10264 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 10265#ifdef BNX2X_STOP_ON_ERROR 10266 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 10267 "you will need to reboot when done\n"); 10268 goto sp_rtnl_not_reset; 10269#endif 10270 10271 /* 10272 * Clear all pending SP commands as we are going to reset the 10273 * function anyway. 10274 */ 10275 bp->sp_rtnl_state = 0; 10276 smp_mb(); 10277 10278 /* Immediately indicate link as down */ 10279 bp->link_vars.link_up = 0; 10280 bp->force_link_down = true; 10281 netif_carrier_off(bp->dev); 10282 BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); 10283 10284 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10285 /* When ret value shows failure of allocation failure, 10286 * the nic is rebooted again. If open still fails, a error 10287 * message to notify the user. 10288 */ 10289 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { 10290 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10291 if (bnx2x_nic_load(bp, LOAD_NORMAL)) 10292 BNX2X_ERR("Open the NIC fails again!\n"); 10293 } 10294 rtnl_unlock(); 10295 return; 10296 } 10297#ifdef BNX2X_STOP_ON_ERROR 10298sp_rtnl_not_reset: 10299#endif 10300 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 10301 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 10302 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 10303 bnx2x_after_function_update(bp); 10304 /* 10305 * in case of fan failure we need to reset id if the "stop on error" 10306 * debug flag is set, since we trying to prevent permanent overheating 10307 * damage 10308 */ 10309 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 10310 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 10311 netif_device_detach(bp->dev); 10312 bnx2x_close(bp->dev); 10313 rtnl_unlock(); 10314 return; 10315 } 10316 10317 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { 10318 DP(BNX2X_MSG_SP, 10319 "sending set mcast vf pf channel message from rtnl sp-task\n"); 10320 bnx2x_vfpf_set_mcast(bp->dev); 10321 } 10322 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 10323 &bp->sp_rtnl_state)){ 10324 if (netif_carrier_ok(bp->dev)) { 10325 bnx2x_tx_disable(bp); 10326 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); 10327 } 10328 } 10329 10330 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { 10331 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); 10332 bnx2x_set_rx_mode_inner(bp); 10333 } 10334 10335 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 10336 &bp->sp_rtnl_state)) 10337 bnx2x_pf_set_vfs_vlan(bp); 10338 10339 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { 10340 bnx2x_dcbx_stop_hw_tx(bp); 10341 bnx2x_dcbx_resume_hw_tx(bp); 10342 } 10343 10344 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, 10345 &bp->sp_rtnl_state)) 10346 bnx2x_update_mng_version(bp); 10347 10348 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) 10349 bnx2x_handle_update_svid_cmd(bp); 10350 10351 /* work which needs rtnl lock not-taken (as it takes the lock itself and 10352 * can be called from other contexts as well) 10353 */ 10354 rtnl_unlock(); 10355 10356 /* enable SR-IOV if applicable */ 10357 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 10358 &bp->sp_rtnl_state)) { 10359 bnx2x_disable_sriov(bp); 10360 bnx2x_enable_sriov(bp); 10361 } 10362} 10363 10364static void bnx2x_period_task(struct work_struct *work) 10365{ 10366 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 10367 10368 if (!netif_running(bp->dev)) 10369 goto period_task_exit; 10370 10371 if (CHIP_REV_IS_SLOW(bp)) { 10372 BNX2X_ERR("period task called on emulation, ignoring\n"); 10373 goto period_task_exit; 10374 } 10375 10376 bnx2x_acquire_phy_lock(bp); 10377 /* 10378 * The barrier is needed to ensure the ordering between the writing to 10379 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 10380 * the reading here. 10381 */ 10382 smp_mb(); 10383 if (bp->port.pmf) { 10384 bnx2x_period_func(&bp->link_params, &bp->link_vars); 10385 10386 /* Re-queue task in 1 sec */ 10387 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 10388 } 10389 10390 bnx2x_release_phy_lock(bp); 10391period_task_exit: 10392 return; 10393} 10394 10395/* 10396 * Init service functions 10397 */ 10398 10399static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 10400{ 10401 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 10402 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 10403 return base + (BP_ABS_FUNC(bp)) * stride; 10404} 10405 10406static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, 10407 u8 port, u32 reset_reg, 10408 struct bnx2x_mac_vals *vals) 10409{ 10410 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 10411 u32 base_addr; 10412 10413 if (!(mask & reset_reg)) 10414 return false; 10415 10416 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); 10417 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 10418 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; 10419 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); 10420 REG_WR(bp, vals->umac_addr[port], 0); 10421 10422 return true; 10423} 10424 10425static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 10426 struct bnx2x_mac_vals *vals) 10427{ 10428 u32 val, base_addr, offset, mask, reset_reg; 10429 bool mac_stopped = false; 10430 u8 port = BP_PORT(bp); 10431 10432 /* reset addresses as they also mark which values were changed */ 10433 memset(vals, 0, sizeof(*vals)); 10434 10435 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 10436 10437 if (!CHIP_IS_E3(bp)) { 10438 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 10439 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 10440 if ((mask & reset_reg) && val) { 10441 u32 wb_data[2]; 10442 BNX2X_DEV_INFO("Disable bmac Rx\n"); 10443 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 10444 : NIG_REG_INGRESS_BMAC0_MEM; 10445 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 10446 : BIGMAC_REGISTER_BMAC_CONTROL; 10447 10448 /* 10449 * use rd/wr since we cannot use dmae. This is safe 10450 * since MCP won't access the bus due to the request 10451 * to unload, and no function on the path can be 10452 * loaded at this time. 10453 */ 10454 wb_data[0] = REG_RD(bp, base_addr + offset); 10455 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 10456 vals->bmac_addr = base_addr + offset; 10457 vals->bmac_val[0] = wb_data[0]; 10458 vals->bmac_val[1] = wb_data[1]; 10459 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 10460 REG_WR(bp, vals->bmac_addr, wb_data[0]); 10461 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); 10462 } 10463 BNX2X_DEV_INFO("Disable emac Rx\n"); 10464 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; 10465 vals->emac_val = REG_RD(bp, vals->emac_addr); 10466 REG_WR(bp, vals->emac_addr, 0); 10467 mac_stopped = true; 10468 } else { 10469 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 10470 BNX2X_DEV_INFO("Disable xmac Rx\n"); 10471 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 10472 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 10473 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 10474 val & ~(1 << 1)); 10475 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 10476 val | (1 << 1)); 10477 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 10478 vals->xmac_val = REG_RD(bp, vals->xmac_addr); 10479 REG_WR(bp, vals->xmac_addr, 0); 10480 mac_stopped = true; 10481 } 10482 10483 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, 10484 reset_reg, vals); 10485 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, 10486 reset_reg, vals); 10487 } 10488 10489 if (mac_stopped) 10490 msleep(20); 10491} 10492 10493#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 10494#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ 10495 0x1848 + ((f) << 4)) 10496#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 10497#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 10498#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 10499 10500#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10501#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10502#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10503 10504static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) 10505{ 10506 /* UNDI marks its presence in DORQ - 10507 * it initializes CID offset for normal bell to 0x7 10508 */ 10509 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & 10510 MISC_REGISTERS_RESET_REG_1_RST_DORQ)) 10511 return false; 10512 10513 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { 10514 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10515 return true; 10516 } 10517 10518 return false; 10519} 10520 10521static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) 10522{ 10523 u16 rcq, bd; 10524 u32 addr, tmp_reg; 10525 10526 if (BP_FUNC(bp) < 2) 10527 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); 10528 else 10529 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); 10530 10531 tmp_reg = REG_RD(bp, addr); 10532 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 10533 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 10534 10535 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 10536 REG_WR(bp, addr, tmp_reg); 10537 10538 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", 10539 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); 10540} 10541 10542static int bnx2x_prev_mcp_done(struct bnx2x *bp) 10543{ 10544 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 10545 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 10546 if (!rc) { 10547 BNX2X_ERR("MCP response failure, aborting\n"); 10548 return -EBUSY; 10549 } 10550 10551 return 0; 10552} 10553 10554static struct bnx2x_prev_path_list * 10555 bnx2x_prev_path_get_entry(struct bnx2x *bp) 10556{ 10557 struct bnx2x_prev_path_list *tmp_list; 10558 10559 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) 10560 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 10561 bp->pdev->bus->number == tmp_list->bus && 10562 BP_PATH(bp) == tmp_list->path) 10563 return tmp_list; 10564 10565 return NULL; 10566} 10567 10568static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) 10569{ 10570 struct bnx2x_prev_path_list *tmp_list; 10571 int rc; 10572 10573 rc = down_interruptible(&bnx2x_prev_sem); 10574 if (rc) { 10575 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10576 return rc; 10577 } 10578 10579 tmp_list = bnx2x_prev_path_get_entry(bp); 10580 if (tmp_list) { 10581 tmp_list->aer = 1; 10582 rc = 0; 10583 } else { 10584 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", 10585 BP_PATH(bp)); 10586 } 10587 10588 up(&bnx2x_prev_sem); 10589 10590 return rc; 10591} 10592 10593static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 10594{ 10595 struct bnx2x_prev_path_list *tmp_list; 10596 bool rc = false; 10597 10598 if (down_trylock(&bnx2x_prev_sem)) 10599 return false; 10600 10601 tmp_list = bnx2x_prev_path_get_entry(bp); 10602 if (tmp_list) { 10603 if (tmp_list->aer) { 10604 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", 10605 BP_PATH(bp)); 10606 } else { 10607 rc = true; 10608 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 10609 BP_PATH(bp)); 10610 } 10611 } 10612 10613 up(&bnx2x_prev_sem); 10614 10615 return rc; 10616} 10617 10618bool bnx2x_port_after_undi(struct bnx2x *bp) 10619{ 10620 struct bnx2x_prev_path_list *entry; 10621 bool val; 10622 10623 down(&bnx2x_prev_sem); 10624 10625 entry = bnx2x_prev_path_get_entry(bp); 10626 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); 10627 10628 up(&bnx2x_prev_sem); 10629 10630 return val; 10631} 10632 10633static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 10634{ 10635 struct bnx2x_prev_path_list *tmp_list; 10636 int rc; 10637 10638 rc = down_interruptible(&bnx2x_prev_sem); 10639 if (rc) { 10640 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10641 return rc; 10642 } 10643 10644 /* Check whether the entry for this path already exists */ 10645 tmp_list = bnx2x_prev_path_get_entry(bp); 10646 if (tmp_list) { 10647 if (!tmp_list->aer) { 10648 BNX2X_ERR("Re-Marking the path.\n"); 10649 } else { 10650 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", 10651 BP_PATH(bp)); 10652 tmp_list->aer = 0; 10653 } 10654 up(&bnx2x_prev_sem); 10655 return 0; 10656 } 10657 up(&bnx2x_prev_sem); 10658 10659 /* Create an entry for this path and add it */ 10660 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 10661 if (!tmp_list) { 10662 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 10663 return -ENOMEM; 10664 } 10665 10666 tmp_list->bus = bp->pdev->bus->number; 10667 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 10668 tmp_list->path = BP_PATH(bp); 10669 tmp_list->aer = 0; 10670 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 10671 10672 rc = down_interruptible(&bnx2x_prev_sem); 10673 if (rc) { 10674 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10675 kfree(tmp_list); 10676 } else { 10677 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", 10678 BP_PATH(bp)); 10679 list_add(&tmp_list->list, &bnx2x_prev_list); 10680 up(&bnx2x_prev_sem); 10681 } 10682 10683 return rc; 10684} 10685 10686static int bnx2x_do_flr(struct bnx2x *bp) 10687{ 10688 struct pci_dev *dev = bp->pdev; 10689 10690 if (CHIP_IS_E1x(bp)) { 10691 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 10692 return -EINVAL; 10693 } 10694 10695 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 10696 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 10697 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 10698 bp->common.bc_ver); 10699 return -EINVAL; 10700 } 10701 10702 if (!pci_wait_for_pending_transaction(dev)) 10703 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 10704 10705 BNX2X_DEV_INFO("Initiating FLR\n"); 10706 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 10707 10708 return 0; 10709} 10710 10711static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) 10712{ 10713 int rc; 10714 10715 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 10716 10717 /* Test if previous unload process was already finished for this path */ 10718 if (bnx2x_prev_is_path_marked(bp)) 10719 return bnx2x_prev_mcp_done(bp); 10720 10721 BNX2X_DEV_INFO("Path is unmarked\n"); 10722 10723 /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */ 10724 if (bnx2x_prev_is_after_undi(bp)) 10725 goto out; 10726 10727 /* If function has FLR capabilities, and existing FW version matches 10728 * the one required, then FLR will be sufficient to clean any residue 10729 * left by previous driver 10730 */ 10731 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); 10732 10733 if (!rc) { 10734 /* fw version is good */ 10735 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); 10736 rc = bnx2x_do_flr(bp); 10737 } 10738 10739 if (!rc) { 10740 /* FLR was performed */ 10741 BNX2X_DEV_INFO("FLR successful\n"); 10742 return 0; 10743 } 10744 10745 BNX2X_DEV_INFO("Could not FLR\n"); 10746 10747out: 10748 /* Close the MCP request, return failure*/ 10749 rc = bnx2x_prev_mcp_done(bp); 10750 if (!rc) 10751 rc = BNX2X_PREV_WAIT_NEEDED; 10752 10753 return rc; 10754} 10755 10756static int bnx2x_prev_unload_common(struct bnx2x *bp) 10757{ 10758 u32 reset_reg, tmp_reg = 0, rc; 10759 bool prev_undi = false; 10760 struct bnx2x_mac_vals mac_vals; 10761 10762 /* It is possible a previous function received 'common' answer, 10763 * but hasn't loaded yet, therefore creating a scenario of 10764 * multiple functions receiving 'common' on the same path. 10765 */ 10766 BNX2X_DEV_INFO("Common unload Flow\n"); 10767 10768 memset(&mac_vals, 0, sizeof(mac_vals)); 10769 10770 if (bnx2x_prev_is_path_marked(bp)) 10771 return bnx2x_prev_mcp_done(bp); 10772 10773 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 10774 10775 /* Reset should be performed after BRB is emptied */ 10776 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10777 u32 timer_count = 1000; 10778 10779 /* Close the MAC Rx to prevent BRB from filling up */ 10780 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10781 10782 /* close LLH filters for both ports towards the BRB */ 10783 bnx2x_set_rx_filter(&bp->link_params, 0); 10784 bp->link_params.port ^= 1; 10785 bnx2x_set_rx_filter(&bp->link_params, 0); 10786 bp->link_params.port ^= 1; 10787 10788 /* Check if the UNDI driver was previously loaded */ 10789 if (bnx2x_prev_is_after_undi(bp)) { 10790 prev_undi = true; 10791 /* clear the UNDI indication */ 10792 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 10793 /* clear possible idle check errors */ 10794 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); 10795 } 10796 if (!CHIP_IS_E1x(bp)) 10797 /* block FW from writing to host */ 10798 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10799 10800 /* wait until BRB is empty */ 10801 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10802 while (timer_count) { 10803 u32 prev_brb = tmp_reg; 10804 10805 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10806 if (!tmp_reg) 10807 break; 10808 10809 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 10810 10811 /* reset timer as long as BRB actually gets emptied */ 10812 if (prev_brb > tmp_reg) 10813 timer_count = 1000; 10814 else 10815 timer_count--; 10816 10817 /* If UNDI resides in memory, manually increment it */ 10818 if (prev_undi) 10819 bnx2x_prev_unload_undi_inc(bp, 1); 10820 10821 udelay(10); 10822 } 10823 10824 if (!timer_count) 10825 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 10826 } 10827 10828 /* No packets are in the pipeline, path is ready for reset */ 10829 bnx2x_reset_common(bp); 10830 10831 if (mac_vals.xmac_addr) 10832 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10833 if (mac_vals.umac_addr[0]) 10834 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); 10835 if (mac_vals.umac_addr[1]) 10836 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); 10837 if (mac_vals.emac_addr) 10838 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10839 if (mac_vals.bmac_addr) { 10840 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 10841 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 10842 } 10843 10844 rc = bnx2x_prev_mark_path(bp, prev_undi); 10845 if (rc) { 10846 bnx2x_prev_mcp_done(bp); 10847 return rc; 10848 } 10849 10850 return bnx2x_prev_mcp_done(bp); 10851} 10852 10853static int bnx2x_prev_unload(struct bnx2x *bp) 10854{ 10855 int time_counter = 10; 10856 u32 rc, fw, hw_lock_reg, hw_lock_val; 10857 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10858 10859 /* clear hw from errors which may have resulted from an interrupted 10860 * dmae transaction. 10861 */ 10862 bnx2x_clean_pglue_errors(bp); 10863 10864 /* Release previously held locks */ 10865 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10866 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 10867 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 10868 10869 hw_lock_val = REG_RD(bp, hw_lock_reg); 10870 if (hw_lock_val) { 10871 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 10872 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 10873 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 10874 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 10875 } 10876 10877 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 10878 REG_WR(bp, hw_lock_reg, 0xffffffff); 10879 } else 10880 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 10881 10882 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 10883 BNX2X_DEV_INFO("Release previously held alr\n"); 10884 bnx2x_release_alr(bp); 10885 } 10886 10887 do { 10888 int aer = 0; 10889 /* Lock MCP using an unload request */ 10890 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10891 if (!fw) { 10892 BNX2X_ERR("MCP response failure, aborting\n"); 10893 rc = -EBUSY; 10894 break; 10895 } 10896 10897 rc = down_interruptible(&bnx2x_prev_sem); 10898 if (rc) { 10899 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", 10900 rc); 10901 } else { 10902 /* If Path is marked by EEH, ignore unload status */ 10903 aer = !!(bnx2x_prev_path_get_entry(bp) && 10904 bnx2x_prev_path_get_entry(bp)->aer); 10905 up(&bnx2x_prev_sem); 10906 } 10907 10908 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { 10909 rc = bnx2x_prev_unload_common(bp); 10910 break; 10911 } 10912 10913 /* non-common reply from MCP might require looping */ 10914 rc = bnx2x_prev_unload_uncommon(bp); 10915 if (rc != BNX2X_PREV_WAIT_NEEDED) 10916 break; 10917 10918 msleep(20); 10919 } while (--time_counter); 10920 10921 if (!time_counter || rc) { 10922 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); 10923 rc = -EPROBE_DEFER; 10924 } 10925 10926 /* Mark function if its port was used to boot from SAN */ 10927 if (bnx2x_port_after_undi(bp)) 10928 bp->link_params.feature_config_flags |= 10929 FEATURE_CONFIG_BOOT_FROM_SAN; 10930 10931 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 10932 10933 return rc; 10934} 10935 10936static void bnx2x_get_common_hwinfo(struct bnx2x *bp) 10937{ 10938 u32 val, val2, val3, val4, id, boot_mode; 10939 u16 pmc; 10940 10941 /* Get the chip revision id and number. */ 10942 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 10943 val = REG_RD(bp, MISC_REG_CHIP_NUM); 10944 id = ((val & 0xffff) << 16); 10945 val = REG_RD(bp, MISC_REG_CHIP_REV); 10946 id |= ((val & 0xf) << 12); 10947 10948 /* Metal is read from PCI regs, but we can't access >=0x400 from 10949 * the configuration space (so we need to reg_rd) 10950 */ 10951 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); 10952 id |= (((val >> 24) & 0xf) << 4); 10953 val = REG_RD(bp, MISC_REG_BOND_ID); 10954 id |= (val & 0xf); 10955 bp->common.chip_id = id; 10956 10957 /* force 57811 according to MISC register */ 10958 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 10959 if (CHIP_IS_57810(bp)) 10960 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 10961 (bp->common.chip_id & 0x0000FFFF); 10962 else if (CHIP_IS_57810_MF(bp)) 10963 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 10964 (bp->common.chip_id & 0x0000FFFF); 10965 bp->common.chip_id |= 0x1; 10966 } 10967 10968 /* Set doorbell size */ 10969 bp->db_size = (1 << BNX2X_DB_SHIFT); 10970 10971 if (!CHIP_IS_E1x(bp)) { 10972 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 10973 if ((val & 1) == 0) 10974 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 10975 else 10976 val = (val >> 1) & 1; 10977 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 10978 "2_PORT_MODE"); 10979 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 10980 CHIP_2_PORT_MODE; 10981 10982 if (CHIP_MODE_IS_4_PORT(bp)) 10983 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 10984 else 10985 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 10986 } else { 10987 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 10988 bp->pfid = bp->pf_num; /* 0..7 */ 10989 } 10990 10991 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 10992 10993 bp->link_params.chip_id = bp->common.chip_id; 10994 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 10995 10996 val = (REG_RD(bp, 0x2874) & 0x55); 10997 if ((bp->common.chip_id & 0x1) || 10998 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 10999 bp->flags |= ONE_PORT_FLAG; 11000 BNX2X_DEV_INFO("single port device\n"); 11001 } 11002 11003 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 11004 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 11005 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 11006 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 11007 bp->common.flash_size, bp->common.flash_size); 11008 11009 bnx2x_init_shmem(bp); 11010 11011 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 11012 MISC_REG_GENERIC_CR_1 : 11013 MISC_REG_GENERIC_CR_0)); 11014 11015 bp->link_params.shmem_base = bp->common.shmem_base; 11016 bp->link_params.shmem2_base = bp->common.shmem2_base; 11017 if (SHMEM2_RD(bp, size) > 11018 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 11019 bp->link_params.lfa_base = 11020 REG_RD(bp, bp->common.shmem2_base + 11021 (u32)offsetof(struct shmem2_region, 11022 lfa_host_addr[BP_PORT(bp)])); 11023 else 11024 bp->link_params.lfa_base = 0; 11025 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 11026 bp->common.shmem_base, bp->common.shmem2_base); 11027 11028 if (!bp->common.shmem_base) { 11029 BNX2X_DEV_INFO("MCP not active\n"); 11030 bp->flags |= NO_MCP_FLAG; 11031 return; 11032 } 11033 11034 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 11035 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 11036 11037 bp->link_params.hw_led_mode = ((bp->common.hw_config & 11038 SHARED_HW_CFG_LED_MODE_MASK) >> 11039 SHARED_HW_CFG_LED_MODE_SHIFT); 11040 11041 bp->link_params.feature_config_flags = 0; 11042 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 11043 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 11044 bp->link_params.feature_config_flags |= 11045 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 11046 else 11047 bp->link_params.feature_config_flags &= 11048 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 11049 11050 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 11051 bp->common.bc_ver = val; 11052 BNX2X_DEV_INFO("bc_ver %X\n", val); 11053 if (val < BNX2X_BC_VER) { 11054 /* for now only warn 11055 * later we might need to enforce this */ 11056 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 11057 BNX2X_BC_VER, val); 11058 } 11059 bp->link_params.feature_config_flags |= 11060 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 11061 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 11062 11063 bp->link_params.feature_config_flags |= 11064 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 11065 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 11066 bp->link_params.feature_config_flags |= 11067 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 11068 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 11069 bp->link_params.feature_config_flags |= 11070 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 11071 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 11072 11073 bp->link_params.feature_config_flags |= 11074 (val >= REQ_BC_VER_4_MT_SUPPORTED) ? 11075 FEATURE_CONFIG_MT_SUPPORT : 0; 11076 11077 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 11078 BC_SUPPORTS_PFC_STATS : 0; 11079 11080 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? 11081 BC_SUPPORTS_FCOE_FEATURES : 0; 11082 11083 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 11084 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 11085 11086 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? 11087 BC_SUPPORTS_RMMOD_CMD : 0; 11088 11089 boot_mode = SHMEM_RD(bp, 11090 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 11091 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 11092 switch (boot_mode) { 11093 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 11094 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 11095 break; 11096 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 11097 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 11098 break; 11099 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 11100 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 11101 break; 11102 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 11103 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 11104 break; 11105 } 11106 11107 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); 11108 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 11109 11110 BNX2X_DEV_INFO("%sWoL capable\n", 11111 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 11112 11113 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 11114 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 11115 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 11116 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 11117 11118 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 11119 val, val2, val3, val4); 11120} 11121 11122#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 11123#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 11124 11125static int bnx2x_get_igu_cam_info(struct bnx2x *bp) 11126{ 11127 int pfid = BP_FUNC(bp); 11128 int igu_sb_id; 11129 u32 val; 11130 u8 fid, igu_sb_cnt = 0; 11131 11132 bp->igu_base_sb = 0xff; 11133 if (CHIP_INT_MODE_IS_BC(bp)) { 11134 int vn = BP_VN(bp); 11135 igu_sb_cnt = bp->igu_sb_cnt; 11136 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 11137 FP_SB_MAX_E1x; 11138 11139 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 11140 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 11141 11142 return 0; 11143 } 11144 11145 /* IGU in normal mode - read CAM */ 11146 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 11147 igu_sb_id++) { 11148 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 11149 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 11150 continue; 11151 fid = IGU_FID(val); 11152 if ((fid & IGU_FID_ENCODE_IS_PF)) { 11153 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 11154 continue; 11155 if (IGU_VEC(val) == 0) 11156 /* default status block */ 11157 bp->igu_dsb_id = igu_sb_id; 11158 else { 11159 if (bp->igu_base_sb == 0xff) 11160 bp->igu_base_sb = igu_sb_id; 11161 igu_sb_cnt++; 11162 } 11163 } 11164 } 11165 11166#ifdef CONFIG_PCI_MSI 11167 /* Due to new PF resource allocation by MFW T7.4 and above, it's 11168 * optional that number of CAM entries will not be equal to the value 11169 * advertised in PCI. 11170 * Driver should use the minimal value of both as the actual status 11171 * block count 11172 */ 11173 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 11174#endif 11175 11176 if (igu_sb_cnt == 0) { 11177 BNX2X_ERR("CAM configuration error\n"); 11178 return -EINVAL; 11179 } 11180 11181 return 0; 11182} 11183 11184static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) 11185{ 11186 int cfg_size = 0, idx, port = BP_PORT(bp); 11187 11188 /* Aggregation of supported attributes of all external phys */ 11189 bp->port.supported[0] = 0; 11190 bp->port.supported[1] = 0; 11191 switch (bp->link_params.num_phys) { 11192 case 1: 11193 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 11194 cfg_size = 1; 11195 break; 11196 case 2: 11197 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 11198 cfg_size = 1; 11199 break; 11200 case 3: 11201 if (bp->link_params.multi_phy_config & 11202 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 11203 bp->port.supported[1] = 11204 bp->link_params.phy[EXT_PHY1].supported; 11205 bp->port.supported[0] = 11206 bp->link_params.phy[EXT_PHY2].supported; 11207 } else { 11208 bp->port.supported[0] = 11209 bp->link_params.phy[EXT_PHY1].supported; 11210 bp->port.supported[1] = 11211 bp->link_params.phy[EXT_PHY2].supported; 11212 } 11213 cfg_size = 2; 11214 break; 11215 } 11216 11217 if (!(bp->port.supported[0] || bp->port.supported[1])) { 11218 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 11219 SHMEM_RD(bp, 11220 dev_info.port_hw_config[port].external_phy_config), 11221 SHMEM_RD(bp, 11222 dev_info.port_hw_config[port].external_phy_config2)); 11223 return; 11224 } 11225 11226 if (CHIP_IS_E3(bp)) 11227 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 11228 else { 11229 switch (switch_cfg) { 11230 case SWITCH_CFG_1G: 11231 bp->port.phy_addr = REG_RD( 11232 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 11233 break; 11234 case SWITCH_CFG_10G: 11235 bp->port.phy_addr = REG_RD( 11236 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 11237 break; 11238 default: 11239 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 11240 bp->port.link_config[0]); 11241 return; 11242 } 11243 } 11244 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 11245 /* mask what we support according to speed_cap_mask per configuration */ 11246 for (idx = 0; idx < cfg_size; idx++) { 11247 if (!(bp->link_params.speed_cap_mask[idx] & 11248 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 11249 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 11250 11251 if (!(bp->link_params.speed_cap_mask[idx] & 11252 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 11253 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 11254 11255 if (!(bp->link_params.speed_cap_mask[idx] & 11256 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 11257 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 11258 11259 if (!(bp->link_params.speed_cap_mask[idx] & 11260 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 11261 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 11262 11263 if (!(bp->link_params.speed_cap_mask[idx] & 11264 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 11265 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 11266 SUPPORTED_1000baseT_Full); 11267 11268 if (!(bp->link_params.speed_cap_mask[idx] & 11269 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 11270 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 11271 11272 if (!(bp->link_params.speed_cap_mask[idx] & 11273 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 11274 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 11275 11276 if (!(bp->link_params.speed_cap_mask[idx] & 11277 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 11278 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; 11279 } 11280 11281 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 11282 bp->port.supported[1]); 11283} 11284 11285static void bnx2x_link_settings_requested(struct bnx2x *bp) 11286{ 11287 u32 link_config, idx, cfg_size = 0; 11288 bp->port.advertising[0] = 0; 11289 bp->port.advertising[1] = 0; 11290 switch (bp->link_params.num_phys) { 11291 case 1: 11292 case 2: 11293 cfg_size = 1; 11294 break; 11295 case 3: 11296 cfg_size = 2; 11297 break; 11298 } 11299 for (idx = 0; idx < cfg_size; idx++) { 11300 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 11301 link_config = bp->port.link_config[idx]; 11302 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 11303 case PORT_FEATURE_LINK_SPEED_AUTO: 11304 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 11305 bp->link_params.req_line_speed[idx] = 11306 SPEED_AUTO_NEG; 11307 bp->port.advertising[idx] |= 11308 bp->port.supported[idx]; 11309 if (bp->link_params.phy[EXT_PHY1].type == 11310 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 11311 bp->port.advertising[idx] |= 11312 (SUPPORTED_100baseT_Half | 11313 SUPPORTED_100baseT_Full); 11314 } else { 11315 /* force 10G, no AN */ 11316 bp->link_params.req_line_speed[idx] = 11317 SPEED_10000; 11318 bp->port.advertising[idx] |= 11319 (ADVERTISED_10000baseT_Full | 11320 ADVERTISED_FIBRE); 11321 continue; 11322 } 11323 break; 11324 11325 case PORT_FEATURE_LINK_SPEED_10M_FULL: 11326 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 11327 bp->link_params.req_line_speed[idx] = 11328 SPEED_10; 11329 bp->port.advertising[idx] |= 11330 (ADVERTISED_10baseT_Full | 11331 ADVERTISED_TP); 11332 } else { 11333 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11334 link_config, 11335 bp->link_params.speed_cap_mask[idx]); 11336 return; 11337 } 11338 break; 11339 11340 case PORT_FEATURE_LINK_SPEED_10M_HALF: 11341 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 11342 bp->link_params.req_line_speed[idx] = 11343 SPEED_10; 11344 bp->link_params.req_duplex[idx] = 11345 DUPLEX_HALF; 11346 bp->port.advertising[idx] |= 11347 (ADVERTISED_10baseT_Half | 11348 ADVERTISED_TP); 11349 } else { 11350 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11351 link_config, 11352 bp->link_params.speed_cap_mask[idx]); 11353 return; 11354 } 11355 break; 11356 11357 case PORT_FEATURE_LINK_SPEED_100M_FULL: 11358 if (bp->port.supported[idx] & 11359 SUPPORTED_100baseT_Full) { 11360 bp->link_params.req_line_speed[idx] = 11361 SPEED_100; 11362 bp->port.advertising[idx] |= 11363 (ADVERTISED_100baseT_Full | 11364 ADVERTISED_TP); 11365 } else { 11366 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11367 link_config, 11368 bp->link_params.speed_cap_mask[idx]); 11369 return; 11370 } 11371 break; 11372 11373 case PORT_FEATURE_LINK_SPEED_100M_HALF: 11374 if (bp->port.supported[idx] & 11375 SUPPORTED_100baseT_Half) { 11376 bp->link_params.req_line_speed[idx] = 11377 SPEED_100; 11378 bp->link_params.req_duplex[idx] = 11379 DUPLEX_HALF; 11380 bp->port.advertising[idx] |= 11381 (ADVERTISED_100baseT_Half | 11382 ADVERTISED_TP); 11383 } else { 11384 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11385 link_config, 11386 bp->link_params.speed_cap_mask[idx]); 11387 return; 11388 } 11389 break; 11390 11391 case PORT_FEATURE_LINK_SPEED_1G: 11392 if (bp->port.supported[idx] & 11393 SUPPORTED_1000baseT_Full) { 11394 bp->link_params.req_line_speed[idx] = 11395 SPEED_1000; 11396 bp->port.advertising[idx] |= 11397 (ADVERTISED_1000baseT_Full | 11398 ADVERTISED_TP); 11399 } else if (bp->port.supported[idx] & 11400 SUPPORTED_1000baseKX_Full) { 11401 bp->link_params.req_line_speed[idx] = 11402 SPEED_1000; 11403 bp->port.advertising[idx] |= 11404 ADVERTISED_1000baseKX_Full; 11405 } else { 11406 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11407 link_config, 11408 bp->link_params.speed_cap_mask[idx]); 11409 return; 11410 } 11411 break; 11412 11413 case PORT_FEATURE_LINK_SPEED_2_5G: 11414 if (bp->port.supported[idx] & 11415 SUPPORTED_2500baseX_Full) { 11416 bp->link_params.req_line_speed[idx] = 11417 SPEED_2500; 11418 bp->port.advertising[idx] |= 11419 (ADVERTISED_2500baseX_Full | 11420 ADVERTISED_TP); 11421 } else { 11422 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11423 link_config, 11424 bp->link_params.speed_cap_mask[idx]); 11425 return; 11426 } 11427 break; 11428 11429 case PORT_FEATURE_LINK_SPEED_10G_CX4: 11430 if (bp->port.supported[idx] & 11431 SUPPORTED_10000baseT_Full) { 11432 bp->link_params.req_line_speed[idx] = 11433 SPEED_10000; 11434 bp->port.advertising[idx] |= 11435 (ADVERTISED_10000baseT_Full | 11436 ADVERTISED_FIBRE); 11437 } else if (bp->port.supported[idx] & 11438 SUPPORTED_10000baseKR_Full) { 11439 bp->link_params.req_line_speed[idx] = 11440 SPEED_10000; 11441 bp->port.advertising[idx] |= 11442 (ADVERTISED_10000baseKR_Full | 11443 ADVERTISED_FIBRE); 11444 } else { 11445 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11446 link_config, 11447 bp->link_params.speed_cap_mask[idx]); 11448 return; 11449 } 11450 break; 11451 case PORT_FEATURE_LINK_SPEED_20G: 11452 bp->link_params.req_line_speed[idx] = SPEED_20000; 11453 11454 break; 11455 default: 11456 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 11457 link_config); 11458 bp->link_params.req_line_speed[idx] = 11459 SPEED_AUTO_NEG; 11460 bp->port.advertising[idx] = 11461 bp->port.supported[idx]; 11462 break; 11463 } 11464 11465 bp->link_params.req_flow_ctrl[idx] = (link_config & 11466 PORT_FEATURE_FLOW_CONTROL_MASK); 11467 if (bp->link_params.req_flow_ctrl[idx] == 11468 BNX2X_FLOW_CTRL_AUTO) { 11469 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) 11470 bp->link_params.req_flow_ctrl[idx] = 11471 BNX2X_FLOW_CTRL_NONE; 11472 else 11473 bnx2x_set_requested_fc(bp); 11474 } 11475 11476 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 11477 bp->link_params.req_line_speed[idx], 11478 bp->link_params.req_duplex[idx], 11479 bp->link_params.req_flow_ctrl[idx], 11480 bp->port.advertising[idx]); 11481 } 11482} 11483 11484static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 11485{ 11486 __be16 mac_hi_be = cpu_to_be16(mac_hi); 11487 __be32 mac_lo_be = cpu_to_be32(mac_lo); 11488 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); 11489 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); 11490} 11491 11492static void bnx2x_get_port_hwinfo(struct bnx2x *bp) 11493{ 11494 int port = BP_PORT(bp); 11495 u32 config; 11496 u32 ext_phy_type, ext_phy_config, eee_mode; 11497 11498 bp->link_params.bp = bp; 11499 bp->link_params.port = port; 11500 11501 bp->link_params.lane_config = 11502 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 11503 11504 bp->link_params.speed_cap_mask[0] = 11505 SHMEM_RD(bp, 11506 dev_info.port_hw_config[port].speed_capability_mask) & 11507 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 11508 bp->link_params.speed_cap_mask[1] = 11509 SHMEM_RD(bp, 11510 dev_info.port_hw_config[port].speed_capability_mask2) & 11511 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 11512 bp->port.link_config[0] = 11513 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 11514 11515 bp->port.link_config[1] = 11516 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 11517 11518 bp->link_params.multi_phy_config = 11519 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 11520 /* If the device is capable of WoL, set the default state according 11521 * to the HW 11522 */ 11523 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 11524 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 11525 (config & PORT_FEATURE_WOL_ENABLED)); 11526 11527 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 11528 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) 11529 bp->flags |= NO_ISCSI_FLAG; 11530 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 11531 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) 11532 bp->flags |= NO_FCOE_FLAG; 11533 11534 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 11535 bp->link_params.lane_config, 11536 bp->link_params.speed_cap_mask[0], 11537 bp->port.link_config[0]); 11538 11539 bp->link_params.switch_cfg = (bp->port.link_config[0] & 11540 PORT_FEATURE_CONNECTED_SWITCH_MASK); 11541 bnx2x_phy_probe(&bp->link_params); 11542 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 11543 11544 bnx2x_link_settings_requested(bp); 11545 11546 /* 11547 * If connected directly, work with the internal PHY, otherwise, work 11548 * with the external PHY 11549 */ 11550 ext_phy_config = 11551 SHMEM_RD(bp, 11552 dev_info.port_hw_config[port].external_phy_config); 11553 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 11554 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 11555 bp->mdio.prtad = bp->port.phy_addr; 11556 11557 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 11558 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 11559 bp->mdio.prtad = 11560 XGXS_EXT_PHY_ADDR(ext_phy_config); 11561 11562 /* Configure link feature according to nvram value */ 11563 eee_mode = (((SHMEM_RD(bp, dev_info. 11564 port_feature_config[port].eee_power_mode)) & 11565 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 11566 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 11567 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 11568 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 11569 EEE_MODE_ENABLE_LPI | 11570 EEE_MODE_OUTPUT_TIME; 11571 } else { 11572 bp->link_params.eee_mode = 0; 11573 } 11574} 11575 11576void bnx2x_get_iscsi_info(struct bnx2x *bp) 11577{ 11578 u32 no_flags = NO_ISCSI_FLAG; 11579 int port = BP_PORT(bp); 11580 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11581 drv_lic_key[port].max_iscsi_conn); 11582 11583 if (!CNIC_SUPPORT(bp)) { 11584 bp->flags |= no_flags; 11585 return; 11586 } 11587 11588 /* Get the number of maximum allowed iSCSI connections */ 11589 bp->cnic_eth_dev.max_iscsi_conn = 11590 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 11591 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 11592 11593 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 11594 bp->cnic_eth_dev.max_iscsi_conn); 11595 11596 /* 11597 * If maximum allowed number of connections is zero - 11598 * disable the feature. 11599 */ 11600 if (!bp->cnic_eth_dev.max_iscsi_conn) 11601 bp->flags |= no_flags; 11602} 11603 11604static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 11605{ 11606 /* Port info */ 11607 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11608 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 11609 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11610 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 11611 11612 /* Node info */ 11613 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11614 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 11615 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11616 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 11617} 11618 11619static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) 11620{ 11621 u8 count = 0; 11622 11623 if (IS_MF(bp)) { 11624 u8 fid; 11625 11626 /* iterate over absolute function ids for this path: */ 11627 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { 11628 if (IS_MF_SD(bp)) { 11629 u32 cfg = MF_CFG_RD(bp, 11630 func_mf_config[fid].config); 11631 11632 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && 11633 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == 11634 FUNC_MF_CFG_PROTOCOL_FCOE)) 11635 count++; 11636 } else { 11637 u32 cfg = MF_CFG_RD(bp, 11638 func_ext_config[fid]. 11639 func_cfg); 11640 11641 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && 11642 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) 11643 count++; 11644 } 11645 } 11646 } else { /* SF */ 11647 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; 11648 11649 for (port = 0; port < port_cnt; port++) { 11650 u32 lic = SHMEM_RD(bp, 11651 drv_lic_key[port].max_fcoe_conn) ^ 11652 FW_ENCODE_32BIT_PATTERN; 11653 if (lic) 11654 count++; 11655 } 11656 } 11657 11658 return count; 11659} 11660 11661static void bnx2x_get_fcoe_info(struct bnx2x *bp) 11662{ 11663 int port = BP_PORT(bp); 11664 int func = BP_ABS_FUNC(bp); 11665 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11666 drv_lic_key[port].max_fcoe_conn); 11667 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); 11668 11669 if (!CNIC_SUPPORT(bp)) { 11670 bp->flags |= NO_FCOE_FLAG; 11671 return; 11672 } 11673 11674 /* Get the number of maximum allowed FCoE connections */ 11675 bp->cnic_eth_dev.max_fcoe_conn = 11676 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 11677 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 11678 11679 /* Calculate the number of maximum allowed FCoE tasks */ 11680 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; 11681 11682 /* check if FCoE resources must be shared between different functions */ 11683 if (num_fcoe_func) 11684 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; 11685 11686 /* Read the WWN: */ 11687 if (!IS_MF(bp)) { 11688 /* Port info */ 11689 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11690 SHMEM_RD(bp, 11691 dev_info.port_hw_config[port]. 11692 fcoe_wwn_port_name_upper); 11693 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11694 SHMEM_RD(bp, 11695 dev_info.port_hw_config[port]. 11696 fcoe_wwn_port_name_lower); 11697 11698 /* Node info */ 11699 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11700 SHMEM_RD(bp, 11701 dev_info.port_hw_config[port]. 11702 fcoe_wwn_node_name_upper); 11703 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11704 SHMEM_RD(bp, 11705 dev_info.port_hw_config[port]. 11706 fcoe_wwn_node_name_lower); 11707 } else if (!IS_MF_SD(bp)) { 11708 /* Read the WWN info only if the FCoE feature is enabled for 11709 * this function. 11710 */ 11711 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) 11712 bnx2x_get_ext_wwn_info(bp, func); 11713 } else { 11714 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 11715 bnx2x_get_ext_wwn_info(bp, func); 11716 } 11717 11718 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 11719 11720 /* 11721 * If maximum allowed number of connections is zero - 11722 * disable the feature. 11723 */ 11724 if (!bp->cnic_eth_dev.max_fcoe_conn) { 11725 bp->flags |= NO_FCOE_FLAG; 11726 eth_zero_addr(bp->fip_mac); 11727 } 11728} 11729 11730static void bnx2x_get_cnic_info(struct bnx2x *bp) 11731{ 11732 /* 11733 * iSCSI may be dynamically disabled but reading 11734 * info here we will decrease memory usage by driver 11735 * if the feature is disabled for good 11736 */ 11737 bnx2x_get_iscsi_info(bp); 11738 bnx2x_get_fcoe_info(bp); 11739} 11740 11741static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) 11742{ 11743 u32 val, val2; 11744 int func = BP_ABS_FUNC(bp); 11745 int port = BP_PORT(bp); 11746 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 11747 u8 *fip_mac = bp->fip_mac; 11748 11749 if (IS_MF(bp)) { 11750 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 11751 * FCoE MAC then the appropriate feature should be disabled. 11752 * In non SD mode features configuration comes from struct 11753 * func_ext_config. 11754 */ 11755 if (!IS_MF_SD(bp)) { 11756 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 11757 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 11758 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11759 iscsi_mac_addr_upper); 11760 val = MF_CFG_RD(bp, func_ext_config[func]. 11761 iscsi_mac_addr_lower); 11762 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11763 BNX2X_DEV_INFO 11764 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11765 } else { 11766 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11767 } 11768 11769 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 11770 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11771 fcoe_mac_addr_upper); 11772 val = MF_CFG_RD(bp, func_ext_config[func]. 11773 fcoe_mac_addr_lower); 11774 bnx2x_set_mac_buf(fip_mac, val, val2); 11775 BNX2X_DEV_INFO 11776 ("Read FCoE L2 MAC: %pM\n", fip_mac); 11777 } else { 11778 bp->flags |= NO_FCOE_FLAG; 11779 } 11780 11781 bp->mf_ext_config = cfg; 11782 11783 } else { /* SD MODE */ 11784 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 11785 /* use primary mac as iscsi mac */ 11786 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); 11787 11788 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 11789 BNX2X_DEV_INFO 11790 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11791 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { 11792 /* use primary mac as fip mac */ 11793 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); 11794 BNX2X_DEV_INFO("SD FCoE MODE\n"); 11795 BNX2X_DEV_INFO 11796 ("Read FIP MAC: %pM\n", fip_mac); 11797 } 11798 } 11799 11800 /* If this is a storage-only interface, use SAN mac as 11801 * primary MAC. Notice that for SD this is already the case, 11802 * as the SAN mac was copied from the primary MAC. 11803 */ 11804 if (IS_MF_FCOE_AFEX(bp)) 11805 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 11806 } else { 11807 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11808 iscsi_mac_upper); 11809 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11810 iscsi_mac_lower); 11811 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11812 11813 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11814 fcoe_fip_mac_upper); 11815 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11816 fcoe_fip_mac_lower); 11817 bnx2x_set_mac_buf(fip_mac, val, val2); 11818 } 11819 11820 /* Disable iSCSI OOO if MAC configuration is invalid. */ 11821 if (!is_valid_ether_addr(iscsi_mac)) { 11822 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11823 eth_zero_addr(iscsi_mac); 11824 } 11825 11826 /* Disable FCoE if MAC configuration is invalid. */ 11827 if (!is_valid_ether_addr(fip_mac)) { 11828 bp->flags |= NO_FCOE_FLAG; 11829 eth_zero_addr(bp->fip_mac); 11830 } 11831} 11832 11833static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) 11834{ 11835 u32 val, val2; 11836 int func = BP_ABS_FUNC(bp); 11837 int port = BP_PORT(bp); 11838 11839 /* Zero primary MAC configuration */ 11840 eth_zero_addr(bp->dev->dev_addr); 11841 11842 if (BP_NOMCP(bp)) { 11843 BNX2X_ERROR("warning: random MAC workaround active\n"); 11844 eth_hw_addr_random(bp->dev); 11845 } else if (IS_MF(bp)) { 11846 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11847 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 11848 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 11849 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 11850 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11851 11852 if (CNIC_SUPPORT(bp)) 11853 bnx2x_get_cnic_mac_hwinfo(bp); 11854 } else { 11855 /* in SF read MACs from port configuration */ 11856 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11857 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11858 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11859 11860 if (CNIC_SUPPORT(bp)) 11861 bnx2x_get_cnic_mac_hwinfo(bp); 11862 } 11863 11864 if (!BP_NOMCP(bp)) { 11865 /* Read physical port identifier from shmem */ 11866 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11867 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11868 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); 11869 bp->flags |= HAS_PHYS_PORT_ID; 11870 } 11871 11872 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 11873 11874 if (!is_valid_ether_addr(bp->dev->dev_addr)) 11875 dev_err(&bp->pdev->dev, 11876 "bad Ethernet MAC address configuration: %pM\n" 11877 "change it manually before bringing up the appropriate network interface\n", 11878 bp->dev->dev_addr); 11879} 11880 11881static bool bnx2x_get_dropless_info(struct bnx2x *bp) 11882{ 11883 int tmp; 11884 u32 cfg; 11885 11886 if (IS_VF(bp)) 11887 return false; 11888 11889 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11890 /* Take function: tmp = func */ 11891 tmp = BP_ABS_FUNC(bp); 11892 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); 11893 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); 11894 } else { 11895 /* Take port: tmp = port */ 11896 tmp = BP_PORT(bp); 11897 cfg = SHMEM_RD(bp, 11898 dev_info.port_hw_config[tmp].generic_features); 11899 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); 11900 } 11901 return cfg; 11902} 11903 11904static void validate_set_si_mode(struct bnx2x *bp) 11905{ 11906 u8 func = BP_ABS_FUNC(bp); 11907 u32 val; 11908 11909 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11910 11911 /* check for legal mac (upper bytes) */ 11912 if (val != 0xffff) { 11913 bp->mf_mode = MULTI_FUNCTION_SI; 11914 bp->mf_config[BP_VN(bp)] = 11915 MF_CFG_RD(bp, func_mf_config[func].config); 11916 } else 11917 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 11918} 11919 11920static int bnx2x_get_hwinfo(struct bnx2x *bp) 11921{ 11922 int /*abs*/func = BP_ABS_FUNC(bp); 11923 int vn; 11924 u32 val = 0, val2 = 0; 11925 int rc = 0; 11926 11927 /* Validate that chip access is feasible */ 11928 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) { 11929 dev_err(&bp->pdev->dev, 11930 "Chip read returns all Fs. Preventing probe from continuing\n"); 11931 return -EINVAL; 11932 } 11933 11934 bnx2x_get_common_hwinfo(bp); 11935 11936 /* 11937 * initialize IGU parameters 11938 */ 11939 if (CHIP_IS_E1x(bp)) { 11940 bp->common.int_block = INT_BLOCK_HC; 11941 11942 bp->igu_dsb_id = DEF_SB_IGU_ID; 11943 bp->igu_base_sb = 0; 11944 } else { 11945 bp->common.int_block = INT_BLOCK_IGU; 11946 11947 /* do not allow device reset during IGU info processing */ 11948 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11949 11950 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 11951 11952 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11953 int tout = 5000; 11954 11955 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 11956 11957 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 11958 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 11959 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 11960 11961 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11962 tout--; 11963 usleep_range(1000, 2000); 11964 } 11965 11966 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11967 dev_err(&bp->pdev->dev, 11968 "FORCING Normal Mode failed!!!\n"); 11969 bnx2x_release_hw_lock(bp, 11970 HW_LOCK_RESOURCE_RESET); 11971 return -EPERM; 11972 } 11973 } 11974 11975 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11976 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 11977 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 11978 } else 11979 BNX2X_DEV_INFO("IGU Normal Mode\n"); 11980 11981 rc = bnx2x_get_igu_cam_info(bp); 11982 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11983 if (rc) 11984 return rc; 11985 } 11986 11987 /* 11988 * set base FW non-default (fast path) status block id, this value is 11989 * used to initialize the fw_sb_id saved on the fp/queue structure to 11990 * determine the id used by the FW. 11991 */ 11992 if (CHIP_IS_E1x(bp)) 11993 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 11994 else /* 11995 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 11996 * the same queue are indicated on the same IGU SB). So we prefer 11997 * FW and IGU SBs to be the same value. 11998 */ 11999 bp->base_fw_ndsb = bp->igu_base_sb; 12000 12001 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 12002 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 12003 bp->igu_sb_cnt, bp->base_fw_ndsb); 12004 12005 /* 12006 * Initialize MF configuration 12007 */ 12008 bp->mf_ov = 0; 12009 bp->mf_mode = 0; 12010 bp->mf_sub_mode = 0; 12011 vn = BP_VN(bp); 12012 12013 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 12014 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 12015 bp->common.shmem2_base, SHMEM2_RD(bp, size), 12016 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 12017 12018 if (SHMEM2_HAS(bp, mf_cfg_addr)) 12019 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 12020 else 12021 bp->common.mf_cfg_base = bp->common.shmem_base + 12022 offsetof(struct shmem_region, func_mb) + 12023 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 12024 /* 12025 * get mf configuration: 12026 * 1. Existence of MF configuration 12027 * 2. MAC address must be legal (check only upper bytes) 12028 * for Switch-Independent mode; 12029 * OVLAN must be legal for Switch-Dependent mode 12030 * 3. SF_MODE configures specific MF mode 12031 */ 12032 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 12033 /* get mf configuration */ 12034 val = SHMEM_RD(bp, 12035 dev_info.shared_feature_config.config); 12036 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 12037 12038 switch (val) { 12039 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 12040 validate_set_si_mode(bp); 12041 break; 12042 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 12043 if ((!CHIP_IS_E1x(bp)) && 12044 (MF_CFG_RD(bp, func_mf_config[func]. 12045 mac_upper) != 0xffff) && 12046 (SHMEM2_HAS(bp, 12047 afex_driver_support))) { 12048 bp->mf_mode = MULTI_FUNCTION_AFEX; 12049 bp->mf_config[vn] = MF_CFG_RD(bp, 12050 func_mf_config[func].config); 12051 } else { 12052 BNX2X_DEV_INFO("can not configure afex mode\n"); 12053 } 12054 break; 12055 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 12056 /* get OV configuration */ 12057 val = MF_CFG_RD(bp, 12058 func_mf_config[FUNC_0].e1hov_tag); 12059 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 12060 12061 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 12062 bp->mf_mode = MULTI_FUNCTION_SD; 12063 bp->mf_config[vn] = MF_CFG_RD(bp, 12064 func_mf_config[func].config); 12065 } else 12066 BNX2X_DEV_INFO("illegal OV for SD\n"); 12067 break; 12068 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE: 12069 bp->mf_mode = MULTI_FUNCTION_SD; 12070 bp->mf_sub_mode = SUB_MF_MODE_BD; 12071 bp->mf_config[vn] = 12072 MF_CFG_RD(bp, 12073 func_mf_config[func].config); 12074 12075 if (SHMEM2_HAS(bp, mtu_size)) { 12076 int mtu_idx = BP_FW_MB_IDX(bp); 12077 u16 mtu_size; 12078 u32 mtu; 12079 12080 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]); 12081 mtu_size = (u16)mtu; 12082 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", 12083 mtu_size, mtu); 12084 12085 /* if valid: update device mtu */ 12086 if ((mtu_size >= ETH_MIN_PACKET_SIZE) && 12087 (mtu_size <= 12088 ETH_MAX_JUMBO_PACKET_SIZE)) 12089 bp->dev->mtu = mtu_size; 12090 } 12091 break; 12092 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: 12093 bp->mf_mode = MULTI_FUNCTION_SD; 12094 bp->mf_sub_mode = SUB_MF_MODE_UFP; 12095 bp->mf_config[vn] = 12096 MF_CFG_RD(bp, 12097 func_mf_config[func].config); 12098 break; 12099 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 12100 bp->mf_config[vn] = 0; 12101 break; 12102 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE: 12103 val2 = SHMEM_RD(bp, 12104 dev_info.shared_hw_config.config_3); 12105 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK; 12106 switch (val2) { 12107 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5: 12108 validate_set_si_mode(bp); 12109 bp->mf_sub_mode = 12110 SUB_MF_MODE_NPAR1_DOT_5; 12111 break; 12112 default: 12113 /* Unknown configuration */ 12114 bp->mf_config[vn] = 0; 12115 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n", 12116 val); 12117 } 12118 break; 12119 default: 12120 /* Unknown configuration: reset mf_config */ 12121 bp->mf_config[vn] = 0; 12122 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 12123 } 12124 } 12125 12126 BNX2X_DEV_INFO("%s function mode\n", 12127 IS_MF(bp) ? "multi" : "single"); 12128 12129 switch (bp->mf_mode) { 12130 case MULTI_FUNCTION_SD: 12131 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 12132 FUNC_MF_CFG_E1HOV_TAG_MASK; 12133 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 12134 bp->mf_ov = val; 12135 bp->path_has_ovlan = true; 12136 12137 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 12138 func, bp->mf_ov, bp->mf_ov); 12139 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || 12140 (bp->mf_sub_mode == SUB_MF_MODE_BD)) { 12141 dev_err(&bp->pdev->dev, 12142 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", 12143 func); 12144 bp->path_has_ovlan = true; 12145 } else { 12146 dev_err(&bp->pdev->dev, 12147 "No valid MF OV for func %d, aborting\n", 12148 func); 12149 return -EPERM; 12150 } 12151 break; 12152 case MULTI_FUNCTION_AFEX: 12153 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 12154 break; 12155 case MULTI_FUNCTION_SI: 12156 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 12157 func); 12158 break; 12159 default: 12160 if (vn) { 12161 dev_err(&bp->pdev->dev, 12162 "VN %d is in a single function mode, aborting\n", 12163 vn); 12164 return -EPERM; 12165 } 12166 break; 12167 } 12168 12169 /* check if other port on the path needs ovlan: 12170 * Since MF configuration is shared between ports 12171 * Possible mixed modes are only 12172 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 12173 */ 12174 if (CHIP_MODE_IS_4_PORT(bp) && 12175 !bp->path_has_ovlan && 12176 !IS_MF(bp) && 12177 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 12178 u8 other_port = !BP_PORT(bp); 12179 u8 other_func = BP_PATH(bp) + 2*other_port; 12180 val = MF_CFG_RD(bp, 12181 func_mf_config[other_func].e1hov_tag); 12182 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 12183 bp->path_has_ovlan = true; 12184 } 12185 } 12186 12187 /* adjust igu_sb_cnt to MF for E1H */ 12188 if (CHIP_IS_E1H(bp) && IS_MF(bp)) 12189 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); 12190 12191 /* port info */ 12192 bnx2x_get_port_hwinfo(bp); 12193 12194 /* Get MAC addresses */ 12195 bnx2x_get_mac_hwinfo(bp); 12196 12197 bnx2x_get_cnic_info(bp); 12198 12199 return rc; 12200} 12201 12202static void bnx2x_read_fwinfo(struct bnx2x *bp) 12203{ 12204 int cnt, i, block_end, rodi; 12205 char vpd_start[BNX2X_VPD_LEN+1]; 12206 char str_id_reg[VENDOR_ID_LEN+1]; 12207 char str_id_cap[VENDOR_ID_LEN+1]; 12208 char *vpd_data; 12209 char *vpd_extended_data = NULL; 12210 u8 len; 12211 12212 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 12213 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 12214 12215 if (cnt < BNX2X_VPD_LEN) 12216 goto out_not_found; 12217 12218 /* VPD RO tag should be first tag after identifier string, hence 12219 * we should be able to find it in first BNX2X_VPD_LEN chars 12220 */ 12221 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 12222 PCI_VPD_LRDT_RO_DATA); 12223 if (i < 0) 12224 goto out_not_found; 12225 12226 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 12227 pci_vpd_lrdt_size(&vpd_start[i]); 12228 12229 i += PCI_VPD_LRDT_TAG_SIZE; 12230 12231 if (block_end > BNX2X_VPD_LEN) { 12232 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 12233 if (vpd_extended_data == NULL) 12234 goto out_not_found; 12235 12236 /* read rest of vpd image into vpd_extended_data */ 12237 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 12238 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 12239 block_end - BNX2X_VPD_LEN, 12240 vpd_extended_data + BNX2X_VPD_LEN); 12241 if (cnt < (block_end - BNX2X_VPD_LEN)) 12242 goto out_not_found; 12243 vpd_data = vpd_extended_data; 12244 } else 12245 vpd_data = vpd_start; 12246 12247 /* now vpd_data holds full vpd content in both cases */ 12248 12249 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 12250 PCI_VPD_RO_KEYWORD_MFR_ID); 12251 if (rodi < 0) 12252 goto out_not_found; 12253 12254 len = pci_vpd_info_field_size(&vpd_data[rodi]); 12255 12256 if (len != VENDOR_ID_LEN) 12257 goto out_not_found; 12258 12259 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 12260 12261 /* vendor specific info */ 12262 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 12263 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 12264 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 12265 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 12266 12267 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 12268 PCI_VPD_RO_KEYWORD_VENDOR0); 12269 if (rodi >= 0) { 12270 len = pci_vpd_info_field_size(&vpd_data[rodi]); 12271 12272 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 12273 12274 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 12275 memcpy(bp->fw_ver, &vpd_data[rodi], len); 12276 bp->fw_ver[len] = ' '; 12277 } 12278 } 12279 kfree(vpd_extended_data); 12280 return; 12281 } 12282out_not_found: 12283 kfree(vpd_extended_data); 12284 return; 12285} 12286 12287static void bnx2x_set_modes_bitmap(struct bnx2x *bp) 12288{ 12289 u32 flags = 0; 12290 12291 if (CHIP_REV_IS_FPGA(bp)) 12292 SET_FLAGS(flags, MODE_FPGA); 12293 else if (CHIP_REV_IS_EMUL(bp)) 12294 SET_FLAGS(flags, MODE_EMUL); 12295 else 12296 SET_FLAGS(flags, MODE_ASIC); 12297 12298 if (CHIP_MODE_IS_4_PORT(bp)) 12299 SET_FLAGS(flags, MODE_PORT4); 12300 else 12301 SET_FLAGS(flags, MODE_PORT2); 12302 12303 if (CHIP_IS_E2(bp)) 12304 SET_FLAGS(flags, MODE_E2); 12305 else if (CHIP_IS_E3(bp)) { 12306 SET_FLAGS(flags, MODE_E3); 12307 if (CHIP_REV(bp) == CHIP_REV_Ax) 12308 SET_FLAGS(flags, MODE_E3_A0); 12309 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 12310 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 12311 } 12312 12313 if (IS_MF(bp)) { 12314 SET_FLAGS(flags, MODE_MF); 12315 switch (bp->mf_mode) { 12316 case MULTI_FUNCTION_SD: 12317 SET_FLAGS(flags, MODE_MF_SD); 12318 break; 12319 case MULTI_FUNCTION_SI: 12320 SET_FLAGS(flags, MODE_MF_SI); 12321 break; 12322 case MULTI_FUNCTION_AFEX: 12323 SET_FLAGS(flags, MODE_MF_AFEX); 12324 break; 12325 } 12326 } else 12327 SET_FLAGS(flags, MODE_SF); 12328 12329#if defined(__LITTLE_ENDIAN) 12330 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 12331#else /*(__BIG_ENDIAN)*/ 12332 SET_FLAGS(flags, MODE_BIG_ENDIAN); 12333#endif 12334 INIT_MODE_FLAGS(bp) = flags; 12335} 12336 12337static int bnx2x_init_bp(struct bnx2x *bp) 12338{ 12339 int func; 12340 int rc; 12341 12342 mutex_init(&bp->port.phy_mutex); 12343 mutex_init(&bp->fw_mb_mutex); 12344 mutex_init(&bp->drv_info_mutex); 12345 sema_init(&bp->stats_lock, 1); 12346 bp->drv_info_mng_owner = false; 12347 INIT_LIST_HEAD(&bp->vlan_reg); 12348 12349 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 12350 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 12351 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 12352 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); 12353 if (IS_PF(bp)) { 12354 rc = bnx2x_get_hwinfo(bp); 12355 if (rc) 12356 return rc; 12357 } else { 12358 eth_zero_addr(bp->dev->dev_addr); 12359 } 12360 12361 bnx2x_set_modes_bitmap(bp); 12362 12363 rc = bnx2x_alloc_mem_bp(bp); 12364 if (rc) 12365 return rc; 12366 12367 bnx2x_read_fwinfo(bp); 12368 12369 func = BP_FUNC(bp); 12370 12371 /* need to reset chip if undi was active */ 12372 if (IS_PF(bp) && !BP_NOMCP(bp)) { 12373 /* init fw_seq */ 12374 bp->fw_seq = 12375 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 12376 DRV_MSG_SEQ_NUMBER_MASK; 12377 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 12378 12379 rc = bnx2x_prev_unload(bp); 12380 if (rc) { 12381 bnx2x_free_mem_bp(bp); 12382 return rc; 12383 } 12384 } 12385 12386 if (CHIP_REV_IS_FPGA(bp)) 12387 dev_err(&bp->pdev->dev, "FPGA detected\n"); 12388 12389 if (BP_NOMCP(bp) && (func == 0)) 12390 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 12391 12392 bp->disable_tpa = disable_tpa; 12393 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); 12394 /* Reduce memory usage in kdump environment by disabling TPA */ 12395 bp->disable_tpa |= is_kdump_kernel(); 12396 12397 /* Set TPA flags */ 12398 if (bp->disable_tpa) { 12399 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12400 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12401 } 12402 12403 if (CHIP_IS_E1(bp)) 12404 bp->dropless_fc = false; 12405 else 12406 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); 12407 12408 bp->mrrs = mrrs; 12409 12410 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; 12411 if (IS_VF(bp)) 12412 bp->rx_ring_size = MAX_RX_AVAIL; 12413 12414 /* make sure that the numbers are in the right granularity */ 12415 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 12416 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 12417 12418 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 12419 12420 timer_setup(&bp->timer, bnx2x_timer, 0); 12421 bp->timer.expires = jiffies + bp->current_interval; 12422 12423 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && 12424 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && 12425 SHMEM2_HAS(bp, dcbx_en) && 12426 SHMEM2_RD(bp, dcbx_lldp_params_offset) && 12427 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) && 12428 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) { 12429 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 12430 bnx2x_dcbx_init_params(bp); 12431 } else { 12432 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); 12433 } 12434 12435 if (CHIP_IS_E1x(bp)) 12436 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 12437 else 12438 bp->cnic_base_cl_id = FP_SB_MAX_E2; 12439 12440 /* multiple tx priority */ 12441 if (IS_VF(bp)) 12442 bp->max_cos = 1; 12443 else if (CHIP_IS_E1x(bp)) 12444 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 12445 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 12446 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 12447 else if (CHIP_IS_E3B0(bp)) 12448 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 12449 else 12450 BNX2X_ERR("unknown chip %x revision %x\n", 12451 CHIP_NUM(bp), CHIP_REV(bp)); 12452 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); 12453 12454 /* We need at least one default status block for slow-path events, 12455 * second status block for the L2 queue, and a third status block for 12456 * CNIC if supported. 12457 */ 12458 if (IS_VF(bp)) 12459 bp->min_msix_vec_cnt = 1; 12460 else if (CNIC_SUPPORT(bp)) 12461 bp->min_msix_vec_cnt = 3; 12462 else /* PF w/o cnic */ 12463 bp->min_msix_vec_cnt = 2; 12464 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 12465 12466 bp->dump_preset_idx = 1; 12467 12468 return rc; 12469} 12470 12471/**************************************************************************** 12472* General service functions 12473****************************************************************************/ 12474 12475/* 12476 * net_device service functions 12477 */ 12478 12479/* called with rtnl_lock */ 12480static int bnx2x_open(struct net_device *dev) 12481{ 12482 struct bnx2x *bp = netdev_priv(dev); 12483 int rc; 12484 12485 bp->stats_init = true; 12486 12487 netif_carrier_off(dev); 12488 12489 bnx2x_set_power_state(bp, PCI_D0); 12490 12491 /* If parity had happen during the unload, then attentions 12492 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 12493 * want the first function loaded on the current engine to 12494 * complete the recovery. 12495 * Parity recovery is only relevant for PF driver. 12496 */ 12497 if (IS_PF(bp)) { 12498 int other_engine = BP_PATH(bp) ? 0 : 1; 12499 bool other_load_status, load_status; 12500 bool global = false; 12501 12502 other_load_status = bnx2x_get_load_status(bp, other_engine); 12503 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 12504 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 12505 bnx2x_chk_parity_attn(bp, &global, true)) { 12506 do { 12507 /* If there are attentions and they are in a 12508 * global blocks, set the GLOBAL_RESET bit 12509 * regardless whether it will be this function 12510 * that will complete the recovery or not. 12511 */ 12512 if (global) 12513 bnx2x_set_reset_global(bp); 12514 12515 /* Only the first function on the current 12516 * engine should try to recover in open. In case 12517 * of attentions in global blocks only the first 12518 * in the chip should try to recover. 12519 */ 12520 if ((!load_status && 12521 (!global || !other_load_status)) && 12522 bnx2x_trylock_leader_lock(bp) && 12523 !bnx2x_leader_reset(bp)) { 12524 netdev_info(bp->dev, 12525 "Recovered in open\n"); 12526 break; 12527 } 12528 12529 /* recovery has failed... */ 12530 bnx2x_set_power_state(bp, PCI_D3hot); 12531 bp->recovery_state = BNX2X_RECOVERY_FAILED; 12532 12533 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 12534 "If you still see this message after a few retries then power cycle is required.\n"); 12535 12536 return -EAGAIN; 12537 } while (0); 12538 } 12539 } 12540 12541 bp->recovery_state = BNX2X_RECOVERY_DONE; 12542 rc = bnx2x_nic_load(bp, LOAD_OPEN); 12543 if (rc) 12544 return rc; 12545 12546 return 0; 12547} 12548 12549/* called with rtnl_lock */ 12550static int bnx2x_close(struct net_device *dev) 12551{ 12552 struct bnx2x *bp = netdev_priv(dev); 12553 12554 /* Unload the driver, release IRQs */ 12555 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 12556 12557 return 0; 12558} 12559 12560struct bnx2x_mcast_list_elem_group 12561{ 12562 struct list_head mcast_group_link; 12563 struct bnx2x_mcast_list_elem mcast_elems[]; 12564}; 12565 12566#define MCAST_ELEMS_PER_PG \ 12567 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \ 12568 sizeof(struct bnx2x_mcast_list_elem)) 12569 12570static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list) 12571{ 12572 struct bnx2x_mcast_list_elem_group *current_mcast_group; 12573 12574 while (!list_empty(mcast_group_list)) { 12575 current_mcast_group = list_first_entry(mcast_group_list, 12576 struct bnx2x_mcast_list_elem_group, 12577 mcast_group_link); 12578 list_del(¤t_mcast_group->mcast_group_link); 12579 free_page((unsigned long)current_mcast_group); 12580 } 12581} 12582 12583static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 12584 struct bnx2x_mcast_ramrod_params *p, 12585 struct list_head *mcast_group_list) 12586{ 12587 struct bnx2x_mcast_list_elem *mc_mac; 12588 struct netdev_hw_addr *ha; 12589 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL; 12590 int mc_count = netdev_mc_count(bp->dev); 12591 int offset = 0; 12592 12593 INIT_LIST_HEAD(&p->mcast_list); 12594 netdev_for_each_mc_addr(ha, bp->dev) { 12595 if (!offset) { 12596 current_mcast_group = 12597 (struct bnx2x_mcast_list_elem_group *) 12598 __get_free_page(GFP_ATOMIC); 12599 if (!current_mcast_group) { 12600 bnx2x_free_mcast_macs_list(mcast_group_list); 12601 BNX2X_ERR("Failed to allocate mc MAC list\n"); 12602 return -ENOMEM; 12603 } 12604 list_add(¤t_mcast_group->mcast_group_link, 12605 mcast_group_list); 12606 } 12607 mc_mac = ¤t_mcast_group->mcast_elems[offset]; 12608 mc_mac->mac = bnx2x_mc_addr(ha); 12609 list_add_tail(&mc_mac->link, &p->mcast_list); 12610 offset++; 12611 if (offset == MCAST_ELEMS_PER_PG) 12612 offset = 0; 12613 } 12614 p->mcast_list_len = mc_count; 12615 return 0; 12616} 12617 12618/** 12619 * bnx2x_set_uc_list - configure a new unicast MACs list. 12620 * 12621 * @bp: driver handle 12622 * 12623 * We will use zero (0) as a MAC type for these MACs. 12624 */ 12625static int bnx2x_set_uc_list(struct bnx2x *bp) 12626{ 12627 int rc; 12628 struct net_device *dev = bp->dev; 12629 struct netdev_hw_addr *ha; 12630 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 12631 unsigned long ramrod_flags = 0; 12632 12633 /* First schedule a cleanup up of old configuration */ 12634 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 12635 if (rc < 0) { 12636 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 12637 return rc; 12638 } 12639 12640 netdev_for_each_uc_addr(ha, dev) { 12641 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 12642 BNX2X_UC_LIST_MAC, &ramrod_flags); 12643 if (rc == -EEXIST) { 12644 DP(BNX2X_MSG_SP, 12645 "Failed to schedule ADD operations: %d\n", rc); 12646 /* do not treat adding same MAC as error */ 12647 rc = 0; 12648 12649 } else if (rc < 0) { 12650 12651 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 12652 rc); 12653 return rc; 12654 } 12655 } 12656 12657 /* Execute the pending commands */ 12658 __set_bit(RAMROD_CONT, &ramrod_flags); 12659 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 12660 BNX2X_UC_LIST_MAC, &ramrod_flags); 12661} 12662 12663static int bnx2x_set_mc_list_e1x(struct bnx2x *bp) 12664{ 12665 LIST_HEAD(mcast_group_list); 12666 struct net_device *dev = bp->dev; 12667 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 12668 int rc = 0; 12669 12670 rparam.mcast_obj = &bp->mcast_obj; 12671 12672 /* first, clear all configured multicast MACs */ 12673 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12674 if (rc < 0) { 12675 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 12676 return rc; 12677 } 12678 12679 /* then, configure a new MACs list */ 12680 if (netdev_mc_count(dev)) { 12681 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list); 12682 if (rc) 12683 return rc; 12684 12685 /* Now add the new MACs */ 12686 rc = bnx2x_config_mcast(bp, &rparam, 12687 BNX2X_MCAST_CMD_ADD); 12688 if (rc < 0) 12689 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 12690 rc); 12691 12692 bnx2x_free_mcast_macs_list(&mcast_group_list); 12693 } 12694 12695 return rc; 12696} 12697 12698static int bnx2x_set_mc_list(struct bnx2x *bp) 12699{ 12700 LIST_HEAD(mcast_group_list); 12701 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 12702 struct net_device *dev = bp->dev; 12703 int rc = 0; 12704 12705 /* On older adapters, we need to flush and re-add filters */ 12706 if (CHIP_IS_E1x(bp)) 12707 return bnx2x_set_mc_list_e1x(bp); 12708 12709 rparam.mcast_obj = &bp->mcast_obj; 12710 12711 if (netdev_mc_count(dev)) { 12712 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list); 12713 if (rc) 12714 return rc; 12715 12716 /* Override the curently configured set of mc filters */ 12717 rc = bnx2x_config_mcast(bp, &rparam, 12718 BNX2X_MCAST_CMD_SET); 12719 if (rc < 0) 12720 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 12721 rc); 12722 12723 bnx2x_free_mcast_macs_list(&mcast_group_list); 12724 } else { 12725 /* If no mc addresses are required, flush the configuration */ 12726 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12727 if (rc < 0) 12728 BNX2X_ERR("Failed to clear multicast configuration %d\n", 12729 rc); 12730 } 12731 12732 return rc; 12733} 12734 12735/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 12736static void bnx2x_set_rx_mode(struct net_device *dev) 12737{ 12738 struct bnx2x *bp = netdev_priv(dev); 12739 12740 if (bp->state != BNX2X_STATE_OPEN) { 12741 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 12742 return; 12743 } else { 12744 /* Schedule an SP task to handle rest of change */ 12745 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, 12746 NETIF_MSG_IFUP); 12747 } 12748} 12749 12750void bnx2x_set_rx_mode_inner(struct bnx2x *bp) 12751{ 12752 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 12753 12754 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 12755 12756 netif_addr_lock_bh(bp->dev); 12757 12758 if (bp->dev->flags & IFF_PROMISC) { 12759 rx_mode = BNX2X_RX_MODE_PROMISC; 12760 } else if ((bp->dev->flags & IFF_ALLMULTI) || 12761 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && 12762 CHIP_IS_E1(bp))) { 12763 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12764 } else { 12765 if (IS_PF(bp)) { 12766 /* some multicasts */ 12767 if (bnx2x_set_mc_list(bp) < 0) 12768 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12769 12770 /* release bh lock, as bnx2x_set_uc_list might sleep */ 12771 netif_addr_unlock_bh(bp->dev); 12772 if (bnx2x_set_uc_list(bp) < 0) 12773 rx_mode = BNX2X_RX_MODE_PROMISC; 12774 netif_addr_lock_bh(bp->dev); 12775 } else { 12776 /* configuring mcast to a vf involves sleeping (when we 12777 * wait for the pf's response). 12778 */ 12779 bnx2x_schedule_sp_rtnl(bp, 12780 BNX2X_SP_RTNL_VFPF_MCAST, 0); 12781 } 12782 } 12783 12784 bp->rx_mode = rx_mode; 12785 /* handle ISCSI SD mode */ 12786 if (IS_MF_ISCSI_ONLY(bp)) 12787 bp->rx_mode = BNX2X_RX_MODE_NONE; 12788 12789 /* Schedule the rx_mode command */ 12790 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 12791 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 12792 netif_addr_unlock_bh(bp->dev); 12793 return; 12794 } 12795 12796 if (IS_PF(bp)) { 12797 bnx2x_set_storm_rx_mode(bp); 12798 netif_addr_unlock_bh(bp->dev); 12799 } else { 12800 /* VF will need to request the PF to make this change, and so 12801 * the VF needs to release the bottom-half lock prior to the 12802 * request (as it will likely require sleep on the VF side) 12803 */ 12804 netif_addr_unlock_bh(bp->dev); 12805 bnx2x_vfpf_storm_rx_mode(bp); 12806 } 12807} 12808 12809/* called with rtnl_lock */ 12810static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 12811 int devad, u16 addr) 12812{ 12813 struct bnx2x *bp = netdev_priv(netdev); 12814 u16 value; 12815 int rc; 12816 12817 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 12818 prtad, devad, addr); 12819 12820 /* The HW expects different devad if CL22 is used */ 12821 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12822 12823 bnx2x_acquire_phy_lock(bp); 12824 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 12825 bnx2x_release_phy_lock(bp); 12826 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 12827 12828 if (!rc) 12829 rc = value; 12830 return rc; 12831} 12832 12833/* called with rtnl_lock */ 12834static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 12835 u16 addr, u16 value) 12836{ 12837 struct bnx2x *bp = netdev_priv(netdev); 12838 int rc; 12839 12840 DP(NETIF_MSG_LINK, 12841 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 12842 prtad, devad, addr, value); 12843 12844 /* The HW expects different devad if CL22 is used */ 12845 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12846 12847 bnx2x_acquire_phy_lock(bp); 12848 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 12849 bnx2x_release_phy_lock(bp); 12850 return rc; 12851} 12852 12853/* called with rtnl_lock */ 12854static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12855{ 12856 struct bnx2x *bp = netdev_priv(dev); 12857 struct mii_ioctl_data *mdio = if_mii(ifr); 12858 12859 if (!netif_running(dev)) 12860 return -EAGAIN; 12861 12862 switch (cmd) { 12863 case SIOCSHWTSTAMP: 12864 return bnx2x_hwtstamp_ioctl(bp, ifr); 12865 default: 12866 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 12867 mdio->phy_id, mdio->reg_num, mdio->val_in); 12868 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 12869 } 12870} 12871 12872static int bnx2x_validate_addr(struct net_device *dev) 12873{ 12874 struct bnx2x *bp = netdev_priv(dev); 12875 12876 /* query the bulletin board for mac address configured by the PF */ 12877 if (IS_VF(bp)) 12878 bnx2x_sample_bulletin(bp); 12879 12880 if (!is_valid_ether_addr(dev->dev_addr)) { 12881 BNX2X_ERR("Non-valid Ethernet address\n"); 12882 return -EADDRNOTAVAIL; 12883 } 12884 return 0; 12885} 12886 12887static int bnx2x_get_phys_port_id(struct net_device *netdev, 12888 struct netdev_phys_item_id *ppid) 12889{ 12890 struct bnx2x *bp = netdev_priv(netdev); 12891 12892 if (!(bp->flags & HAS_PHYS_PORT_ID)) 12893 return -EOPNOTSUPP; 12894 12895 ppid->id_len = sizeof(bp->phys_port_id); 12896 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); 12897 12898 return 0; 12899} 12900 12901static netdev_features_t bnx2x_features_check(struct sk_buff *skb, 12902 struct net_device *dev, 12903 netdev_features_t features) 12904{ 12905 /* 12906 * A skb with gso_size + header length > 9700 will cause a 12907 * firmware panic. Drop GSO support. 12908 * 12909 * Eventually the upper layer should not pass these packets down. 12910 * 12911 * For speed, if the gso_size is <= 9000, assume there will 12912 * not be 700 bytes of headers and pass it through. Only do a 12913 * full (slow) validation if the gso_size is > 9000. 12914 * 12915 * (Due to the way SKB_BY_FRAGS works this will also do a full 12916 * validation in that case.) 12917 */ 12918 if (unlikely(skb_is_gso(skb) && 12919 (skb_shinfo(skb)->gso_size > 9000) && 12920 !skb_gso_validate_mac_len(skb, 9700))) 12921 features &= ~NETIF_F_GSO_MASK; 12922 12923 features = vlan_features_check(skb, features); 12924 return vxlan_features_check(skb, features); 12925} 12926 12927static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) 12928{ 12929 int rc; 12930 12931 if (IS_PF(bp)) { 12932 unsigned long ramrod_flags = 0; 12933 12934 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12935 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, 12936 add, &ramrod_flags); 12937 } else { 12938 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); 12939 } 12940 12941 return rc; 12942} 12943 12944static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) 12945{ 12946 struct bnx2x_vlan_entry *vlan; 12947 int rc = 0; 12948 12949 /* Configure all non-configured entries */ 12950 list_for_each_entry(vlan, &bp->vlan_reg, link) { 12951 if (vlan->hw) 12952 continue; 12953 12954 if (bp->vlan_cnt >= bp->vlan_credit) 12955 return -ENOBUFS; 12956 12957 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); 12958 if (rc) { 12959 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); 12960 return rc; 12961 } 12962 12963 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); 12964 vlan->hw = true; 12965 bp->vlan_cnt++; 12966 } 12967 12968 return 0; 12969} 12970 12971static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) 12972{ 12973 bool need_accept_any_vlan; 12974 12975 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); 12976 12977 if (bp->accept_any_vlan != need_accept_any_vlan) { 12978 bp->accept_any_vlan = need_accept_any_vlan; 12979 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", 12980 bp->accept_any_vlan ? "raised" : "cleared"); 12981 if (set_rx_mode) { 12982 if (IS_PF(bp)) 12983 bnx2x_set_rx_mode_inner(bp); 12984 else 12985 bnx2x_vfpf_storm_rx_mode(bp); 12986 } 12987 } 12988} 12989 12990int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) 12991{ 12992 /* Don't set rx mode here. Our caller will do it. */ 12993 bnx2x_vlan_configure(bp, false); 12994 12995 return 0; 12996} 12997 12998static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 12999{ 13000 struct bnx2x *bp = netdev_priv(dev); 13001 struct bnx2x_vlan_entry *vlan; 13002 13003 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); 13004 13005 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL); 13006 if (!vlan) 13007 return -ENOMEM; 13008 13009 vlan->vid = vid; 13010 vlan->hw = false; 13011 list_add_tail(&vlan->link, &bp->vlan_reg); 13012 13013 if (netif_running(dev)) 13014 bnx2x_vlan_configure(bp, true); 13015 13016 return 0; 13017} 13018 13019static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 13020{ 13021 struct bnx2x *bp = netdev_priv(dev); 13022 struct bnx2x_vlan_entry *vlan; 13023 bool found = false; 13024 int rc = 0; 13025 13026 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); 13027 13028 list_for_each_entry(vlan, &bp->vlan_reg, link) 13029 if (vlan->vid == vid) { 13030 found = true; 13031 break; 13032 } 13033 13034 if (!found) { 13035 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); 13036 return -EINVAL; 13037 } 13038 13039 if (netif_running(dev) && vlan->hw) { 13040 rc = __bnx2x_vlan_configure_vid(bp, vid, false); 13041 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); 13042 bp->vlan_cnt--; 13043 } 13044 13045 list_del(&vlan->link); 13046 kfree(vlan); 13047 13048 if (netif_running(dev)) 13049 bnx2x_vlan_configure(bp, true); 13050 13051 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); 13052 13053 return rc; 13054} 13055 13056static const struct net_device_ops bnx2x_netdev_ops = { 13057 .ndo_open = bnx2x_open, 13058 .ndo_stop = bnx2x_close, 13059 .ndo_start_xmit = bnx2x_start_xmit, 13060 .ndo_select_queue = bnx2x_select_queue, 13061 .ndo_set_rx_mode = bnx2x_set_rx_mode, 13062 .ndo_set_mac_address = bnx2x_change_mac_addr, 13063 .ndo_validate_addr = bnx2x_validate_addr, 13064 .ndo_do_ioctl = bnx2x_ioctl, 13065 .ndo_change_mtu = bnx2x_change_mtu, 13066 .ndo_fix_features = bnx2x_fix_features, 13067 .ndo_set_features = bnx2x_set_features, 13068 .ndo_tx_timeout = bnx2x_tx_timeout, 13069 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, 13070 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, 13071 .ndo_setup_tc = __bnx2x_setup_tc, 13072#ifdef CONFIG_BNX2X_SRIOV 13073 .ndo_set_vf_mac = bnx2x_set_vf_mac, 13074 .ndo_set_vf_vlan = bnx2x_set_vf_vlan, 13075 .ndo_get_vf_config = bnx2x_get_vf_config, 13076 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk, 13077#endif 13078#ifdef NETDEV_FCOE_WWNN 13079 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 13080#endif 13081 13082 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 13083 .ndo_set_vf_link_state = bnx2x_set_vf_link_state, 13084 .ndo_features_check = bnx2x_features_check, 13085 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 13086 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 13087}; 13088 13089static int bnx2x_set_coherency_mask(struct bnx2x *bp) 13090{ 13091 struct device *dev = &bp->pdev->dev; 13092 13093 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && 13094 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { 13095 dev_err(dev, "System does not support DMA, aborting\n"); 13096 return -EIO; 13097 } 13098 13099 return 0; 13100} 13101 13102static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) 13103{ 13104 if (bp->flags & AER_ENABLED) { 13105 pci_disable_pcie_error_reporting(bp->pdev); 13106 bp->flags &= ~AER_ENABLED; 13107 } 13108} 13109 13110static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, 13111 struct net_device *dev, unsigned long board_type) 13112{ 13113 int rc; 13114 u32 pci_cfg_dword; 13115 bool chip_is_e1x = (board_type == BCM57710 || 13116 board_type == BCM57711 || 13117 board_type == BCM57711E); 13118 13119 SET_NETDEV_DEV(dev, &pdev->dev); 13120 13121 bp->dev = dev; 13122 bp->pdev = pdev; 13123 13124 rc = pci_enable_device(pdev); 13125 if (rc) { 13126 dev_err(&bp->pdev->dev, 13127 "Cannot enable PCI device, aborting\n"); 13128 goto err_out; 13129 } 13130 13131 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 13132 dev_err(&bp->pdev->dev, 13133 "Cannot find PCI device base address, aborting\n"); 13134 rc = -ENODEV; 13135 goto err_out_disable; 13136 } 13137 13138 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 13139 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); 13140 rc = -ENODEV; 13141 goto err_out_disable; 13142 } 13143 13144 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); 13145 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == 13146 PCICFG_REVESION_ID_ERROR_VAL) { 13147 pr_err("PCI device error, probably due to fan failure, aborting\n"); 13148 rc = -ENODEV; 13149 goto err_out_disable; 13150 } 13151 13152 if (atomic_read(&pdev->enable_cnt) == 1) { 13153 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 13154 if (rc) { 13155 dev_err(&bp->pdev->dev, 13156 "Cannot obtain PCI resources, aborting\n"); 13157 goto err_out_disable; 13158 } 13159 13160 pci_set_master(pdev); 13161 pci_save_state(pdev); 13162 } 13163 13164 if (IS_PF(bp)) { 13165 if (!pdev->pm_cap) { 13166 dev_err(&bp->pdev->dev, 13167 "Cannot find power management capability, aborting\n"); 13168 rc = -EIO; 13169 goto err_out_release; 13170 } 13171 } 13172 13173 if (!pci_is_pcie(pdev)) { 13174 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 13175 rc = -EIO; 13176 goto err_out_release; 13177 } 13178 13179 rc = bnx2x_set_coherency_mask(bp); 13180 if (rc) 13181 goto err_out_release; 13182 13183 dev->mem_start = pci_resource_start(pdev, 0); 13184 dev->base_addr = dev->mem_start; 13185 dev->mem_end = pci_resource_end(pdev, 0); 13186 13187 dev->irq = pdev->irq; 13188 13189 bp->regview = pci_ioremap_bar(pdev, 0); 13190 if (!bp->regview) { 13191 dev_err(&bp->pdev->dev, 13192 "Cannot map register space, aborting\n"); 13193 rc = -ENOMEM; 13194 goto err_out_release; 13195 } 13196 13197 /* In E1/E1H use pci device function given by kernel. 13198 * In E2/E3 read physical function from ME register since these chips 13199 * support Physical Device Assignment where kernel BDF maybe arbitrary 13200 * (depending on hypervisor). 13201 */ 13202 if (chip_is_e1x) { 13203 bp->pf_num = PCI_FUNC(pdev->devfn); 13204 } else { 13205 /* chip is E2/3*/ 13206 pci_read_config_dword(bp->pdev, 13207 PCICFG_ME_REGISTER, &pci_cfg_dword); 13208 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 13209 ME_REG_ABS_PF_NUM_SHIFT); 13210 } 13211 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 13212 13213 /* clean indirect addresses */ 13214 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 13215 PCICFG_VENDOR_ID_OFFSET); 13216 13217 /* Set PCIe reset type to fundamental for EEH recovery */ 13218 pdev->needs_freset = 1; 13219 13220 /* AER (Advanced Error reporting) configuration */ 13221 rc = pci_enable_pcie_error_reporting(pdev); 13222 if (!rc) 13223 bp->flags |= AER_ENABLED; 13224 else 13225 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); 13226 13227 /* 13228 * Clean the following indirect addresses for all functions since it 13229 * is not used by the driver. 13230 */ 13231 if (IS_PF(bp)) { 13232 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 13233 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 13234 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 13235 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 13236 13237 if (chip_is_e1x) { 13238 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 13239 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 13240 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 13241 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 13242 } 13243 13244 /* Enable internal target-read (in case we are probed after PF 13245 * FLR). Must be done prior to any BAR read access. Only for 13246 * 57712 and up 13247 */ 13248 if (!chip_is_e1x) 13249 REG_WR(bp, 13250 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 13251 } 13252 13253 dev->watchdog_timeo = TX_TIMEOUT; 13254 13255 dev->netdev_ops = &bnx2x_netdev_ops; 13256 bnx2x_set_ethtool_ops(bp, dev); 13257 13258 dev->priv_flags |= IFF_UNICAST_FLT; 13259 13260 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 13261 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 13262 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW | 13263 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 13264 if (!chip_is_e1x) { 13265 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | 13266 NETIF_F_GSO_IPXIP4 | 13267 NETIF_F_GSO_UDP_TUNNEL | 13268 NETIF_F_GSO_UDP_TUNNEL_CSUM | 13269 NETIF_F_GSO_PARTIAL; 13270 13271 dev->hw_enc_features = 13272 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 13273 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 13274 NETIF_F_GSO_IPXIP4 | 13275 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | 13276 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | 13277 NETIF_F_GSO_PARTIAL; 13278 13279 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM | 13280 NETIF_F_GSO_UDP_TUNNEL_CSUM; 13281 13282 if (IS_PF(bp)) 13283 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels; 13284 } 13285 13286 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 13287 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 13288 13289 if (IS_PF(bp)) { 13290 if (chip_is_e1x) 13291 bp->accept_any_vlan = true; 13292 else 13293 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13294 } 13295 /* For VF we'll know whether to enable VLAN filtering after 13296 * getting a response to CHANNEL_TLV_ACQUIRE from PF. 13297 */ 13298 13299 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 13300 dev->features |= NETIF_F_HIGHDMA; 13301 if (dev->features & NETIF_F_LRO) 13302 dev->features &= ~NETIF_F_GRO_HW; 13303 13304 /* Add Loopback capability to the device */ 13305 dev->hw_features |= NETIF_F_LOOPBACK; 13306 13307#ifdef BCM_DCBNL 13308 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 13309#endif 13310 13311 /* MTU range, 46 - 9600 */ 13312 dev->min_mtu = ETH_MIN_PACKET_SIZE; 13313 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; 13314 13315 /* get_port_hwinfo() will set prtad and mmds properly */ 13316 bp->mdio.prtad = MDIO_PRTAD_NONE; 13317 bp->mdio.mmds = 0; 13318 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 13319 bp->mdio.dev = dev; 13320 bp->mdio.mdio_read = bnx2x_mdio_read; 13321 bp->mdio.mdio_write = bnx2x_mdio_write; 13322 13323 return 0; 13324 13325err_out_release: 13326 if (atomic_read(&pdev->enable_cnt) == 1) 13327 pci_release_regions(pdev); 13328 13329err_out_disable: 13330 pci_disable_device(pdev); 13331 13332err_out: 13333 return rc; 13334} 13335 13336static int bnx2x_check_firmware(struct bnx2x *bp) 13337{ 13338 const struct firmware *firmware = bp->firmware; 13339 struct bnx2x_fw_file_hdr *fw_hdr; 13340 struct bnx2x_fw_file_section *sections; 13341 u32 offset, len, num_ops; 13342 __be16 *ops_offsets; 13343 int i; 13344 const u8 *fw_ver; 13345 13346 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 13347 BNX2X_ERR("Wrong FW size\n"); 13348 return -EINVAL; 13349 } 13350 13351 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 13352 sections = (struct bnx2x_fw_file_section *)fw_hdr; 13353 13354 /* Make sure none of the offsets and sizes make us read beyond 13355 * the end of the firmware data */ 13356 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 13357 offset = be32_to_cpu(sections[i].offset); 13358 len = be32_to_cpu(sections[i].len); 13359 if (offset + len > firmware->size) { 13360 BNX2X_ERR("Section %d length is out of bounds\n", i); 13361 return -EINVAL; 13362 } 13363 } 13364 13365 /* Likewise for the init_ops offsets */ 13366 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 13367 ops_offsets = (__force __be16 *)(firmware->data + offset); 13368 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 13369 13370 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 13371 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 13372 BNX2X_ERR("Section offset %d is out of bounds\n", i); 13373 return -EINVAL; 13374 } 13375 } 13376 13377 /* Check FW version */ 13378 offset = be32_to_cpu(fw_hdr->fw_version.offset); 13379 fw_ver = firmware->data + offset; 13380 if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor || 13381 fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) { 13382 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 13383 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 13384 bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng); 13385 return -EINVAL; 13386 } 13387 13388 return 0; 13389} 13390 13391static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 13392{ 13393 const __be32 *source = (const __be32 *)_source; 13394 u32 *target = (u32 *)_target; 13395 u32 i; 13396 13397 for (i = 0; i < n/4; i++) 13398 target[i] = be32_to_cpu(source[i]); 13399} 13400 13401/* 13402 Ops array is stored in the following format: 13403 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 13404 */ 13405static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 13406{ 13407 const __be32 *source = (const __be32 *)_source; 13408 struct raw_op *target = (struct raw_op *)_target; 13409 u32 i, j, tmp; 13410 13411 for (i = 0, j = 0; i < n/8; i++, j += 2) { 13412 tmp = be32_to_cpu(source[j]); 13413 target[i].op = (tmp >> 24) & 0xff; 13414 target[i].offset = tmp & 0xffffff; 13415 target[i].raw_data = be32_to_cpu(source[j + 1]); 13416 } 13417} 13418 13419/* IRO array is stored in the following format: 13420 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 13421 */ 13422static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 13423{ 13424 const __be32 *source = (const __be32 *)_source; 13425 struct iro *target = (struct iro *)_target; 13426 u32 i, j, tmp; 13427 13428 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 13429 target[i].base = be32_to_cpu(source[j]); 13430 j++; 13431 tmp = be32_to_cpu(source[j]); 13432 target[i].m1 = (tmp >> 16) & 0xffff; 13433 target[i].m2 = tmp & 0xffff; 13434 j++; 13435 tmp = be32_to_cpu(source[j]); 13436 target[i].m3 = (tmp >> 16) & 0xffff; 13437 target[i].size = tmp & 0xffff; 13438 j++; 13439 } 13440} 13441 13442static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 13443{ 13444 const __be16 *source = (const __be16 *)_source; 13445 u16 *target = (u16 *)_target; 13446 u32 i; 13447 13448 for (i = 0; i < n/2; i++) 13449 target[i] = be16_to_cpu(source[i]); 13450} 13451 13452#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 13453do { \ 13454 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 13455 bp->arr = kmalloc(len, GFP_KERNEL); \ 13456 if (!bp->arr) \ 13457 goto lbl; \ 13458 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 13459 (u8 *)bp->arr, len); \ 13460} while (0) 13461 13462static int bnx2x_init_firmware(struct bnx2x *bp) 13463{ 13464 const char *fw_file_name, *fw_file_name_v15; 13465 struct bnx2x_fw_file_hdr *fw_hdr; 13466 int rc; 13467 13468 if (bp->firmware) 13469 return 0; 13470 13471 if (CHIP_IS_E1(bp)) { 13472 fw_file_name = FW_FILE_NAME_E1; 13473 fw_file_name_v15 = FW_FILE_NAME_E1_V15; 13474 } else if (CHIP_IS_E1H(bp)) { 13475 fw_file_name = FW_FILE_NAME_E1H; 13476 fw_file_name_v15 = FW_FILE_NAME_E1H_V15; 13477 } else if (!CHIP_IS_E1x(bp)) { 13478 fw_file_name = FW_FILE_NAME_E2; 13479 fw_file_name_v15 = FW_FILE_NAME_E2_V15; 13480 } else { 13481 BNX2X_ERR("Unsupported chip revision\n"); 13482 return -EINVAL; 13483 } 13484 13485 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 13486 13487 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 13488 if (rc) { 13489 BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15); 13490 13491 /* try to load prev version */ 13492 rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev); 13493 13494 if (rc) 13495 goto request_firmware_exit; 13496 13497 bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15; 13498 } else { 13499 bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI; 13500 bp->fw_rev = BCM_5710_FW_REVISION_VERSION; 13501 } 13502 13503 bp->fw_major = BCM_5710_FW_MAJOR_VERSION; 13504 bp->fw_minor = BCM_5710_FW_MINOR_VERSION; 13505 bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION; 13506 13507 rc = bnx2x_check_firmware(bp); 13508 if (rc) { 13509 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 13510 goto request_firmware_exit; 13511 } 13512 13513 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 13514 13515 /* Initialize the pointers to the init arrays */ 13516 /* Blob */ 13517 rc = -ENOMEM; 13518 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 13519 13520 /* Opcodes */ 13521 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 13522 13523 /* Offsets */ 13524 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 13525 be16_to_cpu_n); 13526 13527 /* STORMs firmware */ 13528 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13529 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 13530 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 13531 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 13532 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13533 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 13534 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 13535 be32_to_cpu(fw_hdr->usem_pram_data.offset); 13536 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13537 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 13538 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 13539 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 13540 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 13541 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 13542 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 13543 be32_to_cpu(fw_hdr->csem_pram_data.offset); 13544 /* IRO */ 13545 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 13546 13547 return 0; 13548 13549iro_alloc_err: 13550 kfree(bp->init_ops_offsets); 13551init_offsets_alloc_err: 13552 kfree(bp->init_ops); 13553init_ops_alloc_err: 13554 kfree(bp->init_data); 13555request_firmware_exit: 13556 release_firmware(bp->firmware); 13557 bp->firmware = NULL; 13558 13559 return rc; 13560} 13561 13562static void bnx2x_release_firmware(struct bnx2x *bp) 13563{ 13564 kfree(bp->init_ops_offsets); 13565 kfree(bp->init_ops); 13566 kfree(bp->init_data); 13567 release_firmware(bp->firmware); 13568 bp->firmware = NULL; 13569} 13570 13571static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 13572 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 13573 .init_hw_cmn = bnx2x_init_hw_common, 13574 .init_hw_port = bnx2x_init_hw_port, 13575 .init_hw_func = bnx2x_init_hw_func, 13576 13577 .reset_hw_cmn = bnx2x_reset_common, 13578 .reset_hw_port = bnx2x_reset_port, 13579 .reset_hw_func = bnx2x_reset_func, 13580 13581 .gunzip_init = bnx2x_gunzip_init, 13582 .gunzip_end = bnx2x_gunzip_end, 13583 13584 .init_fw = bnx2x_init_firmware, 13585 .release_fw = bnx2x_release_firmware, 13586}; 13587 13588void bnx2x__init_func_obj(struct bnx2x *bp) 13589{ 13590 /* Prepare DMAE related driver resources */ 13591 bnx2x_setup_dmae(bp); 13592 13593 bnx2x_init_func_obj(bp, &bp->func_obj, 13594 bnx2x_sp(bp, func_rdata), 13595 bnx2x_sp_mapping(bp, func_rdata), 13596 bnx2x_sp(bp, func_afex_rdata), 13597 bnx2x_sp_mapping(bp, func_afex_rdata), 13598 &bnx2x_func_sp_drv); 13599} 13600 13601/* must be called after sriov-enable */ 13602static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 13603{ 13604 int cid_count = BNX2X_L2_MAX_CID(bp); 13605 13606 if (IS_SRIOV(bp)) 13607 cid_count += BNX2X_VF_CIDS; 13608 13609 if (CNIC_SUPPORT(bp)) 13610 cid_count += CNIC_CID_MAX; 13611 13612 return roundup(cid_count, QM_CID_ROUND); 13613} 13614 13615/** 13616 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 13617 * @pdev: pci device 13618 * @cnic_cnt: count 13619 * 13620 */ 13621static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) 13622{ 13623 int index; 13624 u16 control = 0; 13625 13626 /* 13627 * If MSI-X is not supported - return number of SBs needed to support 13628 * one fast path queue: one FP queue + SB for CNIC 13629 */ 13630 if (!pdev->msix_cap) { 13631 dev_info(&pdev->dev, "no msix capability found\n"); 13632 return 1 + cnic_cnt; 13633 } 13634 dev_info(&pdev->dev, "msix capability found\n"); 13635 13636 /* 13637 * The value in the PCI configuration space is the index of the last 13638 * entry, namely one less than the actual size of the table, which is 13639 * exactly what we want to return from this function: number of all SBs 13640 * without the default SB. 13641 * For VFs there is no default SB, then we return (index+1). 13642 */ 13643 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); 13644 13645 index = control & PCI_MSIX_FLAGS_QSIZE; 13646 13647 return index; 13648} 13649 13650static int set_max_cos_est(int chip_id) 13651{ 13652 switch (chip_id) { 13653 case BCM57710: 13654 case BCM57711: 13655 case BCM57711E: 13656 return BNX2X_MULTI_TX_COS_E1X; 13657 case BCM57712: 13658 case BCM57712_MF: 13659 return BNX2X_MULTI_TX_COS_E2_E3A0; 13660 case BCM57800: 13661 case BCM57800_MF: 13662 case BCM57810: 13663 case BCM57810_MF: 13664 case BCM57840_4_10: 13665 case BCM57840_2_20: 13666 case BCM57840_O: 13667 case BCM57840_MFO: 13668 case BCM57840_MF: 13669 case BCM57811: 13670 case BCM57811_MF: 13671 return BNX2X_MULTI_TX_COS_E3B0; 13672 case BCM57712_VF: 13673 case BCM57800_VF: 13674 case BCM57810_VF: 13675 case BCM57840_VF: 13676 case BCM57811_VF: 13677 return 1; 13678 default: 13679 pr_err("Unknown board_type (%d), aborting\n", chip_id); 13680 return -ENODEV; 13681 } 13682} 13683 13684static int set_is_vf(int chip_id) 13685{ 13686 switch (chip_id) { 13687 case BCM57712_VF: 13688 case BCM57800_VF: 13689 case BCM57810_VF: 13690 case BCM57840_VF: 13691 case BCM57811_VF: 13692 return true; 13693 default: 13694 return false; 13695 } 13696} 13697 13698/* nig_tsgen registers relative address */ 13699#define tsgen_ctrl 0x0 13700#define tsgen_freecount 0x10 13701#define tsgen_synctime_t0 0x20 13702#define tsgen_offset_t0 0x28 13703#define tsgen_drift_t0 0x30 13704#define tsgen_synctime_t1 0x58 13705#define tsgen_offset_t1 0x60 13706#define tsgen_drift_t1 0x68 13707 13708/* FW workaround for setting drift */ 13709static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, 13710 int best_val, int best_period) 13711{ 13712 struct bnx2x_func_state_params func_params = {NULL}; 13713 struct bnx2x_func_set_timesync_params *set_timesync_params = 13714 &func_params.params.set_timesync; 13715 13716 /* Prepare parameters for function state transitions */ 13717 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 13718 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 13719 13720 func_params.f_obj = &bp->func_obj; 13721 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; 13722 13723 /* Function parameters */ 13724 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; 13725 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; 13726 set_timesync_params->add_sub_drift_adjust_value = 13727 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE; 13728 set_timesync_params->drift_adjust_value = best_val; 13729 set_timesync_params->drift_adjust_period = best_period; 13730 13731 return bnx2x_func_state_change(bp, &func_params); 13732} 13733 13734static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 13735{ 13736 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13737 int rc; 13738 int drift_dir = 1; 13739 int val, period, period1, period2, dif, dif1, dif2; 13740 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; 13741 13742 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); 13743 13744 if (!netif_running(bp->dev)) { 13745 DP(BNX2X_MSG_PTP, 13746 "PTP adjfreq called while the interface is down\n"); 13747 return -ENETDOWN; 13748 } 13749 13750 if (ppb < 0) { 13751 ppb = -ppb; 13752 drift_dir = 0; 13753 } 13754 13755 if (ppb == 0) { 13756 best_val = 1; 13757 best_period = 0x1FFFFFF; 13758 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) { 13759 best_val = 31; 13760 best_period = 1; 13761 } else { 13762 /* Changed not to allow val = 8, 16, 24 as these values 13763 * are not supported in workaround. 13764 */ 13765 for (val = 0; val <= 31; val++) { 13766 if ((val & 0x7) == 0) 13767 continue; 13768 period1 = val * 1000000 / ppb; 13769 period2 = period1 + 1; 13770 if (period1 != 0) 13771 dif1 = ppb - (val * 1000000 / period1); 13772 else 13773 dif1 = BNX2X_MAX_PHC_DRIFT; 13774 if (dif1 < 0) 13775 dif1 = -dif1; 13776 dif2 = ppb - (val * 1000000 / period2); 13777 if (dif2 < 0) 13778 dif2 = -dif2; 13779 dif = (dif1 < dif2) ? dif1 : dif2; 13780 period = (dif1 < dif2) ? period1 : period2; 13781 if (dif < best_dif) { 13782 best_dif = dif; 13783 best_val = val; 13784 best_period = period; 13785 } 13786 } 13787 } 13788 13789 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, 13790 best_period); 13791 if (rc) { 13792 BNX2X_ERR("Failed to set drift\n"); 13793 return -EFAULT; 13794 } 13795 13796 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, 13797 best_period); 13798 13799 return 0; 13800} 13801 13802static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 13803{ 13804 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13805 13806 if (!netif_running(bp->dev)) { 13807 DP(BNX2X_MSG_PTP, 13808 "PTP adjtime called while the interface is down\n"); 13809 return -ENETDOWN; 13810 } 13811 13812 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); 13813 13814 timecounter_adjtime(&bp->timecounter, delta); 13815 13816 return 0; 13817} 13818 13819static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 13820{ 13821 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13822 u64 ns; 13823 13824 if (!netif_running(bp->dev)) { 13825 DP(BNX2X_MSG_PTP, 13826 "PTP gettime called while the interface is down\n"); 13827 return -ENETDOWN; 13828 } 13829 13830 ns = timecounter_read(&bp->timecounter); 13831 13832 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); 13833 13834 *ts = ns_to_timespec64(ns); 13835 13836 return 0; 13837} 13838 13839static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, 13840 const struct timespec64 *ts) 13841{ 13842 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13843 u64 ns; 13844 13845 if (!netif_running(bp->dev)) { 13846 DP(BNX2X_MSG_PTP, 13847 "PTP settime called while the interface is down\n"); 13848 return -ENETDOWN; 13849 } 13850 13851 ns = timespec64_to_ns(ts); 13852 13853 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); 13854 13855 /* Re-init the timecounter */ 13856 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); 13857 13858 return 0; 13859} 13860 13861/* Enable (or disable) ancillary features of the phc subsystem */ 13862static int bnx2x_ptp_enable(struct ptp_clock_info *ptp, 13863 struct ptp_clock_request *rq, int on) 13864{ 13865 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13866 13867 BNX2X_ERR("PHC ancillary features are not supported\n"); 13868 return -ENOTSUPP; 13869} 13870 13871void bnx2x_register_phc(struct bnx2x *bp) 13872{ 13873 /* Fill the ptp_clock_info struct and register PTP clock*/ 13874 bp->ptp_clock_info.owner = THIS_MODULE; 13875 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); 13876 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ 13877 bp->ptp_clock_info.n_alarm = 0; 13878 bp->ptp_clock_info.n_ext_ts = 0; 13879 bp->ptp_clock_info.n_per_out = 0; 13880 bp->ptp_clock_info.pps = 0; 13881 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; 13882 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; 13883 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; 13884 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; 13885 bp->ptp_clock_info.enable = bnx2x_ptp_enable; 13886 13887 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); 13888 if (IS_ERR(bp->ptp_clock)) { 13889 bp->ptp_clock = NULL; 13890 BNX2X_ERR("PTP clock registration failed\n"); 13891 } 13892} 13893 13894static int bnx2x_init_one(struct pci_dev *pdev, 13895 const struct pci_device_id *ent) 13896{ 13897 struct net_device *dev = NULL; 13898 struct bnx2x *bp; 13899 int rc, max_non_def_sbs; 13900 int rx_count, tx_count, rss_count, doorbell_size; 13901 int max_cos_est; 13902 bool is_vf; 13903 int cnic_cnt; 13904 13905 /* Management FW 'remembers' living interfaces. Allow it some time 13906 * to forget previously living interfaces, allowing a proper re-load. 13907 */ 13908 if (is_kdump_kernel()) { 13909 ktime_t now = ktime_get_boottime(); 13910 ktime_t fw_ready_time = ktime_set(5, 0); 13911 13912 if (ktime_before(now, fw_ready_time)) 13913 msleep(ktime_ms_delta(fw_ready_time, now)); 13914 } 13915 13916 /* An estimated maximum supported CoS number according to the chip 13917 * version. 13918 * We will try to roughly estimate the maximum number of CoSes this chip 13919 * may support in order to minimize the memory allocated for Tx 13920 * netdev_queue's. This number will be accurately calculated during the 13921 * initialization of bp->max_cos based on the chip versions AND chip 13922 * revision in the bnx2x_init_bp(). 13923 */ 13924 max_cos_est = set_max_cos_est(ent->driver_data); 13925 if (max_cos_est < 0) 13926 return max_cos_est; 13927 is_vf = set_is_vf(ent->driver_data); 13928 cnic_cnt = is_vf ? 0 : 1; 13929 13930 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); 13931 13932 /* add another SB for VF as it has no default SB */ 13933 max_non_def_sbs += is_vf ? 1 : 0; 13934 13935 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 13936 rss_count = max_non_def_sbs - cnic_cnt; 13937 13938 if (rss_count < 1) 13939 return -EINVAL; 13940 13941 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 13942 rx_count = rss_count + cnic_cnt; 13943 13944 /* Maximum number of netdev Tx queues: 13945 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 13946 */ 13947 tx_count = rss_count * max_cos_est + cnic_cnt; 13948 13949 /* dev zeroed in init_etherdev */ 13950 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 13951 if (!dev) 13952 return -ENOMEM; 13953 13954 bp = netdev_priv(dev); 13955 13956 bp->flags = 0; 13957 if (is_vf) 13958 bp->flags |= IS_VF_FLAG; 13959 13960 bp->igu_sb_cnt = max_non_def_sbs; 13961 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 13962 bp->msg_enable = debug; 13963 bp->cnic_support = cnic_cnt; 13964 bp->cnic_probe = bnx2x_cnic_probe; 13965 13966 pci_set_drvdata(pdev, dev); 13967 13968 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); 13969 if (rc < 0) { 13970 free_netdev(dev); 13971 return rc; 13972 } 13973 13974 BNX2X_DEV_INFO("This is a %s function\n", 13975 IS_PF(bp) ? "physical" : "virtual"); 13976 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); 13977 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); 13978 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 13979 tx_count, rx_count); 13980 13981 rc = bnx2x_init_bp(bp); 13982 if (rc) 13983 goto init_one_exit; 13984 13985 /* Map doorbells here as we need the real value of bp->max_cos which 13986 * is initialized in bnx2x_init_bp() to determine the number of 13987 * l2 connections. 13988 */ 13989 if (IS_VF(bp)) { 13990 bp->doorbells = bnx2x_vf_doorbells(bp); 13991 rc = bnx2x_vf_pci_alloc(bp); 13992 if (rc) 13993 goto init_one_freemem; 13994 } else { 13995 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 13996 if (doorbell_size > pci_resource_len(pdev, 2)) { 13997 dev_err(&bp->pdev->dev, 13998 "Cannot map doorbells, bar size too small, aborting\n"); 13999 rc = -ENOMEM; 14000 goto init_one_freemem; 14001 } 14002 bp->doorbells = ioremap(pci_resource_start(pdev, 2), 14003 doorbell_size); 14004 } 14005 if (!bp->doorbells) { 14006 dev_err(&bp->pdev->dev, 14007 "Cannot map doorbell space, aborting\n"); 14008 rc = -ENOMEM; 14009 goto init_one_freemem; 14010 } 14011 14012 if (IS_VF(bp)) { 14013 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 14014 if (rc) 14015 goto init_one_freemem; 14016 14017#ifdef CONFIG_BNX2X_SRIOV 14018 /* VF with OLD Hypervisor or old PF do not support filtering */ 14019 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { 14020 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 14021 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 14022 } 14023#endif 14024 } 14025 14026 /* Enable SRIOV if capability found in configuration space */ 14027 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 14028 if (rc) 14029 goto init_one_freemem; 14030 14031 /* calc qm_cid_count */ 14032 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 14033 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); 14034 14035 /* disable FCOE L2 queue for E1x*/ 14036 if (CHIP_IS_E1x(bp)) 14037 bp->flags |= NO_FCOE_FLAG; 14038 14039 /* Set bp->num_queues for MSI-X mode*/ 14040 bnx2x_set_num_queues(bp); 14041 14042 /* Configure interrupt mode: try to enable MSI-X/MSI if 14043 * needed. 14044 */ 14045 rc = bnx2x_set_int_mode(bp); 14046 if (rc) { 14047 dev_err(&pdev->dev, "Cannot set interrupts\n"); 14048 goto init_one_freemem; 14049 } 14050 BNX2X_DEV_INFO("set interrupts successfully\n"); 14051 14052 /* register the net device */ 14053 rc = register_netdev(dev); 14054 if (rc) { 14055 dev_err(&pdev->dev, "Cannot register net device\n"); 14056 goto init_one_freemem; 14057 } 14058 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 14059 14060 if (!NO_FCOE(bp)) { 14061 /* Add storage MAC address */ 14062 rtnl_lock(); 14063 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 14064 rtnl_unlock(); 14065 } 14066 BNX2X_DEV_INFO( 14067 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n", 14068 board_info[ent->driver_data].name, 14069 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 14070 dev->base_addr, bp->pdev->irq, dev->dev_addr); 14071 pcie_print_link_status(bp->pdev); 14072 14073 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 14074 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); 14075 14076 return 0; 14077 14078init_one_freemem: 14079 bnx2x_free_mem_bp(bp); 14080 14081init_one_exit: 14082 bnx2x_disable_pcie_error_reporting(bp); 14083 14084 if (bp->regview) 14085 iounmap(bp->regview); 14086 14087 if (IS_PF(bp) && bp->doorbells) 14088 iounmap(bp->doorbells); 14089 14090 free_netdev(dev); 14091 14092 if (atomic_read(&pdev->enable_cnt) == 1) 14093 pci_release_regions(pdev); 14094 14095 pci_disable_device(pdev); 14096 14097 return rc; 14098} 14099 14100static void __bnx2x_remove(struct pci_dev *pdev, 14101 struct net_device *dev, 14102 struct bnx2x *bp, 14103 bool remove_netdev) 14104{ 14105 /* Delete storage MAC address */ 14106 if (!NO_FCOE(bp)) { 14107 rtnl_lock(); 14108 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 14109 rtnl_unlock(); 14110 } 14111 14112#ifdef BCM_DCBNL 14113 /* Delete app tlvs from dcbnl */ 14114 bnx2x_dcbnl_update_applist(bp, true); 14115#endif 14116 14117 if (IS_PF(bp) && 14118 !BP_NOMCP(bp) && 14119 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) 14120 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); 14121 14122 /* Close the interface - either directly or implicitly */ 14123 if (remove_netdev) { 14124 unregister_netdev(dev); 14125 } else { 14126 rtnl_lock(); 14127 dev_close(dev); 14128 rtnl_unlock(); 14129 } 14130 14131 bnx2x_iov_remove_one(bp); 14132 14133 /* Power on: we can't let PCI layer write to us while we are in D3 */ 14134 if (IS_PF(bp)) { 14135 bnx2x_set_power_state(bp, PCI_D0); 14136 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED); 14137 14138 /* Set endianity registers to reset values in case next driver 14139 * boots in different endianty environment. 14140 */ 14141 bnx2x_reset_endianity(bp); 14142 } 14143 14144 /* Disable MSI/MSI-X */ 14145 bnx2x_disable_msi(bp); 14146 14147 /* Power off */ 14148 if (IS_PF(bp)) 14149 bnx2x_set_power_state(bp, PCI_D3hot); 14150 14151 /* Make sure RESET task is not scheduled before continuing */ 14152 cancel_delayed_work_sync(&bp->sp_rtnl_task); 14153 14154 /* send message via vfpf channel to release the resources of this vf */ 14155 if (IS_VF(bp)) 14156 bnx2x_vfpf_release(bp); 14157 14158 /* Assumes no further PCIe PM changes will occur */ 14159 if (system_state == SYSTEM_POWER_OFF) { 14160 pci_wake_from_d3(pdev, bp->wol); 14161 pci_set_power_state(pdev, PCI_D3hot); 14162 } 14163 14164 bnx2x_disable_pcie_error_reporting(bp); 14165 if (remove_netdev) { 14166 if (bp->regview) 14167 iounmap(bp->regview); 14168 14169 /* For vfs, doorbells are part of the regview and were unmapped 14170 * along with it. FW is only loaded by PF. 14171 */ 14172 if (IS_PF(bp)) { 14173 if (bp->doorbells) 14174 iounmap(bp->doorbells); 14175 14176 bnx2x_release_firmware(bp); 14177 } else { 14178 bnx2x_vf_pci_dealloc(bp); 14179 } 14180 bnx2x_free_mem_bp(bp); 14181 14182 free_netdev(dev); 14183 14184 if (atomic_read(&pdev->enable_cnt) == 1) 14185 pci_release_regions(pdev); 14186 14187 pci_disable_device(pdev); 14188 } 14189} 14190 14191static void bnx2x_remove_one(struct pci_dev *pdev) 14192{ 14193 struct net_device *dev = pci_get_drvdata(pdev); 14194 struct bnx2x *bp; 14195 14196 if (!dev) { 14197 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 14198 return; 14199 } 14200 bp = netdev_priv(dev); 14201 14202 __bnx2x_remove(pdev, dev, bp, true); 14203} 14204 14205static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 14206{ 14207 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 14208 14209 bp->rx_mode = BNX2X_RX_MODE_NONE; 14210 14211 if (CNIC_LOADED(bp)) 14212 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 14213 14214 /* Stop Tx */ 14215 bnx2x_tx_disable(bp); 14216 netdev_reset_tc(bp->dev); 14217 14218 del_timer_sync(&bp->timer); 14219 cancel_delayed_work_sync(&bp->sp_task); 14220 cancel_delayed_work_sync(&bp->period_task); 14221 14222 if (!down_timeout(&bp->stats_lock, HZ / 10)) { 14223 bp->stats_state = STATS_STATE_DISABLED; 14224 up(&bp->stats_lock); 14225 } 14226 14227 bnx2x_save_statistics(bp); 14228 14229 netif_carrier_off(bp->dev); 14230 14231 return 0; 14232} 14233 14234/** 14235 * bnx2x_io_error_detected - called when PCI error is detected 14236 * @pdev: Pointer to PCI device 14237 * @state: The current pci connection state 14238 * 14239 * This function is called after a PCI bus error affecting 14240 * this device has been detected. 14241 */ 14242static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 14243 pci_channel_state_t state) 14244{ 14245 struct net_device *dev = pci_get_drvdata(pdev); 14246 struct bnx2x *bp = netdev_priv(dev); 14247 14248 rtnl_lock(); 14249 14250 BNX2X_ERR("IO error detected\n"); 14251 14252 netif_device_detach(dev); 14253 14254 if (state == pci_channel_io_perm_failure) { 14255 rtnl_unlock(); 14256 return PCI_ERS_RESULT_DISCONNECT; 14257 } 14258 14259 if (netif_running(dev)) 14260 bnx2x_eeh_nic_unload(bp); 14261 14262 bnx2x_prev_path_mark_eeh(bp); 14263 14264 pci_disable_device(pdev); 14265 14266 rtnl_unlock(); 14267 14268 /* Request a slot reset */ 14269 return PCI_ERS_RESULT_NEED_RESET; 14270} 14271 14272/** 14273 * bnx2x_io_slot_reset - called after the PCI bus has been reset 14274 * @pdev: Pointer to PCI device 14275 * 14276 * Restart the card from scratch, as if from a cold-boot. 14277 */ 14278static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 14279{ 14280 struct net_device *dev = pci_get_drvdata(pdev); 14281 struct bnx2x *bp = netdev_priv(dev); 14282 int i; 14283 14284 rtnl_lock(); 14285 BNX2X_ERR("IO slot reset initializing...\n"); 14286 if (pci_enable_device(pdev)) { 14287 dev_err(&pdev->dev, 14288 "Cannot re-enable PCI device after reset\n"); 14289 rtnl_unlock(); 14290 return PCI_ERS_RESULT_DISCONNECT; 14291 } 14292 14293 pci_set_master(pdev); 14294 pci_restore_state(pdev); 14295 pci_save_state(pdev); 14296 14297 if (netif_running(dev)) 14298 bnx2x_set_power_state(bp, PCI_D0); 14299 14300 if (netif_running(dev)) { 14301 BNX2X_ERR("IO slot reset --> driver unload\n"); 14302 14303 /* MCP should have been reset; Need to wait for validity */ 14304 if (bnx2x_init_shmem(bp)) { 14305 rtnl_unlock(); 14306 return PCI_ERS_RESULT_DISCONNECT; 14307 } 14308 14309 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 14310 u32 v; 14311 14312 v = SHMEM2_RD(bp, 14313 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 14314 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 14315 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 14316 } 14317 bnx2x_drain_tx_queues(bp); 14318 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); 14319 bnx2x_netif_stop(bp, 1); 14320 bnx2x_del_all_napi(bp); 14321 14322 if (CNIC_LOADED(bp)) 14323 bnx2x_del_all_napi_cnic(bp); 14324 14325 bnx2x_free_irq(bp); 14326 14327 /* Report UNLOAD_DONE to MCP */ 14328 bnx2x_send_unload_done(bp, true); 14329 14330 bp->sp_state = 0; 14331 bp->port.pmf = 0; 14332 14333 bnx2x_prev_unload(bp); 14334 14335 /* We should have reseted the engine, so It's fair to 14336 * assume the FW will no longer write to the bnx2x driver. 14337 */ 14338 bnx2x_squeeze_objects(bp); 14339 bnx2x_free_skbs(bp); 14340 for_each_rx_queue(bp, i) 14341 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 14342 bnx2x_free_fp_mem(bp); 14343 bnx2x_free_mem(bp); 14344 14345 bp->state = BNX2X_STATE_CLOSED; 14346 } 14347 14348 rtnl_unlock(); 14349 14350 return PCI_ERS_RESULT_RECOVERED; 14351} 14352 14353/** 14354 * bnx2x_io_resume - called when traffic can start flowing again 14355 * @pdev: Pointer to PCI device 14356 * 14357 * This callback is called when the error recovery driver tells us that 14358 * its OK to resume normal operation. 14359 */ 14360static void bnx2x_io_resume(struct pci_dev *pdev) 14361{ 14362 struct net_device *dev = pci_get_drvdata(pdev); 14363 struct bnx2x *bp = netdev_priv(dev); 14364 14365 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 14366 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 14367 return; 14368 } 14369 14370 rtnl_lock(); 14371 14372 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 14373 DRV_MSG_SEQ_NUMBER_MASK; 14374 14375 if (netif_running(dev)) { 14376 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 14377 netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n"); 14378 goto done; 14379 } 14380 } 14381 14382 netif_device_attach(dev); 14383 14384done: 14385 rtnl_unlock(); 14386} 14387 14388static const struct pci_error_handlers bnx2x_err_handler = { 14389 .error_detected = bnx2x_io_error_detected, 14390 .slot_reset = bnx2x_io_slot_reset, 14391 .resume = bnx2x_io_resume, 14392}; 14393 14394static void bnx2x_shutdown(struct pci_dev *pdev) 14395{ 14396 struct net_device *dev = pci_get_drvdata(pdev); 14397 struct bnx2x *bp; 14398 14399 if (!dev) 14400 return; 14401 14402 bp = netdev_priv(dev); 14403 if (!bp) 14404 return; 14405 14406 rtnl_lock(); 14407 netif_device_detach(dev); 14408 rtnl_unlock(); 14409 14410 /* Don't remove the netdevice, as there are scenarios which will cause 14411 * the kernel to hang, e.g., when trying to remove bnx2i while the 14412 * rootfs is mounted from SAN. 14413 */ 14414 __bnx2x_remove(pdev, dev, bp, false); 14415} 14416 14417static struct pci_driver bnx2x_pci_driver = { 14418 .name = DRV_MODULE_NAME, 14419 .id_table = bnx2x_pci_tbl, 14420 .probe = bnx2x_init_one, 14421 .remove = bnx2x_remove_one, 14422 .driver.pm = &bnx2x_pm_ops, 14423 .err_handler = &bnx2x_err_handler, 14424#ifdef CONFIG_BNX2X_SRIOV 14425 .sriov_configure = bnx2x_sriov_configure, 14426#endif 14427 .shutdown = bnx2x_shutdown, 14428}; 14429 14430static int __init bnx2x_init(void) 14431{ 14432 int ret; 14433 14434 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 14435 if (bnx2x_wq == NULL) { 14436 pr_err("Cannot create workqueue\n"); 14437 return -ENOMEM; 14438 } 14439 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); 14440 if (!bnx2x_iov_wq) { 14441 pr_err("Cannot create iov workqueue\n"); 14442 destroy_workqueue(bnx2x_wq); 14443 return -ENOMEM; 14444 } 14445 14446 ret = pci_register_driver(&bnx2x_pci_driver); 14447 if (ret) { 14448 pr_err("Cannot register driver\n"); 14449 destroy_workqueue(bnx2x_wq); 14450 destroy_workqueue(bnx2x_iov_wq); 14451 } 14452 return ret; 14453} 14454 14455static void __exit bnx2x_cleanup(void) 14456{ 14457 struct list_head *pos, *q; 14458 14459 pci_unregister_driver(&bnx2x_pci_driver); 14460 14461 destroy_workqueue(bnx2x_wq); 14462 destroy_workqueue(bnx2x_iov_wq); 14463 14464 /* Free globally allocated resources */ 14465 list_for_each_safe(pos, q, &bnx2x_prev_list) { 14466 struct bnx2x_prev_path_list *tmp = 14467 list_entry(pos, struct bnx2x_prev_path_list, list); 14468 list_del(pos); 14469 kfree(tmp); 14470 } 14471} 14472 14473void bnx2x_notify_link_changed(struct bnx2x *bp) 14474{ 14475 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 14476} 14477 14478module_init(bnx2x_init); 14479module_exit(bnx2x_cleanup); 14480 14481/** 14482 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 14483 * @bp: driver handle 14484 * 14485 * This function will wait until the ramrod completion returns. 14486 * Return 0 if success, -ENODEV if ramrod doesn't return. 14487 */ 14488static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 14489{ 14490 unsigned long ramrod_flags = 0; 14491 14492 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 14493 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 14494 &bp->iscsi_l2_mac_obj, true, 14495 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 14496} 14497 14498/* count denotes the number of new completions we have seen */ 14499static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 14500{ 14501 struct eth_spe *spe; 14502 int cxt_index, cxt_offset; 14503 14504#ifdef BNX2X_STOP_ON_ERROR 14505 if (unlikely(bp->panic)) 14506 return; 14507#endif 14508 14509 spin_lock_bh(&bp->spq_lock); 14510 BUG_ON(bp->cnic_spq_pending < count); 14511 bp->cnic_spq_pending -= count; 14512 14513 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 14514 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 14515 & SPE_HDR_CONN_TYPE) >> 14516 SPE_HDR_CONN_TYPE_SHIFT; 14517 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 14518 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 14519 14520 /* Set validation for iSCSI L2 client before sending SETUP 14521 * ramrod 14522 */ 14523 if (type == ETH_CONNECTION_TYPE) { 14524 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 14525 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 14526 ILT_PAGE_CIDS; 14527 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 14528 (cxt_index * ILT_PAGE_CIDS); 14529 bnx2x_set_ctx_validation(bp, 14530 &bp->context[cxt_index]. 14531 vcxt[cxt_offset].eth, 14532 BNX2X_ISCSI_ETH_CID(bp)); 14533 } 14534 } 14535 14536 /* 14537 * There may be not more than 8 L2, not more than 8 L5 SPEs 14538 * and in the air. We also check that number of outstanding 14539 * COMMON ramrods is not more than the EQ and SPQ can 14540 * accommodate. 14541 */ 14542 if (type == ETH_CONNECTION_TYPE) { 14543 if (!atomic_read(&bp->cq_spq_left)) 14544 break; 14545 else 14546 atomic_dec(&bp->cq_spq_left); 14547 } else if (type == NONE_CONNECTION_TYPE) { 14548 if (!atomic_read(&bp->eq_spq_left)) 14549 break; 14550 else 14551 atomic_dec(&bp->eq_spq_left); 14552 } else if ((type == ISCSI_CONNECTION_TYPE) || 14553 (type == FCOE_CONNECTION_TYPE)) { 14554 if (bp->cnic_spq_pending >= 14555 bp->cnic_eth_dev.max_kwqe_pending) 14556 break; 14557 else 14558 bp->cnic_spq_pending++; 14559 } else { 14560 BNX2X_ERR("Unknown SPE type: %d\n", type); 14561 bnx2x_panic(); 14562 break; 14563 } 14564 14565 spe = bnx2x_sp_get_next(bp); 14566 *spe = *bp->cnic_kwq_cons; 14567 14568 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 14569 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 14570 14571 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 14572 bp->cnic_kwq_cons = bp->cnic_kwq; 14573 else 14574 bp->cnic_kwq_cons++; 14575 } 14576 bnx2x_sp_prod_update(bp); 14577 spin_unlock_bh(&bp->spq_lock); 14578} 14579 14580static int bnx2x_cnic_sp_queue(struct net_device *dev, 14581 struct kwqe_16 *kwqes[], u32 count) 14582{ 14583 struct bnx2x *bp = netdev_priv(dev); 14584 int i; 14585 14586#ifdef BNX2X_STOP_ON_ERROR 14587 if (unlikely(bp->panic)) { 14588 BNX2X_ERR("Can't post to SP queue while panic\n"); 14589 return -EIO; 14590 } 14591#endif 14592 14593 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 14594 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 14595 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 14596 return -EAGAIN; 14597 } 14598 14599 spin_lock_bh(&bp->spq_lock); 14600 14601 for (i = 0; i < count; i++) { 14602 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 14603 14604 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 14605 break; 14606 14607 *bp->cnic_kwq_prod = *spe; 14608 14609 bp->cnic_kwq_pending++; 14610 14611 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 14612 spe->hdr.conn_and_cmd_data, spe->hdr.type, 14613 spe->data.update_data_addr.hi, 14614 spe->data.update_data_addr.lo, 14615 bp->cnic_kwq_pending); 14616 14617 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 14618 bp->cnic_kwq_prod = bp->cnic_kwq; 14619 else 14620 bp->cnic_kwq_prod++; 14621 } 14622 14623 spin_unlock_bh(&bp->spq_lock); 14624 14625 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 14626 bnx2x_cnic_sp_post(bp, 0); 14627 14628 return i; 14629} 14630 14631static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 14632{ 14633 struct cnic_ops *c_ops; 14634 int rc = 0; 14635 14636 mutex_lock(&bp->cnic_mutex); 14637 c_ops = rcu_dereference_protected(bp->cnic_ops, 14638 lockdep_is_held(&bp->cnic_mutex)); 14639 if (c_ops) 14640 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 14641 mutex_unlock(&bp->cnic_mutex); 14642 14643 return rc; 14644} 14645 14646static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 14647{ 14648 struct cnic_ops *c_ops; 14649 int rc = 0; 14650 14651 rcu_read_lock(); 14652 c_ops = rcu_dereference(bp->cnic_ops); 14653 if (c_ops) 14654 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 14655 rcu_read_unlock(); 14656 14657 return rc; 14658} 14659 14660/* 14661 * for commands that have no data 14662 */ 14663int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 14664{ 14665 struct cnic_ctl_info ctl = {0}; 14666 14667 ctl.cmd = cmd; 14668 14669 return bnx2x_cnic_ctl_send(bp, &ctl); 14670} 14671 14672static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 14673{ 14674 struct cnic_ctl_info ctl = {0}; 14675 14676 /* first we tell CNIC and only then we count this as a completion */ 14677 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 14678 ctl.data.comp.cid = cid; 14679 ctl.data.comp.error = err; 14680 14681 bnx2x_cnic_ctl_send_bh(bp, &ctl); 14682 bnx2x_cnic_sp_post(bp, 0); 14683} 14684 14685/* Called with netif_addr_lock_bh() taken. 14686 * Sets an rx_mode config for an iSCSI ETH client. 14687 * Doesn't block. 14688 * Completion should be checked outside. 14689 */ 14690static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 14691{ 14692 unsigned long accept_flags = 0, ramrod_flags = 0; 14693 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 14694 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 14695 14696 if (start) { 14697 /* Start accepting on iSCSI L2 ring. Accept all multicasts 14698 * because it's the only way for UIO Queue to accept 14699 * multicasts (in non-promiscuous mode only one Queue per 14700 * function will receive multicast packets (leading in our 14701 * case). 14702 */ 14703 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 14704 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 14705 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 14706 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 14707 14708 /* Clear STOP_PENDING bit if START is requested */ 14709 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 14710 14711 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 14712 } else 14713 /* Clear START_PENDING bit if STOP is requested */ 14714 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 14715 14716 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 14717 set_bit(sched_state, &bp->sp_state); 14718 else { 14719 __set_bit(RAMROD_RX, &ramrod_flags); 14720 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 14721 ramrod_flags); 14722 } 14723} 14724 14725static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 14726{ 14727 struct bnx2x *bp = netdev_priv(dev); 14728 int rc = 0; 14729 14730 switch (ctl->cmd) { 14731 case DRV_CTL_CTXTBL_WR_CMD: { 14732 u32 index = ctl->data.io.offset; 14733 dma_addr_t addr = ctl->data.io.dma_addr; 14734 14735 bnx2x_ilt_wr(bp, index, addr); 14736 break; 14737 } 14738 14739 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 14740 int count = ctl->data.credit.credit_count; 14741 14742 bnx2x_cnic_sp_post(bp, count); 14743 break; 14744 } 14745 14746 /* rtnl_lock is held. */ 14747 case DRV_CTL_START_L2_CMD: { 14748 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14749 unsigned long sp_bits = 0; 14750 14751 /* Configure the iSCSI classification object */ 14752 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 14753 cp->iscsi_l2_client_id, 14754 cp->iscsi_l2_cid, BP_FUNC(bp), 14755 bnx2x_sp(bp, mac_rdata), 14756 bnx2x_sp_mapping(bp, mac_rdata), 14757 BNX2X_FILTER_MAC_PENDING, 14758 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 14759 &bp->macs_pool); 14760 14761 /* Set iSCSI MAC address */ 14762 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 14763 if (rc) 14764 break; 14765 14766 barrier(); 14767 14768 /* Start accepting on iSCSI L2 ring */ 14769 14770 netif_addr_lock_bh(dev); 14771 bnx2x_set_iscsi_eth_rx_mode(bp, true); 14772 netif_addr_unlock_bh(dev); 14773 14774 /* bits to wait on */ 14775 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 14776 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 14777 14778 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 14779 BNX2X_ERR("rx_mode completion timed out!\n"); 14780 14781 break; 14782 } 14783 14784 /* rtnl_lock is held. */ 14785 case DRV_CTL_STOP_L2_CMD: { 14786 unsigned long sp_bits = 0; 14787 14788 /* Stop accepting on iSCSI L2 ring */ 14789 netif_addr_lock_bh(dev); 14790 bnx2x_set_iscsi_eth_rx_mode(bp, false); 14791 netif_addr_unlock_bh(dev); 14792 14793 /* bits to wait on */ 14794 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 14795 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 14796 14797 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 14798 BNX2X_ERR("rx_mode completion timed out!\n"); 14799 14800 barrier(); 14801 14802 /* Unset iSCSI L2 MAC */ 14803 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 14804 BNX2X_ISCSI_ETH_MAC, true); 14805 break; 14806 } 14807 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 14808 int count = ctl->data.credit.credit_count; 14809 14810 smp_mb__before_atomic(); 14811 atomic_add(count, &bp->cq_spq_left); 14812 smp_mb__after_atomic(); 14813 break; 14814 } 14815 case DRV_CTL_ULP_REGISTER_CMD: { 14816 int ulp_type = ctl->data.register_data.ulp_type; 14817 14818 if (CHIP_IS_E3(bp)) { 14819 int idx = BP_FW_MB_IDX(bp); 14820 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 14821 int path = BP_PATH(bp); 14822 int port = BP_PORT(bp); 14823 int i; 14824 u32 scratch_offset; 14825 u32 *host_addr; 14826 14827 /* first write capability to shmem2 */ 14828 if (ulp_type == CNIC_ULP_ISCSI) 14829 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 14830 else if (ulp_type == CNIC_ULP_FCOE) 14831 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 14832 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 14833 14834 if ((ulp_type != CNIC_ULP_FCOE) || 14835 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || 14836 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) 14837 break; 14838 14839 /* if reached here - should write fcoe capabilities */ 14840 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); 14841 if (!scratch_offset) 14842 break; 14843 scratch_offset += offsetof(struct glob_ncsi_oem_data, 14844 fcoe_features[path][port]); 14845 host_addr = (u32 *) &(ctl->data.register_data. 14846 fcoe_features); 14847 for (i = 0; i < sizeof(struct fcoe_capabilities); 14848 i += 4) 14849 REG_WR(bp, scratch_offset + i, 14850 *(host_addr + i/4)); 14851 } 14852 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 14853 break; 14854 } 14855 14856 case DRV_CTL_ULP_UNREGISTER_CMD: { 14857 int ulp_type = ctl->data.ulp_type; 14858 14859 if (CHIP_IS_E3(bp)) { 14860 int idx = BP_FW_MB_IDX(bp); 14861 u32 cap; 14862 14863 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 14864 if (ulp_type == CNIC_ULP_ISCSI) 14865 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 14866 else if (ulp_type == CNIC_ULP_FCOE) 14867 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 14868 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 14869 } 14870 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 14871 break; 14872 } 14873 14874 default: 14875 BNX2X_ERR("unknown command %x\n", ctl->cmd); 14876 rc = -EINVAL; 14877 } 14878 14879 /* For storage-only interfaces, change driver state */ 14880 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) { 14881 switch (ctl->drv_state) { 14882 case DRV_NOP: 14883 break; 14884 case DRV_ACTIVE: 14885 bnx2x_set_os_driver_state(bp, 14886 OS_DRIVER_STATE_ACTIVE); 14887 break; 14888 case DRV_INACTIVE: 14889 bnx2x_set_os_driver_state(bp, 14890 OS_DRIVER_STATE_DISABLED); 14891 break; 14892 case DRV_UNLOADED: 14893 bnx2x_set_os_driver_state(bp, 14894 OS_DRIVER_STATE_NOT_LOADED); 14895 break; 14896 default: 14897 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state); 14898 } 14899 } 14900 14901 return rc; 14902} 14903 14904static int bnx2x_get_fc_npiv(struct net_device *dev, 14905 struct cnic_fc_npiv_tbl *cnic_tbl) 14906{ 14907 struct bnx2x *bp = netdev_priv(dev); 14908 struct bdn_fc_npiv_tbl *tbl = NULL; 14909 u32 offset, entries; 14910 int rc = -EINVAL; 14911 int i; 14912 14913 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0])) 14914 goto out; 14915 14916 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n"); 14917 14918 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 14919 if (!tbl) { 14920 BNX2X_ERR("Failed to allocate fc_npiv table\n"); 14921 goto out; 14922 } 14923 14924 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); 14925 if (!offset) { 14926 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); 14927 goto out; 14928 } 14929 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); 14930 14931 /* Read the table contents from nvram */ 14932 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) { 14933 BNX2X_ERR("Failed to read FC-NPIV table\n"); 14934 goto out; 14935 } 14936 14937 /* Since bnx2x_nvram_read() returns data in be32, we need to convert 14938 * the number of entries back to cpu endianness. 14939 */ 14940 entries = tbl->fc_npiv_cfg.num_of_npiv; 14941 entries = (__force u32)be32_to_cpu((__force __be32)entries); 14942 tbl->fc_npiv_cfg.num_of_npiv = entries; 14943 14944 if (!tbl->fc_npiv_cfg.num_of_npiv) { 14945 DP(BNX2X_MSG_MCP, 14946 "No FC-NPIV table [valid, simply not present]\n"); 14947 goto out; 14948 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { 14949 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n", 14950 tbl->fc_npiv_cfg.num_of_npiv); 14951 goto out; 14952 } else { 14953 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n", 14954 tbl->fc_npiv_cfg.num_of_npiv); 14955 } 14956 14957 /* Copy the data into cnic-provided struct */ 14958 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; 14959 for (i = 0; i < cnic_tbl->count; i++) { 14960 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); 14961 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); 14962 } 14963 14964 rc = 0; 14965out: 14966 kfree(tbl); 14967 return rc; 14968} 14969 14970void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 14971{ 14972 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14973 14974 if (bp->flags & USING_MSIX_FLAG) { 14975 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 14976 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 14977 cp->irq_arr[0].vector = bp->msix_table[1].vector; 14978 } else { 14979 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 14980 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 14981 } 14982 if (!CHIP_IS_E1x(bp)) 14983 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 14984 else 14985 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 14986 14987 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 14988 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 14989 cp->irq_arr[1].status_blk = bp->def_status_blk; 14990 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 14991 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 14992 14993 cp->num_irq = 2; 14994} 14995 14996void bnx2x_setup_cnic_info(struct bnx2x *bp) 14997{ 14998 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 14999 15000 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 15001 bnx2x_cid_ilt_lines(bp); 15002 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 15003 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 15004 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 15005 15006 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", 15007 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, 15008 cp->iscsi_l2_cid); 15009 15010 if (NO_ISCSI_OOO(bp)) 15011 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 15012} 15013 15014static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 15015 void *data) 15016{ 15017 struct bnx2x *bp = netdev_priv(dev); 15018 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 15019 int rc; 15020 15021 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); 15022 15023 if (ops == NULL) { 15024 BNX2X_ERR("NULL ops received\n"); 15025 return -EINVAL; 15026 } 15027 15028 if (!CNIC_SUPPORT(bp)) { 15029 BNX2X_ERR("Can't register CNIC when not supported\n"); 15030 return -EOPNOTSUPP; 15031 } 15032 15033 if (!CNIC_LOADED(bp)) { 15034 rc = bnx2x_load_cnic(bp); 15035 if (rc) { 15036 BNX2X_ERR("CNIC-related load failed\n"); 15037 return rc; 15038 } 15039 } 15040 15041 bp->cnic_enabled = true; 15042 15043 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 15044 if (!bp->cnic_kwq) 15045 return -ENOMEM; 15046 15047 bp->cnic_kwq_cons = bp->cnic_kwq; 15048 bp->cnic_kwq_prod = bp->cnic_kwq; 15049 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 15050 15051 bp->cnic_spq_pending = 0; 15052 bp->cnic_kwq_pending = 0; 15053 15054 bp->cnic_data = data; 15055 15056 cp->num_irq = 0; 15057 cp->drv_state |= CNIC_DRV_STATE_REGD; 15058 cp->iro_arr = bp->iro_arr; 15059 15060 bnx2x_setup_cnic_irq_info(bp); 15061 15062 rcu_assign_pointer(bp->cnic_ops, ops); 15063 15064 /* Schedule driver to read CNIC driver versions */ 15065 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 15066 15067 return 0; 15068} 15069 15070static int bnx2x_unregister_cnic(struct net_device *dev) 15071{ 15072 struct bnx2x *bp = netdev_priv(dev); 15073 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 15074 15075 mutex_lock(&bp->cnic_mutex); 15076 cp->drv_state = 0; 15077 RCU_INIT_POINTER(bp->cnic_ops, NULL); 15078 mutex_unlock(&bp->cnic_mutex); 15079 synchronize_rcu(); 15080 bp->cnic_enabled = false; 15081 kfree(bp->cnic_kwq); 15082 bp->cnic_kwq = NULL; 15083 15084 return 0; 15085} 15086 15087static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 15088{ 15089 struct bnx2x *bp = netdev_priv(dev); 15090 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 15091 15092 /* If both iSCSI and FCoE are disabled - return NULL in 15093 * order to indicate CNIC that it should not try to work 15094 * with this device. 15095 */ 15096 if (NO_ISCSI(bp) && NO_FCOE(bp)) 15097 return NULL; 15098 15099 cp->drv_owner = THIS_MODULE; 15100 cp->chip_id = CHIP_ID(bp); 15101 cp->pdev = bp->pdev; 15102 cp->io_base = bp->regview; 15103 cp->io_base2 = bp->doorbells; 15104 cp->max_kwqe_pending = 8; 15105 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 15106 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 15107 bnx2x_cid_ilt_lines(bp); 15108 cp->ctx_tbl_len = CNIC_ILT_LINES; 15109 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 15110 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 15111 cp->drv_ctl = bnx2x_drv_ctl; 15112 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv; 15113 cp->drv_register_cnic = bnx2x_register_cnic; 15114 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 15115 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 15116 cp->iscsi_l2_client_id = 15117 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 15118 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 15119 15120 if (NO_ISCSI_OOO(bp)) 15121 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 15122 15123 if (NO_ISCSI(bp)) 15124 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 15125 15126 if (NO_FCOE(bp)) 15127 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 15128 15129 BNX2X_DEV_INFO( 15130 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 15131 cp->ctx_blk_size, 15132 cp->ctx_tbl_offset, 15133 cp->ctx_tbl_len, 15134 cp->starting_cid); 15135 return cp; 15136} 15137 15138static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 15139{ 15140 struct bnx2x *bp = fp->bp; 15141 u32 offset = BAR_USTRORM_INTMEM; 15142 15143 if (IS_VF(bp)) 15144 return bnx2x_vf_ustorm_prods_offset(bp, fp); 15145 else if (!CHIP_IS_E1x(bp)) 15146 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 15147 else 15148 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 15149 15150 return offset; 15151} 15152 15153/* called only on E1H or E2. 15154 * When pretending to be PF, the pretend value is the function number 0...7 15155 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 15156 * combination 15157 */ 15158int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 15159{ 15160 u32 pretend_reg; 15161 15162 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) 15163 return -1; 15164 15165 /* get my own pretend register */ 15166 pretend_reg = bnx2x_get_pretend_reg(bp); 15167 REG_WR(bp, pretend_reg, pretend_func_val); 15168 REG_RD(bp, pretend_reg); 15169 return 0; 15170} 15171 15172static void bnx2x_ptp_task(struct work_struct *work) 15173{ 15174 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); 15175 int port = BP_PORT(bp); 15176 u32 val_seq; 15177 u64 timestamp, ns; 15178 struct skb_shared_hwtstamps shhwtstamps; 15179 bool bail = true; 15180 int i; 15181 15182 /* FW may take a while to complete timestamping; try a bit and if it's 15183 * still not complete, may indicate an error state - bail out then. 15184 */ 15185 for (i = 0; i < 10; i++) { 15186 /* Read Tx timestamp registers */ 15187 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : 15188 NIG_REG_P0_TLLH_PTP_BUF_SEQID); 15189 if (val_seq & 0x10000) { 15190 bail = false; 15191 break; 15192 } 15193 msleep(1 << i); 15194 } 15195 15196 if (!bail) { 15197 /* There is a valid timestamp value */ 15198 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : 15199 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); 15200 timestamp <<= 32; 15201 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : 15202 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB); 15203 /* Reset timestamp register to allow new timestamp */ 15204 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : 15205 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); 15206 ns = timecounter_cyc2time(&bp->timecounter, timestamp); 15207 15208 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 15209 shhwtstamps.hwtstamp = ns_to_ktime(ns); 15210 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); 15211 15212 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", 15213 timestamp, ns); 15214 } else { 15215 DP(BNX2X_MSG_PTP, 15216 "Tx timestamp is not recorded (register read=%u)\n", 15217 val_seq); 15218 bp->eth_stats.ptp_skip_tx_ts++; 15219 } 15220 15221 dev_kfree_skb_any(bp->ptp_tx_skb); 15222 bp->ptp_tx_skb = NULL; 15223} 15224 15225void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) 15226{ 15227 int port = BP_PORT(bp); 15228 u64 timestamp, ns; 15229 15230 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : 15231 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB); 15232 timestamp <<= 32; 15233 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : 15234 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB); 15235 15236 /* Reset timestamp register to allow new timestamp */ 15237 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : 15238 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); 15239 15240 ns = timecounter_cyc2time(&bp->timecounter, timestamp); 15241 15242 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 15243 15244 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", 15245 timestamp, ns); 15246} 15247 15248/* Read the PHC */ 15249static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc) 15250{ 15251 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); 15252 int port = BP_PORT(bp); 15253 u32 wb_data[2]; 15254 u64 phc_cycles; 15255 15256 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : 15257 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2); 15258 phc_cycles = wb_data[1]; 15259 phc_cycles = (phc_cycles << 32) + wb_data[0]; 15260 15261 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); 15262 15263 return phc_cycles; 15264} 15265 15266static void bnx2x_init_cyclecounter(struct bnx2x *bp) 15267{ 15268 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); 15269 bp->cyclecounter.read = bnx2x_cyclecounter_read; 15270 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); 15271 bp->cyclecounter.shift = 0; 15272 bp->cyclecounter.mult = 1; 15273} 15274 15275static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) 15276{ 15277 struct bnx2x_func_state_params func_params = {NULL}; 15278 struct bnx2x_func_set_timesync_params *set_timesync_params = 15279 &func_params.params.set_timesync; 15280 15281 /* Prepare parameters for function state transitions */ 15282 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 15283 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 15284 15285 func_params.f_obj = &bp->func_obj; 15286 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; 15287 15288 /* Function parameters */ 15289 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; 15290 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; 15291 15292 return bnx2x_func_state_change(bp, &func_params); 15293} 15294 15295static int bnx2x_enable_ptp_packets(struct bnx2x *bp) 15296{ 15297 struct bnx2x_queue_state_params q_params; 15298 int rc, i; 15299 15300 /* send queue update ramrod to enable PTP packets */ 15301 memset(&q_params, 0, sizeof(q_params)); 15302 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 15303 q_params.cmd = BNX2X_Q_CMD_UPDATE; 15304 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, 15305 &q_params.params.update.update_flags); 15306 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS, 15307 &q_params.params.update.update_flags); 15308 15309 /* send the ramrod on all the queues of the PF */ 15310 for_each_eth_queue(bp, i) { 15311 struct bnx2x_fastpath *fp = &bp->fp[i]; 15312 15313 /* Set the appropriate Queue object */ 15314 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 15315 15316 /* Update the Queue state */ 15317 rc = bnx2x_queue_state_change(bp, &q_params); 15318 if (rc) { 15319 BNX2X_ERR("Failed to enable PTP packets\n"); 15320 return rc; 15321 } 15322 } 15323 15324 return 0; 15325} 15326 15327#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5 15328#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB 15329#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) 15330#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) 15331#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE) 15332#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE) 15333#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA) 15334#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE) 15335#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF) 15336#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF) 15337#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) 15338#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) 15339 15340int bnx2x_configure_ptp_filters(struct bnx2x *bp) 15341{ 15342 int port = BP_PORT(bp); 15343 u32 param, rule; 15344 int rc; 15345 15346 if (!bp->hwtstamp_ioctl_called) 15347 return 0; 15348 15349 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : 15350 NIG_REG_P0_TLLH_PTP_PARAM_MASK; 15351 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : 15352 NIG_REG_P0_TLLH_PTP_RULE_MASK; 15353 switch (bp->tx_type) { 15354 case HWTSTAMP_TX_ON: 15355 bp->flags |= TX_TIMESTAMPING_EN; 15356 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK); 15357 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK); 15358 break; 15359 case HWTSTAMP_TX_ONESTEP_SYNC: 15360 case HWTSTAMP_TX_ONESTEP_P2P: 15361 BNX2X_ERR("One-step timestamping is not supported\n"); 15362 return -ERANGE; 15363 } 15364 15365 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : 15366 NIG_REG_P0_LLH_PTP_PARAM_MASK; 15367 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK : 15368 NIG_REG_P0_LLH_PTP_RULE_MASK; 15369 switch (bp->rx_filter) { 15370 case HWTSTAMP_FILTER_NONE: 15371 break; 15372 case HWTSTAMP_FILTER_ALL: 15373 case HWTSTAMP_FILTER_SOME: 15374 case HWTSTAMP_FILTER_NTP_ALL: 15375 bp->rx_filter = HWTSTAMP_FILTER_NONE; 15376 break; 15377 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 15378 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 15379 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 15380 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 15381 /* Initialize PTP detection for UDP/IPv4 events */ 15382 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK); 15383 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK); 15384 break; 15385 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 15386 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 15387 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 15388 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 15389 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ 15390 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK); 15391 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK); 15392 break; 15393 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 15394 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 15395 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 15396 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 15397 /* Initialize PTP detection L2 events */ 15398 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK); 15399 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK); 15400 15401 break; 15402 case HWTSTAMP_FILTER_PTP_V2_EVENT: 15403 case HWTSTAMP_FILTER_PTP_V2_SYNC: 15404 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 15405 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 15406 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ 15407 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK); 15408 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK); 15409 break; 15410 } 15411 15412 /* Indicate to FW that this PF expects recorded PTP packets */ 15413 rc = bnx2x_enable_ptp_packets(bp); 15414 if (rc) 15415 return rc; 15416 15417 /* Enable sending PTP packets to host */ 15418 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : 15419 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1); 15420 15421 return 0; 15422} 15423 15424static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) 15425{ 15426 struct hwtstamp_config config; 15427 int rc; 15428 15429 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); 15430 15431 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 15432 return -EFAULT; 15433 15434 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", 15435 config.tx_type, config.rx_filter); 15436 15437 if (config.flags) { 15438 BNX2X_ERR("config.flags is reserved for future use\n"); 15439 return -EINVAL; 15440 } 15441 15442 bp->hwtstamp_ioctl_called = true; 15443 bp->tx_type = config.tx_type; 15444 bp->rx_filter = config.rx_filter; 15445 15446 rc = bnx2x_configure_ptp_filters(bp); 15447 if (rc) 15448 return rc; 15449 15450 config.rx_filter = bp->rx_filter; 15451 15452 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 15453 -EFAULT : 0; 15454} 15455 15456/* Configures HW for PTP */ 15457static int bnx2x_configure_ptp(struct bnx2x *bp) 15458{ 15459 int rc, port = BP_PORT(bp); 15460 u32 wb_data[2]; 15461 15462 /* Reset PTP event detection rules - will be configured in the IOCTL */ 15463 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : 15464 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); 15465 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : 15466 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); 15467 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : 15468 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); 15469 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : 15470 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); 15471 15472 /* Disable PTP packets to host - will be configured in the IOCTL*/ 15473 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : 15474 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); 15475 15476 /* Enable the PTP feature */ 15477 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : 15478 NIG_REG_P0_PTP_EN, 0x3F); 15479 15480 /* Enable the free-running counter */ 15481 wb_data[0] = 0; 15482 wb_data[1] = 0; 15483 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); 15484 15485 /* Reset drift register (offset register is not reset) */ 15486 rc = bnx2x_send_reset_timesync_ramrod(bp); 15487 if (rc) { 15488 BNX2X_ERR("Failed to reset PHC drift register\n"); 15489 return -EFAULT; 15490 } 15491 15492 /* Reset possibly old timestamps */ 15493 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : 15494 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); 15495 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : 15496 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); 15497 15498 return 0; 15499} 15500 15501/* Called during load, to initialize PTP-related stuff */ 15502void bnx2x_init_ptp(struct bnx2x *bp) 15503{ 15504 int rc; 15505 15506 /* Configure PTP in HW */ 15507 rc = bnx2x_configure_ptp(bp); 15508 if (rc) { 15509 BNX2X_ERR("Stopping PTP initialization\n"); 15510 return; 15511 } 15512 15513 /* Init work queue for Tx timestamping */ 15514 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); 15515 15516 /* Init cyclecounter and timecounter. This is done only in the first 15517 * load. If done in every load, PTP application will fail when doing 15518 * unload / load (e.g. MTU change) while it is running. 15519 */ 15520 if (!bp->timecounter_init_done) { 15521 bnx2x_init_cyclecounter(bp); 15522 timecounter_init(&bp->timecounter, &bp->cyclecounter, 15523 ktime_to_ns(ktime_get_real())); 15524 bp->timecounter_init_done = true; 15525 } 15526 15527 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); 15528} 15529