1/* bnx2x_cmn.c: QLogic Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * Copyright (c) 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 12 * Written by: Eliezer Tamir 13 * Based on code from Michael Chan's bnx2 driver 14 * UDP CSUM errata workaround by Arik Gendelman 15 * Slowpath and fastpath rework by Vladislav Zolotarov 16 * Statistics and Link management by Yitchak Gertner 17 * 18 */ 19 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22#include <linux/etherdevice.h> 23#include <linux/if_vlan.h> 24#include <linux/interrupt.h> 25#include <linux/ip.h> 26#include <linux/crash_dump.h> 27#include <net/tcp.h> 28#include <net/ipv6.h> 29#include <net/ip6_checksum.h> 30#include <linux/prefetch.h> 31#include "bnx2x_cmn.h" 32#include "bnx2x_init.h" 33#include "bnx2x_sp.h" 34 35static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); 36static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); 37static int bnx2x_alloc_fp_mem(struct bnx2x *bp); 38static int bnx2x_poll(struct napi_struct *napi, int budget); 39 40static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) 41{ 42 int i; 43 44 /* Add NAPI objects */ 45 for_each_rx_queue_cnic(bp, i) { 46 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 47 bnx2x_poll, NAPI_POLL_WEIGHT); 48 } 49} 50 51static void bnx2x_add_all_napi(struct bnx2x *bp) 52{ 53 int i; 54 55 /* Add NAPI objects */ 56 for_each_eth_queue(bp, i) { 57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 58 bnx2x_poll, NAPI_POLL_WEIGHT); 59 } 60} 61 62static int bnx2x_calc_num_queues(struct bnx2x *bp) 63{ 64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); 65 66 /* Reduce memory usage in kdump environment by using only one queue */ 67 if (is_kdump_kernel()) 68 nq = 1; 69 70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); 71 return nq; 72} 73 74/** 75 * bnx2x_move_fp - move content of the fastpath structure. 76 * 77 * @bp: driver handle 78 * @from: source FP index 79 * @to: destination FP index 80 * 81 * Makes sure the contents of the bp->fp[to].napi is kept 82 * intact. This is done by first copying the napi struct from 83 * the target to the source, and then mem copying the entire 84 * source onto the target. Update txdata pointers and related 85 * content. 86 */ 87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) 88{ 89 struct bnx2x_fastpath *from_fp = &bp->fp[from]; 90 struct bnx2x_fastpath *to_fp = &bp->fp[to]; 91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; 92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; 93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; 94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; 95 int old_max_eth_txqs, new_max_eth_txqs; 96 int old_txdata_index = 0, new_txdata_index = 0; 97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; 98 99 /* Copy the NAPI object as it has been already initialized */ 100 from_fp->napi = to_fp->napi; 101 102 /* Move bnx2x_fastpath contents */ 103 memcpy(to_fp, from_fp, sizeof(*to_fp)); 104 to_fp->index = to; 105 106 /* Retain the tpa_info of the original `to' version as we don't want 107 * 2 FPs to contain the same tpa_info pointer. 108 */ 109 to_fp->tpa_info = old_tpa_info; 110 111 /* move sp_objs contents as well, as their indices match fp ones */ 112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); 113 114 /* move fp_stats contents as well, as their indices match fp ones */ 115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); 116 117 /* Update txdata pointers in fp and move txdata content accordingly: 118 * Each fp consumes 'max_cos' txdata structures, so the index should be 119 * decremented by max_cos x delta. 120 */ 121 122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; 123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * 124 (bp)->max_cos; 125 if (from == FCOE_IDX(bp)) { 126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; 127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; 128 } 129 130 memcpy(&bp->bnx2x_txq[new_txdata_index], 131 &bp->bnx2x_txq[old_txdata_index], 132 sizeof(struct bnx2x_fp_txdata)); 133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; 134} 135 136/** 137 * bnx2x_fill_fw_str - Fill buffer with FW version string. 138 * 139 * @bp: driver handle 140 * @buf: character buffer to fill with the fw name 141 * @buf_len: length of the above buffer 142 * 143 */ 144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) 145{ 146 if (IS_PF(bp)) { 147 u8 phy_fw_ver[PHY_FW_VER_LEN]; 148 149 phy_fw_ver[0] = '\0'; 150 bnx2x_get_ext_phy_fw_version(&bp->link_params, 151 phy_fw_ver, PHY_FW_VER_LEN); 152 strlcpy(buf, bp->fw_ver, buf_len); 153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), 154 "bc %d.%d.%d%s%s", 155 (bp->common.bc_ver & 0xff0000) >> 16, 156 (bp->common.bc_ver & 0xff00) >> 8, 157 (bp->common.bc_ver & 0xff), 158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); 159 } else { 160 bnx2x_vf_fill_fw_str(bp, buf, buf_len); 161 } 162} 163 164/** 165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact 166 * 167 * @bp: driver handle 168 * @delta: number of eth queues which were not allocated 169 */ 170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) 171{ 172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); 173 174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer 175 * backward along the array could cause memory to be overridden 176 */ 177 for (cos = 1; cos < bp->max_cos; cos++) { 178 for (i = 0; i < old_eth_num - delta; i++) { 179 struct bnx2x_fastpath *fp = &bp->fp[i]; 180 int new_idx = cos * (old_eth_num - delta) + i; 181 182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], 183 sizeof(struct bnx2x_fp_txdata)); 184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; 185 } 186 } 187} 188 189int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 190 191/* free skb in the packet ring at pos idx 192 * return idx of last bd freed 193 */ 194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, 195 u16 idx, unsigned int *pkts_compl, 196 unsigned int *bytes_compl) 197{ 198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; 199 struct eth_tx_start_bd *tx_start_bd; 200 struct eth_tx_bd *tx_data_bd; 201 struct sk_buff *skb = tx_buf->skb; 202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 203 int nbd; 204 u16 split_bd_len = 0; 205 206 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 207 prefetch(&skb->end); 208 209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 210 txdata->txq_index, idx, tx_buf, skb); 211 212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 213 214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 215#ifdef BNX2X_STOP_ON_ERROR 216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { 217 BNX2X_ERR("BAD nbd!\n"); 218 bnx2x_panic(); 219 } 220#endif 221 new_cons = nbd + tx_buf->first_bd; 222 223 /* Get the next bd */ 224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 225 226 /* Skip a parse bd... */ 227 --nbd; 228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 229 230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { 231 /* Skip second parse bd... */ 232 --nbd; 233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 234 } 235 236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 239 split_bd_len = BD_UNMAP_LEN(tx_data_bd); 240 --nbd; 241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 242 } 243 244 /* unmap first bd */ 245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len, 247 DMA_TO_DEVICE); 248 249 /* now free frags */ 250 while (nbd > 0) { 251 252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), 254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); 255 if (--nbd) 256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 257 } 258 259 /* release skb */ 260 WARN_ON(!skb); 261 if (likely(skb)) { 262 (*pkts_compl)++; 263 (*bytes_compl) += skb->len; 264 dev_kfree_skb_any(skb); 265 } 266 267 tx_buf->first_bd = 0; 268 tx_buf->skb = NULL; 269 270 return new_cons; 271} 272 273int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) 274{ 275 struct netdev_queue *txq; 276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; 277 unsigned int pkts_compl = 0, bytes_compl = 0; 278 279#ifdef BNX2X_STOP_ON_ERROR 280 if (unlikely(bp->panic)) 281 return -1; 282#endif 283 284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); 285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 286 sw_cons = txdata->tx_pkt_cons; 287 288 /* Ensure subsequent loads occur after hw_cons */ 289 smp_rmb(); 290 291 while (sw_cons != hw_cons) { 292 u16 pkt_cons; 293 294 pkt_cons = TX_BD(sw_cons); 295 296 DP(NETIF_MSG_TX_DONE, 297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n", 298 txdata->txq_index, hw_cons, sw_cons, pkt_cons); 299 300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, 301 &pkts_compl, &bytes_compl); 302 303 sw_cons++; 304 } 305 306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 307 308 txdata->tx_pkt_cons = sw_cons; 309 txdata->tx_bd_cons = bd_cons; 310 311 /* Need to make the tx_bd_cons update visible to start_xmit() 312 * before checking for netif_tx_queue_stopped(). Without the 313 * memory barrier, there is a small possibility that 314 * start_xmit() will miss it and cause the queue to be stopped 315 * forever. 316 * On the other hand we need an rmb() here to ensure the proper 317 * ordering of bit testing in the following 318 * netif_tx_queue_stopped(txq) call. 319 */ 320 smp_mb(); 321 322 if (unlikely(netif_tx_queue_stopped(txq))) { 323 /* Taking tx_lock() is needed to prevent re-enabling the queue 324 * while it's empty. This could have happen if rx_action() gets 325 * suspended in bnx2x_tx_int() after the condition before 326 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): 327 * 328 * stops the queue->sees fresh tx_bd_cons->releases the queue-> 329 * sends some packets consuming the whole queue again-> 330 * stops the queue 331 */ 332 333 __netif_tx_lock(txq, smp_processor_id()); 334 335 if ((netif_tx_queue_stopped(txq)) && 336 (bp->state == BNX2X_STATE_OPEN) && 337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) 338 netif_tx_wake_queue(txq); 339 340 __netif_tx_unlock(txq); 341 } 342 return 0; 343} 344 345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, 346 u16 idx) 347{ 348 u16 last_max = fp->last_max_sge; 349 350 if (SUB_S16(idx, last_max) > 0) 351 fp->last_max_sge = idx; 352} 353 354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, 355 u16 sge_len, 356 struct eth_end_agg_rx_cqe *cqe) 357{ 358 struct bnx2x *bp = fp->bp; 359 u16 last_max, last_elem, first_elem; 360 u16 delta = 0; 361 u16 i; 362 363 if (!sge_len) 364 return; 365 366 /* First mark all used pages */ 367 for (i = 0; i < sge_len; i++) 368 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); 370 371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 373 374 /* Here we assume that the last SGE index is the biggest */ 375 prefetch((void *)(fp->sge_mask)); 376 bnx2x_update_last_max_sge(fp, 377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 378 379 last_max = RX_SGE(fp->last_max_sge); 380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 382 383 /* If ring is not full */ 384 if (last_elem + 1 != first_elem) 385 last_elem++; 386 387 /* Now update the prod */ 388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { 389 if (likely(fp->sge_mask[i])) 390 break; 391 392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 393 delta += BIT_VEC64_ELEM_SZ; 394 } 395 396 if (delta > 0) { 397 fp->rx_sge_prod += delta; 398 /* clear page-end entries */ 399 bnx2x_clear_sge_mask_next_elems(fp); 400 } 401 402 DP(NETIF_MSG_RX_STATUS, 403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", 404 fp->last_max_sge, fp->rx_sge_prod); 405} 406 407/* Get Toeplitz hash value in the skb using the value from the 408 * CQE (calculated by HW). 409 */ 410static u32 bnx2x_get_rxhash(const struct bnx2x *bp, 411 const struct eth_fast_path_rx_cqe *cqe, 412 enum pkt_hash_types *rxhash_type) 413{ 414 /* Get Toeplitz hash from CQE */ 415 if ((bp->dev->features & NETIF_F_RXHASH) && 416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { 417 enum eth_rss_hash_type htype; 418 419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; 420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || 421 (htype == TCP_IPV6_HASH_TYPE)) ? 422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; 423 424 return le32_to_cpu(cqe->rss_hash_result); 425 } 426 *rxhash_type = PKT_HASH_TYPE_NONE; 427 return 0; 428} 429 430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, 431 u16 cons, u16 prod, 432 struct eth_fast_path_rx_cqe *cqe) 433{ 434 struct bnx2x *bp = fp->bp; 435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 438 dma_addr_t mapping; 439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; 440 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 441 442 /* print error if current state != stop */ 443 if (tpa_info->tpa_state != BNX2X_TPA_STOP) 444 BNX2X_ERR("start of bin not in stop [%d]\n", queue); 445 446 /* Try to map an empty data buffer from the aggregation info */ 447 mapping = dma_map_single(&bp->pdev->dev, 448 first_buf->data + NET_SKB_PAD, 449 fp->rx_buf_size, DMA_FROM_DEVICE); 450 /* 451 * ...if it fails - move the skb from the consumer to the producer 452 * and set the current aggregation state as ERROR to drop it 453 * when TPA_STOP arrives. 454 */ 455 456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 457 /* Move the BD from the consumer to the producer */ 458 bnx2x_reuse_rx_data(fp, cons, prod); 459 tpa_info->tpa_state = BNX2X_TPA_ERROR; 460 return; 461 } 462 463 /* move empty data from pool to prod */ 464 prod_rx_buf->data = first_buf->data; 465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 466 /* point prod_bd to new data */ 467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 469 470 /* move partial skb from cons to pool (don't unmap yet) */ 471 *first_buf = *cons_rx_buf; 472 473 /* mark bin state as START */ 474 tpa_info->parsing_flags = 475 le16_to_cpu(cqe->pars_flags.flags); 476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); 477 tpa_info->tpa_state = BNX2X_TPA_START; 478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); 479 tpa_info->placement_offset = cqe->placement_offset; 480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); 481 if (fp->mode == TPA_MODE_GRO) { 482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size; 484 tpa_info->gro_size = gro_size; 485 } 486 487#ifdef BNX2X_STOP_ON_ERROR 488 fp->tpa_queue_used |= (1 << queue); 489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 490 fp->tpa_queue_used); 491#endif 492} 493 494/* Timestamp option length allowed for TPA aggregation: 495 * 496 * nop nop kind length echo val 497 */ 498#define TPA_TSTAMP_OPT_LEN 12 499/** 500 * bnx2x_set_gro_params - compute GRO values 501 * 502 * @skb: packet skb 503 * @parsing_flags: parsing flags from the START CQE 504 * @len_on_bd: total length of the first packet for the 505 * aggregation. 506 * @pkt_len: length of all segments 507 * @num_of_coalesced_segs: count of segments 508 * 509 * Approximate value of the MSS for this aggregation calculated using 510 * the first packet of it. 511 * Compute number of aggregated segments, and gso_type. 512 */ 513static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, 514 u16 len_on_bd, unsigned int pkt_len, 515 u16 num_of_coalesced_segs) 516{ 517 /* TPA aggregation won't have either IP options or TCP options 518 * other than timestamp or IPv6 extension headers. 519 */ 520 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); 521 522 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == 523 PRS_FLAG_OVERETH_IPV6) { 524 hdrs_len += sizeof(struct ipv6hdr); 525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 526 } else { 527 hdrs_len += sizeof(struct iphdr); 528 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 529 } 530 531 /* Check if there was a TCP timestamp, if there is it's will 532 * always be 12 bytes length: nop nop kind length echo val. 533 * 534 * Otherwise FW would close the aggregation. 535 */ 536 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) 537 hdrs_len += TPA_TSTAMP_OPT_LEN; 538 539 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; 540 541 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count 542 * to skb_shinfo(skb)->gso_segs 543 */ 544 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; 545} 546 547static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, 548 u16 index, gfp_t gfp_mask) 549{ 550 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 551 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 552 struct bnx2x_alloc_pool *pool = &fp->page_pool; 553 dma_addr_t mapping; 554 555 if (!pool->page) { 556 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); 557 if (unlikely(!pool->page)) 558 return -ENOMEM; 559 560 pool->offset = 0; 561 } 562 563 mapping = dma_map_page(&bp->pdev->dev, pool->page, 564 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); 565 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 566 BNX2X_ERR("Can't map sge\n"); 567 return -ENOMEM; 568 } 569 570 sw_buf->page = pool->page; 571 sw_buf->offset = pool->offset; 572 573 dma_unmap_addr_set(sw_buf, mapping, mapping); 574 575 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 576 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 577 578 pool->offset += SGE_PAGE_SIZE; 579 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) 580 get_page(pool->page); 581 else 582 pool->page = NULL; 583 return 0; 584} 585 586static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 587 struct bnx2x_agg_info *tpa_info, 588 u16 pages, 589 struct sk_buff *skb, 590 struct eth_end_agg_rx_cqe *cqe, 591 u16 cqe_idx) 592{ 593 struct sw_rx_page *rx_pg, old_rx_pg; 594 u32 i, frag_len, frag_size; 595 int err, j, frag_id = 0; 596 u16 len_on_bd = tpa_info->len_on_bd; 597 u16 full_page = 0, gro_size = 0; 598 599 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; 600 601 if (fp->mode == TPA_MODE_GRO) { 602 gro_size = tpa_info->gro_size; 603 full_page = tpa_info->full_page; 604 } 605 606 /* This is needed in order to enable forwarding support */ 607 if (frag_size) 608 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, 609 le16_to_cpu(cqe->pkt_len), 610 le16_to_cpu(cqe->num_of_coalesced_segs)); 611 612#ifdef BNX2X_STOP_ON_ERROR 613 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { 614 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 615 pages, cqe_idx); 616 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); 617 bnx2x_panic(); 618 return -EINVAL; 619 } 620#endif 621 622 /* Run through the SGL and compose the fragmented skb */ 623 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 624 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); 625 626 /* FW gives the indices of the SGE as if the ring is an array 627 (meaning that "next" element will consume 2 indices) */ 628 if (fp->mode == TPA_MODE_GRO) 629 frag_len = min_t(u32, frag_size, (u32)full_page); 630 else /* LRO */ 631 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); 632 633 rx_pg = &fp->rx_page_ring[sge_idx]; 634 old_rx_pg = *rx_pg; 635 636 /* If we fail to allocate a substitute page, we simply stop 637 where we are and drop the whole packet */ 638 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); 639 if (unlikely(err)) { 640 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 641 return err; 642 } 643 644 dma_unmap_page(&bp->pdev->dev, 645 dma_unmap_addr(&old_rx_pg, mapping), 646 SGE_PAGE_SIZE, DMA_FROM_DEVICE); 647 /* Add one frag and update the appropriate fields in the skb */ 648 if (fp->mode == TPA_MODE_LRO) 649 skb_fill_page_desc(skb, j, old_rx_pg.page, 650 old_rx_pg.offset, frag_len); 651 else { /* GRO */ 652 int rem; 653 int offset = 0; 654 for (rem = frag_len; rem > 0; rem -= gro_size) { 655 int len = rem > gro_size ? gro_size : rem; 656 skb_fill_page_desc(skb, frag_id++, 657 old_rx_pg.page, 658 old_rx_pg.offset + offset, 659 len); 660 if (offset) 661 get_page(old_rx_pg.page); 662 offset += len; 663 } 664 } 665 666 skb->data_len += frag_len; 667 skb->truesize += SGE_PAGES; 668 skb->len += frag_len; 669 670 frag_size -= frag_len; 671 } 672 673 return 0; 674} 675 676static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) 677{ 678 if (fp->rx_frag_size) 679 skb_free_frag(data); 680 else 681 kfree(data); 682} 683 684static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) 685{ 686 if (fp->rx_frag_size) { 687 /* GFP_KERNEL allocations are used only during initialization */ 688 if (unlikely(gfpflags_allow_blocking(gfp_mask))) 689 return (void *)__get_free_page(gfp_mask); 690 691 return napi_alloc_frag(fp->rx_frag_size); 692 } 693 694 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); 695} 696 697#ifdef CONFIG_INET 698static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) 699{ 700 const struct iphdr *iph = ip_hdr(skb); 701 struct tcphdr *th; 702 703 skb_set_transport_header(skb, sizeof(struct iphdr)); 704 th = tcp_hdr(skb); 705 706 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 707 iph->saddr, iph->daddr, 0); 708} 709 710static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) 711{ 712 struct ipv6hdr *iph = ipv6_hdr(skb); 713 struct tcphdr *th; 714 715 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 716 th = tcp_hdr(skb); 717 718 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 719 &iph->saddr, &iph->daddr, 0); 720} 721 722static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, 723 void (*gro_func)(struct bnx2x*, struct sk_buff*)) 724{ 725 skb_reset_network_header(skb); 726 gro_func(bp, skb); 727 tcp_gro_complete(skb); 728} 729#endif 730 731static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, 732 struct sk_buff *skb) 733{ 734#ifdef CONFIG_INET 735 if (skb_shinfo(skb)->gso_size) { 736 switch (be16_to_cpu(skb->protocol)) { 737 case ETH_P_IP: 738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); 739 break; 740 case ETH_P_IPV6: 741 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); 742 break; 743 default: 744 netdev_WARN_ONCE(bp->dev, 745 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", 746 be16_to_cpu(skb->protocol)); 747 } 748 } 749#endif 750 skb_record_rx_queue(skb, fp->rx_queue); 751 napi_gro_receive(&fp->napi, skb); 752} 753 754static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 755 struct bnx2x_agg_info *tpa_info, 756 u16 pages, 757 struct eth_end_agg_rx_cqe *cqe, 758 u16 cqe_idx) 759{ 760 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 761 u8 pad = tpa_info->placement_offset; 762 u16 len = tpa_info->len_on_bd; 763 struct sk_buff *skb = NULL; 764 u8 *new_data, *data = rx_buf->data; 765 u8 old_tpa_state = tpa_info->tpa_state; 766 767 tpa_info->tpa_state = BNX2X_TPA_STOP; 768 769 /* If we there was an error during the handling of the TPA_START - 770 * drop this aggregation. 771 */ 772 if (old_tpa_state == BNX2X_TPA_ERROR) 773 goto drop; 774 775 /* Try to allocate the new data */ 776 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); 777 /* Unmap skb in the pool anyway, as we are going to change 778 pool entry status to BNX2X_TPA_STOP even if new skb allocation 779 fails. */ 780 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 781 fp->rx_buf_size, DMA_FROM_DEVICE); 782 if (likely(new_data)) 783 skb = build_skb(data, fp->rx_frag_size); 784 785 if (likely(skb)) { 786#ifdef BNX2X_STOP_ON_ERROR 787 if (pad + len > fp->rx_buf_size) { 788 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", 789 pad, len, fp->rx_buf_size); 790 bnx2x_panic(); 791 bnx2x_frag_free(fp, new_data); 792 return; 793 } 794#endif 795 796 skb_reserve(skb, pad + NET_SKB_PAD); 797 skb_put(skb, len); 798 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type); 799 800 skb->protocol = eth_type_trans(skb, bp->dev); 801 skb->ip_summed = CHECKSUM_UNNECESSARY; 802 803 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, 804 skb, cqe, cqe_idx)) { 805 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) 806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); 807 bnx2x_gro_receive(bp, fp, skb); 808 } else { 809 DP(NETIF_MSG_RX_STATUS, 810 "Failed to allocate new pages - dropping packet!\n"); 811 dev_kfree_skb_any(skb); 812 } 813 814 /* put new data in bin */ 815 rx_buf->data = new_data; 816 817 return; 818 } 819 if (new_data) 820 bnx2x_frag_free(fp, new_data); 821drop: 822 /* drop the packet and keep the buffer in the bin */ 823 DP(NETIF_MSG_RX_STATUS, 824 "Failed to allocate or map a new skb - dropping packet!\n"); 825 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; 826} 827 828static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, 829 u16 index, gfp_t gfp_mask) 830{ 831 u8 *data; 832 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 833 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 834 dma_addr_t mapping; 835 836 data = bnx2x_frag_alloc(fp, gfp_mask); 837 if (unlikely(data == NULL)) 838 return -ENOMEM; 839 840 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, 841 fp->rx_buf_size, 842 DMA_FROM_DEVICE); 843 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 844 bnx2x_frag_free(fp, data); 845 BNX2X_ERR("Can't map rx data\n"); 846 return -ENOMEM; 847 } 848 849 rx_buf->data = data; 850 dma_unmap_addr_set(rx_buf, mapping, mapping); 851 852 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 853 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 854 855 return 0; 856} 857 858static 859void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, 860 struct bnx2x_fastpath *fp, 861 struct bnx2x_eth_q_stats *qstats) 862{ 863 /* Do nothing if no L4 csum validation was done. 864 * We do not check whether IP csum was validated. For IPv4 we assume 865 * that if the card got as far as validating the L4 csum, it also 866 * validated the IP csum. IPv6 has no IP csum. 867 */ 868 if (cqe->fast_path_cqe.status_flags & 869 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) 870 return; 871 872 /* If L4 validation was done, check if an error was found. */ 873 874 if (cqe->fast_path_cqe.type_error_flags & 875 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | 876 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 877 qstats->hw_csum_err++; 878 else 879 skb->ip_summed = CHECKSUM_UNNECESSARY; 880} 881 882static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 883{ 884 struct bnx2x *bp = fp->bp; 885 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 886 u16 sw_comp_cons, sw_comp_prod; 887 int rx_pkt = 0; 888 union eth_rx_cqe *cqe; 889 struct eth_fast_path_rx_cqe *cqe_fp; 890 891#ifdef BNX2X_STOP_ON_ERROR 892 if (unlikely(bp->panic)) 893 return 0; 894#endif 895 if (budget <= 0) 896 return rx_pkt; 897 898 bd_cons = fp->rx_bd_cons; 899 bd_prod = fp->rx_bd_prod; 900 bd_prod_fw = bd_prod; 901 sw_comp_cons = fp->rx_comp_cons; 902 sw_comp_prod = fp->rx_comp_prod; 903 904 comp_ring_cons = RCQ_BD(sw_comp_cons); 905 cqe = &fp->rx_comp_ring[comp_ring_cons]; 906 cqe_fp = &cqe->fast_path_cqe; 907 908 DP(NETIF_MSG_RX_STATUS, 909 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); 910 911 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { 912 struct sw_rx_bd *rx_buf = NULL; 913 struct sk_buff *skb; 914 u8 cqe_fp_flags; 915 enum eth_rx_cqe_type cqe_fp_type; 916 u16 len, pad, queue; 917 u8 *data; 918 u32 rxhash; 919 enum pkt_hash_types rxhash_type; 920 921#ifdef BNX2X_STOP_ON_ERROR 922 if (unlikely(bp->panic)) 923 return 0; 924#endif 925 926 bd_prod = RX_BD(bd_prod); 927 bd_cons = RX_BD(bd_cons); 928 929 /* A rmb() is required to ensure that the CQE is not read 930 * before it is written by the adapter DMA. PCI ordering 931 * rules will make sure the other fields are written before 932 * the marker at the end of struct eth_fast_path_rx_cqe 933 * but without rmb() a weakly ordered processor can process 934 * stale data. Without the barrier TPA state-machine might 935 * enter inconsistent state and kernel stack might be 936 * provided with incorrect packet description - these lead 937 * to various kernel crashed. 938 */ 939 rmb(); 940 941 cqe_fp_flags = cqe_fp->type_error_flags; 942 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 943 944 DP(NETIF_MSG_RX_STATUS, 945 "CQE type %x err %x status %x queue %x vlan %x len %u\n", 946 CQE_TYPE(cqe_fp_flags), 947 cqe_fp_flags, cqe_fp->status_flags, 948 le32_to_cpu(cqe_fp->rss_hash_result), 949 le16_to_cpu(cqe_fp->vlan_tag), 950 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); 951 952 /* is this a slowpath msg? */ 953 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { 954 bnx2x_sp_event(fp, cqe); 955 goto next_cqe; 956 } 957 958 rx_buf = &fp->rx_buf_ring[bd_cons]; 959 data = rx_buf->data; 960 961 if (!CQE_TYPE_FAST(cqe_fp_type)) { 962 struct bnx2x_agg_info *tpa_info; 963 u16 frag_size, pages; 964#ifdef BNX2X_STOP_ON_ERROR 965 /* sanity check */ 966 if (fp->mode == TPA_MODE_DISABLED && 967 (CQE_TYPE_START(cqe_fp_type) || 968 CQE_TYPE_STOP(cqe_fp_type))) 969 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", 970 CQE_TYPE(cqe_fp_type)); 971#endif 972 973 if (CQE_TYPE_START(cqe_fp_type)) { 974 u16 queue = cqe_fp->queue_index; 975 DP(NETIF_MSG_RX_STATUS, 976 "calling tpa_start on queue %d\n", 977 queue); 978 979 bnx2x_tpa_start(fp, queue, 980 bd_cons, bd_prod, 981 cqe_fp); 982 983 goto next_rx; 984 } 985 queue = cqe->end_agg_cqe.queue_index; 986 tpa_info = &fp->tpa_info[queue]; 987 DP(NETIF_MSG_RX_STATUS, 988 "calling tpa_stop on queue %d\n", 989 queue); 990 991 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - 992 tpa_info->len_on_bd; 993 994 if (fp->mode == TPA_MODE_GRO) 995 pages = (frag_size + tpa_info->full_page - 1) / 996 tpa_info->full_page; 997 else 998 pages = SGE_PAGE_ALIGN(frag_size) >> 999 SGE_PAGE_SHIFT; 1000 1001 bnx2x_tpa_stop(bp, fp, tpa_info, pages, 1002 &cqe->end_agg_cqe, comp_ring_cons); 1003#ifdef BNX2X_STOP_ON_ERROR 1004 if (bp->panic) 1005 return 0; 1006#endif 1007 1008 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); 1009 goto next_cqe; 1010 } 1011 /* non TPA */ 1012 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); 1013 pad = cqe_fp->placement_offset; 1014 dma_sync_single_for_cpu(&bp->pdev->dev, 1015 dma_unmap_addr(rx_buf, mapping), 1016 pad + RX_COPY_THRESH, 1017 DMA_FROM_DEVICE); 1018 pad += NET_SKB_PAD; 1019 prefetch(data + pad); /* speedup eth_type_trans() */ 1020 /* is this an error packet? */ 1021 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1022 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 1023 "ERROR flags %x rx packet %u\n", 1024 cqe_fp_flags, sw_comp_cons); 1025 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; 1026 goto reuse_rx; 1027 } 1028 1029 /* Since we don't have a jumbo ring 1030 * copy small packets if mtu > 1500 1031 */ 1032 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && 1033 (len <= RX_COPY_THRESH)) { 1034 skb = napi_alloc_skb(&fp->napi, len); 1035 if (skb == NULL) { 1036 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 1037 "ERROR packet dropped because of alloc failure\n"); 1038 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 1039 goto reuse_rx; 1040 } 1041 memcpy(skb->data, data + pad, len); 1042 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 1043 } else { 1044 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, 1045 GFP_ATOMIC) == 0)) { 1046 dma_unmap_single(&bp->pdev->dev, 1047 dma_unmap_addr(rx_buf, mapping), 1048 fp->rx_buf_size, 1049 DMA_FROM_DEVICE); 1050 skb = build_skb(data, fp->rx_frag_size); 1051 if (unlikely(!skb)) { 1052 bnx2x_frag_free(fp, data); 1053 bnx2x_fp_qstats(bp, fp)-> 1054 rx_skb_alloc_failed++; 1055 goto next_rx; 1056 } 1057 skb_reserve(skb, pad); 1058 } else { 1059 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 1060 "ERROR packet dropped because of alloc failure\n"); 1061 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; 1062reuse_rx: 1063 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 1064 goto next_rx; 1065 } 1066 } 1067 1068 skb_put(skb, len); 1069 skb->protocol = eth_type_trans(skb, bp->dev); 1070 1071 /* Set Toeplitz hash for a none-LRO skb */ 1072 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); 1073 skb_set_hash(skb, rxhash, rxhash_type); 1074 1075 skb_checksum_none_assert(skb); 1076 1077 if (bp->dev->features & NETIF_F_RXCSUM) 1078 bnx2x_csum_validate(skb, cqe, fp, 1079 bnx2x_fp_qstats(bp, fp)); 1080 1081 skb_record_rx_queue(skb, fp->rx_queue); 1082 1083 /* Check if this packet was timestamped */ 1084 if (unlikely(cqe->fast_path_cqe.type_error_flags & 1085 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) 1086 bnx2x_set_rx_ts(bp, skb); 1087 1088 if (le16_to_cpu(cqe_fp->pars_flags.flags) & 1089 PARSING_FLAGS_VLAN) 1090 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1091 le16_to_cpu(cqe_fp->vlan_tag)); 1092 1093 napi_gro_receive(&fp->napi, skb); 1094next_rx: 1095 rx_buf->data = NULL; 1096 1097 bd_cons = NEXT_RX_IDX(bd_cons); 1098 bd_prod = NEXT_RX_IDX(bd_prod); 1099 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); 1100 rx_pkt++; 1101next_cqe: 1102 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); 1103 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); 1104 1105 /* mark CQE as free */ 1106 BNX2X_SEED_CQE(cqe_fp); 1107 1108 if (rx_pkt == budget) 1109 break; 1110 1111 comp_ring_cons = RCQ_BD(sw_comp_cons); 1112 cqe = &fp->rx_comp_ring[comp_ring_cons]; 1113 cqe_fp = &cqe->fast_path_cqe; 1114 } /* while */ 1115 1116 fp->rx_bd_cons = bd_cons; 1117 fp->rx_bd_prod = bd_prod_fw; 1118 fp->rx_comp_cons = sw_comp_cons; 1119 fp->rx_comp_prod = sw_comp_prod; 1120 1121 /* Update producers */ 1122 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, 1123 fp->rx_sge_prod); 1124 1125 return rx_pkt; 1126} 1127 1128static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) 1129{ 1130 struct bnx2x_fastpath *fp = fp_cookie; 1131 struct bnx2x *bp = fp->bp; 1132 u8 cos; 1133 1134 DP(NETIF_MSG_INTR, 1135 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", 1136 fp->index, fp->fw_sb_id, fp->igu_sb_id); 1137 1138 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 1139 1140#ifdef BNX2X_STOP_ON_ERROR 1141 if (unlikely(bp->panic)) 1142 return IRQ_HANDLED; 1143#endif 1144 1145 /* Handle Rx and Tx according to MSI-X vector */ 1146 for_each_cos_in_tx_queue(fp, cos) 1147 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1148 1149 prefetch(&fp->sb_running_index[SM_RX_ID]); 1150 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); 1151 1152 return IRQ_HANDLED; 1153} 1154 1155/* HW Lock for shared dual port PHYs */ 1156void bnx2x_acquire_phy_lock(struct bnx2x *bp) 1157{ 1158 mutex_lock(&bp->port.phy_mutex); 1159 1160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); 1161} 1162 1163void bnx2x_release_phy_lock(struct bnx2x *bp) 1164{ 1165 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); 1166 1167 mutex_unlock(&bp->port.phy_mutex); 1168} 1169 1170/* calculates MF speed according to current linespeed and MF configuration */ 1171u16 bnx2x_get_mf_speed(struct bnx2x *bp) 1172{ 1173 u16 line_speed = bp->link_vars.line_speed; 1174 if (IS_MF(bp)) { 1175 u16 maxCfg = bnx2x_extract_max_cfg(bp, 1176 bp->mf_config[BP_VN(bp)]); 1177 1178 /* Calculate the current MAX line speed limit for the MF 1179 * devices 1180 */ 1181 if (IS_MF_PERCENT_BW(bp)) 1182 line_speed = (line_speed * maxCfg) / 100; 1183 else { /* SD mode */ 1184 u16 vn_max_rate = maxCfg * 100; 1185 1186 if (vn_max_rate < line_speed) 1187 line_speed = vn_max_rate; 1188 } 1189 } 1190 1191 return line_speed; 1192} 1193 1194/** 1195 * bnx2x_fill_report_data - fill link report data to report 1196 * 1197 * @bp: driver handle 1198 * @data: link state to update 1199 * 1200 * It uses a none-atomic bit operations because is called under the mutex. 1201 */ 1202static void bnx2x_fill_report_data(struct bnx2x *bp, 1203 struct bnx2x_link_report_data *data) 1204{ 1205 memset(data, 0, sizeof(*data)); 1206 1207 if (IS_PF(bp)) { 1208 /* Fill the report data: effective line speed */ 1209 data->line_speed = bnx2x_get_mf_speed(bp); 1210 1211 /* Link is down */ 1212 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) 1213 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1214 &data->link_report_flags); 1215 1216 if (!BNX2X_NUM_ETH_QUEUES(bp)) 1217 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1218 &data->link_report_flags); 1219 1220 /* Full DUPLEX */ 1221 if (bp->link_vars.duplex == DUPLEX_FULL) 1222 __set_bit(BNX2X_LINK_REPORT_FD, 1223 &data->link_report_flags); 1224 1225 /* Rx Flow Control is ON */ 1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) 1227 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, 1228 &data->link_report_flags); 1229 1230 /* Tx Flow Control is ON */ 1231 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 1232 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, 1233 &data->link_report_flags); 1234 } else { /* VF */ 1235 *data = bp->vf_link_vars; 1236 } 1237} 1238 1239/** 1240 * bnx2x_link_report - report link status to OS. 1241 * 1242 * @bp: driver handle 1243 * 1244 * Calls the __bnx2x_link_report() under the same locking scheme 1245 * as a link/PHY state managing code to ensure a consistent link 1246 * reporting. 1247 */ 1248 1249void bnx2x_link_report(struct bnx2x *bp) 1250{ 1251 bnx2x_acquire_phy_lock(bp); 1252 __bnx2x_link_report(bp); 1253 bnx2x_release_phy_lock(bp); 1254} 1255 1256/** 1257 * __bnx2x_link_report - report link status to OS. 1258 * 1259 * @bp: driver handle 1260 * 1261 * None atomic implementation. 1262 * Should be called under the phy_lock. 1263 */ 1264void __bnx2x_link_report(struct bnx2x *bp) 1265{ 1266 struct bnx2x_link_report_data cur_data; 1267 1268 if (bp->force_link_down) { 1269 bp->link_vars.link_up = 0; 1270 return; 1271 } 1272 1273 /* reread mf_cfg */ 1274 if (IS_PF(bp) && !CHIP_IS_E1(bp)) 1275 bnx2x_read_mf_cfg(bp); 1276 1277 /* Read the current link report info */ 1278 bnx2x_fill_report_data(bp, &cur_data); 1279 1280 /* Don't report link down or exactly the same link status twice */ 1281 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || 1282 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1283 &bp->last_reported_link.link_report_flags) && 1284 test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1285 &cur_data.link_report_flags))) 1286 return; 1287 1288 bp->link_cnt++; 1289 1290 /* We are going to report a new link parameters now - 1291 * remember the current data for the next time. 1292 */ 1293 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); 1294 1295 /* propagate status to VFs */ 1296 if (IS_PF(bp)) 1297 bnx2x_iov_link_update(bp); 1298 1299 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1300 &cur_data.link_report_flags)) { 1301 netif_carrier_off(bp->dev); 1302 netdev_err(bp->dev, "NIC Link is Down\n"); 1303 return; 1304 } else { 1305 const char *duplex; 1306 const char *flow; 1307 1308 netif_carrier_on(bp->dev); 1309 1310 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, 1311 &cur_data.link_report_flags)) 1312 duplex = "full"; 1313 else 1314 duplex = "half"; 1315 1316 /* Handle the FC at the end so that only these flags would be 1317 * possibly set. This way we may easily check if there is no FC 1318 * enabled. 1319 */ 1320 if (cur_data.link_report_flags) { 1321 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 1322 &cur_data.link_report_flags)) { 1323 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 1324 &cur_data.link_report_flags)) 1325 flow = "ON - receive & transmit"; 1326 else 1327 flow = "ON - receive"; 1328 } else { 1329 flow = "ON - transmit"; 1330 } 1331 } else { 1332 flow = "none"; 1333 } 1334 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 1335 cur_data.line_speed, duplex, flow); 1336 } 1337} 1338 1339static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) 1340{ 1341 int i; 1342 1343 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 1344 struct eth_rx_sge *sge; 1345 1346 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; 1347 sge->addr_hi = 1348 cpu_to_le32(U64_HI(fp->rx_sge_mapping + 1349 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1350 1351 sge->addr_lo = 1352 cpu_to_le32(U64_LO(fp->rx_sge_mapping + 1353 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1354 } 1355} 1356 1357static void bnx2x_free_tpa_pool(struct bnx2x *bp, 1358 struct bnx2x_fastpath *fp, int last) 1359{ 1360 int i; 1361 1362 for (i = 0; i < last; i++) { 1363 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; 1364 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 1365 u8 *data = first_buf->data; 1366 1367 if (data == NULL) { 1368 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); 1369 continue; 1370 } 1371 if (tpa_info->tpa_state == BNX2X_TPA_START) 1372 dma_unmap_single(&bp->pdev->dev, 1373 dma_unmap_addr(first_buf, mapping), 1374 fp->rx_buf_size, DMA_FROM_DEVICE); 1375 bnx2x_frag_free(fp, data); 1376 first_buf->data = NULL; 1377 } 1378} 1379 1380void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) 1381{ 1382 int j; 1383 1384 for_each_rx_queue_cnic(bp, j) { 1385 struct bnx2x_fastpath *fp = &bp->fp[j]; 1386 1387 fp->rx_bd_cons = 0; 1388 1389 /* Activate BD ring */ 1390 /* Warning! 1391 * this will generate an interrupt (to the TSTORM) 1392 * must only be done after chip is initialized 1393 */ 1394 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 1395 fp->rx_sge_prod); 1396 } 1397} 1398 1399void bnx2x_init_rx_rings(struct bnx2x *bp) 1400{ 1401 int func = BP_FUNC(bp); 1402 u16 ring_prod; 1403 int i, j; 1404 1405 /* Allocate TPA resources */ 1406 for_each_eth_queue(bp, j) { 1407 struct bnx2x_fastpath *fp = &bp->fp[j]; 1408 1409 DP(NETIF_MSG_IFUP, 1410 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); 1411 1412 if (fp->mode != TPA_MODE_DISABLED) { 1413 /* Fill the per-aggregation pool */ 1414 for (i = 0; i < MAX_AGG_QS(bp); i++) { 1415 struct bnx2x_agg_info *tpa_info = 1416 &fp->tpa_info[i]; 1417 struct sw_rx_bd *first_buf = 1418 &tpa_info->first_buf; 1419 1420 first_buf->data = 1421 bnx2x_frag_alloc(fp, GFP_KERNEL); 1422 if (!first_buf->data) { 1423 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1424 j); 1425 bnx2x_free_tpa_pool(bp, fp, i); 1426 fp->mode = TPA_MODE_DISABLED; 1427 break; 1428 } 1429 dma_unmap_addr_set(first_buf, mapping, 0); 1430 tpa_info->tpa_state = BNX2X_TPA_STOP; 1431 } 1432 1433 /* "next page" elements initialization */ 1434 bnx2x_set_next_page_sgl(fp); 1435 1436 /* set SGEs bit mask */ 1437 bnx2x_init_sge_ring_bit_mask(fp); 1438 1439 /* Allocate SGEs and initialize the ring elements */ 1440 for (i = 0, ring_prod = 0; 1441 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { 1442 1443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, 1444 GFP_KERNEL) < 0) { 1445 BNX2X_ERR("was only able to allocate %d rx sges\n", 1446 i); 1447 BNX2X_ERR("disabling TPA for queue[%d]\n", 1448 j); 1449 /* Cleanup already allocated elements */ 1450 bnx2x_free_rx_sge_range(bp, fp, 1451 ring_prod); 1452 bnx2x_free_tpa_pool(bp, fp, 1453 MAX_AGG_QS(bp)); 1454 fp->mode = TPA_MODE_DISABLED; 1455 ring_prod = 0; 1456 break; 1457 } 1458 ring_prod = NEXT_SGE_IDX(ring_prod); 1459 } 1460 1461 fp->rx_sge_prod = ring_prod; 1462 } 1463 } 1464 1465 for_each_eth_queue(bp, j) { 1466 struct bnx2x_fastpath *fp = &bp->fp[j]; 1467 1468 fp->rx_bd_cons = 0; 1469 1470 /* Activate BD ring */ 1471 /* Warning! 1472 * this will generate an interrupt (to the TSTORM) 1473 * must only be done after chip is initialized 1474 */ 1475 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 1476 fp->rx_sge_prod); 1477 1478 if (j != 0) 1479 continue; 1480 1481 if (CHIP_IS_E1(bp)) { 1482 REG_WR(bp, BAR_USTRORM_INTMEM + 1483 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), 1484 U64_LO(fp->rx_comp_mapping)); 1485 REG_WR(bp, BAR_USTRORM_INTMEM + 1486 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 1487 U64_HI(fp->rx_comp_mapping)); 1488 } 1489 } 1490} 1491 1492static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) 1493{ 1494 u8 cos; 1495 struct bnx2x *bp = fp->bp; 1496 1497 for_each_cos_in_tx_queue(fp, cos) { 1498 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1499 unsigned pkts_compl = 0, bytes_compl = 0; 1500 1501 u16 sw_prod = txdata->tx_pkt_prod; 1502 u16 sw_cons = txdata->tx_pkt_cons; 1503 1504 while (sw_cons != sw_prod) { 1505 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1506 &pkts_compl, &bytes_compl); 1507 sw_cons++; 1508 } 1509 1510 netdev_tx_reset_queue( 1511 netdev_get_tx_queue(bp->dev, 1512 txdata->txq_index)); 1513 } 1514} 1515 1516static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) 1517{ 1518 int i; 1519 1520 for_each_tx_queue_cnic(bp, i) { 1521 bnx2x_free_tx_skbs_queue(&bp->fp[i]); 1522 } 1523} 1524 1525static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1526{ 1527 int i; 1528 1529 for_each_eth_queue(bp, i) { 1530 bnx2x_free_tx_skbs_queue(&bp->fp[i]); 1531 } 1532} 1533 1534static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) 1535{ 1536 struct bnx2x *bp = fp->bp; 1537 int i; 1538 1539 /* ring wasn't allocated */ 1540 if (fp->rx_buf_ring == NULL) 1541 return; 1542 1543 for (i = 0; i < NUM_RX_BD; i++) { 1544 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; 1545 u8 *data = rx_buf->data; 1546 1547 if (data == NULL) 1548 continue; 1549 dma_unmap_single(&bp->pdev->dev, 1550 dma_unmap_addr(rx_buf, mapping), 1551 fp->rx_buf_size, DMA_FROM_DEVICE); 1552 1553 rx_buf->data = NULL; 1554 bnx2x_frag_free(fp, data); 1555 } 1556} 1557 1558static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) 1559{ 1560 int j; 1561 1562 for_each_rx_queue_cnic(bp, j) { 1563 bnx2x_free_rx_bds(&bp->fp[j]); 1564 } 1565} 1566 1567static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1568{ 1569 int j; 1570 1571 for_each_eth_queue(bp, j) { 1572 struct bnx2x_fastpath *fp = &bp->fp[j]; 1573 1574 bnx2x_free_rx_bds(fp); 1575 1576 if (fp->mode != TPA_MODE_DISABLED) 1577 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1578 } 1579} 1580 1581static void bnx2x_free_skbs_cnic(struct bnx2x *bp) 1582{ 1583 bnx2x_free_tx_skbs_cnic(bp); 1584 bnx2x_free_rx_skbs_cnic(bp); 1585} 1586 1587void bnx2x_free_skbs(struct bnx2x *bp) 1588{ 1589 bnx2x_free_tx_skbs(bp); 1590 bnx2x_free_rx_skbs(bp); 1591} 1592 1593void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) 1594{ 1595 /* load old values */ 1596 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; 1597 1598 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { 1599 /* leave all but MAX value */ 1600 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 1601 1602 /* set new MAX value */ 1603 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) 1604 & FUNC_MF_CFG_MAX_BW_MASK; 1605 1606 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 1607 } 1608} 1609 1610/** 1611 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors 1612 * 1613 * @bp: driver handle 1614 * @nvecs: number of vectors to be released 1615 */ 1616static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) 1617{ 1618 int i, offset = 0; 1619 1620 if (nvecs == offset) 1621 return; 1622 1623 /* VFs don't have a default SB */ 1624 if (IS_PF(bp)) { 1625 free_irq(bp->msix_table[offset].vector, bp->dev); 1626 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1627 bp->msix_table[offset].vector); 1628 offset++; 1629 } 1630 1631 if (CNIC_SUPPORT(bp)) { 1632 if (nvecs == offset) 1633 return; 1634 offset++; 1635 } 1636 1637 for_each_eth_queue(bp, i) { 1638 if (nvecs == offset) 1639 return; 1640 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", 1641 i, bp->msix_table[offset].vector); 1642 1643 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); 1644 } 1645} 1646 1647void bnx2x_free_irq(struct bnx2x *bp) 1648{ 1649 if (bp->flags & USING_MSIX_FLAG && 1650 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { 1651 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); 1652 1653 /* vfs don't have a default status block */ 1654 if (IS_PF(bp)) 1655 nvecs++; 1656 1657 bnx2x_free_msix_irqs(bp, nvecs); 1658 } else { 1659 free_irq(bp->dev->irq, bp->dev); 1660 } 1661} 1662 1663int bnx2x_enable_msix(struct bnx2x *bp) 1664{ 1665 int msix_vec = 0, i, rc; 1666 1667 /* VFs don't have a default status block */ 1668 if (IS_PF(bp)) { 1669 bp->msix_table[msix_vec].entry = msix_vec; 1670 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", 1671 bp->msix_table[0].entry); 1672 msix_vec++; 1673 } 1674 1675 /* Cnic requires an msix vector for itself */ 1676 if (CNIC_SUPPORT(bp)) { 1677 bp->msix_table[msix_vec].entry = msix_vec; 1678 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1679 msix_vec, bp->msix_table[msix_vec].entry); 1680 msix_vec++; 1681 } 1682 1683 /* We need separate vectors for ETH queues only (not FCoE) */ 1684 for_each_eth_queue(bp, i) { 1685 bp->msix_table[msix_vec].entry = msix_vec; 1686 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n", 1687 msix_vec, msix_vec, i); 1688 msix_vec++; 1689 } 1690 1691 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", 1692 msix_vec); 1693 1694 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1695 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); 1696 /* 1697 * reconfigure number of tx/rx queues according to available 1698 * MSI-X vectors 1699 */ 1700 if (rc == -ENOSPC) { 1701 /* Get by with single vector */ 1702 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); 1703 if (rc < 0) { 1704 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", 1705 rc); 1706 goto no_msix; 1707 } 1708 1709 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1710 bp->flags |= USING_SINGLE_MSIX_FLAG; 1711 1712 BNX2X_DEV_INFO("set number of queues to 1\n"); 1713 bp->num_ethernet_queues = 1; 1714 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1715 } else if (rc < 0) { 1716 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1717 goto no_msix; 1718 } else if (rc < msix_vec) { 1719 /* how less vectors we will have? */ 1720 int diff = msix_vec - rc; 1721 1722 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); 1723 1724 /* 1725 * decrease number of queues by number of unallocated entries 1726 */ 1727 bp->num_ethernet_queues -= diff; 1728 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1729 1730 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1731 bp->num_queues); 1732 } 1733 1734 bp->flags |= USING_MSIX_FLAG; 1735 1736 return 0; 1737 1738no_msix: 1739 /* fall to INTx if not enough memory */ 1740 if (rc == -ENOMEM) 1741 bp->flags |= DISABLE_MSI_FLAG; 1742 1743 return rc; 1744} 1745 1746static int bnx2x_req_msix_irqs(struct bnx2x *bp) 1747{ 1748 int i, rc, offset = 0; 1749 1750 /* no default status block for vf */ 1751 if (IS_PF(bp)) { 1752 rc = request_irq(bp->msix_table[offset++].vector, 1753 bnx2x_msix_sp_int, 0, 1754 bp->dev->name, bp->dev); 1755 if (rc) { 1756 BNX2X_ERR("request sp irq failed\n"); 1757 return -EBUSY; 1758 } 1759 } 1760 1761 if (CNIC_SUPPORT(bp)) 1762 offset++; 1763 1764 for_each_eth_queue(bp, i) { 1765 struct bnx2x_fastpath *fp = &bp->fp[i]; 1766 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1767 bp->dev->name, i); 1768 1769 rc = request_irq(bp->msix_table[offset].vector, 1770 bnx2x_msix_fp_int, 0, fp->name, fp); 1771 if (rc) { 1772 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, 1773 bp->msix_table[offset].vector, rc); 1774 bnx2x_free_msix_irqs(bp, offset); 1775 return -EBUSY; 1776 } 1777 1778 offset++; 1779 } 1780 1781 i = BNX2X_NUM_ETH_QUEUES(bp); 1782 if (IS_PF(bp)) { 1783 offset = 1 + CNIC_SUPPORT(bp); 1784 netdev_info(bp->dev, 1785 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1786 bp->msix_table[0].vector, 1787 0, bp->msix_table[offset].vector, 1788 i - 1, bp->msix_table[offset + i - 1].vector); 1789 } else { 1790 offset = CNIC_SUPPORT(bp); 1791 netdev_info(bp->dev, 1792 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", 1793 0, bp->msix_table[offset].vector, 1794 i - 1, bp->msix_table[offset + i - 1].vector); 1795 } 1796 return 0; 1797} 1798 1799int bnx2x_enable_msi(struct bnx2x *bp) 1800{ 1801 int rc; 1802 1803 rc = pci_enable_msi(bp->pdev); 1804 if (rc) { 1805 BNX2X_DEV_INFO("MSI is not attainable\n"); 1806 return -1; 1807 } 1808 bp->flags |= USING_MSI_FLAG; 1809 1810 return 0; 1811} 1812 1813static int bnx2x_req_irq(struct bnx2x *bp) 1814{ 1815 unsigned long flags; 1816 unsigned int irq; 1817 1818 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) 1819 flags = 0; 1820 else 1821 flags = IRQF_SHARED; 1822 1823 if (bp->flags & USING_MSIX_FLAG) 1824 irq = bp->msix_table[0].vector; 1825 else 1826 irq = bp->pdev->irq; 1827 1828 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); 1829} 1830 1831static int bnx2x_setup_irqs(struct bnx2x *bp) 1832{ 1833 int rc = 0; 1834 if (bp->flags & USING_MSIX_FLAG && 1835 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { 1836 rc = bnx2x_req_msix_irqs(bp); 1837 if (rc) 1838 return rc; 1839 } else { 1840 rc = bnx2x_req_irq(bp); 1841 if (rc) { 1842 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); 1843 return rc; 1844 } 1845 if (bp->flags & USING_MSI_FLAG) { 1846 bp->dev->irq = bp->pdev->irq; 1847 netdev_info(bp->dev, "using MSI IRQ %d\n", 1848 bp->dev->irq); 1849 } 1850 if (bp->flags & USING_MSIX_FLAG) { 1851 bp->dev->irq = bp->msix_table[0].vector; 1852 netdev_info(bp->dev, "using MSIX IRQ %d\n", 1853 bp->dev->irq); 1854 } 1855 } 1856 1857 return 0; 1858} 1859 1860static void bnx2x_napi_enable_cnic(struct bnx2x *bp) 1861{ 1862 int i; 1863 1864 for_each_rx_queue_cnic(bp, i) { 1865 napi_enable(&bnx2x_fp(bp, i, napi)); 1866 } 1867} 1868 1869static void bnx2x_napi_enable(struct bnx2x *bp) 1870{ 1871 int i; 1872 1873 for_each_eth_queue(bp, i) { 1874 napi_enable(&bnx2x_fp(bp, i, napi)); 1875 } 1876} 1877 1878static void bnx2x_napi_disable_cnic(struct bnx2x *bp) 1879{ 1880 int i; 1881 1882 for_each_rx_queue_cnic(bp, i) { 1883 napi_disable(&bnx2x_fp(bp, i, napi)); 1884 } 1885} 1886 1887static void bnx2x_napi_disable(struct bnx2x *bp) 1888{ 1889 int i; 1890 1891 for_each_eth_queue(bp, i) { 1892 napi_disable(&bnx2x_fp(bp, i, napi)); 1893 } 1894} 1895 1896void bnx2x_netif_start(struct bnx2x *bp) 1897{ 1898 if (netif_running(bp->dev)) { 1899 bnx2x_napi_enable(bp); 1900 if (CNIC_LOADED(bp)) 1901 bnx2x_napi_enable_cnic(bp); 1902 bnx2x_int_enable(bp); 1903 if (bp->state == BNX2X_STATE_OPEN) 1904 netif_tx_wake_all_queues(bp->dev); 1905 } 1906} 1907 1908void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 1909{ 1910 bnx2x_int_disable_sync(bp, disable_hw); 1911 bnx2x_napi_disable(bp); 1912 if (CNIC_LOADED(bp)) 1913 bnx2x_napi_disable_cnic(bp); 1914} 1915 1916u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1917 struct net_device *sb_dev) 1918{ 1919 struct bnx2x *bp = netdev_priv(dev); 1920 1921 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { 1922 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1923 u16 ether_type = ntohs(hdr->h_proto); 1924 1925 /* Skip VLAN tag if present */ 1926 if (ether_type == ETH_P_8021Q) { 1927 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); 1928 1929 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); 1930 } 1931 1932 /* If ethertype is FCoE or FIP - use FCoE ring */ 1933 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1934 return bnx2x_fcoe_tx(bp, txq_index); 1935 } 1936 1937 /* select a non-FCoE queue */ 1938 return netdev_pick_tx(dev, skb, NULL) % 1939 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); 1940} 1941 1942void bnx2x_set_num_queues(struct bnx2x *bp) 1943{ 1944 /* RSS queues */ 1945 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); 1946 1947 /* override in STORAGE SD modes */ 1948 if (IS_MF_STORAGE_ONLY(bp)) 1949 bp->num_ethernet_queues = 1; 1950 1951 /* Add special queues */ 1952 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ 1953 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1954 1955 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1956} 1957 1958/** 1959 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues 1960 * 1961 * @bp: Driver handle 1962 * @include_cnic: handle cnic case 1963 * 1964 * We currently support for at most 16 Tx queues for each CoS thus we will 1965 * allocate a multiple of 16 for ETH L2 rings according to the value of the 1966 * bp->max_cos. 1967 * 1968 * If there is an FCoE L2 queue the appropriate Tx queue will have the next 1969 * index after all ETH L2 indices. 1970 * 1971 * If the actual number of Tx queues (for each CoS) is less than 16 then there 1972 * will be the holes at the end of each group of 16 ETh L2 indices (0..15, 1973 * 16..31,...) with indices that are not coupled with any real Tx queue. 1974 * 1975 * The proper configuration of skb->queue_mapping is handled by 1976 * bnx2x_select_queue() and __skb_tx_hash(). 1977 * 1978 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1979 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1980 */ 1981static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) 1982{ 1983 int rc, tx, rx; 1984 1985 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1986 rx = BNX2X_NUM_ETH_QUEUES(bp); 1987 1988/* account for fcoe queue */ 1989 if (include_cnic && !NO_FCOE(bp)) { 1990 rx++; 1991 tx++; 1992 } 1993 1994 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1995 if (rc) { 1996 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); 1997 return rc; 1998 } 1999 rc = netif_set_real_num_rx_queues(bp->dev, rx); 2000 if (rc) { 2001 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); 2002 return rc; 2003 } 2004 2005 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n", 2006 tx, rx); 2007 2008 return rc; 2009} 2010 2011static void bnx2x_set_rx_buf_size(struct bnx2x *bp) 2012{ 2013 int i; 2014 2015 for_each_queue(bp, i) { 2016 struct bnx2x_fastpath *fp = &bp->fp[i]; 2017 u32 mtu; 2018 2019 /* Always use a mini-jumbo MTU for the FCoE L2 ring */ 2020 if (IS_FCOE_IDX(i)) 2021 /* 2022 * Although there are no IP frames expected to arrive to 2023 * this ring we still want to add an 2024 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer 2025 * overrun attack. 2026 */ 2027 mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 2028 else 2029 mtu = bp->dev->mtu; 2030 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + 2031 IP_HEADER_ALIGNMENT_PADDING + 2032 ETH_OVERHEAD + 2033 mtu + 2034 BNX2X_FW_RX_ALIGN_END; 2035 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); 2036 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ 2037 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) 2038 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; 2039 else 2040 fp->rx_frag_size = 0; 2041 } 2042} 2043 2044static int bnx2x_init_rss(struct bnx2x *bp) 2045{ 2046 int i; 2047 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 2048 2049 /* Prepare the initial contents for the indirection table if RSS is 2050 * enabled 2051 */ 2052 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) 2053 bp->rss_conf_obj.ind_table[i] = 2054 bp->fp->cl_id + 2055 ethtool_rxfh_indir_default(i, num_eth_queues); 2056 2057 /* 2058 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 2059 * per-port, so if explicit configuration is needed , do it only 2060 * for a PMF. 2061 * 2062 * For 57712 and newer on the other hand it's a per-function 2063 * configuration. 2064 */ 2065 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); 2066} 2067 2068int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 2069 bool config_hash, bool enable) 2070{ 2071 struct bnx2x_config_rss_params params = {NULL}; 2072 2073 /* Although RSS is meaningless when there is a single HW queue we 2074 * still need it enabled in order to have HW Rx hash generated. 2075 * 2076 * if (!is_eth_multi(bp)) 2077 * bp->multi_mode = ETH_RSS_MODE_DISABLED; 2078 */ 2079 2080 params.rss_obj = rss_obj; 2081 2082 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 2083 2084 if (enable) { 2085 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); 2086 2087 /* RSS configuration */ 2088 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); 2089 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); 2090 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); 2091 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); 2092 if (rss_obj->udp_rss_v4) 2093 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); 2094 if (rss_obj->udp_rss_v6) 2095 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); 2096 2097 if (!CHIP_IS_E1x(bp)) { 2098 /* valid only for TUNN_MODE_VXLAN tunnel mode */ 2099 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); 2100 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); 2101 2102 /* valid only for TUNN_MODE_GRE tunnel mode */ 2103 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); 2104 } 2105 } else { 2106 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); 2107 } 2108 2109 /* Hash bits */ 2110 params.rss_result_mask = MULTI_MASK; 2111 2112 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 2113 2114 if (config_hash) { 2115 /* RSS keys */ 2116 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4); 2117 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); 2118 } 2119 2120 if (IS_PF(bp)) 2121 return bnx2x_config_rss(bp, ¶ms); 2122 else 2123 return bnx2x_vfpf_config_rss(bp, ¶ms); 2124} 2125 2126static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 2127{ 2128 struct bnx2x_func_state_params func_params = {NULL}; 2129 2130 /* Prepare parameters for function state transitions */ 2131 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 2132 2133 func_params.f_obj = &bp->func_obj; 2134 func_params.cmd = BNX2X_F_CMD_HW_INIT; 2135 2136 func_params.params.hw_init.load_phase = load_code; 2137 2138 return bnx2x_func_state_change(bp, &func_params); 2139} 2140 2141/* 2142 * Cleans the object that have internal lists without sending 2143 * ramrods. Should be run when interrupts are disabled. 2144 */ 2145void bnx2x_squeeze_objects(struct bnx2x *bp) 2146{ 2147 int rc; 2148 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 2149 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2150 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 2151 2152 /***************** Cleanup MACs' object first *************************/ 2153 2154 /* Wait for completion of requested */ 2155 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2156 /* Perform a dry cleanup */ 2157 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 2158 2159 /* Clean ETH primary MAC */ 2160 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); 2161 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, 2162 &ramrod_flags); 2163 if (rc != 0) 2164 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); 2165 2166 /* Cleanup UC list */ 2167 vlan_mac_flags = 0; 2168 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); 2169 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, 2170 &ramrod_flags); 2171 if (rc != 0) 2172 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); 2173 2174 /***************** Now clean mcast object *****************************/ 2175 rparam.mcast_obj = &bp->mcast_obj; 2176 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 2177 2178 /* Add a DEL command... - Since we're doing a driver cleanup only, 2179 * we take a lock surrounding both the initial send and the CONTs, 2180 * as we don't want a true completion to disrupt us in the middle. 2181 */ 2182 netif_addr_lock_bh(bp->dev); 2183 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 2184 if (rc < 0) 2185 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", 2186 rc); 2187 2188 /* ...and wait until all pending commands are cleared */ 2189 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2190 while (rc != 0) { 2191 if (rc < 0) { 2192 BNX2X_ERR("Failed to clean multi-cast object: %d\n", 2193 rc); 2194 netif_addr_unlock_bh(bp->dev); 2195 return; 2196 } 2197 2198 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2199 } 2200 netif_addr_unlock_bh(bp->dev); 2201} 2202 2203#ifndef BNX2X_STOP_ON_ERROR 2204#define LOAD_ERROR_EXIT(bp, label) \ 2205 do { \ 2206 (bp)->state = BNX2X_STATE_ERROR; \ 2207 goto label; \ 2208 } while (0) 2209 2210#define LOAD_ERROR_EXIT_CNIC(bp, label) \ 2211 do { \ 2212 bp->cnic_loaded = false; \ 2213 goto label; \ 2214 } while (0) 2215#else /*BNX2X_STOP_ON_ERROR*/ 2216#define LOAD_ERROR_EXIT(bp, label) \ 2217 do { \ 2218 (bp)->state = BNX2X_STATE_ERROR; \ 2219 (bp)->panic = 1; \ 2220 return -EBUSY; \ 2221 } while (0) 2222#define LOAD_ERROR_EXIT_CNIC(bp, label) \ 2223 do { \ 2224 bp->cnic_loaded = false; \ 2225 (bp)->panic = 1; \ 2226 return -EBUSY; \ 2227 } while (0) 2228#endif /*BNX2X_STOP_ON_ERROR*/ 2229 2230static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) 2231{ 2232 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 2233 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 2234 return; 2235} 2236 2237static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 2238{ 2239 int num_groups, vf_headroom = 0; 2240 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 2241 2242 /* number of queues for statistics is number of eth queues + FCoE */ 2243 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; 2244 2245 /* Total number of FW statistics requests = 2246 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper 2247 * and fcoe l2 queue) stats + num of queues (which includes another 1 2248 * for fcoe l2 queue if applicable) 2249 */ 2250 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; 2251 2252 /* vf stats appear in the request list, but their data is allocated by 2253 * the VFs themselves. We don't include them in the bp->fw_stats_num as 2254 * it is used to determine where to place the vf stats queries in the 2255 * request struct 2256 */ 2257 if (IS_SRIOV(bp)) 2258 vf_headroom = bnx2x_vf_headroom(bp); 2259 2260 /* Request is built from stats_query_header and an array of 2261 * stats_query_cmd_group each of which contains 2262 * STATS_QUERY_CMD_COUNT rules. The real number or requests is 2263 * configured in the stats_query_header. 2264 */ 2265 num_groups = 2266 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + 2267 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? 2268 1 : 0)); 2269 2270 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n", 2271 bp->fw_stats_num, vf_headroom, num_groups); 2272 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + 2273 num_groups * sizeof(struct stats_query_cmd_group); 2274 2275 /* Data for statistics requests + stats_counter 2276 * stats_counter holds per-STORM counters that are incremented 2277 * when STORM has finished with the current request. 2278 * memory for FCoE offloaded statistics are counted anyway, 2279 * even if they will not be sent. 2280 * VF stats are not accounted for here as the data of VF stats is stored 2281 * in memory allocated by the VF, not here. 2282 */ 2283 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + 2284 sizeof(struct per_pf_stats) + 2285 sizeof(struct fcoe_statistics_params) + 2286 sizeof(struct per_queue_stats) * num_queue_stats + 2287 sizeof(struct stats_counter); 2288 2289 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, 2290 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 2291 if (!bp->fw_stats) 2292 goto alloc_mem_err; 2293 2294 /* Set shortcuts */ 2295 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 2296 bp->fw_stats_req_mapping = bp->fw_stats_mapping; 2297 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) 2298 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); 2299 bp->fw_stats_data_mapping = bp->fw_stats_mapping + 2300 bp->fw_stats_req_sz; 2301 2302 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", 2303 U64_HI(bp->fw_stats_req_mapping), 2304 U64_LO(bp->fw_stats_req_mapping)); 2305 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", 2306 U64_HI(bp->fw_stats_data_mapping), 2307 U64_LO(bp->fw_stats_data_mapping)); 2308 return 0; 2309 2310alloc_mem_err: 2311 bnx2x_free_fw_stats_mem(bp); 2312 BNX2X_ERR("Can't allocate FW stats memory\n"); 2313 return -ENOMEM; 2314} 2315 2316/* send load request to mcp and analyze response */ 2317static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) 2318{ 2319 u32 param; 2320 2321 /* init fw_seq */ 2322 bp->fw_seq = 2323 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 2324 DRV_MSG_SEQ_NUMBER_MASK); 2325 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 2326 2327 /* Get current FW pulse sequence */ 2328 bp->fw_drv_pulse_wr_seq = 2329 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & 2330 DRV_PULSE_SEQ_MASK); 2331 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 2332 2333 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; 2334 2335 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) 2336 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; 2337 2338 /* load request */ 2339 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); 2340 2341 /* if mcp fails to respond we must abort */ 2342 if (!(*load_code)) { 2343 BNX2X_ERR("MCP response failure, aborting\n"); 2344 return -EBUSY; 2345 } 2346 2347 /* If mcp refused (e.g. other port is in diagnostic mode) we 2348 * must abort 2349 */ 2350 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 2351 BNX2X_ERR("MCP refused load request, aborting\n"); 2352 return -EBUSY; 2353 } 2354 return 0; 2355} 2356 2357/* check whether another PF has already loaded FW to chip. In 2358 * virtualized environments a pf from another VM may have already 2359 * initialized the device including loading FW 2360 */ 2361int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) 2362{ 2363 /* is another pf loaded on this engine? */ 2364 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && 2365 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { 2366 u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; 2367 u32 loaded_fw; 2368 2369 /* read loaded FW from chip */ 2370 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); 2371 2372 loaded_fw_major = loaded_fw & 0xff; 2373 loaded_fw_minor = (loaded_fw >> 8) & 0xff; 2374 loaded_fw_rev = (loaded_fw >> 16) & 0xff; 2375 loaded_fw_eng = (loaded_fw >> 24) & 0xff; 2376 2377 DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n", 2378 loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); 2379 2380 /* abort nic load if version mismatch */ 2381 if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || 2382 loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || 2383 loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || 2384 loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { 2385 if (print_err) 2386 BNX2X_ERR("loaded FW incompatible. Aborting\n"); 2387 else 2388 BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n"); 2389 2390 return -EBUSY; 2391 } 2392 } 2393 return 0; 2394} 2395 2396/* returns the "mcp load_code" according to global load_count array */ 2397static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) 2398{ 2399 int path = BP_PATH(bp); 2400 2401 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", 2402 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 2403 bnx2x_load_count[path][2]); 2404 bnx2x_load_count[path][0]++; 2405 bnx2x_load_count[path][1 + port]++; 2406 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", 2407 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 2408 bnx2x_load_count[path][2]); 2409 if (bnx2x_load_count[path][0] == 1) 2410 return FW_MSG_CODE_DRV_LOAD_COMMON; 2411 else if (bnx2x_load_count[path][1 + port] == 1) 2412 return FW_MSG_CODE_DRV_LOAD_PORT; 2413 else 2414 return FW_MSG_CODE_DRV_LOAD_FUNCTION; 2415} 2416 2417/* mark PMF if applicable */ 2418static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) 2419{ 2420 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2421 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 2422 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 2423 bp->port.pmf = 1; 2424 /* We need the barrier to ensure the ordering between the 2425 * writing to bp->port.pmf here and reading it from the 2426 * bnx2x_periodic_task(). 2427 */ 2428 smp_mb(); 2429 } else { 2430 bp->port.pmf = 0; 2431 } 2432 2433 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); 2434} 2435 2436static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) 2437{ 2438 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 2439 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && 2440 (bp->common.shmem2_base)) { 2441 if (SHMEM2_HAS(bp, dcc_support)) 2442 SHMEM2_WR(bp, dcc_support, 2443 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 2444 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); 2445 if (SHMEM2_HAS(bp, afex_driver_support)) 2446 SHMEM2_WR(bp, afex_driver_support, 2447 SHMEM_AFEX_SUPPORTED_VERSION_ONE); 2448 } 2449 2450 /* Set AFEX default VLAN tag to an invalid value */ 2451 bp->afex_def_vlan_tag = -1; 2452} 2453 2454/** 2455 * bnx2x_bz_fp - zero content of the fastpath structure. 2456 * 2457 * @bp: driver handle 2458 * @index: fastpath index to be zeroed 2459 * 2460 * Makes sure the contents of the bp->fp[index].napi is kept 2461 * intact. 2462 */ 2463static void bnx2x_bz_fp(struct bnx2x *bp, int index) 2464{ 2465 struct bnx2x_fastpath *fp = &bp->fp[index]; 2466 int cos; 2467 struct napi_struct orig_napi = fp->napi; 2468 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; 2469 2470 /* bzero bnx2x_fastpath contents */ 2471 if (fp->tpa_info) 2472 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * 2473 sizeof(struct bnx2x_agg_info)); 2474 memset(fp, 0, sizeof(*fp)); 2475 2476 /* Restore the NAPI object as it has been already initialized */ 2477 fp->napi = orig_napi; 2478 fp->tpa_info = orig_tpa_info; 2479 fp->bp = bp; 2480 fp->index = index; 2481 if (IS_ETH_FP(fp)) 2482 fp->max_cos = bp->max_cos; 2483 else 2484 /* Special queues support only one CoS */ 2485 fp->max_cos = 1; 2486 2487 /* Init txdata pointers */ 2488 if (IS_FCOE_FP(fp)) 2489 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2490 if (IS_ETH_FP(fp)) 2491 for_each_cos_in_tx_queue(fp, cos) 2492 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2493 BNX2X_NUM_ETH_QUEUES(bp) + index]; 2494 2495 /* set the tpa flag for each queue. The tpa flag determines the queue 2496 * minimal size so it must be set prior to queue memory allocation 2497 */ 2498 if (bp->dev->features & NETIF_F_LRO) 2499 fp->mode = TPA_MODE_LRO; 2500 else if (bp->dev->features & NETIF_F_GRO_HW) 2501 fp->mode = TPA_MODE_GRO; 2502 else 2503 fp->mode = TPA_MODE_DISABLED; 2504 2505 /* We don't want TPA if it's disabled in bp 2506 * or if this is an FCoE L2 ring. 2507 */ 2508 if (bp->disable_tpa || IS_FCOE_FP(fp)) 2509 fp->mode = TPA_MODE_DISABLED; 2510} 2511 2512void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) 2513{ 2514 u32 cur; 2515 2516 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) 2517 return; 2518 2519 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); 2520 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", 2521 cur, state); 2522 2523 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); 2524} 2525 2526int bnx2x_load_cnic(struct bnx2x *bp) 2527{ 2528 int i, rc, port = BP_PORT(bp); 2529 2530 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n"); 2531 2532 mutex_init(&bp->cnic_mutex); 2533 2534 if (IS_PF(bp)) { 2535 rc = bnx2x_alloc_mem_cnic(bp); 2536 if (rc) { 2537 BNX2X_ERR("Unable to allocate bp memory for cnic\n"); 2538 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); 2539 } 2540 } 2541 2542 rc = bnx2x_alloc_fp_mem_cnic(bp); 2543 if (rc) { 2544 BNX2X_ERR("Unable to allocate memory for cnic fps\n"); 2545 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); 2546 } 2547 2548 /* Update the number of queues with the cnic queues */ 2549 rc = bnx2x_set_real_num_queues(bp, 1); 2550 if (rc) { 2551 BNX2X_ERR("Unable to set real_num_queues including cnic\n"); 2552 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); 2553 } 2554 2555 /* Add all CNIC NAPI objects */ 2556 bnx2x_add_all_napi_cnic(bp); 2557 DP(NETIF_MSG_IFUP, "cnic napi added\n"); 2558 bnx2x_napi_enable_cnic(bp); 2559 2560 rc = bnx2x_init_hw_func_cnic(bp); 2561 if (rc) 2562 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); 2563 2564 bnx2x_nic_init_cnic(bp); 2565 2566 if (IS_PF(bp)) { 2567 /* Enable Timer scan */ 2568 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); 2569 2570 /* setup cnic queues */ 2571 for_each_cnic_queue(bp, i) { 2572 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2573 if (rc) { 2574 BNX2X_ERR("Queue setup failed\n"); 2575 LOAD_ERROR_EXIT(bp, load_error_cnic2); 2576 } 2577 } 2578 } 2579 2580 /* Initialize Rx filter. */ 2581 bnx2x_set_rx_mode_inner(bp); 2582 2583 /* re-read iscsi info */ 2584 bnx2x_get_iscsi_info(bp); 2585 bnx2x_setup_cnic_irq_info(bp); 2586 bnx2x_setup_cnic_info(bp); 2587 bp->cnic_loaded = true; 2588 if (bp->state == BNX2X_STATE_OPEN) 2589 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2590 2591 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); 2592 2593 return 0; 2594 2595#ifndef BNX2X_STOP_ON_ERROR 2596load_error_cnic2: 2597 /* Disable Timer scan */ 2598 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 2599 2600load_error_cnic1: 2601 bnx2x_napi_disable_cnic(bp); 2602 /* Update the number of queues without the cnic queues */ 2603 if (bnx2x_set_real_num_queues(bp, 0)) 2604 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2605load_error_cnic0: 2606 BNX2X_ERR("CNIC-related load failed\n"); 2607 bnx2x_free_fp_mem_cnic(bp); 2608 bnx2x_free_mem_cnic(bp); 2609 return rc; 2610#endif /* ! BNX2X_STOP_ON_ERROR */ 2611} 2612 2613/* must be called with rtnl_lock */ 2614int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 2615{ 2616 int port = BP_PORT(bp); 2617 int i, rc = 0, load_code = 0; 2618 2619 DP(NETIF_MSG_IFUP, "Starting NIC load\n"); 2620 DP(NETIF_MSG_IFUP, 2621 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); 2622 2623#ifdef BNX2X_STOP_ON_ERROR 2624 if (unlikely(bp->panic)) { 2625 BNX2X_ERR("Can't load NIC when there is panic\n"); 2626 return -EPERM; 2627 } 2628#endif 2629 2630 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 2631 2632 /* zero the structure w/o any lock, before SP handler is initialized */ 2633 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); 2634 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 2635 &bp->last_reported_link.link_report_flags); 2636 2637 if (IS_PF(bp)) 2638 /* must be called before memory allocation and HW init */ 2639 bnx2x_ilt_set_info(bp); 2640 2641 /* 2642 * Zero fastpath structures preserving invariants like napi, which are 2643 * allocated only once, fp index, max_cos, bp pointer. 2644 * Also set fp->mode and txdata_ptr. 2645 */ 2646 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2647 for_each_queue(bp, i) 2648 bnx2x_bz_fp(bp, i); 2649 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + 2650 bp->num_cnic_queues) * 2651 sizeof(struct bnx2x_fp_txdata)); 2652 2653 bp->fcoe_init = false; 2654 2655 /* Set the receive queues buffer size */ 2656 bnx2x_set_rx_buf_size(bp); 2657 2658 if (IS_PF(bp)) { 2659 rc = bnx2x_alloc_mem(bp); 2660 if (rc) { 2661 BNX2X_ERR("Unable to allocate bp memory\n"); 2662 return rc; 2663 } 2664 } 2665 2666 /* need to be done after alloc mem, since it's self adjusting to amount 2667 * of memory available for RSS queues 2668 */ 2669 rc = bnx2x_alloc_fp_mem(bp); 2670 if (rc) { 2671 BNX2X_ERR("Unable to allocate memory for fps\n"); 2672 LOAD_ERROR_EXIT(bp, load_error0); 2673 } 2674 2675 /* Allocated memory for FW statistics */ 2676 rc = bnx2x_alloc_fw_stats_mem(bp); 2677 if (rc) 2678 LOAD_ERROR_EXIT(bp, load_error0); 2679 2680 /* request pf to initialize status blocks */ 2681 if (IS_VF(bp)) { 2682 rc = bnx2x_vfpf_init(bp); 2683 if (rc) 2684 LOAD_ERROR_EXIT(bp, load_error0); 2685 } 2686 2687 /* As long as bnx2x_alloc_mem() may possibly update 2688 * bp->num_queues, bnx2x_set_real_num_queues() should always 2689 * come after it. At this stage cnic queues are not counted. 2690 */ 2691 rc = bnx2x_set_real_num_queues(bp, 0); 2692 if (rc) { 2693 BNX2X_ERR("Unable to set real_num_queues\n"); 2694 LOAD_ERROR_EXIT(bp, load_error0); 2695 } 2696 2697 /* configure multi cos mappings in kernel. 2698 * this configuration may be overridden by a multi class queue 2699 * discipline or by a dcbx negotiation result. 2700 */ 2701 bnx2x_setup_tc(bp->dev, bp->max_cos); 2702 2703 /* Add all NAPI objects */ 2704 bnx2x_add_all_napi(bp); 2705 DP(NETIF_MSG_IFUP, "napi added\n"); 2706 bnx2x_napi_enable(bp); 2707 2708 if (IS_PF(bp)) { 2709 /* set pf load just before approaching the MCP */ 2710 bnx2x_set_pf_load(bp); 2711 2712 /* if mcp exists send load request and analyze response */ 2713 if (!BP_NOMCP(bp)) { 2714 /* attempt to load pf */ 2715 rc = bnx2x_nic_load_request(bp, &load_code); 2716 if (rc) 2717 LOAD_ERROR_EXIT(bp, load_error1); 2718 2719 /* what did mcp say? */ 2720 rc = bnx2x_compare_fw_ver(bp, load_code, true); 2721 if (rc) { 2722 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2723 LOAD_ERROR_EXIT(bp, load_error2); 2724 } 2725 } else { 2726 load_code = bnx2x_nic_load_no_mcp(bp, port); 2727 } 2728 2729 /* mark pmf if applicable */ 2730 bnx2x_nic_load_pmf(bp, load_code); 2731 2732 /* Init Function state controlling object */ 2733 bnx2x__init_func_obj(bp); 2734 2735 /* Initialize HW */ 2736 rc = bnx2x_init_hw(bp, load_code); 2737 if (rc) { 2738 BNX2X_ERR("HW init failed, aborting\n"); 2739 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2740 LOAD_ERROR_EXIT(bp, load_error2); 2741 } 2742 } 2743 2744 bnx2x_pre_irq_nic_init(bp); 2745 2746 /* Connect to IRQs */ 2747 rc = bnx2x_setup_irqs(bp); 2748 if (rc) { 2749 BNX2X_ERR("setup irqs failed\n"); 2750 if (IS_PF(bp)) 2751 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2752 LOAD_ERROR_EXIT(bp, load_error2); 2753 } 2754 2755 /* Init per-function objects */ 2756 if (IS_PF(bp)) { 2757 /* Setup NIC internals and enable interrupts */ 2758 bnx2x_post_irq_nic_init(bp, load_code); 2759 2760 bnx2x_init_bp_objs(bp); 2761 bnx2x_iov_nic_init(bp); 2762 2763 /* Set AFEX default VLAN tag to an invalid value */ 2764 bp->afex_def_vlan_tag = -1; 2765 bnx2x_nic_load_afex_dcc(bp, load_code); 2766 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 2767 rc = bnx2x_func_start(bp); 2768 if (rc) { 2769 BNX2X_ERR("Function start failed!\n"); 2770 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 2771 2772 LOAD_ERROR_EXIT(bp, load_error3); 2773 } 2774 2775 /* Send LOAD_DONE command to MCP */ 2776 if (!BP_NOMCP(bp)) { 2777 load_code = bnx2x_fw_command(bp, 2778 DRV_MSG_CODE_LOAD_DONE, 0); 2779 if (!load_code) { 2780 BNX2X_ERR("MCP response failure, aborting\n"); 2781 rc = -EBUSY; 2782 LOAD_ERROR_EXIT(bp, load_error3); 2783 } 2784 } 2785 2786 /* initialize FW coalescing state machines in RAM */ 2787 bnx2x_update_coalesce(bp); 2788 } 2789 2790 /* setup the leading queue */ 2791 rc = bnx2x_setup_leading(bp); 2792 if (rc) { 2793 BNX2X_ERR("Setup leading failed!\n"); 2794 LOAD_ERROR_EXIT(bp, load_error3); 2795 } 2796 2797 /* set up the rest of the queues */ 2798 for_each_nondefault_eth_queue(bp, i) { 2799 if (IS_PF(bp)) 2800 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); 2801 else /* VF */ 2802 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); 2803 if (rc) { 2804 BNX2X_ERR("Queue %d setup failed\n", i); 2805 LOAD_ERROR_EXIT(bp, load_error3); 2806 } 2807 } 2808 2809 /* setup rss */ 2810 rc = bnx2x_init_rss(bp); 2811 if (rc) { 2812 BNX2X_ERR("PF RSS init failed\n"); 2813 LOAD_ERROR_EXIT(bp, load_error3); 2814 } 2815 2816 /* Now when Clients are configured we are ready to work */ 2817 bp->state = BNX2X_STATE_OPEN; 2818 2819 /* Configure a ucast MAC */ 2820 if (IS_PF(bp)) 2821 rc = bnx2x_set_eth_mac(bp, true); 2822 else /* vf */ 2823 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, 2824 true); 2825 if (rc) { 2826 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2827 LOAD_ERROR_EXIT(bp, load_error3); 2828 } 2829 2830 if (IS_PF(bp) && bp->pending_max) { 2831 bnx2x_update_max_mf_config(bp, bp->pending_max); 2832 bp->pending_max = 0; 2833 } 2834 2835 bp->force_link_down = false; 2836 if (bp->port.pmf) { 2837 rc = bnx2x_initial_phy_init(bp, load_mode); 2838 if (rc) 2839 LOAD_ERROR_EXIT(bp, load_error3); 2840 } 2841 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; 2842 2843 /* Start fast path */ 2844 2845 /* Re-configure vlan filters */ 2846 rc = bnx2x_vlan_reconfigure_vid(bp); 2847 if (rc) 2848 LOAD_ERROR_EXIT(bp, load_error3); 2849 2850 /* Initialize Rx filter. */ 2851 bnx2x_set_rx_mode_inner(bp); 2852 2853 if (bp->flags & PTP_SUPPORTED) { 2854 bnx2x_register_phc(bp); 2855 bnx2x_init_ptp(bp); 2856 bnx2x_configure_ptp_filters(bp); 2857 } 2858 /* Start Tx */ 2859 switch (load_mode) { 2860 case LOAD_NORMAL: 2861 /* Tx queue should be only re-enabled */ 2862 netif_tx_wake_all_queues(bp->dev); 2863 break; 2864 2865 case LOAD_OPEN: 2866 netif_tx_start_all_queues(bp->dev); 2867 smp_mb__after_atomic(); 2868 break; 2869 2870 case LOAD_DIAG: 2871 case LOAD_LOOPBACK_EXT: 2872 bp->state = BNX2X_STATE_DIAG; 2873 break; 2874 2875 default: 2876 break; 2877 } 2878 2879 if (bp->port.pmf) 2880 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); 2881 else 2882 bnx2x__link_status_update(bp); 2883 2884 /* start the timer */ 2885 mod_timer(&bp->timer, jiffies + bp->current_interval); 2886 2887 if (CNIC_ENABLED(bp)) 2888 bnx2x_load_cnic(bp); 2889 2890 if (IS_PF(bp)) 2891 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); 2892 2893 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 2894 /* mark driver is loaded in shmem2 */ 2895 u32 val; 2896 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 2897 val &= ~DRV_FLAGS_MTU_MASK; 2898 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); 2899 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 2900 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 2901 DRV_FLAGS_CAPABILITIES_LOADED_L2); 2902 } 2903 2904 /* Wait for all pending SP commands to complete */ 2905 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { 2906 BNX2X_ERR("Timeout waiting for SP elements to complete\n"); 2907 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 2908 return -EBUSY; 2909 } 2910 2911 /* Update driver data for On-Chip MFW dump. */ 2912 if (IS_PF(bp)) 2913 bnx2x_update_mfw_dump(bp); 2914 2915 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 2916 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2917 bnx2x_dcbx_init(bp, false); 2918 2919 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 2920 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); 2921 2922 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); 2923 2924 return 0; 2925 2926#ifndef BNX2X_STOP_ON_ERROR 2927load_error3: 2928 if (IS_PF(bp)) { 2929 bnx2x_int_disable_sync(bp, 1); 2930 2931 /* Clean queueable objects */ 2932 bnx2x_squeeze_objects(bp); 2933 } 2934 2935 /* Free SKBs, SGEs, TPA pool and driver internals */ 2936 bnx2x_free_skbs(bp); 2937 for_each_rx_queue(bp, i) 2938 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2939 2940 /* Release IRQs */ 2941 bnx2x_free_irq(bp); 2942load_error2: 2943 if (IS_PF(bp) && !BP_NOMCP(bp)) { 2944 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 2945 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 2946 } 2947 2948 bp->port.pmf = 0; 2949load_error1: 2950 bnx2x_napi_disable(bp); 2951 bnx2x_del_all_napi(bp); 2952 2953 /* clear pf_load status, as it was already set */ 2954 if (IS_PF(bp)) 2955 bnx2x_clear_pf_load(bp); 2956load_error0: 2957 bnx2x_free_fw_stats_mem(bp); 2958 bnx2x_free_fp_mem(bp); 2959 bnx2x_free_mem(bp); 2960 2961 return rc; 2962#endif /* ! BNX2X_STOP_ON_ERROR */ 2963} 2964 2965int bnx2x_drain_tx_queues(struct bnx2x *bp) 2966{ 2967 u8 rc = 0, cos, i; 2968 2969 /* Wait until tx fastpath tasks complete */ 2970 for_each_tx_queue(bp, i) { 2971 struct bnx2x_fastpath *fp = &bp->fp[i]; 2972 2973 for_each_cos_in_tx_queue(fp, cos) 2974 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 2975 if (rc) 2976 return rc; 2977 } 2978 return 0; 2979} 2980 2981/* must be called with rtnl_lock */ 2982int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) 2983{ 2984 int i; 2985 bool global = false; 2986 2987 DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); 2988 2989 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 2990 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); 2991 2992 /* mark driver is unloaded in shmem2 */ 2993 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 2994 u32 val; 2995 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 2996 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 2997 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 2998 } 2999 3000 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && 3001 (bp->state == BNX2X_STATE_CLOSED || 3002 bp->state == BNX2X_STATE_ERROR)) { 3003 /* We can get here if the driver has been unloaded 3004 * during parity error recovery and is either waiting for a 3005 * leader to complete or for other functions to unload and 3006 * then ifdown has been issued. In this case we want to 3007 * unload and let other functions to complete a recovery 3008 * process. 3009 */ 3010 bp->recovery_state = BNX2X_RECOVERY_DONE; 3011 bp->is_leader = 0; 3012 bnx2x_release_leader_lock(bp); 3013 smp_mb(); 3014 3015 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n"); 3016 BNX2X_ERR("Can't unload in closed or error state\n"); 3017 return -EINVAL; 3018 } 3019 3020 /* Nothing to do during unload if previous bnx2x_nic_load() 3021 * have not completed successfully - all resources are released. 3022 * 3023 * we can get here only after unsuccessful ndo_* callback, during which 3024 * dev->IFF_UP flag is still on. 3025 */ 3026 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) 3027 return 0; 3028 3029 /* It's important to set the bp->state to the value different from 3030 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() 3031 * may restart the Tx from the NAPI context (see bnx2x_tx_int()). 3032 */ 3033 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 3034 smp_mb(); 3035 3036 /* indicate to VFs that the PF is going down */ 3037 bnx2x_iov_channel_down(bp); 3038 3039 if (CNIC_LOADED(bp)) 3040 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 3041 3042 /* Stop Tx */ 3043 bnx2x_tx_disable(bp); 3044 netdev_reset_tc(bp->dev); 3045 3046 bp->rx_mode = BNX2X_RX_MODE_NONE; 3047 3048 del_timer_sync(&bp->timer); 3049 3050 if (IS_PF(bp) && !BP_NOMCP(bp)) { 3051 /* Set ALWAYS_ALIVE bit in shmem */ 3052 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 3053 bnx2x_drv_pulse(bp); 3054 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 3055 bnx2x_save_statistics(bp); 3056 } 3057 3058 /* wait till consumers catch up with producers in all queues. 3059 * If we're recovering, FW can't write to host so no reason 3060 * to wait for the queues to complete all Tx. 3061 */ 3062 if (unload_mode != UNLOAD_RECOVERY) 3063 bnx2x_drain_tx_queues(bp); 3064 3065 /* if VF indicate to PF this function is going down (PF will delete sp 3066 * elements and clear initializations 3067 */ 3068 if (IS_VF(bp)) { 3069 bnx2x_clear_vlan_info(bp); 3070 bnx2x_vfpf_close_vf(bp); 3071 } else if (unload_mode != UNLOAD_RECOVERY) { 3072 /* if this is a normal/close unload need to clean up chip*/ 3073 bnx2x_chip_cleanup(bp, unload_mode, keep_link); 3074 } else { 3075 /* Send the UNLOAD_REQUEST to the MCP */ 3076 bnx2x_send_unload_req(bp, unload_mode); 3077 3078 /* Prevent transactions to host from the functions on the 3079 * engine that doesn't reset global blocks in case of global 3080 * attention once global blocks are reset and gates are opened 3081 * (the engine which leader will perform the recovery 3082 * last). 3083 */ 3084 if (!CHIP_IS_E1x(bp)) 3085 bnx2x_pf_disable(bp); 3086 3087 /* Disable HW interrupts, NAPI */ 3088 bnx2x_netif_stop(bp, 1); 3089 /* Delete all NAPI objects */ 3090 bnx2x_del_all_napi(bp); 3091 if (CNIC_LOADED(bp)) 3092 bnx2x_del_all_napi_cnic(bp); 3093 /* Release IRQs */ 3094 bnx2x_free_irq(bp); 3095 3096 /* Report UNLOAD_DONE to MCP */ 3097 bnx2x_send_unload_done(bp, false); 3098 } 3099 3100 /* 3101 * At this stage no more interrupts will arrive so we may safely clean 3102 * the queueable objects here in case they failed to get cleaned so far. 3103 */ 3104 if (IS_PF(bp)) 3105 bnx2x_squeeze_objects(bp); 3106 3107 /* There should be no more pending SP commands at this stage */ 3108 bp->sp_state = 0; 3109 3110 bp->port.pmf = 0; 3111 3112 /* clear pending work in rtnl task */ 3113 bp->sp_rtnl_state = 0; 3114 smp_mb(); 3115 3116 /* Free SKBs, SGEs, TPA pool and driver internals */ 3117 bnx2x_free_skbs(bp); 3118 if (CNIC_LOADED(bp)) 3119 bnx2x_free_skbs_cnic(bp); 3120 for_each_rx_queue(bp, i) 3121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 3122 3123 bnx2x_free_fp_mem(bp); 3124 if (CNIC_LOADED(bp)) 3125 bnx2x_free_fp_mem_cnic(bp); 3126 3127 if (IS_PF(bp)) { 3128 if (CNIC_LOADED(bp)) 3129 bnx2x_free_mem_cnic(bp); 3130 } 3131 bnx2x_free_mem(bp); 3132 3133 bp->state = BNX2X_STATE_CLOSED; 3134 bp->cnic_loaded = false; 3135 3136 /* Clear driver version indication in shmem */ 3137 if (IS_PF(bp) && !BP_NOMCP(bp)) 3138 bnx2x_update_mng_version(bp); 3139 3140 /* Check if there are pending parity attentions. If there are - set 3141 * RECOVERY_IN_PROGRESS. 3142 */ 3143 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { 3144 bnx2x_set_reset_in_progress(bp); 3145 3146 /* Set RESET_IS_GLOBAL if needed */ 3147 if (global) 3148 bnx2x_set_reset_global(bp); 3149 } 3150 3151 /* The last driver must disable a "close the gate" if there is no 3152 * parity attention or "process kill" pending. 3153 */ 3154 if (IS_PF(bp) && 3155 !bnx2x_clear_pf_load(bp) && 3156 bnx2x_reset_is_done(bp, BP_PATH(bp))) 3157 bnx2x_disable_close_the_gate(bp); 3158 3159 DP(NETIF_MSG_IFUP, "Ending NIC unload\n"); 3160 3161 return 0; 3162} 3163 3164int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 3165{ 3166 u16 pmcsr; 3167 3168 /* If there is no power capability, silently succeed */ 3169 if (!bp->pdev->pm_cap) { 3170 BNX2X_DEV_INFO("No power capability. Breaking.\n"); 3171 return 0; 3172 } 3173 3174 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); 3175 3176 switch (state) { 3177 case PCI_D0: 3178 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, 3179 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3180 PCI_PM_CTRL_PME_STATUS)); 3181 3182 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 3183 /* delay required during transition out of D3hot */ 3184 msleep(20); 3185 break; 3186 3187 case PCI_D3hot: 3188 /* If there are other clients above don't 3189 shut down the power */ 3190 if (atomic_read(&bp->pdev->enable_cnt) != 1) 3191 return 0; 3192 /* Don't shut down the power for emulation and FPGA */ 3193 if (CHIP_REV_IS_SLOW(bp)) 3194 return 0; 3195 3196 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3197 pmcsr |= 3; 3198 3199 if (bp->wol) 3200 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 3201 3202 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, 3203 pmcsr); 3204 3205 /* No more memory access after this point until 3206 * device is brought back to D0. 3207 */ 3208 break; 3209 3210 default: 3211 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); 3212 return -EINVAL; 3213 } 3214 return 0; 3215} 3216 3217/* 3218 * net_device service functions 3219 */ 3220static int bnx2x_poll(struct napi_struct *napi, int budget) 3221{ 3222 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 3223 napi); 3224 struct bnx2x *bp = fp->bp; 3225 int rx_work_done; 3226 u8 cos; 3227 3228#ifdef BNX2X_STOP_ON_ERROR 3229 if (unlikely(bp->panic)) { 3230 napi_complete(napi); 3231 return 0; 3232 } 3233#endif 3234 for_each_cos_in_tx_queue(fp, cos) 3235 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 3236 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); 3237 3238 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; 3239 3240 if (rx_work_done < budget) { 3241 /* No need to update SB for FCoE L2 ring as long as 3242 * it's connected to the default SB and the SB 3243 * has been updated when NAPI was scheduled. 3244 */ 3245 if (IS_FCOE_FP(fp)) { 3246 napi_complete_done(napi, rx_work_done); 3247 } else { 3248 bnx2x_update_fpsb_idx(fp); 3249 /* bnx2x_has_rx_work() reads the status block, 3250 * thus we need to ensure that status block indices 3251 * have been actually read (bnx2x_update_fpsb_idx) 3252 * prior to this check (bnx2x_has_rx_work) so that 3253 * we won't write the "newer" value of the status block 3254 * to IGU (if there was a DMA right after 3255 * bnx2x_has_rx_work and if there is no rmb, the memory 3256 * reading (bnx2x_update_fpsb_idx) may be postponed 3257 * to right before bnx2x_ack_sb). In this case there 3258 * will never be another interrupt until there is 3259 * another update of the status block, while there 3260 * is still unhandled work. 3261 */ 3262 rmb(); 3263 3264 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 3265 if (napi_complete_done(napi, rx_work_done)) { 3266 /* Re-enable interrupts */ 3267 DP(NETIF_MSG_RX_STATUS, 3268 "Update index to %d\n", fp->fp_hc_idx); 3269 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 3270 le16_to_cpu(fp->fp_hc_idx), 3271 IGU_INT_ENABLE, 1); 3272 } 3273 } else { 3274 rx_work_done = budget; 3275 } 3276 } 3277 } 3278 3279 return rx_work_done; 3280} 3281 3282/* we split the first BD into headers and data BDs 3283 * to ease the pain of our fellow microcode engineers 3284 * we use one mapping for both BDs 3285 */ 3286static u16 bnx2x_tx_split(struct bnx2x *bp, 3287 struct bnx2x_fp_txdata *txdata, 3288 struct sw_tx_bd *tx_buf, 3289 struct eth_tx_start_bd **tx_bd, u16 hlen, 3290 u16 bd_prod) 3291{ 3292 struct eth_tx_start_bd *h_tx_bd = *tx_bd; 3293 struct eth_tx_bd *d_tx_bd; 3294 dma_addr_t mapping; 3295 int old_len = le16_to_cpu(h_tx_bd->nbytes); 3296 3297 /* first fix first BD */ 3298 h_tx_bd->nbytes = cpu_to_le16(hlen); 3299 3300 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", 3301 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); 3302 3303 /* now get a new data BD 3304 * (after the pbd) and fill it */ 3305 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3306 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 3307 3308 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), 3309 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; 3310 3311 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 3312 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 3313 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); 3314 3315 /* this marks the BD as one that has no individual mapping */ 3316 tx_buf->flags |= BNX2X_TSO_SPLIT_BD; 3317 3318 DP(NETIF_MSG_TX_QUEUED, 3319 "TSO split data size is %d (%x:%x)\n", 3320 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); 3321 3322 /* update tx_bd */ 3323 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; 3324 3325 return bd_prod; 3326} 3327 3328#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) 3329#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) 3330static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) 3331{ 3332 __sum16 tsum = (__force __sum16) csum; 3333 3334 if (fix > 0) 3335 tsum = ~csum_fold(csum_sub((__force __wsum) csum, 3336 csum_partial(t_header - fix, fix, 0))); 3337 3338 else if (fix < 0) 3339 tsum = ~csum_fold(csum_add((__force __wsum) csum, 3340 csum_partial(t_header, -fix, 0))); 3341 3342 return bswab16(tsum); 3343} 3344 3345static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 3346{ 3347 u32 rc; 3348 __u8 prot = 0; 3349 __be16 protocol; 3350 3351 if (skb->ip_summed != CHECKSUM_PARTIAL) 3352 return XMIT_PLAIN; 3353 3354 protocol = vlan_get_protocol(skb); 3355 if (protocol == htons(ETH_P_IPV6)) { 3356 rc = XMIT_CSUM_V6; 3357 prot = ipv6_hdr(skb)->nexthdr; 3358 } else { 3359 rc = XMIT_CSUM_V4; 3360 prot = ip_hdr(skb)->protocol; 3361 } 3362 3363 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { 3364 if (inner_ip_hdr(skb)->version == 6) { 3365 rc |= XMIT_CSUM_ENC_V6; 3366 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3367 rc |= XMIT_CSUM_TCP; 3368 } else { 3369 rc |= XMIT_CSUM_ENC_V4; 3370 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) 3371 rc |= XMIT_CSUM_TCP; 3372 } 3373 } 3374 if (prot == IPPROTO_TCP) 3375 rc |= XMIT_CSUM_TCP; 3376 3377 if (skb_is_gso(skb)) { 3378 if (skb_is_gso_v6(skb)) { 3379 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); 3380 if (rc & XMIT_CSUM_ENC) 3381 rc |= XMIT_GSO_ENC_V6; 3382 } else { 3383 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); 3384 if (rc & XMIT_CSUM_ENC) 3385 rc |= XMIT_GSO_ENC_V4; 3386 } 3387 } 3388 3389 return rc; 3390} 3391 3392/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */ 3393#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4 3394 3395/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ 3396#define BNX2X_NUM_TSO_WIN_SUB_BDS 3 3397 3398#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) 3399/* check if packet requires linearization (packet is too fragmented) 3400 no need to check fragmentation if page size > 8K (there will be no 3401 violation to FW restrictions) */ 3402static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 3403 u32 xmit_type) 3404{ 3405 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS; 3406 int to_copy = 0, hlen = 0; 3407 3408 if (xmit_type & XMIT_GSO_ENC) 3409 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS; 3410 3411 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { 3412 if (xmit_type & XMIT_GSO) { 3413 unsigned short lso_mss = skb_shinfo(skb)->gso_size; 3414 int wnd_size = MAX_FETCH_BD - num_tso_win_sub; 3415 /* Number of windows to check */ 3416 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 3417 int wnd_idx = 0; 3418 int frag_idx = 0; 3419 u32 wnd_sum = 0; 3420 3421 /* Headers length */ 3422 if (xmit_type & XMIT_GSO_ENC) 3423 hlen = (int)(skb_inner_transport_header(skb) - 3424 skb->data) + 3425 inner_tcp_hdrlen(skb); 3426 else 3427 hlen = (int)(skb_transport_header(skb) - 3428 skb->data) + tcp_hdrlen(skb); 3429 3430 /* Amount of data (w/o headers) on linear part of SKB*/ 3431 first_bd_sz = skb_headlen(skb) - hlen; 3432 3433 wnd_sum = first_bd_sz; 3434 3435 /* Calculate the first sum - it's special */ 3436 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) 3437 wnd_sum += 3438 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); 3439 3440 /* If there was data on linear skb data - check it */ 3441 if (first_bd_sz > 0) { 3442 if (unlikely(wnd_sum < lso_mss)) { 3443 to_copy = 1; 3444 goto exit_lbl; 3445 } 3446 3447 wnd_sum -= first_bd_sz; 3448 } 3449 3450 /* Others are easier: run through the frag list and 3451 check all windows */ 3452 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { 3453 wnd_sum += 3454 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); 3455 3456 if (unlikely(wnd_sum < lso_mss)) { 3457 to_copy = 1; 3458 break; 3459 } 3460 wnd_sum -= 3461 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); 3462 } 3463 } else { 3464 /* in non-LSO too fragmented packet should always 3465 be linearized */ 3466 to_copy = 1; 3467 } 3468 } 3469 3470exit_lbl: 3471 if (unlikely(to_copy)) 3472 DP(NETIF_MSG_TX_QUEUED, 3473 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n", 3474 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", 3475 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); 3476 3477 return to_copy; 3478} 3479#endif 3480 3481/** 3482 * bnx2x_set_pbd_gso - update PBD in GSO case. 3483 * 3484 * @skb: packet skb 3485 * @pbd: parse BD 3486 * @xmit_type: xmit flags 3487 */ 3488static void bnx2x_set_pbd_gso(struct sk_buff *skb, 3489 struct eth_tx_parse_bd_e1x *pbd, 3490 u32 xmit_type) 3491{ 3492 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 3493 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); 3494 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); 3495 3496 if (xmit_type & XMIT_GSO_V4) { 3497 pbd->ip_id = bswab16(ip_hdr(skb)->id); 3498 pbd->tcp_pseudo_csum = 3499 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3500 ip_hdr(skb)->daddr, 3501 0, IPPROTO_TCP, 0)); 3502 } else { 3503 pbd->tcp_pseudo_csum = 3504 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3505 &ipv6_hdr(skb)->daddr, 3506 0, IPPROTO_TCP, 0)); 3507 } 3508 3509 pbd->global_data |= 3510 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 3511} 3512 3513/** 3514 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length 3515 * 3516 * @bp: driver handle 3517 * @skb: packet skb 3518 * @parsing_data: data to be updated 3519 * @xmit_type: xmit flags 3520 * 3521 * 57712/578xx related, when skb has encapsulation 3522 */ 3523static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, 3524 u32 *parsing_data, u32 xmit_type) 3525{ 3526 *parsing_data |= 3527 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << 3528 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 3529 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; 3530 3531 if (xmit_type & XMIT_CSUM_TCP) { 3532 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << 3533 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 3534 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 3535 3536 return skb_inner_transport_header(skb) + 3537 inner_tcp_hdrlen(skb) - skb->data; 3538 } 3539 3540 /* We support checksum offload for TCP and UDP only. 3541 * No need to pass the UDP header length - it's a constant. 3542 */ 3543 return skb_inner_transport_header(skb) + 3544 sizeof(struct udphdr) - skb->data; 3545} 3546 3547/** 3548 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length 3549 * 3550 * @bp: driver handle 3551 * @skb: packet skb 3552 * @parsing_data: data to be updated 3553 * @xmit_type: xmit flags 3554 * 3555 * 57712/578xx related 3556 */ 3557static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 3558 u32 *parsing_data, u32 xmit_type) 3559{ 3560 *parsing_data |= 3561 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << 3562 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 3563 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; 3564 3565 if (xmit_type & XMIT_CSUM_TCP) { 3566 *parsing_data |= ((tcp_hdrlen(skb) / 4) << 3567 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 3568 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 3569 3570 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 3571 } 3572 /* We support checksum offload for TCP and UDP only. 3573 * No need to pass the UDP header length - it's a constant. 3574 */ 3575 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; 3576} 3577 3578/* set FW indication according to inner or outer protocols if tunneled */ 3579static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3580 struct eth_tx_start_bd *tx_start_bd, 3581 u32 xmit_type) 3582{ 3583 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 3584 3585 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) 3586 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; 3587 3588 if (!(xmit_type & XMIT_CSUM_TCP)) 3589 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; 3590} 3591 3592/** 3593 * bnx2x_set_pbd_csum - update PBD with checksum and return header length 3594 * 3595 * @bp: driver handle 3596 * @skb: packet skb 3597 * @pbd: parse BD to be updated 3598 * @xmit_type: xmit flags 3599 */ 3600static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3601 struct eth_tx_parse_bd_e1x *pbd, 3602 u32 xmit_type) 3603{ 3604 u8 hlen = (skb_network_header(skb) - skb->data) >> 1; 3605 3606 /* for now NS flag is not used in Linux */ 3607 pbd->global_data = 3608 cpu_to_le16(hlen | 3609 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 3610 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 3611 3612 pbd->ip_hlen_w = (skb_transport_header(skb) - 3613 skb_network_header(skb)) >> 1; 3614 3615 hlen += pbd->ip_hlen_w; 3616 3617 /* We support checksum offload for TCP and UDP only */ 3618 if (xmit_type & XMIT_CSUM_TCP) 3619 hlen += tcp_hdrlen(skb) / 2; 3620 else 3621 hlen += sizeof(struct udphdr) / 2; 3622 3623 pbd->total_hlen_w = cpu_to_le16(hlen); 3624 hlen = hlen*2; 3625 3626 if (xmit_type & XMIT_CSUM_TCP) { 3627 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); 3628 3629 } else { 3630 s8 fix = SKB_CS_OFF(skb); /* signed! */ 3631 3632 DP(NETIF_MSG_TX_QUEUED, 3633 "hlen %d fix %d csum before fix %x\n", 3634 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); 3635 3636 /* HW bug: fixup the CSUM */ 3637 pbd->tcp_pseudo_csum = 3638 bnx2x_csum_fix(skb_transport_header(skb), 3639 SKB_CS(skb), fix); 3640 3641 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", 3642 pbd->tcp_pseudo_csum); 3643 } 3644 3645 return hlen; 3646} 3647 3648static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, 3649 struct eth_tx_parse_bd_e2 *pbd_e2, 3650 struct eth_tx_parse_2nd_bd *pbd2, 3651 u16 *global_data, 3652 u32 xmit_type) 3653{ 3654 u16 hlen_w = 0; 3655 u8 outerip_off, outerip_len = 0; 3656 3657 /* from outer IP to transport */ 3658 hlen_w = (skb_inner_transport_header(skb) - 3659 skb_network_header(skb)) >> 1; 3660 3661 /* transport len */ 3662 hlen_w += inner_tcp_hdrlen(skb) >> 1; 3663 3664 pbd2->fw_ip_hdr_to_payload_w = hlen_w; 3665 3666 /* outer IP header info */ 3667 if (xmit_type & XMIT_CSUM_V4) { 3668 struct iphdr *iph = ip_hdr(skb); 3669 u32 csum = (__force u32)(~iph->check) - 3670 (__force u32)iph->tot_len - 3671 (__force u32)iph->frag_off; 3672 3673 outerip_len = iph->ihl << 1; 3674 3675 pbd2->fw_ip_csum_wo_len_flags_frag = 3676 bswab16(csum_fold((__force __wsum)csum)); 3677 } else { 3678 pbd2->fw_ip_hdr_to_payload_w = 3679 hlen_w - ((sizeof(struct ipv6hdr)) >> 1); 3680 pbd_e2->data.tunnel_data.flags |= 3681 ETH_TUNNEL_DATA_IPV6_OUTER; 3682 } 3683 3684 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); 3685 3686 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); 3687 3688 /* inner IP header info */ 3689 if (xmit_type & XMIT_CSUM_ENC_V4) { 3690 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); 3691 3692 pbd_e2->data.tunnel_data.pseudo_csum = 3693 bswab16(~csum_tcpudp_magic( 3694 inner_ip_hdr(skb)->saddr, 3695 inner_ip_hdr(skb)->daddr, 3696 0, IPPROTO_TCP, 0)); 3697 } else { 3698 pbd_e2->data.tunnel_data.pseudo_csum = 3699 bswab16(~csum_ipv6_magic( 3700 &inner_ipv6_hdr(skb)->saddr, 3701 &inner_ipv6_hdr(skb)->daddr, 3702 0, IPPROTO_TCP, 0)); 3703 } 3704 3705 outerip_off = (skb_network_header(skb) - skb->data) >> 1; 3706 3707 *global_data |= 3708 outerip_off | 3709 (outerip_len << 3710 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | 3711 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 3712 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); 3713 3714 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 3715 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1); 3716 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; 3717 } 3718} 3719 3720static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, 3721 u32 xmit_type) 3722{ 3723 struct ipv6hdr *ipv6; 3724 3725 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) 3726 return; 3727 3728 if (xmit_type & XMIT_GSO_ENC_V6) 3729 ipv6 = inner_ipv6_hdr(skb); 3730 else /* XMIT_GSO_V6 */ 3731 ipv6 = ipv6_hdr(skb); 3732 3733 if (ipv6->nexthdr == NEXTHDR_IPV6) 3734 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 3735} 3736 3737/* called with netif_tx_lock 3738 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 3739 * netif_wake_queue() 3740 */ 3741netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 3742{ 3743 struct bnx2x *bp = netdev_priv(dev); 3744 3745 struct netdev_queue *txq; 3746 struct bnx2x_fp_txdata *txdata; 3747 struct sw_tx_bd *tx_buf; 3748 struct eth_tx_start_bd *tx_start_bd, *first_bd; 3749 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 3750 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 3751 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 3752 struct eth_tx_parse_2nd_bd *pbd2 = NULL; 3753 u32 pbd_e2_parsing_data = 0; 3754 u16 pkt_prod, bd_prod; 3755 int nbd, txq_index; 3756 dma_addr_t mapping; 3757 u32 xmit_type = bnx2x_xmit_type(bp, skb); 3758 int i; 3759 u8 hlen = 0; 3760 __le16 pkt_size = 0; 3761 struct ethhdr *eth; 3762 u8 mac_type = UNICAST_ADDRESS; 3763 3764#ifdef BNX2X_STOP_ON_ERROR 3765 if (unlikely(bp->panic)) 3766 return NETDEV_TX_BUSY; 3767#endif 3768 3769 txq_index = skb_get_queue_mapping(skb); 3770 txq = netdev_get_tx_queue(dev, txq_index); 3771 3772 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); 3773 3774 txdata = &bp->bnx2x_txq[txq_index]; 3775 3776 /* enable this debug print to view the transmission queue being used 3777 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", 3778 txq_index, fp_index, txdata_index); */ 3779 3780 /* enable this debug print to view the transmission details 3781 DP(NETIF_MSG_TX_QUEUED, 3782 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", 3783 txdata->cid, fp_index, txdata_index, txdata, fp); */ 3784 3785 if (unlikely(bnx2x_tx_avail(bp, txdata) < 3786 skb_shinfo(skb)->nr_frags + 3787 BDS_PER_TX_PKT + 3788 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { 3789 /* Handle special storage cases separately */ 3790 if (txdata->tx_ring_size == 0) { 3791 struct bnx2x_eth_q_stats *q_stats = 3792 bnx2x_fp_qstats(bp, txdata->parent_fp); 3793 q_stats->driver_filtered_tx_pkt++; 3794 dev_kfree_skb(skb); 3795 return NETDEV_TX_OK; 3796 } 3797 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 3798 netif_tx_stop_queue(txq); 3799 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 3800 3801 return NETDEV_TX_BUSY; 3802 } 3803 3804 DP(NETIF_MSG_TX_QUEUED, 3805 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n", 3806 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 3807 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, 3808 skb->len); 3809 3810 eth = (struct ethhdr *)skb->data; 3811 3812 /* set flag according to packet type (UNICAST_ADDRESS is default)*/ 3813 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { 3814 if (is_broadcast_ether_addr(eth->h_dest)) 3815 mac_type = BROADCAST_ADDRESS; 3816 else 3817 mac_type = MULTICAST_ADDRESS; 3818 } 3819 3820#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) 3821 /* First, check if we need to linearize the skb (due to FW 3822 restrictions). No need to check fragmentation if page size > 8K 3823 (there will be no violation to FW restrictions) */ 3824 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 3825 /* Statistics of linearization */ 3826 bp->lin_cnt++; 3827 if (skb_linearize(skb) != 0) { 3828 DP(NETIF_MSG_TX_QUEUED, 3829 "SKB linearization failed - silently dropping this SKB\n"); 3830 dev_kfree_skb_any(skb); 3831 return NETDEV_TX_OK; 3832 } 3833 } 3834#endif 3835 /* Map skb linear data for DMA */ 3836 mapping = dma_map_single(&bp->pdev->dev, skb->data, 3837 skb_headlen(skb), DMA_TO_DEVICE); 3838 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 3839 DP(NETIF_MSG_TX_QUEUED, 3840 "SKB mapping failed - silently dropping this SKB\n"); 3841 dev_kfree_skb_any(skb); 3842 return NETDEV_TX_OK; 3843 } 3844 /* 3845 Please read carefully. First we use one BD which we mark as start, 3846 then we have a parsing info BD (used for TSO or xsum), 3847 and only then we have the rest of the TSO BDs. 3848 (don't forget to mark the last one as last, 3849 and to unmap only AFTER you write to the BD ...) 3850 And above all, all pdb sizes are in words - NOT DWORDS! 3851 */ 3852 3853 /* get current pkt produced now - advance it just before sending packet 3854 * since mapping of pages may fail and cause packet to be dropped 3855 */ 3856 pkt_prod = txdata->tx_pkt_prod; 3857 bd_prod = TX_BD(txdata->tx_bd_prod); 3858 3859 /* get a tx_buf and first BD 3860 * tx_start_bd may be changed during SPLIT, 3861 * but first_bd will always stay first 3862 */ 3863 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; 3864 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; 3865 first_bd = tx_start_bd; 3866 3867 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3868 3869 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 3870 if (!(bp->flags & TX_TIMESTAMPING_EN)) { 3871 bp->eth_stats.ptp_skip_tx_ts++; 3872 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); 3873 } else if (bp->ptp_tx_skb) { 3874 bp->eth_stats.ptp_skip_tx_ts++; 3875 netdev_err_once(bp->dev, 3876 "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n"); 3877 } else { 3878 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3879 /* schedule check for Tx timestamp */ 3880 bp->ptp_tx_skb = skb_get(skb); 3881 bp->ptp_tx_start = jiffies; 3882 schedule_work(&bp->ptp_task); 3883 } 3884 } 3885 3886 /* header nbd: indirectly zero other flags! */ 3887 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; 3888 3889 /* remember the first BD of the packet */ 3890 tx_buf->first_bd = txdata->tx_bd_prod; 3891 tx_buf->skb = skb; 3892 tx_buf->flags = 0; 3893 3894 DP(NETIF_MSG_TX_QUEUED, 3895 "sending pkt %u @%p next_idx %u bd %u @%p\n", 3896 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); 3897 3898 if (skb_vlan_tag_present(skb)) { 3899 tx_start_bd->vlan_or_ethertype = 3900 cpu_to_le16(skb_vlan_tag_get(skb)); 3901 tx_start_bd->bd_flags.as_bitfield |= 3902 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 3903 } else { 3904 /* when transmitting in a vf, start bd must hold the ethertype 3905 * for fw to enforce it 3906 */ 3907 u16 vlan_tci = 0; 3908#ifndef BNX2X_STOP_ON_ERROR 3909 if (IS_VF(bp)) { 3910#endif 3911 /* Still need to consider inband vlan for enforced */ 3912 if (__vlan_get_tag(skb, &vlan_tci)) { 3913 tx_start_bd->vlan_or_ethertype = 3914 cpu_to_le16(ntohs(eth->h_proto)); 3915 } else { 3916 tx_start_bd->bd_flags.as_bitfield |= 3917 (X_ETH_INBAND_VLAN << 3918 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 3919 tx_start_bd->vlan_or_ethertype = 3920 cpu_to_le16(vlan_tci); 3921 } 3922#ifndef BNX2X_STOP_ON_ERROR 3923 } else { 3924 /* used by FW for packet accounting */ 3925 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3926 } 3927#endif 3928 } 3929 3930 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ 3931 3932 /* turn on parsing and get a BD */ 3933 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3934 3935 if (xmit_type & XMIT_CSUM) 3936 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); 3937 3938 if (!CHIP_IS_E1x(bp)) { 3939 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 3940 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 3941 3942 if (xmit_type & XMIT_CSUM_ENC) { 3943 u16 global_data = 0; 3944 3945 /* Set PBD in enc checksum offload case */ 3946 hlen = bnx2x_set_pbd_csum_enc(bp, skb, 3947 &pbd_e2_parsing_data, 3948 xmit_type); 3949 3950 /* turn on 2nd parsing and get a BD */ 3951 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3952 3953 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; 3954 3955 memset(pbd2, 0, sizeof(*pbd2)); 3956 3957 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = 3958 (skb_inner_network_header(skb) - 3959 skb->data) >> 1; 3960 3961 if (xmit_type & XMIT_GSO_ENC) 3962 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, 3963 &global_data, 3964 xmit_type); 3965 3966 pbd2->global_data = cpu_to_le16(global_data); 3967 3968 /* add addition parse BD indication to start BD */ 3969 SET_FLAG(tx_start_bd->general_data, 3970 ETH_TX_START_BD_PARSE_NBDS, 1); 3971 /* set encapsulation flag in start BD */ 3972 SET_FLAG(tx_start_bd->general_data, 3973 ETH_TX_START_BD_TUNNEL_EXIST, 1); 3974 3975 tx_buf->flags |= BNX2X_HAS_SECOND_PBD; 3976 3977 nbd++; 3978 } else if (xmit_type & XMIT_CSUM) { 3979 /* Set PBD in checksum offload case w/o encapsulation */ 3980 hlen = bnx2x_set_pbd_csum_e2(bp, skb, 3981 &pbd_e2_parsing_data, 3982 xmit_type); 3983 } 3984 3985 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type); 3986 /* Add the macs to the parsing BD if this is a vf or if 3987 * Tx Switching is enabled. 3988 */ 3989 if (IS_VF(bp)) { 3990 /* override GRE parameters in BD */ 3991 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 3992 &pbd_e2->data.mac_addr.src_mid, 3993 &pbd_e2->data.mac_addr.src_lo, 3994 eth->h_source); 3995 3996 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 3997 &pbd_e2->data.mac_addr.dst_mid, 3998 &pbd_e2->data.mac_addr.dst_lo, 3999 eth->h_dest); 4000 } else { 4001 if (bp->flags & TX_SWITCHING) 4002 bnx2x_set_fw_mac_addr( 4003 &pbd_e2->data.mac_addr.dst_hi, 4004 &pbd_e2->data.mac_addr.dst_mid, 4005 &pbd_e2->data.mac_addr.dst_lo, 4006 eth->h_dest); 4007#ifdef BNX2X_STOP_ON_ERROR 4008 /* Enforce security is always set in Stop on Error - 4009 * source mac should be present in the parsing BD 4010 */ 4011 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 4012 &pbd_e2->data.mac_addr.src_mid, 4013 &pbd_e2->data.mac_addr.src_lo, 4014 eth->h_source); 4015#endif 4016 } 4017 4018 SET_FLAG(pbd_e2_parsing_data, 4019 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); 4020 } else { 4021 u16 global_data = 0; 4022 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 4023 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 4024 /* Set PBD in checksum offload case */ 4025 if (xmit_type & XMIT_CSUM) 4026 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); 4027 4028 SET_FLAG(global_data, 4029 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 4030 pbd_e1x->global_data |= cpu_to_le16(global_data); 4031 } 4032 4033 /* Setup the data pointer of the first BD of the packet */ 4034 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 4035 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 4036 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 4037 pkt_size = tx_start_bd->nbytes; 4038 4039 DP(NETIF_MSG_TX_QUEUED, 4040 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n", 4041 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 4042 le16_to_cpu(tx_start_bd->nbytes), 4043 tx_start_bd->bd_flags.as_bitfield, 4044 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); 4045 4046 if (xmit_type & XMIT_GSO) { 4047 4048 DP(NETIF_MSG_TX_QUEUED, 4049 "TSO packet len %d hlen %d total len %d tso size %d\n", 4050 skb->len, hlen, skb_headlen(skb), 4051 skb_shinfo(skb)->gso_size); 4052 4053 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 4054 4055 if (unlikely(skb_headlen(skb) > hlen)) { 4056 nbd++; 4057 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, 4058 &tx_start_bd, hlen, 4059 bd_prod); 4060 } 4061 if (!CHIP_IS_E1x(bp)) 4062 pbd_e2_parsing_data |= 4063 (skb_shinfo(skb)->gso_size << 4064 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 4065 ETH_TX_PARSE_BD_E2_LSO_MSS; 4066 else 4067 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 4068 } 4069 4070 /* Set the PBD's parsing_data field if not zero 4071 * (for the chips newer than 57711). 4072 */ 4073 if (pbd_e2_parsing_data) 4074 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); 4075 4076 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 4077 4078 /* Handle fragmented skb */ 4079 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4080 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4081 4082 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, 4083 skb_frag_size(frag), DMA_TO_DEVICE); 4084 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 4085 unsigned int pkts_compl = 0, bytes_compl = 0; 4086 4087 DP(NETIF_MSG_TX_QUEUED, 4088 "Unable to map page - dropping packet...\n"); 4089 4090 /* we need unmap all buffers already mapped 4091 * for this SKB; 4092 * first_bd->nbd need to be properly updated 4093 * before call to bnx2x_free_tx_pkt 4094 */ 4095 first_bd->nbd = cpu_to_le16(nbd); 4096 bnx2x_free_tx_pkt(bp, txdata, 4097 TX_BD(txdata->tx_pkt_prod), 4098 &pkts_compl, &bytes_compl); 4099 return NETDEV_TX_OK; 4100 } 4101 4102 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 4103 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 4104 if (total_pkt_bd == NULL) 4105 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 4106 4107 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 4108 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 4109 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); 4110 le16_add_cpu(&pkt_size, skb_frag_size(frag)); 4111 nbd++; 4112 4113 DP(NETIF_MSG_TX_QUEUED, 4114 "frag %d bd @%p addr (%x:%x) nbytes %d\n", 4115 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, 4116 le16_to_cpu(tx_data_bd->nbytes)); 4117 } 4118 4119 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); 4120 4121 /* update with actual num BDs */ 4122 first_bd->nbd = cpu_to_le16(nbd); 4123 4124 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 4125 4126 /* now send a tx doorbell, counting the next BD 4127 * if the packet contains or ends with it 4128 */ 4129 if (TX_BD_POFF(bd_prod) < nbd) 4130 nbd++; 4131 4132 /* total_pkt_bytes should be set on the first data BD if 4133 * it's not an LSO packet and there is more than one 4134 * data BD. In this case pkt_size is limited by an MTU value. 4135 * However we prefer to set it for an LSO packet (while we don't 4136 * have to) in order to save some CPU cycles in a none-LSO 4137 * case, when we much more care about them. 4138 */ 4139 if (total_pkt_bd != NULL) 4140 total_pkt_bd->total_pkt_bytes = pkt_size; 4141 4142 if (pbd_e1x) 4143 DP(NETIF_MSG_TX_QUEUED, 4144 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n", 4145 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, 4146 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, 4147 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, 4148 le16_to_cpu(pbd_e1x->total_hlen_w)); 4149 if (pbd_e2) 4150 DP(NETIF_MSG_TX_QUEUED, 4151 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", 4152 pbd_e2, 4153 pbd_e2->data.mac_addr.dst_hi, 4154 pbd_e2->data.mac_addr.dst_mid, 4155 pbd_e2->data.mac_addr.dst_lo, 4156 pbd_e2->data.mac_addr.src_hi, 4157 pbd_e2->data.mac_addr.src_mid, 4158 pbd_e2->data.mac_addr.src_lo, 4159 pbd_e2->parsing_data); 4160 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 4161 4162 netdev_tx_sent_queue(txq, skb->len); 4163 4164 skb_tx_timestamp(skb); 4165 4166 txdata->tx_pkt_prod++; 4167 /* 4168 * Make sure that the BD data is updated before updating the producer 4169 * since FW might read the BD right after the producer is updated. 4170 * This is only applicable for weak-ordered memory model archs such 4171 * as IA-64. The following barrier is also mandatory since FW will 4172 * assumes packets must have BDs. 4173 */ 4174 wmb(); 4175 4176 txdata->tx_db.data.prod += nbd; 4177 /* make sure descriptor update is observed by HW */ 4178 wmb(); 4179 4180 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); 4181 4182 txdata->tx_bd_prod += nbd; 4183 4184 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { 4185 netif_tx_stop_queue(txq); 4186 4187 /* paired memory barrier is in bnx2x_tx_int(), we have to keep 4188 * ordering of set_bit() in netif_tx_stop_queue() and read of 4189 * fp->bd_tx_cons */ 4190 smp_mb(); 4191 4192 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 4193 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) 4194 netif_tx_wake_queue(txq); 4195 } 4196 txdata->tx_pkt++; 4197 4198 return NETDEV_TX_OK; 4199} 4200 4201void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) 4202{ 4203 int mfw_vn = BP_FW_MB_IDX(bp); 4204 u32 tmp; 4205 4206 /* If the shmem shouldn't affect configuration, reflect */ 4207 if (!IS_MF_BD(bp)) { 4208 int i; 4209 4210 for (i = 0; i < BNX2X_MAX_PRIORITY; i++) 4211 c2s_map[i] = i; 4212 *c2s_default = 0; 4213 4214 return; 4215 } 4216 4217 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); 4218 tmp = (__force u32)be32_to_cpu((__force __be32)tmp); 4219 c2s_map[0] = tmp & 0xff; 4220 c2s_map[1] = (tmp >> 8) & 0xff; 4221 c2s_map[2] = (tmp >> 16) & 0xff; 4222 c2s_map[3] = (tmp >> 24) & 0xff; 4223 4224 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); 4225 tmp = (__force u32)be32_to_cpu((__force __be32)tmp); 4226 c2s_map[4] = tmp & 0xff; 4227 c2s_map[5] = (tmp >> 8) & 0xff; 4228 c2s_map[6] = (tmp >> 16) & 0xff; 4229 c2s_map[7] = (tmp >> 24) & 0xff; 4230 4231 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); 4232 tmp = (__force u32)be32_to_cpu((__force __be32)tmp); 4233 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; 4234} 4235 4236/** 4237 * bnx2x_setup_tc - routine to configure net_device for multi tc 4238 * 4239 * @dev: net device to configure 4240 * @num_tc: number of traffic classes to enable 4241 * 4242 * callback connected to the ndo_setup_tc function pointer 4243 */ 4244int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) 4245{ 4246 struct bnx2x *bp = netdev_priv(dev); 4247 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; 4248 int cos, prio, count, offset; 4249 4250 /* setup tc must be called under rtnl lock */ 4251 ASSERT_RTNL(); 4252 4253 /* no traffic classes requested. Aborting */ 4254 if (!num_tc) { 4255 netdev_reset_tc(dev); 4256 return 0; 4257 } 4258 4259 /* requested to support too many traffic classes */ 4260 if (num_tc > bp->max_cos) { 4261 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", 4262 num_tc, bp->max_cos); 4263 return -EINVAL; 4264 } 4265 4266 /* declare amount of supported traffic classes */ 4267 if (netdev_set_num_tc(dev, num_tc)) { 4268 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc); 4269 return -EINVAL; 4270 } 4271 4272 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); 4273 4274 /* configure priority to traffic class mapping */ 4275 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { 4276 int outer_prio = c2s_map[prio]; 4277 4278 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); 4279 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4280 "mapping priority %d to tc %d\n", 4281 outer_prio, bp->prio_to_cos[outer_prio]); 4282 } 4283 4284 /* Use this configuration to differentiate tc0 from other COSes 4285 This can be used for ets or pfc, and save the effort of setting 4286 up a multio class queue disc or negotiating DCBX with a switch 4287 netdev_set_prio_tc_map(dev, 0, 0); 4288 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); 4289 for (prio = 1; prio < 16; prio++) { 4290 netdev_set_prio_tc_map(dev, prio, 1); 4291 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); 4292 } */ 4293 4294 /* configure traffic class to transmission queue mapping */ 4295 for (cos = 0; cos < bp->max_cos; cos++) { 4296 count = BNX2X_NUM_ETH_QUEUES(bp); 4297 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); 4298 netdev_set_tc_queue(dev, cos, count, offset); 4299 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4300 "mapping tc %d to offset %d count %d\n", 4301 cos, offset, count); 4302 } 4303 4304 return 0; 4305} 4306 4307int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, 4308 void *type_data) 4309{ 4310 struct tc_mqprio_qopt *mqprio = type_data; 4311 4312 if (type != TC_SETUP_QDISC_MQPRIO) 4313 return -EOPNOTSUPP; 4314 4315 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 4316 4317 return bnx2x_setup_tc(dev, mqprio->num_tc); 4318} 4319 4320/* called with rtnl_lock */ 4321int bnx2x_change_mac_addr(struct net_device *dev, void *p) 4322{ 4323 struct sockaddr *addr = p; 4324 struct bnx2x *bp = netdev_priv(dev); 4325 int rc = 0; 4326 4327 if (!is_valid_ether_addr(addr->sa_data)) { 4328 BNX2X_ERR("Requested MAC address is not valid\n"); 4329 return -EINVAL; 4330 } 4331 4332 if (IS_MF_STORAGE_ONLY(bp)) { 4333 BNX2X_ERR("Can't change address on STORAGE ONLY function\n"); 4334 return -EINVAL; 4335 } 4336 4337 if (netif_running(dev)) { 4338 rc = bnx2x_set_eth_mac(bp, false); 4339 if (rc) 4340 return rc; 4341 } 4342 4343 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 4344 4345 if (netif_running(dev)) 4346 rc = bnx2x_set_eth_mac(bp, true); 4347 4348 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) 4349 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); 4350 4351 return rc; 4352} 4353 4354static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) 4355{ 4356 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); 4357 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; 4358 u8 cos; 4359 4360 /* Common */ 4361 4362 if (IS_FCOE_IDX(fp_index)) { 4363 memset(sb, 0, sizeof(union host_hc_status_block)); 4364 fp->status_blk_mapping = 0; 4365 } else { 4366 /* status blocks */ 4367 if (!CHIP_IS_E1x(bp)) 4368 BNX2X_PCI_FREE(sb->e2_sb, 4369 bnx2x_fp(bp, fp_index, 4370 status_blk_mapping), 4371 sizeof(struct host_hc_status_block_e2)); 4372 else 4373 BNX2X_PCI_FREE(sb->e1x_sb, 4374 bnx2x_fp(bp, fp_index, 4375 status_blk_mapping), 4376 sizeof(struct host_hc_status_block_e1x)); 4377 } 4378 4379 /* Rx */ 4380 if (!skip_rx_queue(bp, fp_index)) { 4381 bnx2x_free_rx_bds(fp); 4382 4383 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 4384 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); 4385 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), 4386 bnx2x_fp(bp, fp_index, rx_desc_mapping), 4387 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4388 4389 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), 4390 bnx2x_fp(bp, fp_index, rx_comp_mapping), 4391 sizeof(struct eth_fast_path_rx_cqe) * 4392 NUM_RCQ_BD); 4393 4394 /* SGE ring */ 4395 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); 4396 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), 4397 bnx2x_fp(bp, fp_index, rx_sge_mapping), 4398 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 4399 } 4400 4401 /* Tx */ 4402 if (!skip_tx_queue(bp, fp_index)) { 4403 /* fastpath tx rings: tx_buf tx_desc */ 4404 for_each_cos_in_tx_queue(fp, cos) { 4405 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 4406 4407 DP(NETIF_MSG_IFDOWN, 4408 "freeing tx memory of fp %d cos %d cid %d\n", 4409 fp_index, cos, txdata->cid); 4410 4411 BNX2X_FREE(txdata->tx_buf_ring); 4412 BNX2X_PCI_FREE(txdata->tx_desc_ring, 4413 txdata->tx_desc_mapping, 4414 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 4415 } 4416 } 4417 /* end of fastpath */ 4418} 4419 4420static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) 4421{ 4422 int i; 4423 for_each_cnic_queue(bp, i) 4424 bnx2x_free_fp_mem_at(bp, i); 4425} 4426 4427void bnx2x_free_fp_mem(struct bnx2x *bp) 4428{ 4429 int i; 4430 for_each_eth_queue(bp, i) 4431 bnx2x_free_fp_mem_at(bp, i); 4432} 4433 4434static void set_sb_shortcuts(struct bnx2x *bp, int index) 4435{ 4436 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); 4437 if (!CHIP_IS_E1x(bp)) { 4438 bnx2x_fp(bp, index, sb_index_values) = 4439 (__le16 *)status_blk.e2_sb->sb.index_values; 4440 bnx2x_fp(bp, index, sb_running_index) = 4441 (__le16 *)status_blk.e2_sb->sb.running_index; 4442 } else { 4443 bnx2x_fp(bp, index, sb_index_values) = 4444 (__le16 *)status_blk.e1x_sb->sb.index_values; 4445 bnx2x_fp(bp, index, sb_running_index) = 4446 (__le16 *)status_blk.e1x_sb->sb.running_index; 4447 } 4448} 4449 4450/* Returns the number of actually allocated BDs */ 4451static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, 4452 int rx_ring_size) 4453{ 4454 struct bnx2x *bp = fp->bp; 4455 u16 ring_prod, cqe_ring_prod; 4456 int i, failure_cnt = 0; 4457 4458 fp->rx_comp_cons = 0; 4459 cqe_ring_prod = ring_prod = 0; 4460 4461 /* This routine is called only during fo init so 4462 * fp->eth_q_stats.rx_skb_alloc_failed = 0 4463 */ 4464 for (i = 0; i < rx_ring_size; i++) { 4465 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { 4466 failure_cnt++; 4467 continue; 4468 } 4469 ring_prod = NEXT_RX_IDX(ring_prod); 4470 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 4471 WARN_ON(ring_prod <= (i - failure_cnt)); 4472 } 4473 4474 if (failure_cnt) 4475 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", 4476 i - failure_cnt, fp->index); 4477 4478 fp->rx_bd_prod = ring_prod; 4479 /* Limit the CQE producer by the CQE ring size */ 4480 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, 4481 cqe_ring_prod); 4482 4483 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 4484 4485 return i - failure_cnt; 4486} 4487 4488static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) 4489{ 4490 int i; 4491 4492 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 4493 struct eth_rx_cqe_next_page *nextpg; 4494 4495 nextpg = (struct eth_rx_cqe_next_page *) 4496 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; 4497 nextpg->addr_hi = 4498 cpu_to_le32(U64_HI(fp->rx_comp_mapping + 4499 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 4500 nextpg->addr_lo = 4501 cpu_to_le32(U64_LO(fp->rx_comp_mapping + 4502 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 4503 } 4504} 4505 4506static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) 4507{ 4508 union host_hc_status_block *sb; 4509 struct bnx2x_fastpath *fp = &bp->fp[index]; 4510 int ring_size = 0; 4511 u8 cos; 4512 int rx_ring_size = 0; 4513 4514 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { 4515 rx_ring_size = MIN_RX_SIZE_NONTPA; 4516 bp->rx_ring_size = rx_ring_size; 4517 } else if (!bp->rx_ring_size) { 4518 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 4519 4520 if (CHIP_IS_E3(bp)) { 4521 u32 cfg = SHMEM_RD(bp, 4522 dev_info.port_hw_config[BP_PORT(bp)]. 4523 default_cfg); 4524 4525 /* Decrease ring size for 1G functions */ 4526 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == 4527 PORT_HW_CFG_NET_SERDES_IF_SGMII) 4528 rx_ring_size /= 10; 4529 } 4530 4531 /* allocate at least number of buffers required by FW */ 4532 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 4533 MIN_RX_SIZE_TPA, rx_ring_size); 4534 4535 bp->rx_ring_size = rx_ring_size; 4536 } else /* if rx_ring_size specified - use it */ 4537 rx_ring_size = bp->rx_ring_size; 4538 4539 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size); 4540 4541 /* Common */ 4542 sb = &bnx2x_fp(bp, index, status_blk); 4543 4544 if (!IS_FCOE_IDX(index)) { 4545 /* status blocks */ 4546 if (!CHIP_IS_E1x(bp)) { 4547 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), 4548 sizeof(struct host_hc_status_block_e2)); 4549 if (!sb->e2_sb) 4550 goto alloc_mem_err; 4551 } else { 4552 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), 4553 sizeof(struct host_hc_status_block_e1x)); 4554 if (!sb->e1x_sb) 4555 goto alloc_mem_err; 4556 } 4557 } 4558 4559 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 4560 * set shortcuts for it. 4561 */ 4562 if (!IS_FCOE_IDX(index)) 4563 set_sb_shortcuts(bp, index); 4564 4565 /* Tx */ 4566 if (!skip_tx_queue(bp, index)) { 4567 /* fastpath tx rings: tx_buf tx_desc */ 4568 for_each_cos_in_tx_queue(fp, cos) { 4569 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 4570 4571 DP(NETIF_MSG_IFUP, 4572 "allocating tx memory of fp %d cos %d\n", 4573 index, cos); 4574 4575 txdata->tx_buf_ring = kcalloc(NUM_TX_BD, 4576 sizeof(struct sw_tx_bd), 4577 GFP_KERNEL); 4578 if (!txdata->tx_buf_ring) 4579 goto alloc_mem_err; 4580 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, 4581 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 4582 if (!txdata->tx_desc_ring) 4583 goto alloc_mem_err; 4584 } 4585 } 4586 4587 /* Rx */ 4588 if (!skip_rx_queue(bp, index)) { 4589 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 4590 bnx2x_fp(bp, index, rx_buf_ring) = 4591 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); 4592 if (!bnx2x_fp(bp, index, rx_buf_ring)) 4593 goto alloc_mem_err; 4594 bnx2x_fp(bp, index, rx_desc_ring) = 4595 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), 4596 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4597 if (!bnx2x_fp(bp, index, rx_desc_ring)) 4598 goto alloc_mem_err; 4599 4600 /* Seed all CQEs by 1s */ 4601 bnx2x_fp(bp, index, rx_comp_ring) = 4602 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), 4603 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); 4604 if (!bnx2x_fp(bp, index, rx_comp_ring)) 4605 goto alloc_mem_err; 4606 4607 /* SGE ring */ 4608 bnx2x_fp(bp, index, rx_page_ring) = 4609 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), 4610 GFP_KERNEL); 4611 if (!bnx2x_fp(bp, index, rx_page_ring)) 4612 goto alloc_mem_err; 4613 bnx2x_fp(bp, index, rx_sge_ring) = 4614 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), 4615 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 4616 if (!bnx2x_fp(bp, index, rx_sge_ring)) 4617 goto alloc_mem_err; 4618 /* RX BD ring */ 4619 bnx2x_set_next_page_rx_bd(fp); 4620 4621 /* CQ ring */ 4622 bnx2x_set_next_page_rx_cq(fp); 4623 4624 /* BDs */ 4625 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); 4626 if (ring_size < rx_ring_size) 4627 goto alloc_mem_err; 4628 } 4629 4630 return 0; 4631 4632/* handles low memory cases */ 4633alloc_mem_err: 4634 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", 4635 index, ring_size); 4636 /* FW will drop all packets if queue is not big enough, 4637 * In these cases we disable the queue 4638 * Min size is different for OOO, TPA and non-TPA queues 4639 */ 4640 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? 4641 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { 4642 /* release memory allocated for this queue */ 4643 bnx2x_free_fp_mem_at(bp, index); 4644 return -ENOMEM; 4645 } 4646 return 0; 4647} 4648 4649static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) 4650{ 4651 if (!NO_FCOE(bp)) 4652 /* FCoE */ 4653 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) 4654 /* we will fail load process instead of mark 4655 * NO_FCOE_FLAG 4656 */ 4657 return -ENOMEM; 4658 4659 return 0; 4660} 4661 4662static int bnx2x_alloc_fp_mem(struct bnx2x *bp) 4663{ 4664 int i; 4665 4666 /* 1. Allocate FP for leading - fatal if error 4667 * 2. Allocate RSS - fix number of queues if error 4668 */ 4669 4670 /* leading */ 4671 if (bnx2x_alloc_fp_mem_at(bp, 0)) 4672 return -ENOMEM; 4673 4674 /* RSS */ 4675 for_each_nondefault_eth_queue(bp, i) 4676 if (bnx2x_alloc_fp_mem_at(bp, i)) 4677 break; 4678 4679 /* handle memory failures */ 4680 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { 4681 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 4682 4683 WARN_ON(delta < 0); 4684 bnx2x_shrink_eth_fp(bp, delta); 4685 if (CNIC_SUPPORT(bp)) 4686 /* move non eth FPs next to last eth FP 4687 * must be done in that order 4688 * FCOE_IDX < FWD_IDX < OOO_IDX 4689 */ 4690 4691 /* move FCoE fp even NO_FCOE_FLAG is on */ 4692 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 4693 bp->num_ethernet_queues -= delta; 4694 bp->num_queues = bp->num_ethernet_queues + 4695 bp->num_cnic_queues; 4696 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 4697 bp->num_queues + delta, bp->num_queues); 4698 } 4699 4700 return 0; 4701} 4702 4703void bnx2x_free_mem_bp(struct bnx2x *bp) 4704{ 4705 int i; 4706 4707 for (i = 0; i < bp->fp_array_size; i++) 4708 kfree(bp->fp[i].tpa_info); 4709 kfree(bp->fp); 4710 kfree(bp->sp_objs); 4711 kfree(bp->fp_stats); 4712 kfree(bp->bnx2x_txq); 4713 kfree(bp->msix_table); 4714 kfree(bp->ilt); 4715} 4716 4717int bnx2x_alloc_mem_bp(struct bnx2x *bp) 4718{ 4719 struct bnx2x_fastpath *fp; 4720 struct msix_entry *tbl; 4721 struct bnx2x_ilt *ilt; 4722 int msix_table_size = 0; 4723 int fp_array_size, txq_array_size; 4724 int i; 4725 4726 /* 4727 * The biggest MSI-X table we might need is as a maximum number of fast 4728 * path IGU SBs plus default SB (for PF only). 4729 */ 4730 msix_table_size = bp->igu_sb_cnt; 4731 if (IS_PF(bp)) 4732 msix_table_size++; 4733 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size); 4734 4735 /* fp array: RSS plus CNIC related L2 queues */ 4736 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); 4737 bp->fp_array_size = fp_array_size; 4738 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); 4739 4740 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); 4741 if (!fp) 4742 goto alloc_err; 4743 for (i = 0; i < bp->fp_array_size; i++) { 4744 fp[i].tpa_info = 4745 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, 4746 sizeof(struct bnx2x_agg_info), GFP_KERNEL); 4747 if (!(fp[i].tpa_info)) 4748 goto alloc_err; 4749 } 4750 4751 bp->fp = fp; 4752 4753 /* allocate sp objs */ 4754 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), 4755 GFP_KERNEL); 4756 if (!bp->sp_objs) 4757 goto alloc_err; 4758 4759 /* allocate fp_stats */ 4760 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), 4761 GFP_KERNEL); 4762 if (!bp->fp_stats) 4763 goto alloc_err; 4764 4765 /* Allocate memory for the transmission queues array */ 4766 txq_array_size = 4767 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); 4768 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size); 4769 4770 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), 4771 GFP_KERNEL); 4772 if (!bp->bnx2x_txq) 4773 goto alloc_err; 4774 4775 /* msix table */ 4776 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); 4777 if (!tbl) 4778 goto alloc_err; 4779 bp->msix_table = tbl; 4780 4781 /* ilt */ 4782 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); 4783 if (!ilt) 4784 goto alloc_err; 4785 bp->ilt = ilt; 4786 4787 return 0; 4788alloc_err: 4789 bnx2x_free_mem_bp(bp); 4790 return -ENOMEM; 4791} 4792 4793int bnx2x_reload_if_running(struct net_device *dev) 4794{ 4795 struct bnx2x *bp = netdev_priv(dev); 4796 4797 if (unlikely(!netif_running(dev))) 4798 return 0; 4799 4800 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 4801 return bnx2x_nic_load(bp, LOAD_NORMAL); 4802} 4803 4804int bnx2x_get_cur_phy_idx(struct bnx2x *bp) 4805{ 4806 u32 sel_phy_idx = 0; 4807 if (bp->link_params.num_phys <= 1) 4808 return INT_PHY; 4809 4810 if (bp->link_vars.link_up) { 4811 sel_phy_idx = EXT_PHY1; 4812 /* In case link is SERDES, check if the EXT_PHY2 is the one */ 4813 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 4814 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) 4815 sel_phy_idx = EXT_PHY2; 4816 } else { 4817 4818 switch (bnx2x_phy_selection(&bp->link_params)) { 4819 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 4820 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 4821 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 4822 sel_phy_idx = EXT_PHY1; 4823 break; 4824 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 4825 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 4826 sel_phy_idx = EXT_PHY2; 4827 break; 4828 } 4829 } 4830 4831 return sel_phy_idx; 4832} 4833int bnx2x_get_link_cfg_idx(struct bnx2x *bp) 4834{ 4835 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); 4836 /* 4837 * The selected activated PHY is always after swapping (in case PHY 4838 * swapping is enabled). So when swapping is enabled, we need to reverse 4839 * the configuration 4840 */ 4841 4842 if (bp->link_params.multi_phy_config & 4843 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 4844 if (sel_phy_idx == EXT_PHY1) 4845 sel_phy_idx = EXT_PHY2; 4846 else if (sel_phy_idx == EXT_PHY2) 4847 sel_phy_idx = EXT_PHY1; 4848 } 4849 return LINK_CONFIG_IDX(sel_phy_idx); 4850} 4851 4852#ifdef NETDEV_FCOE_WWNN 4853int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4854{ 4855 struct bnx2x *bp = netdev_priv(dev); 4856 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 4857 4858 switch (type) { 4859 case NETDEV_FCOE_WWNN: 4860 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, 4861 cp->fcoe_wwn_node_name_lo); 4862 break; 4863 case NETDEV_FCOE_WWPN: 4864 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, 4865 cp->fcoe_wwn_port_name_lo); 4866 break; 4867 default: 4868 BNX2X_ERR("Wrong WWN type requested - %d\n", type); 4869 return -EINVAL; 4870 } 4871 4872 return 0; 4873} 4874#endif 4875 4876/* called with rtnl_lock */ 4877int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 4878{ 4879 struct bnx2x *bp = netdev_priv(dev); 4880 4881 if (pci_num_vf(bp->pdev)) { 4882 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); 4883 return -EPERM; 4884 } 4885 4886 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 4887 BNX2X_ERR("Can't perform change MTU during parity recovery\n"); 4888 return -EAGAIN; 4889 } 4890 4891 /* This does not race with packet allocation 4892 * because the actual alloc size is 4893 * only updated as part of load 4894 */ 4895 dev->mtu = new_mtu; 4896 4897 if (!bnx2x_mtu_allows_gro(new_mtu)) 4898 dev->features &= ~NETIF_F_GRO_HW; 4899 4900 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) 4901 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); 4902 4903 return bnx2x_reload_if_running(dev); 4904} 4905 4906netdev_features_t bnx2x_fix_features(struct net_device *dev, 4907 netdev_features_t features) 4908{ 4909 struct bnx2x *bp = netdev_priv(dev); 4910 4911 if (pci_num_vf(bp->pdev)) { 4912 netdev_features_t changed = dev->features ^ features; 4913 4914 /* Revert the requested changes in features if they 4915 * would require internal reload of PF in bnx2x_set_features(). 4916 */ 4917 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { 4918 features &= ~NETIF_F_RXCSUM; 4919 features |= dev->features & NETIF_F_RXCSUM; 4920 } 4921 4922 if (changed & NETIF_F_LOOPBACK) { 4923 features &= ~NETIF_F_LOOPBACK; 4924 features |= dev->features & NETIF_F_LOOPBACK; 4925 } 4926 } 4927 4928 /* TPA requires Rx CSUM offloading */ 4929 if (!(features & NETIF_F_RXCSUM)) 4930 features &= ~NETIF_F_LRO; 4931 4932 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu)) 4933 features &= ~NETIF_F_GRO_HW; 4934 if (features & NETIF_F_GRO_HW) 4935 features &= ~NETIF_F_LRO; 4936 4937 return features; 4938} 4939 4940int bnx2x_set_features(struct net_device *dev, netdev_features_t features) 4941{ 4942 struct bnx2x *bp = netdev_priv(dev); 4943 netdev_features_t changes = features ^ dev->features; 4944 bool bnx2x_reload = false; 4945 int rc; 4946 4947 /* VFs or non SRIOV PFs should be able to change loopback feature */ 4948 if (!pci_num_vf(bp->pdev)) { 4949 if (features & NETIF_F_LOOPBACK) { 4950 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { 4951 bp->link_params.loopback_mode = LOOPBACK_BMAC; 4952 bnx2x_reload = true; 4953 } 4954 } else { 4955 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { 4956 bp->link_params.loopback_mode = LOOPBACK_NONE; 4957 bnx2x_reload = true; 4958 } 4959 } 4960 } 4961 4962 /* Don't care about GRO changes */ 4963 changes &= ~NETIF_F_GRO; 4964 4965 if (changes) 4966 bnx2x_reload = true; 4967 4968 if (bnx2x_reload) { 4969 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { 4970 dev->features = features; 4971 rc = bnx2x_reload_if_running(dev); 4972 return rc ? rc : 1; 4973 } 4974 /* else: bnx2x_nic_load() will be called at end of recovery */ 4975 } 4976 4977 return 0; 4978} 4979 4980void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue) 4981{ 4982 struct bnx2x *bp = netdev_priv(dev); 4983 4984 /* We want the information of the dump logged, 4985 * but calling bnx2x_panic() would kill all chances of recovery. 4986 */ 4987 if (!bp->panic) 4988#ifndef BNX2X_STOP_ON_ERROR 4989 bnx2x_panic_dump(bp, false); 4990#else 4991 bnx2x_panic(); 4992#endif 4993 4994 /* This allows the netif to be shutdown gracefully before resetting */ 4995 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); 4996} 4997 4998static int __maybe_unused bnx2x_suspend(struct device *dev_d) 4999{ 5000 struct pci_dev *pdev = to_pci_dev(dev_d); 5001 struct net_device *dev = pci_get_drvdata(pdev); 5002 struct bnx2x *bp; 5003 5004 if (!dev) { 5005 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 5006 return -ENODEV; 5007 } 5008 bp = netdev_priv(dev); 5009 5010 rtnl_lock(); 5011 5012 if (!netif_running(dev)) { 5013 rtnl_unlock(); 5014 return 0; 5015 } 5016 5017 netif_device_detach(dev); 5018 5019 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 5020 5021 rtnl_unlock(); 5022 5023 return 0; 5024} 5025 5026static int __maybe_unused bnx2x_resume(struct device *dev_d) 5027{ 5028 struct pci_dev *pdev = to_pci_dev(dev_d); 5029 struct net_device *dev = pci_get_drvdata(pdev); 5030 struct bnx2x *bp; 5031 int rc; 5032 5033 if (!dev) { 5034 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 5035 return -ENODEV; 5036 } 5037 bp = netdev_priv(dev); 5038 5039 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 5040 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 5041 return -EAGAIN; 5042 } 5043 5044 rtnl_lock(); 5045 5046 if (!netif_running(dev)) { 5047 rtnl_unlock(); 5048 return 0; 5049 } 5050 5051 netif_device_attach(dev); 5052 5053 rc = bnx2x_nic_load(bp, LOAD_OPEN); 5054 5055 rtnl_unlock(); 5056 5057 return rc; 5058} 5059 5060SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume); 5061 5062void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 5063 u32 cid) 5064{ 5065 if (!cxt) { 5066 BNX2X_ERR("bad context pointer %p\n", cxt); 5067 return; 5068 } 5069 5070 /* ustorm cxt validation */ 5071 cxt->ustorm_ag_context.cdu_usage = 5072 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 5073 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 5074 /* xcontext validation */ 5075 cxt->xstorm_ag_context.cdu_reserved = 5076 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 5077 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 5078} 5079 5080static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, 5081 u8 fw_sb_id, u8 sb_index, 5082 u8 ticks) 5083{ 5084 u32 addr = BAR_CSTRORM_INTMEM + 5085 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); 5086 REG_WR8(bp, addr, ticks); 5087 DP(NETIF_MSG_IFUP, 5088 "port %x fw_sb_id %d sb_index %d ticks %d\n", 5089 port, fw_sb_id, sb_index, ticks); 5090} 5091 5092static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, 5093 u16 fw_sb_id, u8 sb_index, 5094 u8 disable) 5095{ 5096 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 5097 u32 addr = BAR_CSTRORM_INTMEM + 5098 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); 5099 u8 flags = REG_RD8(bp, addr); 5100 /* clear and set */ 5101 flags &= ~HC_INDEX_DATA_HC_ENABLED; 5102 flags |= enable_flag; 5103 REG_WR8(bp, addr, flags); 5104 DP(NETIF_MSG_IFUP, 5105 "port %x fw_sb_id %d sb_index %d disable %d\n", 5106 port, fw_sb_id, sb_index, disable); 5107} 5108 5109void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 5110 u8 sb_index, u8 disable, u16 usec) 5111{ 5112 int port = BP_PORT(bp); 5113 u8 ticks = usec / BNX2X_BTR; 5114 5115 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); 5116 5117 disable = disable ? 1 : (usec ? 0 : 1); 5118 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); 5119} 5120 5121void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, 5122 u32 verbose) 5123{ 5124 smp_mb__before_atomic(); 5125 set_bit(flag, &bp->sp_rtnl_state); 5126 smp_mb__after_atomic(); 5127 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", 5128 flag); 5129 schedule_delayed_work(&bp->sp_rtnl_task, 0); 5130} 5131