1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7#include <linux/kernel.h> 8#include <linux/types.h> 9#include <linux/errno.h> 10#include <linux/pci.h> 11#include <linux/device.h> 12#include <linux/netdevice.h> 13#include <linux/etherdevice.h> 14#include <linux/u64_stats_sync.h> 15#include <linux/slab.h> 16#include <linux/interrupt.h> 17#include <linux/skbuff.h> 18#include <linux/dma-mapping.h> 19#include <linux/prefetch.h> 20#include <linux/cpumask.h> 21#include <linux/if_vlan.h> 22#include <asm/barrier.h> 23 24#include "hinic_common.h" 25#include "hinic_hw_if.h" 26#include "hinic_hw_wqe.h" 27#include "hinic_hw_wq.h" 28#include "hinic_hw_qp.h" 29#include "hinic_hw_dev.h" 30#include "hinic_rx.h" 31#include "hinic_dev.h" 32 33#define RX_IRQ_NO_PENDING 0 34#define RX_IRQ_NO_COALESC 0 35#define RX_IRQ_NO_LLI_TIMER 0 36#define RX_IRQ_NO_CREDIT 0 37#define RX_IRQ_NO_RESEND_TIMER 0 38#define HINIC_RX_BUFFER_WRITE 16 39 40#define HINIC_RX_IPV6_PKT 7 41#define LRO_PKT_HDR_LEN_IPV4 66 42#define LRO_PKT_HDR_LEN_IPV6 86 43#define LRO_REPLENISH_THLD 256 44 45#define LRO_PKT_HDR_LEN(cqe) \ 46 (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ 47 HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) 48 49/** 50 * hinic_rxq_clean_stats - Clean the statistics of specific queue 51 * @rxq: Logical Rx Queue 52 **/ 53void hinic_rxq_clean_stats(struct hinic_rxq *rxq) 54{ 55 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; 56 57 u64_stats_update_begin(&rxq_stats->syncp); 58 rxq_stats->pkts = 0; 59 rxq_stats->bytes = 0; 60 rxq_stats->errors = 0; 61 rxq_stats->csum_errors = 0; 62 rxq_stats->other_errors = 0; 63 u64_stats_update_end(&rxq_stats->syncp); 64} 65 66/** 67 * hinic_rxq_get_stats - get statistics of Rx Queue 68 * @rxq: Logical Rx Queue 69 * @stats: return updated stats here 70 **/ 71void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) 72{ 73 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; 74 unsigned int start; 75 76 do { 77 start = u64_stats_fetch_begin_irq(&rxq_stats->syncp); 78 stats->pkts = rxq_stats->pkts; 79 stats->bytes = rxq_stats->bytes; 80 stats->errors = rxq_stats->csum_errors + 81 rxq_stats->other_errors; 82 stats->csum_errors = rxq_stats->csum_errors; 83 stats->other_errors = rxq_stats->other_errors; 84 } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start)); 85} 86 87/** 88 * rxq_stats_init - Initialize the statistics of specific queue 89 * @rxq: Logical Rx Queue 90 **/ 91static void rxq_stats_init(struct hinic_rxq *rxq) 92{ 93 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; 94 95 u64_stats_init(&rxq_stats->syncp); 96 hinic_rxq_clean_stats(rxq); 97} 98 99static void rx_csum(struct hinic_rxq *rxq, u32 status, 100 struct sk_buff *skb) 101{ 102 struct net_device *netdev = rxq->netdev; 103 u32 csum_err; 104 105 csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR); 106 107 if (!(netdev->features & NETIF_F_RXCSUM)) 108 return; 109 110 if (!csum_err) { 111 skb->ip_summed = CHECKSUM_UNNECESSARY; 112 } else { 113 if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE | 114 HINIC_RX_CSUM_IPSU_OTHER_ERR))) 115 rxq->rxq_stats.csum_errors++; 116 skb->ip_summed = CHECKSUM_NONE; 117 } 118} 119/** 120 * rx_alloc_skb - allocate skb and map it to dma address 121 * @rxq: rx queue 122 * @dma_addr: returned dma address for the skb 123 * 124 * Return skb 125 **/ 126static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, 127 dma_addr_t *dma_addr) 128{ 129 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 130 struct hinic_hwdev *hwdev = nic_dev->hwdev; 131 struct hinic_hwif *hwif = hwdev->hwif; 132 struct pci_dev *pdev = hwif->pdev; 133 struct sk_buff *skb; 134 dma_addr_t addr; 135 int err; 136 137 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); 138 if (!skb) { 139 netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); 140 return NULL; 141 } 142 143 addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, 144 DMA_FROM_DEVICE); 145 err = dma_mapping_error(&pdev->dev, addr); 146 if (err) { 147 dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); 148 goto err_rx_map; 149 } 150 151 *dma_addr = addr; 152 return skb; 153 154err_rx_map: 155 dev_kfree_skb_any(skb); 156 return NULL; 157} 158 159/** 160 * rx_unmap_skb - unmap the dma address of the skb 161 * @rxq: rx queue 162 * @dma_addr: dma address of the skb 163 **/ 164static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) 165{ 166 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 167 struct hinic_hwdev *hwdev = nic_dev->hwdev; 168 struct hinic_hwif *hwif = hwdev->hwif; 169 struct pci_dev *pdev = hwif->pdev; 170 171 dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, 172 DMA_FROM_DEVICE); 173} 174 175/** 176 * rx_free_skb - unmap and free skb 177 * @rxq: rx queue 178 * @skb: skb to free 179 * @dma_addr: dma address of the skb 180 **/ 181static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, 182 dma_addr_t dma_addr) 183{ 184 rx_unmap_skb(rxq, dma_addr); 185 dev_kfree_skb_any(skb); 186} 187 188/** 189 * rx_alloc_pkts - allocate pkts in rx queue 190 * @rxq: rx queue 191 * 192 * Return number of skbs allocated 193 **/ 194static int rx_alloc_pkts(struct hinic_rxq *rxq) 195{ 196 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 197 struct hinic_rq_wqe *rq_wqe; 198 unsigned int free_wqebbs; 199 struct hinic_sge sge; 200 dma_addr_t dma_addr; 201 struct sk_buff *skb; 202 u16 prod_idx; 203 int i; 204 205 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); 206 207 /* Limit the allocation chunks */ 208 if (free_wqebbs > nic_dev->rx_weight) 209 free_wqebbs = nic_dev->rx_weight; 210 211 for (i = 0; i < free_wqebbs; i++) { 212 skb = rx_alloc_skb(rxq, &dma_addr); 213 if (!skb) { 214 netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); 215 goto skb_out; 216 } 217 218 hinic_set_sge(&sge, dma_addr, skb->len); 219 220 rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, 221 &prod_idx); 222 if (!rq_wqe) { 223 rx_free_skb(rxq, skb, dma_addr); 224 goto skb_out; 225 } 226 227 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); 228 229 hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); 230 } 231 232skb_out: 233 if (i) { 234 wmb(); /* write all the wqes before update PI */ 235 236 hinic_rq_update(rxq->rq, prod_idx); 237 } 238 239 return i; 240} 241 242/** 243 * free_all_rx_skbs - free all skbs in rx queue 244 * @rxq: rx queue 245 **/ 246static void free_all_rx_skbs(struct hinic_rxq *rxq) 247{ 248 struct hinic_rq *rq = rxq->rq; 249 struct hinic_hw_wqe *hw_wqe; 250 struct hinic_sge sge; 251 u16 ci; 252 253 while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { 254 if (IS_ERR(hw_wqe)) 255 break; 256 257 hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); 258 259 hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); 260 261 rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); 262 } 263} 264 265/** 266 * rx_recv_jumbo_pkt - Rx handler for jumbo pkt 267 * @rxq: rx queue 268 * @head_skb: the first skb in the list 269 * @left_pkt_len: left size of the pkt exclude head skb 270 * @ci: consumer index 271 * 272 * Return number of wqes that used for the left of the pkt 273 **/ 274static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, 275 unsigned int left_pkt_len, u16 ci) 276{ 277 struct sk_buff *skb, *curr_skb = head_skb; 278 struct hinic_rq_wqe *rq_wqe; 279 unsigned int curr_len; 280 struct hinic_sge sge; 281 int num_wqes = 0; 282 283 while (left_pkt_len > 0) { 284 rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, 285 &skb, &ci); 286 287 num_wqes++; 288 289 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); 290 291 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); 292 293 prefetch(skb->data); 294 295 curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : 296 left_pkt_len; 297 298 left_pkt_len -= curr_len; 299 300 __skb_put(skb, curr_len); 301 302 if (curr_skb == head_skb) 303 skb_shinfo(head_skb)->frag_list = skb; 304 else 305 curr_skb->next = skb; 306 307 head_skb->len += skb->len; 308 head_skb->data_len += skb->len; 309 head_skb->truesize += skb->truesize; 310 311 curr_skb = skb; 312 } 313 314 return num_wqes; 315} 316 317static void hinic_copy_lp_data(struct hinic_dev *nic_dev, 318 struct sk_buff *skb) 319{ 320 struct net_device *netdev = nic_dev->netdev; 321 u8 *lb_buf = nic_dev->lb_test_rx_buf; 322 int lb_len = nic_dev->lb_pkt_len; 323 int pkt_offset, frag_len, i; 324 void *frag_data = NULL; 325 326 if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { 327 nic_dev->lb_test_rx_idx = 0; 328 netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n"); 329 } 330 331 if (skb->len != nic_dev->lb_pkt_len) { 332 netif_warn(nic_dev, drv, netdev, "Wrong packet length\n"); 333 nic_dev->lb_test_rx_idx++; 334 return; 335 } 336 337 pkt_offset = nic_dev->lb_test_rx_idx * lb_len; 338 frag_len = (int)skb_headlen(skb); 339 memcpy(lb_buf + pkt_offset, skb->data, frag_len); 340 pkt_offset += frag_len; 341 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 342 frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); 343 frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); 344 memcpy((lb_buf + pkt_offset), frag_data, frag_len); 345 pkt_offset += frag_len; 346 } 347 nic_dev->lb_test_rx_idx++; 348} 349 350/** 351 * rxq_recv - Rx handler 352 * @rxq: rx queue 353 * @budget: maximum pkts to process 354 * 355 * Return number of pkts received 356 **/ 357static int rxq_recv(struct hinic_rxq *rxq, int budget) 358{ 359 struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); 360 struct net_device *netdev = rxq->netdev; 361 u64 pkt_len = 0, rx_bytes = 0; 362 struct hinic_rq *rq = rxq->rq; 363 struct hinic_rq_wqe *rq_wqe; 364 struct hinic_dev *nic_dev; 365 unsigned int free_wqebbs; 366 struct hinic_rq_cqe *cqe; 367 int num_wqes, pkts = 0; 368 struct hinic_sge sge; 369 unsigned int status; 370 struct sk_buff *skb; 371 u32 offload_type; 372 u16 ci, num_lro; 373 u16 num_wqe = 0; 374 u32 vlan_len; 375 u16 vid; 376 377 nic_dev = netdev_priv(netdev); 378 379 while (pkts < budget) { 380 num_wqes = 0; 381 382 rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, 383 &ci); 384 if (!rq_wqe) 385 break; 386 387 /* make sure we read rx_done before packet length */ 388 dma_rmb(); 389 390 cqe = rq->cqe[ci]; 391 status = be32_to_cpu(cqe->status); 392 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); 393 394 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); 395 396 rx_csum(rxq, status, skb); 397 398 prefetch(skb->data); 399 400 pkt_len = sge.len; 401 402 if (pkt_len <= HINIC_RX_BUF_SZ) { 403 __skb_put(skb, pkt_len); 404 } else { 405 __skb_put(skb, HINIC_RX_BUF_SZ); 406 num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - 407 HINIC_RX_BUF_SZ, ci); 408 } 409 410 hinic_rq_put_wqe(rq, ci, 411 (num_wqes + 1) * HINIC_RQ_WQE_SIZE); 412 413 offload_type = be32_to_cpu(cqe->offload_type); 414 vlan_len = be32_to_cpu(cqe->len); 415 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 416 HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { 417 vid = HINIC_GET_RX_VLAN_TAG(vlan_len); 418 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 419 } 420 421 if (unlikely(nic_dev->flags & HINIC_LP_TEST)) 422 hinic_copy_lp_data(nic_dev, skb); 423 424 skb_record_rx_queue(skb, qp->q_id); 425 skb->protocol = eth_type_trans(skb, rxq->netdev); 426 427 napi_gro_receive(&rxq->napi, skb); 428 429 pkts++; 430 rx_bytes += pkt_len; 431 432 num_lro = HINIC_GET_RX_NUM_LRO(status); 433 if (num_lro) { 434 rx_bytes += ((num_lro - 1) * 435 LRO_PKT_HDR_LEN(cqe)); 436 437 num_wqe += 438 (u16)(pkt_len >> rxq->rx_buff_shift) + 439 ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); 440 } 441 442 cqe->status = 0; 443 444 if (num_wqe >= LRO_REPLENISH_THLD) 445 break; 446 } 447 448 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); 449 if (free_wqebbs > HINIC_RX_BUFFER_WRITE) 450 rx_alloc_pkts(rxq); 451 452 u64_stats_update_begin(&rxq->rxq_stats.syncp); 453 rxq->rxq_stats.pkts += pkts; 454 rxq->rxq_stats.bytes += rx_bytes; 455 u64_stats_update_end(&rxq->rxq_stats.syncp); 456 457 return pkts; 458} 459 460static int rx_poll(struct napi_struct *napi, int budget) 461{ 462 struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); 463 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 464 struct hinic_rq *rq = rxq->rq; 465 int pkts; 466 467 pkts = rxq_recv(rxq, budget); 468 if (pkts >= budget) 469 return budget; 470 471 napi_complete(napi); 472 473 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 474 hinic_hwdev_set_msix_state(nic_dev->hwdev, 475 rq->msix_entry, 476 HINIC_MSIX_ENABLE); 477 478 return pkts; 479} 480 481static void rx_add_napi(struct hinic_rxq *rxq) 482{ 483 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 484 485 netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); 486 napi_enable(&rxq->napi); 487} 488 489static void rx_del_napi(struct hinic_rxq *rxq) 490{ 491 napi_disable(&rxq->napi); 492 netif_napi_del(&rxq->napi); 493} 494 495static irqreturn_t rx_irq(int irq, void *data) 496{ 497 struct hinic_rxq *rxq = (struct hinic_rxq *)data; 498 struct hinic_rq *rq = rxq->rq; 499 struct hinic_dev *nic_dev; 500 501 /* Disable the interrupt until napi will be completed */ 502 nic_dev = netdev_priv(rxq->netdev); 503 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 504 hinic_hwdev_set_msix_state(nic_dev->hwdev, 505 rq->msix_entry, 506 HINIC_MSIX_DISABLE); 507 508 nic_dev = netdev_priv(rxq->netdev); 509 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); 510 511 napi_schedule(&rxq->napi); 512 return IRQ_HANDLED; 513} 514 515static int rx_request_irq(struct hinic_rxq *rxq) 516{ 517 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); 518 struct hinic_msix_config interrupt_info = {0}; 519 struct hinic_intr_coal_info *intr_coal = NULL; 520 struct hinic_hwdev *hwdev = nic_dev->hwdev; 521 struct hinic_rq *rq = rxq->rq; 522 struct hinic_qp *qp; 523 int err; 524 525 qp = container_of(rq, struct hinic_qp, rq); 526 527 rx_add_napi(rxq); 528 529 hinic_hwdev_msix_set(hwdev, rq->msix_entry, 530 RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, 531 RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, 532 RX_IRQ_NO_RESEND_TIMER); 533 534 intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; 535 interrupt_info.msix_index = rq->msix_entry; 536 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; 537 interrupt_info.pending_cnt = intr_coal->pending_limt; 538 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; 539 540 err = hinic_set_interrupt_cfg(hwdev, &interrupt_info); 541 if (err) { 542 netif_err(nic_dev, drv, rxq->netdev, 543 "Failed to set RX interrupt coalescing attribute\n"); 544 goto err_req_irq; 545 } 546 547 err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); 548 if (err) 549 goto err_req_irq; 550 551 cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); 552 err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask); 553 if (err) 554 goto err_irq_affinity; 555 556 return 0; 557 558err_irq_affinity: 559 free_irq(rq->irq, rxq); 560err_req_irq: 561 rx_del_napi(rxq); 562 return err; 563} 564 565static void rx_free_irq(struct hinic_rxq *rxq) 566{ 567 struct hinic_rq *rq = rxq->rq; 568 569 irq_set_affinity_hint(rq->irq, NULL); 570 free_irq(rq->irq, rxq); 571 rx_del_napi(rxq); 572} 573 574/** 575 * hinic_init_rxq - Initialize the Rx Queue 576 * @rxq: Logical Rx Queue 577 * @rq: Hardware Rx Queue to connect the Logical queue with 578 * @netdev: network device to connect the Logical queue with 579 * 580 * Return 0 - Success, negative - Failure 581 **/ 582int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, 583 struct net_device *netdev) 584{ 585 struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); 586 int err, pkts; 587 588 rxq->netdev = netdev; 589 rxq->rq = rq; 590 rxq->buf_len = HINIC_RX_BUF_SZ; 591 rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ); 592 593 rxq_stats_init(rxq); 594 595 rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, 596 "%s_rxq%d", netdev->name, qp->q_id); 597 if (!rxq->irq_name) 598 return -ENOMEM; 599 600 pkts = rx_alloc_pkts(rxq); 601 if (!pkts) { 602 err = -ENOMEM; 603 goto err_rx_pkts; 604 } 605 606 err = rx_request_irq(rxq); 607 if (err) { 608 netdev_err(netdev, "Failed to request Rx irq\n"); 609 goto err_req_rx_irq; 610 } 611 612 return 0; 613 614err_req_rx_irq: 615err_rx_pkts: 616 free_all_rx_skbs(rxq); 617 devm_kfree(&netdev->dev, rxq->irq_name); 618 return err; 619} 620 621/** 622 * hinic_clean_rxq - Clean the Rx Queue 623 * @rxq: Logical Rx Queue 624 **/ 625void hinic_clean_rxq(struct hinic_rxq *rxq) 626{ 627 struct net_device *netdev = rxq->netdev; 628 629 rx_free_irq(rxq); 630 631 free_all_rx_skbs(rxq); 632 devm_kfree(&netdev->dev, rxq->irq_name); 633} 634