1/******************************************************************************* 2* 3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. 4* 5* This software is available to you under a choice of one of two 6* licenses. You may choose to be licensed under the terms of the GNU 7* General Public License (GPL) Version 2, available from the file 8* COPYING in the main directory of this source tree, or the 9* OpenFabrics.org BSD license below: 10* 11* Redistribution and use in source and binary forms, with or 12* without modification, are permitted provided that the following 13* conditions are met: 14* 15* - Redistributions of source code must retain the above 16* copyright notice, this list of conditions and the following 17* disclaimer. 18* 19* - Redistributions in binary form must reproduce the above 20* copyright notice, this list of conditions and the following 21* disclaimer in the documentation and/or other materials 22* provided with the distribution. 23* 24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31* SOFTWARE. 32* 33*******************************************************************************/ 34 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/netdevice.h> 38#include <linux/etherdevice.h> 39#include <linux/ethtool.h> 40#include <linux/mii.h> 41#include <linux/if_vlan.h> 42#include <linux/crc32.h> 43#include <linux/in.h> 44#include <linux/ip.h> 45#include <linux/tcp.h> 46#include <linux/init.h> 47#include <linux/io.h> 48#include <asm/irq.h> 49#include <asm/byteorder.h> 50#include <net/netevent.h> 51#include <net/neighbour.h> 52#include "i40iw.h" 53 54/** 55 * i40iw_arp_table - manage arp table 56 * @iwdev: iwarp device 57 * @ip_addr: ip address for device 58 * @mac_addr: mac address ptr 59 * @action: modify, delete or add 60 */ 61int i40iw_arp_table(struct i40iw_device *iwdev, 62 u32 *ip_addr, 63 bool ipv4, 64 u8 *mac_addr, 65 u32 action) 66{ 67 int arp_index; 68 int err; 69 u32 ip[4]; 70 71 if (ipv4) { 72 memset(ip, 0, sizeof(ip)); 73 ip[0] = *ip_addr; 74 } else { 75 memcpy(ip, ip_addr, sizeof(ip)); 76 } 77 78 for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++) 79 if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0) 80 break; 81 switch (action) { 82 case I40IW_ARP_ADD: 83 if (arp_index != iwdev->arp_table_size) 84 return -1; 85 86 arp_index = 0; 87 err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps, 88 iwdev->arp_table_size, 89 (u32 *)&arp_index, 90 &iwdev->next_arp_index); 91 92 if (err) 93 return err; 94 95 memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)); 96 ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr); 97 break; 98 case I40IW_ARP_RESOLVE: 99 if (arp_index == iwdev->arp_table_size) 100 return -1; 101 break; 102 case I40IW_ARP_DELETE: 103 if (arp_index == iwdev->arp_table_size) 104 return -1; 105 memset(iwdev->arp_table[arp_index].ip_addr, 0, 106 sizeof(iwdev->arp_table[arp_index].ip_addr)); 107 eth_zero_addr(iwdev->arp_table[arp_index].mac_addr); 108 i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index); 109 break; 110 default: 111 return -1; 112 } 113 return arp_index; 114} 115 116/** 117 * i40iw_wr32 - write 32 bits to hw register 118 * @hw: hardware information including registers 119 * @reg: register offset 120 * @value: vvalue to write to register 121 */ 122inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value) 123{ 124 writel(value, hw->hw_addr + reg); 125} 126 127/** 128 * i40iw_rd32 - read a 32 bit hw register 129 * @hw: hardware information including registers 130 * @reg: register offset 131 * 132 * Return value of register content 133 */ 134inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg) 135{ 136 return readl(hw->hw_addr + reg); 137} 138 139/** 140 * i40iw_inetaddr_event - system notifier for ipv4 addr events 141 * @notfier: not used 142 * @event: event for notifier 143 * @ptr: if address 144 */ 145int i40iw_inetaddr_event(struct notifier_block *notifier, 146 unsigned long event, 147 void *ptr) 148{ 149 struct in_ifaddr *ifa = ptr; 150 struct net_device *event_netdev = ifa->ifa_dev->dev; 151 struct net_device *netdev; 152 struct net_device *upper_dev; 153 struct i40iw_device *iwdev; 154 struct i40iw_handler *hdl; 155 u32 local_ipaddr; 156 u32 action = I40IW_ARP_ADD; 157 158 hdl = i40iw_find_netdev(event_netdev); 159 if (!hdl) 160 return NOTIFY_DONE; 161 162 iwdev = &hdl->device; 163 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) 164 return NOTIFY_DONE; 165 166 netdev = iwdev->ldev->netdev; 167 upper_dev = netdev_master_upper_dev_get(netdev); 168 if (netdev != event_netdev) 169 return NOTIFY_DONE; 170 171 if (upper_dev) { 172 struct in_device *in; 173 174 rcu_read_lock(); 175 in = __in_dev_get_rcu(upper_dev); 176 177 local_ipaddr = 0; 178 if (in) { 179 struct in_ifaddr *ifa; 180 181 ifa = rcu_dereference(in->ifa_list); 182 if (ifa) 183 local_ipaddr = ntohl(ifa->ifa_address); 184 } 185 186 rcu_read_unlock(); 187 } else { 188 local_ipaddr = ntohl(ifa->ifa_address); 189 } 190 switch (event) { 191 case NETDEV_DOWN: 192 action = I40IW_ARP_DELETE; 193 fallthrough; 194 case NETDEV_UP: 195 case NETDEV_CHANGEADDR: 196 197 /* Just skip if no need to handle ARP cache */ 198 if (!local_ipaddr) 199 break; 200 201 i40iw_manage_arp_cache(iwdev, 202 netdev->dev_addr, 203 &local_ipaddr, 204 true, 205 action); 206 i40iw_if_notify(iwdev, netdev, &local_ipaddr, true, 207 (action == I40IW_ARP_ADD) ? true : false); 208 break; 209 default: 210 break; 211 } 212 return NOTIFY_DONE; 213} 214 215/** 216 * i40iw_inet6addr_event - system notifier for ipv6 addr events 217 * @notfier: not used 218 * @event: event for notifier 219 * @ptr: if address 220 */ 221int i40iw_inet6addr_event(struct notifier_block *notifier, 222 unsigned long event, 223 void *ptr) 224{ 225 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 226 struct net_device *event_netdev = ifa->idev->dev; 227 struct net_device *netdev; 228 struct i40iw_device *iwdev; 229 struct i40iw_handler *hdl; 230 u32 local_ipaddr6[4]; 231 u32 action = I40IW_ARP_ADD; 232 233 hdl = i40iw_find_netdev(event_netdev); 234 if (!hdl) 235 return NOTIFY_DONE; 236 237 iwdev = &hdl->device; 238 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) 239 return NOTIFY_DONE; 240 241 netdev = iwdev->ldev->netdev; 242 if (netdev != event_netdev) 243 return NOTIFY_DONE; 244 245 i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); 246 switch (event) { 247 case NETDEV_DOWN: 248 action = I40IW_ARP_DELETE; 249 fallthrough; 250 case NETDEV_UP: 251 case NETDEV_CHANGEADDR: 252 i40iw_manage_arp_cache(iwdev, 253 netdev->dev_addr, 254 local_ipaddr6, 255 false, 256 action); 257 i40iw_if_notify(iwdev, netdev, local_ipaddr6, false, 258 (action == I40IW_ARP_ADD) ? true : false); 259 break; 260 default: 261 break; 262 } 263 return NOTIFY_DONE; 264} 265 266/** 267 * i40iw_net_event - system notifier for netevents 268 * @notfier: not used 269 * @event: event for notifier 270 * @ptr: neighbor 271 */ 272int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) 273{ 274 struct neighbour *neigh = ptr; 275 struct i40iw_device *iwdev; 276 struct i40iw_handler *iwhdl; 277 __be32 *p; 278 u32 local_ipaddr[4]; 279 280 switch (event) { 281 case NETEVENT_NEIGH_UPDATE: 282 iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev); 283 if (!iwhdl) 284 return NOTIFY_DONE; 285 iwdev = &iwhdl->device; 286 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) 287 return NOTIFY_DONE; 288 p = (__be32 *)neigh->primary_key; 289 i40iw_copy_ip_ntohl(local_ipaddr, p); 290 if (neigh->nud_state & NUD_VALID) { 291 i40iw_manage_arp_cache(iwdev, 292 neigh->ha, 293 local_ipaddr, 294 false, 295 I40IW_ARP_ADD); 296 297 } else { 298 i40iw_manage_arp_cache(iwdev, 299 neigh->ha, 300 local_ipaddr, 301 false, 302 I40IW_ARP_DELETE); 303 } 304 break; 305 default: 306 break; 307 } 308 return NOTIFY_DONE; 309} 310 311/** 312 * i40iw_netdevice_event - system notifier for netdev events 313 * @notfier: not used 314 * @event: event for notifier 315 * @ptr: netdev 316 */ 317int i40iw_netdevice_event(struct notifier_block *notifier, 318 unsigned long event, 319 void *ptr) 320{ 321 struct net_device *event_netdev; 322 struct net_device *netdev; 323 struct i40iw_device *iwdev; 324 struct i40iw_handler *hdl; 325 326 event_netdev = netdev_notifier_info_to_dev(ptr); 327 328 hdl = i40iw_find_netdev(event_netdev); 329 if (!hdl) 330 return NOTIFY_DONE; 331 332 iwdev = &hdl->device; 333 if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing) 334 return NOTIFY_DONE; 335 336 netdev = iwdev->ldev->netdev; 337 if (netdev != event_netdev) 338 return NOTIFY_DONE; 339 340 iwdev->iw_status = 1; 341 342 switch (event) { 343 case NETDEV_DOWN: 344 iwdev->iw_status = 0; 345 fallthrough; 346 case NETDEV_UP: 347 i40iw_port_ibevent(iwdev); 348 break; 349 default: 350 break; 351 } 352 return NOTIFY_DONE; 353} 354 355/** 356 * i40iw_get_cqp_request - get cqp struct 357 * @cqp: device cqp ptr 358 * @wait: cqp to be used in wait mode 359 */ 360struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait) 361{ 362 struct i40iw_cqp_request *cqp_request = NULL; 363 unsigned long flags; 364 365 spin_lock_irqsave(&cqp->req_lock, flags); 366 if (!list_empty(&cqp->cqp_avail_reqs)) { 367 cqp_request = list_entry(cqp->cqp_avail_reqs.next, 368 struct i40iw_cqp_request, list); 369 list_del_init(&cqp_request->list); 370 } 371 spin_unlock_irqrestore(&cqp->req_lock, flags); 372 if (!cqp_request) { 373 cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); 374 if (cqp_request) { 375 cqp_request->dynamic = true; 376 INIT_LIST_HEAD(&cqp_request->list); 377 init_waitqueue_head(&cqp_request->waitq); 378 } 379 } 380 if (!cqp_request) { 381 i40iw_pr_err("CQP Request Fail: No Memory"); 382 return NULL; 383 } 384 385 if (wait) { 386 atomic_set(&cqp_request->refcount, 2); 387 cqp_request->waiting = true; 388 } else { 389 atomic_set(&cqp_request->refcount, 1); 390 } 391 return cqp_request; 392} 393 394/** 395 * i40iw_free_cqp_request - free cqp request 396 * @cqp: cqp ptr 397 * @cqp_request: to be put back in cqp list 398 */ 399void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) 400{ 401 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); 402 unsigned long flags; 403 404 if (cqp_request->dynamic) { 405 kfree(cqp_request); 406 } else { 407 cqp_request->request_done = false; 408 cqp_request->callback_fcn = NULL; 409 cqp_request->waiting = false; 410 411 spin_lock_irqsave(&cqp->req_lock, flags); 412 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); 413 spin_unlock_irqrestore(&cqp->req_lock, flags); 414 } 415 wake_up(&iwdev->close_wq); 416} 417 418/** 419 * i40iw_put_cqp_request - dec ref count and free if 0 420 * @cqp: cqp ptr 421 * @cqp_request: to be put back in cqp list 422 */ 423void i40iw_put_cqp_request(struct i40iw_cqp *cqp, 424 struct i40iw_cqp_request *cqp_request) 425{ 426 if (atomic_dec_and_test(&cqp_request->refcount)) 427 i40iw_free_cqp_request(cqp, cqp_request); 428} 429 430/** 431 * i40iw_free_pending_cqp_request -free pending cqp request objs 432 * @cqp: cqp ptr 433 * @cqp_request: to be put back in cqp list 434 */ 435static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp, 436 struct i40iw_cqp_request *cqp_request) 437{ 438 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); 439 440 if (cqp_request->waiting) { 441 cqp_request->compl_info.error = true; 442 cqp_request->request_done = true; 443 wake_up(&cqp_request->waitq); 444 } 445 i40iw_put_cqp_request(cqp, cqp_request); 446 wait_event_timeout(iwdev->close_wq, 447 !atomic_read(&cqp_request->refcount), 448 1000); 449} 450 451/** 452 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions 453 * @iwdev: iwarp device 454 */ 455void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev) 456{ 457 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 458 struct i40iw_cqp *cqp = &iwdev->cqp; 459 struct i40iw_cqp_request *cqp_request = NULL; 460 struct cqp_commands_info *pcmdinfo = NULL; 461 u32 i, pending_work, wqe_idx; 462 463 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring); 464 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring); 465 for (i = 0; i < pending_work; i++) { 466 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx]; 467 if (cqp_request) 468 i40iw_free_pending_cqp_request(cqp, cqp_request); 469 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring); 470 } 471 472 while (!list_empty(&dev->cqp_cmd_head)) { 473 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); 474 cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info); 475 if (cqp_request) 476 i40iw_free_pending_cqp_request(cqp, cqp_request); 477 } 478} 479 480/** 481 * i40iw_wait_event - wait for completion 482 * @iwdev: iwarp device 483 * @cqp_request: cqp request to wait 484 */ 485static int i40iw_wait_event(struct i40iw_device *iwdev, 486 struct i40iw_cqp_request *cqp_request) 487{ 488 struct cqp_commands_info *info = &cqp_request->info; 489 struct i40iw_cqp *iwcqp = &iwdev->cqp; 490 struct i40iw_cqp_timeout cqp_timeout; 491 bool cqp_error = false; 492 int err_code = 0; 493 memset(&cqp_timeout, 0, sizeof(cqp_timeout)); 494 cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS]; 495 do { 496 if (wait_event_timeout(cqp_request->waitq, 497 cqp_request->request_done, CQP_COMPL_WAIT_TIME)) 498 break; 499 500 i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev); 501 502 if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD) 503 continue; 504 505 i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd); 506 err_code = -ETIME; 507 if (!iwdev->reset) { 508 iwdev->reset = true; 509 i40iw_request_reset(iwdev); 510 } 511 goto done; 512 } while (1); 513 cqp_error = cqp_request->compl_info.error; 514 if (cqp_error) { 515 i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n", 516 info->cqp_cmd, cqp_request->compl_info.maj_err_code, 517 cqp_request->compl_info.min_err_code); 518 err_code = -EPROTO; 519 goto done; 520 } 521done: 522 i40iw_put_cqp_request(iwcqp, cqp_request); 523 return err_code; 524} 525 526/** 527 * i40iw_handle_cqp_op - process cqp command 528 * @iwdev: iwarp device 529 * @cqp_request: cqp request to process 530 */ 531enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev, 532 struct i40iw_cqp_request 533 *cqp_request) 534{ 535 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 536 enum i40iw_status_code status; 537 struct cqp_commands_info *info = &cqp_request->info; 538 int err_code = 0; 539 540 if (iwdev->reset) { 541 i40iw_free_cqp_request(&iwdev->cqp, cqp_request); 542 return I40IW_ERR_CQP_COMPL_ERROR; 543 } 544 545 status = i40iw_process_cqp_cmd(dev, info); 546 if (status) { 547 i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd); 548 i40iw_free_cqp_request(&iwdev->cqp, cqp_request); 549 return status; 550 } 551 if (cqp_request->waiting) 552 err_code = i40iw_wait_event(iwdev, cqp_request); 553 if (err_code) 554 status = I40IW_ERR_CQP_COMPL_ERROR; 555 return status; 556} 557 558/** 559 * i40iw_add_devusecount - add dev refcount 560 * @iwdev: dev for refcount 561 */ 562void i40iw_add_devusecount(struct i40iw_device *iwdev) 563{ 564 atomic64_inc(&iwdev->use_count); 565} 566 567/** 568 * i40iw_rem_devusecount - decrement refcount for dev 569 * @iwdev: device 570 */ 571void i40iw_rem_devusecount(struct i40iw_device *iwdev) 572{ 573 if (!atomic64_dec_and_test(&iwdev->use_count)) 574 return; 575 wake_up(&iwdev->close_wq); 576} 577 578/** 579 * i40iw_add_pdusecount - add pd refcount 580 * @iwpd: pd for refcount 581 */ 582void i40iw_add_pdusecount(struct i40iw_pd *iwpd) 583{ 584 atomic_inc(&iwpd->usecount); 585} 586 587/** 588 * i40iw_rem_pdusecount - decrement refcount for pd and free if 0 589 * @iwpd: pd for refcount 590 * @iwdev: iwarp device 591 */ 592void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) 593{ 594 if (!atomic_dec_and_test(&iwpd->usecount)) 595 return; 596 i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id); 597} 598 599/** 600 * i40iw_qp_add_ref - add refcount for qp 601 * @ibqp: iqarp qp 602 */ 603void i40iw_qp_add_ref(struct ib_qp *ibqp) 604{ 605 struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp; 606 607 refcount_inc(&iwqp->refcount); 608} 609 610/** 611 * i40iw_qp_rem_ref - rem refcount for qp and free if 0 612 * @ibqp: iqarp qp 613 */ 614void i40iw_qp_rem_ref(struct ib_qp *ibqp) 615{ 616 struct i40iw_qp *iwqp; 617 struct i40iw_device *iwdev; 618 u32 qp_num; 619 unsigned long flags; 620 621 iwqp = to_iwqp(ibqp); 622 iwdev = iwqp->iwdev; 623 spin_lock_irqsave(&iwdev->qptable_lock, flags); 624 if (!refcount_dec_and_test(&iwqp->refcount)) { 625 spin_unlock_irqrestore(&iwdev->qptable_lock, flags); 626 return; 627 } 628 629 qp_num = iwqp->ibqp.qp_num; 630 iwdev->qp_table[qp_num] = NULL; 631 spin_unlock_irqrestore(&iwdev->qptable_lock, flags); 632 complete(&iwqp->free_qp); 633 634} 635 636/** 637 * i40iw_get_qp - get qp address 638 * @device: iwarp device 639 * @qpn: qp number 640 */ 641struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn) 642{ 643 struct i40iw_device *iwdev = to_iwdev(device); 644 645 if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp)) 646 return NULL; 647 648 return &iwdev->qp_table[qpn]->ibqp; 649} 650 651/** 652 * i40iw_debug_buf - print debug msg and buffer is mask set 653 * @dev: hardware control device structure 654 * @mask: mask to compare if to print debug buffer 655 * @buf: points buffer addr 656 * @size: saize of buffer to print 657 */ 658void i40iw_debug_buf(struct i40iw_sc_dev *dev, 659 enum i40iw_debug_flag mask, 660 char *desc, 661 u64 *buf, 662 u32 size) 663{ 664 u32 i; 665 666 if (!(dev->debug_mask & mask)) 667 return; 668 i40iw_debug(dev, mask, "%s\n", desc); 669 i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf, 670 (unsigned long long)virt_to_phys(buf)); 671 672 for (i = 0; i < size; i += 8) 673 i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]); 674} 675 676/** 677 * i40iw_get_hw_addr - return hw addr 678 * @par: points to shared dev 679 */ 680u8 __iomem *i40iw_get_hw_addr(void *par) 681{ 682 struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par; 683 684 return dev->hw->hw_addr; 685} 686 687/** 688 * i40iw_remove_head - return head entry and remove from list 689 * @list: list for entry 690 */ 691void *i40iw_remove_head(struct list_head *list) 692{ 693 struct list_head *entry; 694 695 if (list_empty(list)) 696 return NULL; 697 698 entry = (void *)list->next; 699 list_del(entry); 700 return (void *)entry; 701} 702 703/** 704 * i40iw_allocate_dma_mem - Memory alloc helper fn 705 * @hw: pointer to the HW structure 706 * @mem: ptr to mem struct to fill out 707 * @size: size of memory requested 708 * @alignment: what to align the allocation to 709 */ 710enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, 711 struct i40iw_dma_mem *mem, 712 u64 size, 713 u32 alignment) 714{ 715 struct pci_dev *pcidev = hw->pcidev; 716 717 if (!mem) 718 return I40IW_ERR_PARAM; 719 mem->size = ALIGN(size, alignment); 720 mem->va = dma_alloc_coherent(&pcidev->dev, mem->size, 721 (dma_addr_t *)&mem->pa, GFP_KERNEL); 722 if (!mem->va) 723 return I40IW_ERR_NO_MEMORY; 724 return 0; 725} 726 727/** 728 * i40iw_free_dma_mem - Memory free helper fn 729 * @hw: pointer to the HW structure 730 * @mem: ptr to mem struct to free 731 */ 732void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem) 733{ 734 struct pci_dev *pcidev = hw->pcidev; 735 736 if (!mem || !mem->va) 737 return; 738 739 dma_free_coherent(&pcidev->dev, mem->size, 740 mem->va, (dma_addr_t)mem->pa); 741 mem->va = NULL; 742} 743 744/** 745 * i40iw_allocate_virt_mem - virtual memory alloc helper fn 746 * @hw: pointer to the HW structure 747 * @mem: ptr to mem struct to fill out 748 * @size: size of memory requested 749 */ 750enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw, 751 struct i40iw_virt_mem *mem, 752 u32 size) 753{ 754 if (!mem) 755 return I40IW_ERR_PARAM; 756 757 mem->size = size; 758 mem->va = kzalloc(size, GFP_KERNEL); 759 760 if (mem->va) 761 return 0; 762 else 763 return I40IW_ERR_NO_MEMORY; 764} 765 766/** 767 * i40iw_free_virt_mem - virtual memory free helper fn 768 * @hw: pointer to the HW structure 769 * @mem: ptr to mem struct to free 770 */ 771enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, 772 struct i40iw_virt_mem *mem) 773{ 774 if (!mem) 775 return I40IW_ERR_PARAM; 776 /* 777 * mem->va points to the parent of mem, so both mem and mem->va 778 * can not be touched once mem->va is freed 779 */ 780 kfree(mem->va); 781 return 0; 782} 783 784/** 785 * i40iw_cqp_sds_cmd - create cqp command for sd 786 * @dev: hardware control device structure 787 * @sd_info: information for sd cqp 788 * 789 */ 790enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev, 791 struct i40iw_update_sds_info *sdinfo) 792{ 793 enum i40iw_status_code status; 794 struct i40iw_cqp_request *cqp_request; 795 struct cqp_commands_info *cqp_info; 796 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 797 798 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); 799 if (!cqp_request) 800 return I40IW_ERR_NO_MEMORY; 801 cqp_info = &cqp_request->info; 802 memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, 803 sizeof(cqp_info->in.u.update_pe_sds.info)); 804 cqp_info->cqp_cmd = OP_UPDATE_PE_SDS; 805 cqp_info->post_sq = 1; 806 cqp_info->in.u.update_pe_sds.dev = dev; 807 cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request; 808 status = i40iw_handle_cqp_op(iwdev, cqp_request); 809 if (status) 810 i40iw_pr_err("CQP-OP Update SD's fail"); 811 return status; 812} 813 814/** 815 * i40iw_qp_suspend_resume - cqp command for suspend/resume 816 * @dev: hardware control device structure 817 * @qp: hardware control qp 818 * @suspend: flag if suspend or resume 819 */ 820void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend) 821{ 822 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 823 struct i40iw_cqp_request *cqp_request; 824 struct i40iw_sc_cqp *cqp = dev->cqp; 825 struct cqp_commands_info *cqp_info; 826 enum i40iw_status_code status; 827 828 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); 829 if (!cqp_request) 830 return; 831 832 cqp_info = &cqp_request->info; 833 cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME; 834 cqp_info->in.u.suspend_resume.cqp = cqp; 835 cqp_info->in.u.suspend_resume.qp = qp; 836 cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request; 837 status = i40iw_handle_cqp_op(iwdev, cqp_request); 838 if (status) 839 i40iw_pr_err("CQP-OP QP Suspend/Resume fail"); 840} 841 842/** 843 * i40iw_term_modify_qp - modify qp for term message 844 * @qp: hardware control qp 845 * @next_state: qp's next state 846 * @term: terminate code 847 * @term_len: length 848 */ 849void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len) 850{ 851 struct i40iw_qp *iwqp; 852 853 iwqp = (struct i40iw_qp *)qp->back_qp; 854 i40iw_next_iw_state(iwqp, next_state, 0, term, term_len); 855}; 856 857/** 858 * i40iw_terminate_done - after terminate is completed 859 * @qp: hardware control qp 860 * @timeout_occurred: indicates if terminate timer expired 861 */ 862void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred) 863{ 864 struct i40iw_qp *iwqp; 865 u32 next_iwarp_state = I40IW_QP_STATE_ERROR; 866 u8 hte = 0; 867 bool first_time; 868 unsigned long flags; 869 870 iwqp = (struct i40iw_qp *)qp->back_qp; 871 spin_lock_irqsave(&iwqp->lock, flags); 872 if (iwqp->hte_added) { 873 iwqp->hte_added = 0; 874 hte = 1; 875 } 876 first_time = !(qp->term_flags & I40IW_TERM_DONE); 877 qp->term_flags |= I40IW_TERM_DONE; 878 spin_unlock_irqrestore(&iwqp->lock, flags); 879 if (first_time) { 880 if (!timeout_occurred) 881 i40iw_terminate_del_timer(qp); 882 else 883 next_iwarp_state = I40IW_QP_STATE_CLOSING; 884 885 i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0); 886 i40iw_cm_disconn(iwqp); 887 } 888} 889 890/** 891 * i40iw_terminate_imeout - timeout happened 892 * @context: points to iwarp qp 893 */ 894static void i40iw_terminate_timeout(struct timer_list *t) 895{ 896 struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer); 897 struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp; 898 899 i40iw_terminate_done(qp, 1); 900 i40iw_qp_rem_ref(&iwqp->ibqp); 901} 902 903/** 904 * i40iw_terminate_start_timer - start terminate timeout 905 * @qp: hardware control qp 906 */ 907void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp) 908{ 909 struct i40iw_qp *iwqp; 910 911 iwqp = (struct i40iw_qp *)qp->back_qp; 912 i40iw_qp_add_ref(&iwqp->ibqp); 913 timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0); 914 iwqp->terminate_timer.expires = jiffies + HZ; 915 add_timer(&iwqp->terminate_timer); 916} 917 918/** 919 * i40iw_terminate_del_timer - delete terminate timeout 920 * @qp: hardware control qp 921 */ 922void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp) 923{ 924 struct i40iw_qp *iwqp; 925 926 iwqp = (struct i40iw_qp *)qp->back_qp; 927 if (del_timer(&iwqp->terminate_timer)) 928 i40iw_qp_rem_ref(&iwqp->ibqp); 929} 930 931/** 932 * i40iw_cqp_generic_worker - generic worker for cqp 933 * @work: work pointer 934 */ 935static void i40iw_cqp_generic_worker(struct work_struct *work) 936{ 937 struct i40iw_virtchnl_work_info *work_info = 938 &((struct virtchnl_work *)work)->work_info; 939 940 if (work_info->worker_vf_dev) 941 work_info->callback_fcn(work_info->worker_vf_dev); 942} 943 944/** 945 * i40iw_cqp_spawn_worker - spawn worket thread 946 * @iwdev: device struct pointer 947 * @work_info: work request info 948 * @iw_vf_idx: virtual function index 949 */ 950void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, 951 struct i40iw_virtchnl_work_info *work_info, 952 u32 iw_vf_idx) 953{ 954 struct virtchnl_work *work; 955 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 956 957 work = &iwdev->virtchnl_w[iw_vf_idx]; 958 memcpy(&work->work_info, work_info, sizeof(*work_info)); 959 INIT_WORK(&work->work, i40iw_cqp_generic_worker); 960 queue_work(iwdev->virtchnl_wq, &work->work); 961} 962 963/** 964 * i40iw_cqp_manage_hmc_fcn_worker - 965 * @work: work pointer for hmc info 966 */ 967static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work) 968{ 969 struct i40iw_cqp_request *cqp_request = 970 ((struct virtchnl_work *)work)->cqp_request; 971 struct i40iw_ccq_cqe_info ccq_cqe_info; 972 struct i40iw_hmc_fcn_info *hmcfcninfo = 973 &cqp_request->info.in.u.manage_hmc_pm.info; 974 struct i40iw_device *iwdev = 975 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev; 976 977 ccq_cqe_info.cqp = NULL; 978 ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code; 979 ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code; 980 ccq_cqe_info.op_code = cqp_request->compl_info.op_code; 981 ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val; 982 ccq_cqe_info.scratch = 0; 983 ccq_cqe_info.error = cqp_request->compl_info.error; 984 hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev, 985 hmcfcninfo->cqp_callback_param, &ccq_cqe_info); 986 i40iw_put_cqp_request(&iwdev->cqp, cqp_request); 987} 988 989/** 990 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion 991 * @cqp_request: cqp request info struct for hmc fun 992 * @unused: unused param of callback 993 */ 994static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request, 995 u32 unused) 996{ 997 struct virtchnl_work *work; 998 struct i40iw_hmc_fcn_info *hmcfcninfo = 999 &cqp_request->info.in.u.manage_hmc_pm.info; 1000 struct i40iw_device *iwdev = 1001 (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev-> 1002 back_dev; 1003 1004 if (hmcfcninfo && hmcfcninfo->callback_fcn) { 1005 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__); 1006 atomic_inc(&cqp_request->refcount); 1007 work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx]; 1008 work->cqp_request = cqp_request; 1009 INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker); 1010 queue_work(iwdev->virtchnl_wq, &work->work); 1011 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__); 1012 } else { 1013 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__); 1014 } 1015} 1016 1017/** 1018 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc 1019 * @dev: hardware control device structure 1020 * @hmcfcninfo: info for hmc 1021 */ 1022enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev, 1023 struct i40iw_hmc_fcn_info *hmcfcninfo) 1024{ 1025 enum i40iw_status_code status; 1026 struct i40iw_cqp_request *cqp_request; 1027 struct cqp_commands_info *cqp_info; 1028 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1029 1030 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__); 1031 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); 1032 if (!cqp_request) 1033 return I40IW_ERR_NO_MEMORY; 1034 cqp_info = &cqp_request->info; 1035 cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback; 1036 cqp_request->param = hmcfcninfo; 1037 memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo, 1038 sizeof(*hmcfcninfo)); 1039 cqp_info->in.u.manage_hmc_pm.dev = dev; 1040 cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE; 1041 cqp_info->post_sq = 1; 1042 cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request; 1043 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1044 if (status) 1045 i40iw_pr_err("CQP-OP Manage HMC fail"); 1046 return status; 1047} 1048 1049/** 1050 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm 1051 * @iwdev: function device struct 1052 * @values_mem: buffer for fpm 1053 * @hmc_fn_id: function id for fpm 1054 */ 1055enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev, 1056 struct i40iw_dma_mem *values_mem, 1057 u8 hmc_fn_id) 1058{ 1059 enum i40iw_status_code status; 1060 struct i40iw_cqp_request *cqp_request; 1061 struct cqp_commands_info *cqp_info; 1062 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1063 1064 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); 1065 if (!cqp_request) 1066 return I40IW_ERR_NO_MEMORY; 1067 cqp_info = &cqp_request->info; 1068 cqp_request->param = NULL; 1069 cqp_info->in.u.query_fpm_values.cqp = dev->cqp; 1070 cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa; 1071 cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va; 1072 cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id; 1073 cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES; 1074 cqp_info->post_sq = 1; 1075 cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request; 1076 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1077 if (status) 1078 i40iw_pr_err("CQP-OP Query FPM fail"); 1079 return status; 1080} 1081 1082/** 1083 * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw 1084 * @dev: hardware control device structure 1085 * @values_mem: buffer with fpm values 1086 * @hmc_fn_id: function id for fpm 1087 */ 1088enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev, 1089 struct i40iw_dma_mem *values_mem, 1090 u8 hmc_fn_id) 1091{ 1092 enum i40iw_status_code status; 1093 struct i40iw_cqp_request *cqp_request; 1094 struct cqp_commands_info *cqp_info; 1095 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1096 1097 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); 1098 if (!cqp_request) 1099 return I40IW_ERR_NO_MEMORY; 1100 cqp_info = &cqp_request->info; 1101 cqp_request->param = NULL; 1102 cqp_info->in.u.commit_fpm_values.cqp = dev->cqp; 1103 cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa; 1104 cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va; 1105 cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id; 1106 cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES; 1107 cqp_info->post_sq = 1; 1108 cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request; 1109 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1110 if (status) 1111 i40iw_pr_err("CQP-OP Commit FPM fail"); 1112 return status; 1113} 1114 1115/** 1116 * i40iw_vf_wait_vchnl_resp - wait for channel msg 1117 * @iwdev: function's device struct 1118 */ 1119enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev) 1120{ 1121 struct i40iw_device *iwdev = dev->back_dev; 1122 int timeout_ret; 1123 1124 i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n", 1125 __func__, __LINE__, dev, iwdev); 1126 1127 atomic_set(&iwdev->vchnl_msgs, 2); 1128 timeout_ret = wait_event_timeout(iwdev->vchnl_waitq, 1129 (atomic_read(&iwdev->vchnl_msgs) == 1), 1130 I40IW_VCHNL_EVENT_TIMEOUT); 1131 atomic_dec(&iwdev->vchnl_msgs); 1132 if (!timeout_ret) { 1133 i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret); 1134 atomic_set(&iwdev->vchnl_msgs, 0); 1135 dev->vchnl_up = false; 1136 return I40IW_ERR_TIMEOUT; 1137 } 1138 wake_up(&dev->vf_reqs); 1139 return 0; 1140} 1141 1142/** 1143 * i40iw_cqp_cq_create_cmd - create a cq for the cqp 1144 * @dev: device pointer 1145 * @cq: pointer to created cq 1146 */ 1147enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, 1148 struct i40iw_sc_cq *cq) 1149{ 1150 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1151 struct i40iw_cqp *iwcqp = &iwdev->cqp; 1152 struct i40iw_cqp_request *cqp_request; 1153 struct cqp_commands_info *cqp_info; 1154 enum i40iw_status_code status; 1155 1156 cqp_request = i40iw_get_cqp_request(iwcqp, true); 1157 if (!cqp_request) 1158 return I40IW_ERR_NO_MEMORY; 1159 1160 cqp_info = &cqp_request->info; 1161 cqp_info->cqp_cmd = OP_CQ_CREATE; 1162 cqp_info->post_sq = 1; 1163 cqp_info->in.u.cq_create.cq = cq; 1164 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; 1165 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1166 if (status) 1167 i40iw_pr_err("CQP-OP Create QP fail"); 1168 1169 return status; 1170} 1171 1172/** 1173 * i40iw_cqp_qp_create_cmd - create a qp for the cqp 1174 * @dev: device pointer 1175 * @qp: pointer to created qp 1176 */ 1177enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, 1178 struct i40iw_sc_qp *qp) 1179{ 1180 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1181 struct i40iw_cqp *iwcqp = &iwdev->cqp; 1182 struct i40iw_cqp_request *cqp_request; 1183 struct cqp_commands_info *cqp_info; 1184 struct i40iw_create_qp_info *qp_info; 1185 enum i40iw_status_code status; 1186 1187 cqp_request = i40iw_get_cqp_request(iwcqp, true); 1188 if (!cqp_request) 1189 return I40IW_ERR_NO_MEMORY; 1190 1191 cqp_info = &cqp_request->info; 1192 qp_info = &cqp_request->info.in.u.qp_create.info; 1193 1194 memset(qp_info, 0, sizeof(*qp_info)); 1195 1196 qp_info->cq_num_valid = true; 1197 qp_info->next_iwarp_state = I40IW_QP_STATE_RTS; 1198 1199 cqp_info->cqp_cmd = OP_QP_CREATE; 1200 cqp_info->post_sq = 1; 1201 cqp_info->in.u.qp_create.qp = qp; 1202 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; 1203 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1204 if (status) 1205 i40iw_pr_err("CQP-OP QP create fail"); 1206 return status; 1207} 1208 1209/** 1210 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq 1211 * @dev: device pointer 1212 * @cq: pointer to cq 1213 */ 1214void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) 1215{ 1216 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1217 1218 i40iw_cq_wq_destroy(iwdev, cq); 1219} 1220 1221/** 1222 * i40iw_cqp_qp_destroy_cmd - destroy the cqp 1223 * @dev: device pointer 1224 * @qp: pointer to qp 1225 */ 1226void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) 1227{ 1228 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1229 struct i40iw_cqp *iwcqp = &iwdev->cqp; 1230 struct i40iw_cqp_request *cqp_request; 1231 struct cqp_commands_info *cqp_info; 1232 enum i40iw_status_code status; 1233 1234 cqp_request = i40iw_get_cqp_request(iwcqp, true); 1235 if (!cqp_request) 1236 return; 1237 1238 cqp_info = &cqp_request->info; 1239 memset(cqp_info, 0, sizeof(*cqp_info)); 1240 1241 cqp_info->cqp_cmd = OP_QP_DESTROY; 1242 cqp_info->post_sq = 1; 1243 cqp_info->in.u.qp_destroy.qp = qp; 1244 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; 1245 cqp_info->in.u.qp_destroy.remove_hash_idx = true; 1246 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1247 if (status) 1248 i40iw_pr_err("CQP QP_DESTROY fail"); 1249} 1250 1251 1252/** 1253 * i40iw_ieq_mpa_crc_ae - generate AE for crc error 1254 * @dev: hardware control device structure 1255 * @qp: hardware control qp 1256 */ 1257void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) 1258{ 1259 struct i40iw_gen_ae_info info; 1260 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1261 1262 i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__); 1263 info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR; 1264 info.ae_source = I40IW_AE_SOURCE_RQ; 1265 i40iw_gen_ae(iwdev, qp, &info, false); 1266} 1267 1268/** 1269 * i40iw_init_hash_desc - initialize hash for crc calculation 1270 * @desc: cryption type 1271 */ 1272enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc) 1273{ 1274 struct crypto_shash *tfm; 1275 struct shash_desc *tdesc; 1276 1277 tfm = crypto_alloc_shash("crc32c", 0, 0); 1278 if (IS_ERR(tfm)) 1279 return I40IW_ERR_MPA_CRC; 1280 1281 tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), 1282 GFP_KERNEL); 1283 if (!tdesc) { 1284 crypto_free_shash(tfm); 1285 return I40IW_ERR_MPA_CRC; 1286 } 1287 tdesc->tfm = tfm; 1288 *desc = tdesc; 1289 1290 return 0; 1291} 1292 1293/** 1294 * i40iw_free_hash_desc - free hash desc 1295 * @desc: to be freed 1296 */ 1297void i40iw_free_hash_desc(struct shash_desc *desc) 1298{ 1299 if (desc) { 1300 crypto_free_shash(desc->tfm); 1301 kfree(desc); 1302 } 1303} 1304 1305/** 1306 * i40iw_alloc_query_fpm_buf - allocate buffer for fpm 1307 * @dev: hardware control device structure 1308 * @mem: buffer ptr for fpm to be allocated 1309 * @return: memory allocation status 1310 */ 1311enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev, 1312 struct i40iw_dma_mem *mem) 1313{ 1314 enum i40iw_status_code status; 1315 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1316 1317 status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE, 1318 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); 1319 return status; 1320} 1321 1322/** 1323 * i40iw_ieq_check_mpacrc - check if mpa crc is OK 1324 * @desc: desc for hash 1325 * @addr: address of buffer for crc 1326 * @length: length of buffer 1327 * @value: value to be compared 1328 */ 1329enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, 1330 void *addr, 1331 u32 length, 1332 u32 value) 1333{ 1334 u32 crc = 0; 1335 int ret; 1336 enum i40iw_status_code ret_code = 0; 1337 1338 crypto_shash_init(desc); 1339 ret = crypto_shash_update(desc, addr, length); 1340 if (!ret) 1341 crypto_shash_final(desc, (u8 *)&crc); 1342 if (crc != value) { 1343 i40iw_pr_err("mpa crc check fail\n"); 1344 ret_code = I40IW_ERR_MPA_CRC; 1345 } 1346 return ret_code; 1347} 1348 1349/** 1350 * i40iw_ieq_get_qp - get qp based on quad in puda buffer 1351 * @dev: hardware control device structure 1352 * @buf: receive puda buffer on exception q 1353 */ 1354struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, 1355 struct i40iw_puda_buf *buf) 1356{ 1357 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 1358 struct i40iw_qp *iwqp; 1359 struct i40iw_cm_node *cm_node; 1360 u32 loc_addr[4], rem_addr[4]; 1361 u16 loc_port, rem_port; 1362 struct ipv6hdr *ip6h; 1363 struct iphdr *iph = (struct iphdr *)buf->iph; 1364 struct tcphdr *tcph = (struct tcphdr *)buf->tcph; 1365 1366 if (iph->version == 4) { 1367 memset(loc_addr, 0, sizeof(loc_addr)); 1368 loc_addr[0] = ntohl(iph->daddr); 1369 memset(rem_addr, 0, sizeof(rem_addr)); 1370 rem_addr[0] = ntohl(iph->saddr); 1371 } else { 1372 ip6h = (struct ipv6hdr *)buf->iph; 1373 i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32); 1374 i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32); 1375 } 1376 loc_port = ntohs(tcph->dest); 1377 rem_port = ntohs(tcph->source); 1378 1379 cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, 1380 loc_addr, false, true); 1381 if (!cm_node) 1382 return NULL; 1383 iwqp = cm_node->iwqp; 1384 return &iwqp->sc_qp; 1385} 1386 1387/** 1388 * i40iw_ieq_update_tcpip_info - update tcpip in the buffer 1389 * @buf: puda to update 1390 * @length: length of buffer 1391 * @seqnum: seq number for tcp 1392 */ 1393void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum) 1394{ 1395 struct tcphdr *tcph; 1396 struct iphdr *iph; 1397 u16 iphlen; 1398 u16 packetsize; 1399 u8 *addr = (u8 *)buf->mem.va; 1400 1401 iphlen = (buf->ipv4) ? 20 : 40; 1402 iph = (struct iphdr *)(addr + buf->maclen); 1403 tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); 1404 packetsize = length + buf->tcphlen + iphlen; 1405 1406 iph->tot_len = htons(packetsize); 1407 tcph->seq = htonl(seqnum); 1408} 1409 1410/** 1411 * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer 1412 * @info: to get information 1413 * @buf: puda buffer 1414 */ 1415enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, 1416 struct i40iw_puda_buf *buf) 1417{ 1418 struct iphdr *iph; 1419 struct ipv6hdr *ip6h; 1420 struct tcphdr *tcph; 1421 u16 iphlen; 1422 u16 pkt_len; 1423 u8 *mem = (u8 *)buf->mem.va; 1424 struct ethhdr *ethh = (struct ethhdr *)buf->mem.va; 1425 1426 if (ethh->h_proto == htons(0x8100)) { 1427 info->vlan_valid = true; 1428 buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK; 1429 } 1430 buf->maclen = (info->vlan_valid) ? 18 : 14; 1431 iphlen = (info->l3proto) ? 40 : 20; 1432 buf->ipv4 = (info->l3proto) ? false : true; 1433 buf->iph = mem + buf->maclen; 1434 iph = (struct iphdr *)buf->iph; 1435 1436 buf->tcph = buf->iph + iphlen; 1437 tcph = (struct tcphdr *)buf->tcph; 1438 1439 if (buf->ipv4) { 1440 pkt_len = ntohs(iph->tot_len); 1441 } else { 1442 ip6h = (struct ipv6hdr *)buf->iph; 1443 pkt_len = ntohs(ip6h->payload_len) + iphlen; 1444 } 1445 1446 buf->totallen = pkt_len + buf->maclen; 1447 1448 if (info->payload_len < buf->totallen) { 1449 i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n", 1450 info->payload_len, buf->totallen); 1451 return I40IW_ERR_INVALID_SIZE; 1452 } 1453 1454 buf->tcphlen = (tcph->doff) << 2; 1455 buf->datalen = pkt_len - iphlen - buf->tcphlen; 1456 buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL; 1457 buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; 1458 buf->seqnum = ntohl(tcph->seq); 1459 return 0; 1460} 1461 1462/** 1463 * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats 1464 * @vsi: pointer to the vsi structure 1465 */ 1466static void i40iw_hw_stats_timeout(struct timer_list *t) 1467{ 1468 struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t, 1469 stats_timer); 1470 struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi; 1471 struct i40iw_sc_dev *pf_dev = sc_vsi->dev; 1472 struct i40iw_vsi_pestat *vf_devstat = NULL; 1473 u16 iw_vf_idx; 1474 unsigned long flags; 1475 1476 /*PF*/ 1477 i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats); 1478 1479 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { 1480 spin_lock_irqsave(&pf_devstat->lock, flags); 1481 if (pf_dev->vf_dev[iw_vf_idx]) { 1482 if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) { 1483 vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat; 1484 i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats); 1485 } 1486 } 1487 spin_unlock_irqrestore(&pf_devstat->lock, flags); 1488 } 1489 1490 mod_timer(&pf_devstat->stats_timer, 1491 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); 1492} 1493 1494/** 1495 * i40iw_hw_stats_start_timer - Start periodic stats timer 1496 * @vsi: pointer to the vsi structure 1497 */ 1498void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi) 1499{ 1500 struct i40iw_vsi_pestat *devstat = vsi->pestat; 1501 1502 timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0); 1503 mod_timer(&devstat->stats_timer, 1504 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); 1505} 1506 1507/** 1508 * i40iw_hw_stats_stop_timer - Delete periodic stats timer 1509 * @vsi: pointer to the vsi structure 1510 */ 1511void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi) 1512{ 1513 struct i40iw_vsi_pestat *devstat = vsi->pestat; 1514 1515 del_timer_sync(&devstat->stats_timer); 1516} 1517