1/******************************************************************************* 2* 3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. 4* 5* This software is available to you under a choice of one of two 6* licenses. You may choose to be licensed under the terms of the GNU 7* General Public License (GPL) Version 2, available from the file 8* COPYING in the main directory of this source tree, or the 9* OpenFabrics.org BSD license below: 10* 11* Redistribution and use in source and binary forms, with or 12* without modification, are permitted provided that the following 13* conditions are met: 14* 15* - Redistributions of source code must retain the above 16* copyright notice, this list of conditions and the following 17* disclaimer. 18* 19* - Redistributions in binary form must reproduce the above 20* copyright notice, this list of conditions and the following 21* disclaimer in the documentation and/or other materials 22* provided with the distribution. 23* 24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31* SOFTWARE. 32* 33*******************************************************************************/ 34 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/netdevice.h> 38#include <linux/etherdevice.h> 39#include <linux/ip.h> 40#include <linux/tcp.h> 41#include <linux/if_vlan.h> 42#include <net/addrconf.h> 43 44#include "i40iw.h" 45#include "i40iw_register.h" 46#include <net/netevent.h> 47#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0 48#define CLIENT_IW_INTERFACE_VERSION_MINOR 01 49#define CLIENT_IW_INTERFACE_VERSION_BUILD 00 50 51#define DRV_VERSION_MAJOR 0 52#define DRV_VERSION_MINOR 5 53#define DRV_VERSION_BUILD 123 54#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 55 __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD) 56 57static int debug; 58module_param(debug, int, 0644); 59MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all"); 60 61static int resource_profile; 62module_param(resource_profile, int, 0644); 63MODULE_PARM_DESC(resource_profile, 64 "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution"); 65 66static int max_rdma_vfs = 32; 67module_param(max_rdma_vfs, int, 0644); 68MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default"); 69static int mpa_version = 2; 70module_param(mpa_version, int, 0644); 71MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2"); 72 73MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>"); 74MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver"); 75MODULE_LICENSE("Dual BSD/GPL"); 76 77static struct i40e_client i40iw_client; 78static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw"; 79 80static LIST_HEAD(i40iw_handlers); 81static spinlock_t i40iw_handler_lock; 82 83static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, 84 u32 vf_id, u8 *msg, u16 len); 85 86static struct notifier_block i40iw_inetaddr_notifier = { 87 .notifier_call = i40iw_inetaddr_event 88}; 89 90static struct notifier_block i40iw_inetaddr6_notifier = { 91 .notifier_call = i40iw_inet6addr_event 92}; 93 94static struct notifier_block i40iw_net_notifier = { 95 .notifier_call = i40iw_net_event 96}; 97 98static struct notifier_block i40iw_netdevice_notifier = { 99 .notifier_call = i40iw_netdevice_event 100}; 101 102/** 103 * i40iw_find_i40e_handler - find a handler given a client info 104 * @ldev: pointer to a client info 105 */ 106static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev) 107{ 108 struct i40iw_handler *hdl; 109 unsigned long flags; 110 111 spin_lock_irqsave(&i40iw_handler_lock, flags); 112 list_for_each_entry(hdl, &i40iw_handlers, list) { 113 if (hdl->ldev.netdev == ldev->netdev) { 114 spin_unlock_irqrestore(&i40iw_handler_lock, flags); 115 return hdl; 116 } 117 } 118 spin_unlock_irqrestore(&i40iw_handler_lock, flags); 119 return NULL; 120} 121 122/** 123 * i40iw_find_netdev - find a handler given a netdev 124 * @netdev: pointer to net_device 125 */ 126struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev) 127{ 128 struct i40iw_handler *hdl; 129 unsigned long flags; 130 131 spin_lock_irqsave(&i40iw_handler_lock, flags); 132 list_for_each_entry(hdl, &i40iw_handlers, list) { 133 if (hdl->ldev.netdev == netdev) { 134 spin_unlock_irqrestore(&i40iw_handler_lock, flags); 135 return hdl; 136 } 137 } 138 spin_unlock_irqrestore(&i40iw_handler_lock, flags); 139 return NULL; 140} 141 142/** 143 * i40iw_add_handler - add a handler to the list 144 * @hdl: handler to be added to the handler list 145 */ 146static void i40iw_add_handler(struct i40iw_handler *hdl) 147{ 148 unsigned long flags; 149 150 spin_lock_irqsave(&i40iw_handler_lock, flags); 151 list_add(&hdl->list, &i40iw_handlers); 152 spin_unlock_irqrestore(&i40iw_handler_lock, flags); 153} 154 155/** 156 * i40iw_del_handler - delete a handler from the list 157 * @hdl: handler to be deleted from the handler list 158 */ 159static int i40iw_del_handler(struct i40iw_handler *hdl) 160{ 161 unsigned long flags; 162 163 spin_lock_irqsave(&i40iw_handler_lock, flags); 164 list_del(&hdl->list); 165 spin_unlock_irqrestore(&i40iw_handler_lock, flags); 166 return 0; 167} 168 169/** 170 * i40iw_enable_intr - set up device interrupts 171 * @dev: hardware control device structure 172 * @msix_id: id of the interrupt to be enabled 173 */ 174static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id) 175{ 176 u32 val; 177 178 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 179 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 180 (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 181 if (dev->is_pf) 182 i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val); 183 else 184 i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val); 185} 186 187/** 188 * i40iw_dpc - tasklet for aeq and ceq 0 189 * @data: iwarp device 190 */ 191static void i40iw_dpc(struct tasklet_struct *t) 192{ 193 struct i40iw_device *iwdev = from_tasklet(iwdev, t, dpc_tasklet); 194 195 if (iwdev->msix_shared) 196 i40iw_process_ceq(iwdev, iwdev->ceqlist); 197 i40iw_process_aeq(iwdev); 198 i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx); 199} 200 201/** 202 * i40iw_ceq_dpc - dpc handler for CEQ 203 * @data: data points to CEQ 204 */ 205static void i40iw_ceq_dpc(struct tasklet_struct *t) 206{ 207 struct i40iw_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); 208 struct i40iw_device *iwdev = iwceq->iwdev; 209 210 i40iw_process_ceq(iwdev, iwceq); 211 i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx); 212} 213 214/** 215 * i40iw_irq_handler - interrupt handler for aeq and ceq0 216 * @irq: Interrupt request number 217 * @data: iwarp device 218 */ 219static irqreturn_t i40iw_irq_handler(int irq, void *data) 220{ 221 struct i40iw_device *iwdev = (struct i40iw_device *)data; 222 223 tasklet_schedule(&iwdev->dpc_tasklet); 224 return IRQ_HANDLED; 225} 226 227/** 228 * i40iw_destroy_cqp - destroy control qp 229 * @iwdev: iwarp device 230 * @create_done: 1 if cqp create poll was success 231 * 232 * Issue destroy cqp request and 233 * free the resources associated with the cqp 234 */ 235static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) 236{ 237 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 238 struct i40iw_cqp *cqp = &iwdev->cqp; 239 240 if (free_hwcqp) 241 dev->cqp_ops->cqp_destroy(dev->cqp); 242 243 i40iw_cleanup_pending_cqp_op(iwdev); 244 245 i40iw_free_dma_mem(dev->hw, &cqp->sq); 246 kfree(cqp->scratch_array); 247 iwdev->cqp.scratch_array = NULL; 248 249 kfree(cqp->cqp_requests); 250 cqp->cqp_requests = NULL; 251} 252 253/** 254 * i40iw_disable_irqs - disable device interrupts 255 * @dev: hardware control device structure 256 * @msic_vec: msix vector to disable irq 257 * @dev_id: parameter to pass to free_irq (used during irq setup) 258 * 259 * The function is called when destroying aeq/ceq 260 */ 261static void i40iw_disable_irq(struct i40iw_sc_dev *dev, 262 struct i40iw_msix_vector *msix_vec, 263 void *dev_id) 264{ 265 if (dev->is_pf) 266 i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0); 267 else 268 i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0); 269 irq_set_affinity_hint(msix_vec->irq, NULL); 270 free_irq(msix_vec->irq, dev_id); 271} 272 273/** 274 * i40iw_destroy_aeq - destroy aeq 275 * @iwdev: iwarp device 276 * 277 * Issue a destroy aeq request and 278 * free the resources associated with the aeq 279 * The function is called during driver unload 280 */ 281static void i40iw_destroy_aeq(struct i40iw_device *iwdev) 282{ 283 enum i40iw_status_code status = I40IW_ERR_NOT_READY; 284 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 285 struct i40iw_aeq *aeq = &iwdev->aeq; 286 287 if (!iwdev->msix_shared) 288 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); 289 if (iwdev->reset) 290 goto exit; 291 292 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) 293 status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq); 294 if (status) 295 i40iw_pr_err("destroy aeq failed %d\n", status); 296 297exit: 298 i40iw_free_dma_mem(dev->hw, &aeq->mem); 299} 300 301/** 302 * i40iw_destroy_ceq - destroy ceq 303 * @iwdev: iwarp device 304 * @iwceq: ceq to be destroyed 305 * 306 * Issue a destroy ceq request and 307 * free the resources associated with the ceq 308 */ 309static void i40iw_destroy_ceq(struct i40iw_device *iwdev, 310 struct i40iw_ceq *iwceq) 311{ 312 enum i40iw_status_code status; 313 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 314 315 if (iwdev->reset) 316 goto exit; 317 318 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); 319 if (status) { 320 i40iw_pr_err("ceq destroy command failed %d\n", status); 321 goto exit; 322 } 323 324 status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq); 325 if (status) 326 i40iw_pr_err("ceq destroy completion failed %d\n", status); 327exit: 328 i40iw_free_dma_mem(dev->hw, &iwceq->mem); 329} 330 331/** 332 * i40iw_dele_ceqs - destroy all ceq's 333 * @iwdev: iwarp device 334 * 335 * Go through all of the device ceq's and for each ceq 336 * disable the ceq interrupt and destroy the ceq 337 */ 338static void i40iw_dele_ceqs(struct i40iw_device *iwdev) 339{ 340 u32 i = 0; 341 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 342 struct i40iw_ceq *iwceq = iwdev->ceqlist; 343 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; 344 345 if (iwdev->msix_shared) { 346 i40iw_disable_irq(dev, msix_vec, (void *)iwdev); 347 i40iw_destroy_ceq(iwdev, iwceq); 348 iwceq++; 349 i++; 350 } 351 352 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { 353 i40iw_disable_irq(dev, msix_vec, (void *)iwceq); 354 i40iw_destroy_ceq(iwdev, iwceq); 355 } 356 357 iwdev->sc_dev.ceq_valid = false; 358} 359 360/** 361 * i40iw_destroy_ccq - destroy control cq 362 * @iwdev: iwarp device 363 * 364 * Issue destroy ccq request and 365 * free the resources associated with the ccq 366 */ 367static void i40iw_destroy_ccq(struct i40iw_device *iwdev) 368{ 369 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 370 struct i40iw_ccq *ccq = &iwdev->ccq; 371 enum i40iw_status_code status = 0; 372 373 if (!iwdev->reset) 374 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); 375 if (status) 376 i40iw_pr_err("ccq destroy failed %d\n", status); 377 i40iw_free_dma_mem(dev->hw, &ccq->mem_cq); 378} 379 380/* types of hmc objects */ 381static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = { 382 I40IW_HMC_IW_QP, 383 I40IW_HMC_IW_CQ, 384 I40IW_HMC_IW_HTE, 385 I40IW_HMC_IW_ARP, 386 I40IW_HMC_IW_APBVT_ENTRY, 387 I40IW_HMC_IW_MR, 388 I40IW_HMC_IW_XF, 389 I40IW_HMC_IW_XFFL, 390 I40IW_HMC_IW_Q1, 391 I40IW_HMC_IW_Q1FL, 392 I40IW_HMC_IW_TIMER, 393}; 394 395/** 396 * i40iw_close_hmc_objects_type - delete hmc objects of a given type 397 * @iwdev: iwarp device 398 * @obj_type: the hmc object type to be deleted 399 * @is_pf: true if the function is PF otherwise false 400 * @reset: true if called before reset 401 */ 402static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev, 403 enum i40iw_hmc_rsrc_type obj_type, 404 struct i40iw_hmc_info *hmc_info, 405 bool is_pf, 406 bool reset) 407{ 408 struct i40iw_hmc_del_obj_info info; 409 410 memset(&info, 0, sizeof(info)); 411 info.hmc_info = hmc_info; 412 info.rsrc_type = obj_type; 413 info.count = hmc_info->hmc_obj[obj_type].cnt; 414 info.is_pf = is_pf; 415 if (dev->hmc_ops->del_hmc_object(dev, &info, reset)) 416 i40iw_pr_err("del obj of type %d failed\n", obj_type); 417} 418 419/** 420 * i40iw_del_hmc_objects - remove all device hmc objects 421 * @dev: iwarp device 422 * @hmc_info: hmc_info to free 423 * @is_pf: true if hmc_info belongs to PF, not vf nor allocated 424 * by PF on behalf of VF 425 * @reset: true if called before reset 426 */ 427static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev, 428 struct i40iw_hmc_info *hmc_info, 429 bool is_pf, 430 bool reset) 431{ 432 unsigned int i; 433 434 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) 435 i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset); 436} 437 438/** 439 * i40iw_ceq_handler - interrupt handler for ceq 440 * @data: ceq pointer 441 */ 442static irqreturn_t i40iw_ceq_handler(int irq, void *data) 443{ 444 struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data; 445 446 if (iwceq->irq != irq) 447 i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq); 448 tasklet_schedule(&iwceq->dpc_tasklet); 449 return IRQ_HANDLED; 450} 451 452/** 453 * i40iw_create_hmc_obj_type - create hmc object of a given type 454 * @dev: hardware control device structure 455 * @info: information for the hmc object to create 456 */ 457static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev, 458 struct i40iw_hmc_create_obj_info *info) 459{ 460 return dev->hmc_ops->create_hmc_object(dev, info); 461} 462 463/** 464 * i40iw_create_hmc_objs - create all hmc objects for the device 465 * @iwdev: iwarp device 466 * @is_pf: true if the function is PF otherwise false 467 * 468 * Create the device hmc objects and allocate hmc pages 469 * Return 0 if successful, otherwise clean up and return error 470 */ 471static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev, 472 bool is_pf) 473{ 474 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 475 struct i40iw_hmc_create_obj_info info; 476 enum i40iw_status_code status; 477 int i; 478 479 memset(&info, 0, sizeof(info)); 480 info.hmc_info = dev->hmc_info; 481 info.is_pf = is_pf; 482 info.entry_type = iwdev->sd_type; 483 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { 484 info.rsrc_type = iw_hmc_obj_types[i]; 485 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; 486 info.add_sd_cnt = 0; 487 status = i40iw_create_hmc_obj_type(dev, &info); 488 if (status) { 489 i40iw_pr_err("create obj type %d status = %d\n", 490 iw_hmc_obj_types[i], status); 491 break; 492 } 493 } 494 if (!status) 495 return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0, 496 dev->hmc_fn_id, 497 true, true)); 498 499 while (i) { 500 i--; 501 /* destroy the hmc objects of a given type */ 502 i40iw_close_hmc_objects_type(dev, 503 iw_hmc_obj_types[i], 504 dev->hmc_info, 505 is_pf, 506 false); 507 } 508 return status; 509} 510 511/** 512 * i40iw_obj_aligned_mem - get aligned memory from device allocated memory 513 * @iwdev: iwarp device 514 * @memptr: points to the memory addresses 515 * @size: size of memory needed 516 * @mask: mask for the aligned memory 517 * 518 * Get aligned memory of the requested size and 519 * update the memptr to point to the new aligned memory 520 * Return 0 if successful, otherwise return no memory error 521 */ 522enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, 523 struct i40iw_dma_mem *memptr, 524 u32 size, 525 u32 mask) 526{ 527 unsigned long va, newva; 528 unsigned long extra; 529 530 va = (unsigned long)iwdev->obj_next.va; 531 newva = va; 532 if (mask) 533 newva = ALIGN(va, (mask + 1)); 534 extra = newva - va; 535 memptr->va = (u8 *)va + extra; 536 memptr->pa = iwdev->obj_next.pa + extra; 537 memptr->size = size; 538 if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size)) 539 return I40IW_ERR_NO_MEMORY; 540 541 iwdev->obj_next.va = memptr->va + size; 542 iwdev->obj_next.pa = memptr->pa + size; 543 return 0; 544} 545 546/** 547 * i40iw_create_cqp - create control qp 548 * @iwdev: iwarp device 549 * 550 * Return 0, if the cqp and all the resources associated with it 551 * are successfully created, otherwise return error 552 */ 553static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev) 554{ 555 enum i40iw_status_code status; 556 u32 sqsize = I40IW_CQP_SW_SQSIZE_2048; 557 struct i40iw_dma_mem mem; 558 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 559 struct i40iw_cqp_init_info cqp_init_info; 560 struct i40iw_cqp *cqp = &iwdev->cqp; 561 u16 maj_err, min_err; 562 int i; 563 564 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); 565 if (!cqp->cqp_requests) 566 return I40IW_ERR_NO_MEMORY; 567 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); 568 if (!cqp->scratch_array) { 569 kfree(cqp->cqp_requests); 570 return I40IW_ERR_NO_MEMORY; 571 } 572 dev->cqp = &cqp->sc_cqp; 573 dev->cqp->dev = dev; 574 memset(&cqp_init_info, 0, sizeof(cqp_init_info)); 575 status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq, 576 (sizeof(struct i40iw_cqp_sq_wqe) * sqsize), 577 I40IW_CQP_ALIGNMENT); 578 if (status) 579 goto exit; 580 status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx), 581 I40IW_HOST_CTX_ALIGNMENT_MASK); 582 if (status) 583 goto exit; 584 dev->cqp->host_ctx_pa = mem.pa; 585 dev->cqp->host_ctx = mem.va; 586 /* populate the cqp init info */ 587 cqp_init_info.dev = dev; 588 cqp_init_info.sq_size = sqsize; 589 cqp_init_info.sq = cqp->sq.va; 590 cqp_init_info.sq_pa = cqp->sq.pa; 591 cqp_init_info.host_ctx_pa = mem.pa; 592 cqp_init_info.host_ctx = mem.va; 593 cqp_init_info.hmc_profile = iwdev->resource_profile; 594 cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs; 595 cqp_init_info.scratch_array = cqp->scratch_array; 596 status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info); 597 if (status) { 598 i40iw_pr_err("cqp init status %d\n", status); 599 goto exit; 600 } 601 status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err); 602 if (status) { 603 i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n", 604 status, maj_err, min_err); 605 goto exit; 606 } 607 spin_lock_init(&cqp->req_lock); 608 INIT_LIST_HEAD(&cqp->cqp_avail_reqs); 609 INIT_LIST_HEAD(&cqp->cqp_pending_reqs); 610 /* init the waitq of the cqp_requests and add them to the list */ 611 for (i = 0; i < sqsize; i++) { 612 init_waitqueue_head(&cqp->cqp_requests[i].waitq); 613 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); 614 } 615 return 0; 616exit: 617 /* clean up the created resources */ 618 i40iw_destroy_cqp(iwdev, false); 619 return status; 620} 621 622/** 623 * i40iw_create_ccq - create control cq 624 * @iwdev: iwarp device 625 * 626 * Return 0, if the ccq and the resources associated with it 627 * are successfully created, otherwise return error 628 */ 629static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev) 630{ 631 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 632 struct i40iw_dma_mem mem; 633 enum i40iw_status_code status; 634 struct i40iw_ccq_init_info info; 635 struct i40iw_ccq *ccq = &iwdev->ccq; 636 637 memset(&info, 0, sizeof(info)); 638 dev->ccq = &ccq->sc_cq; 639 dev->ccq->dev = dev; 640 info.dev = dev; 641 ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area); 642 ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE; 643 status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq, 644 ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT); 645 if (status) 646 goto exit; 647 status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size, 648 I40IW_SHADOWAREA_MASK); 649 if (status) 650 goto exit; 651 ccq->sc_cq.back_cq = (void *)ccq; 652 /* populate the ccq init info */ 653 info.cq_base = ccq->mem_cq.va; 654 info.cq_pa = ccq->mem_cq.pa; 655 info.num_elem = IW_CCQ_SIZE; 656 info.shadow_area = mem.va; 657 info.shadow_area_pa = mem.pa; 658 info.ceqe_mask = false; 659 info.ceq_id_valid = true; 660 info.shadow_read_threshold = 16; 661 status = dev->ccq_ops->ccq_init(dev->ccq, &info); 662 if (!status) 663 status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true); 664exit: 665 if (status) 666 i40iw_free_dma_mem(dev->hw, &ccq->mem_cq); 667 return status; 668} 669 670/** 671 * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq 672 * @iwdev: iwarp device 673 * @msix_vec: interrupt vector information 674 * @iwceq: ceq associated with the vector 675 * @ceq_id: the id number of the iwceq 676 * 677 * Allocate interrupt resources and enable irq handling 678 * Return 0 if successful, otherwise return error 679 */ 680static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev, 681 struct i40iw_ceq *iwceq, 682 u32 ceq_id, 683 struct i40iw_msix_vector *msix_vec) 684{ 685 enum i40iw_status_code status; 686 687 if (iwdev->msix_shared && !ceq_id) { 688 tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc); 689 status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev); 690 } else { 691 tasklet_setup(&iwceq->dpc_tasklet, i40iw_ceq_dpc); 692 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); 693 } 694 695 cpumask_clear(&msix_vec->mask); 696 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); 697 irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); 698 699 if (status) { 700 i40iw_pr_err("ceq irq config fail\n"); 701 return I40IW_ERR_CONFIG; 702 } 703 msix_vec->ceq_id = ceq_id; 704 705 return 0; 706} 707 708/** 709 * i40iw_create_ceq - create completion event queue 710 * @iwdev: iwarp device 711 * @iwceq: pointer to the ceq resources to be created 712 * @ceq_id: the id number of the iwceq 713 * 714 * Return 0, if the ceq and the resources associated with it 715 * are successfully created, otherwise return error 716 */ 717static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev, 718 struct i40iw_ceq *iwceq, 719 u32 ceq_id) 720{ 721 enum i40iw_status_code status; 722 struct i40iw_ceq_init_info info; 723 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 724 u64 scratch; 725 726 memset(&info, 0, sizeof(info)); 727 info.ceq_id = ceq_id; 728 iwceq->iwdev = iwdev; 729 iwceq->mem.size = sizeof(struct i40iw_ceqe) * 730 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; 731 status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size, 732 I40IW_CEQ_ALIGNMENT); 733 if (status) 734 goto exit; 735 info.ceq_id = ceq_id; 736 info.ceqe_base = iwceq->mem.va; 737 info.ceqe_pa = iwceq->mem.pa; 738 739 info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; 740 iwceq->sc_ceq.ceq_id = ceq_id; 741 info.dev = dev; 742 scratch = (uintptr_t)&iwdev->cqp.sc_cqp; 743 status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info); 744 if (!status) 745 status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch); 746 747exit: 748 if (status) 749 i40iw_free_dma_mem(dev->hw, &iwceq->mem); 750 return status; 751} 752 753void i40iw_request_reset(struct i40iw_device *iwdev) 754{ 755 struct i40e_info *ldev = iwdev->ldev; 756 757 ldev->ops->request_reset(ldev, iwdev->client, 1); 758} 759 760/** 761 * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources 762 * @iwdev: iwarp device 763 * @ldev: i40e lan device 764 * 765 * Allocate a list for all device completion event queues 766 * Create the ceq's and configure their msix interrupt vectors 767 * Return 0, if at least one ceq is successfully set up, otherwise return error 768 */ 769static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, 770 struct i40e_info *ldev) 771{ 772 u32 i; 773 u32 ceq_id; 774 struct i40iw_ceq *iwceq; 775 struct i40iw_msix_vector *msix_vec; 776 enum i40iw_status_code status = 0; 777 u32 num_ceqs; 778 779 if (ldev && ldev->ops && ldev->ops->setup_qvlist) { 780 status = ldev->ops->setup_qvlist(ldev, &i40iw_client, 781 iwdev->iw_qvlist); 782 if (status) 783 goto exit; 784 } else { 785 status = I40IW_ERR_BAD_PTR; 786 goto exit; 787 } 788 789 num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs); 790 iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL); 791 if (!iwdev->ceqlist) { 792 status = I40IW_ERR_NO_MEMORY; 793 goto exit; 794 } 795 i = (iwdev->msix_shared) ? 0 : 1; 796 for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) { 797 iwceq = &iwdev->ceqlist[ceq_id]; 798 status = i40iw_create_ceq(iwdev, iwceq, ceq_id); 799 if (status) { 800 i40iw_pr_err("create ceq status = %d\n", status); 801 break; 802 } 803 804 msix_vec = &iwdev->iw_msixtbl[i]; 805 iwceq->irq = msix_vec->irq; 806 iwceq->msix_idx = msix_vec->idx; 807 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); 808 if (status) { 809 i40iw_destroy_ceq(iwdev, iwceq); 810 break; 811 } 812 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); 813 iwdev->ceqs_count++; 814 } 815exit: 816 if (status && !iwdev->ceqs_count) { 817 kfree(iwdev->ceqlist); 818 iwdev->ceqlist = NULL; 819 return status; 820 } else { 821 iwdev->sc_dev.ceq_valid = true; 822 return 0; 823 } 824 825} 826 827/** 828 * i40iw_configure_aeq_vector - set up the msix vector for aeq 829 * @iwdev: iwarp device 830 * 831 * Allocate interrupt resources and enable irq handling 832 * Return 0 if successful, otherwise return error 833 */ 834static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev) 835{ 836 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; 837 u32 ret = 0; 838 839 if (!iwdev->msix_shared) { 840 tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc); 841 ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev); 842 } 843 if (ret) { 844 i40iw_pr_err("aeq irq config fail\n"); 845 return I40IW_ERR_CONFIG; 846 } 847 848 return 0; 849} 850 851/** 852 * i40iw_create_aeq - create async event queue 853 * @iwdev: iwarp device 854 * 855 * Return 0, if the aeq and the resources associated with it 856 * are successfully created, otherwise return error 857 */ 858static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev) 859{ 860 enum i40iw_status_code status; 861 struct i40iw_aeq_init_info info; 862 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 863 struct i40iw_aeq *aeq = &iwdev->aeq; 864 u64 scratch = 0; 865 u32 aeq_size; 866 867 aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt + 868 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; 869 memset(&info, 0, sizeof(info)); 870 aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size; 871 status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size, 872 I40IW_AEQ_ALIGNMENT); 873 if (status) 874 goto exit; 875 876 info.aeqe_base = aeq->mem.va; 877 info.aeq_elem_pa = aeq->mem.pa; 878 info.elem_cnt = aeq_size; 879 info.dev = dev; 880 status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info); 881 if (status) 882 goto exit; 883 status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1); 884 if (!status) 885 status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq); 886exit: 887 if (status) 888 i40iw_free_dma_mem(dev->hw, &aeq->mem); 889 return status; 890} 891 892/** 893 * i40iw_setup_aeq - set up the device aeq 894 * @iwdev: iwarp device 895 * 896 * Create the aeq and configure its msix interrupt vector 897 * Return 0 if successful, otherwise return error 898 */ 899static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) 900{ 901 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 902 enum i40iw_status_code status; 903 904 status = i40iw_create_aeq(iwdev); 905 if (status) 906 return status; 907 908 status = i40iw_configure_aeq_vector(iwdev); 909 if (status) { 910 i40iw_destroy_aeq(iwdev); 911 return status; 912 } 913 914 if (!iwdev->msix_shared) 915 i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx); 916 return 0; 917} 918 919/** 920 * i40iw_initialize_ilq - create iwarp local queue for cm 921 * @iwdev: iwarp device 922 * 923 * Return 0 if successful, otherwise return error 924 */ 925static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev) 926{ 927 struct i40iw_puda_rsrc_info info; 928 enum i40iw_status_code status; 929 930 memset(&info, 0, sizeof(info)); 931 info.type = I40IW_PUDA_RSRC_TYPE_ILQ; 932 info.cq_id = 1; 933 info.qp_id = 0; 934 info.count = 1; 935 info.pd_id = 1; 936 info.sq_size = 8192; 937 info.rq_size = 8192; 938 info.buf_size = 1024; 939 info.tx_buf_cnt = 16384; 940 info.receive = i40iw_receive_ilq; 941 info.xmit_complete = i40iw_free_sqbuf; 942 status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); 943 if (status) 944 i40iw_pr_err("ilq create fail\n"); 945 return status; 946} 947 948/** 949 * i40iw_initialize_ieq - create iwarp exception queue 950 * @iwdev: iwarp device 951 * 952 * Return 0 if successful, otherwise return error 953 */ 954static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev) 955{ 956 struct i40iw_puda_rsrc_info info; 957 enum i40iw_status_code status; 958 959 memset(&info, 0, sizeof(info)); 960 info.type = I40IW_PUDA_RSRC_TYPE_IEQ; 961 info.cq_id = 2; 962 info.qp_id = iwdev->vsi.exception_lan_queue; 963 info.count = 1; 964 info.pd_id = 2; 965 info.sq_size = 8192; 966 info.rq_size = 8192; 967 info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN; 968 info.tx_buf_cnt = 4096; 969 status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); 970 if (status) 971 i40iw_pr_err("ieq create fail\n"); 972 return status; 973} 974 975/** 976 * i40iw_reinitialize_ieq - destroy and re-create ieq 977 * @dev: iwarp device 978 */ 979void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev) 980{ 981 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 982 983 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false); 984 if (i40iw_initialize_ieq(iwdev)) { 985 iwdev->reset = true; 986 i40iw_request_reset(iwdev); 987 } 988} 989 990/** 991 * i40iw_hmc_setup - create hmc objects for the device 992 * @iwdev: iwarp device 993 * 994 * Set up the device private memory space for the number and size of 995 * the hmc objects and create the objects 996 * Return 0 if successful, otherwise return error 997 */ 998static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev) 999{ 1000 enum i40iw_status_code status; 1001 1002 iwdev->sd_type = I40IW_SD_TYPE_DIRECT; 1003 status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT); 1004 if (status) 1005 goto exit; 1006 status = i40iw_create_hmc_objs(iwdev, true); 1007 if (status) 1008 goto exit; 1009 iwdev->init_state = HMC_OBJS_CREATED; 1010exit: 1011 return status; 1012} 1013 1014/** 1015 * i40iw_del_init_mem - deallocate memory resources 1016 * @iwdev: iwarp device 1017 */ 1018static void i40iw_del_init_mem(struct i40iw_device *iwdev) 1019{ 1020 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 1021 1022 i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem); 1023 kfree(dev->hmc_info->sd_table.sd_entry); 1024 dev->hmc_info->sd_table.sd_entry = NULL; 1025 kfree(iwdev->mem_resources); 1026 iwdev->mem_resources = NULL; 1027 kfree(iwdev->ceqlist); 1028 iwdev->ceqlist = NULL; 1029 kfree(iwdev->iw_msixtbl); 1030 iwdev->iw_msixtbl = NULL; 1031 kfree(iwdev->hmc_info_mem); 1032 iwdev->hmc_info_mem = NULL; 1033} 1034 1035/** 1036 * i40iw_del_macip_entry - remove a mac ip address entry from the hw table 1037 * @iwdev: iwarp device 1038 * @idx: the index of the mac ip address to delete 1039 */ 1040static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx) 1041{ 1042 struct i40iw_cqp *iwcqp = &iwdev->cqp; 1043 struct i40iw_cqp_request *cqp_request; 1044 struct cqp_commands_info *cqp_info; 1045 enum i40iw_status_code status = 0; 1046 1047 cqp_request = i40iw_get_cqp_request(iwcqp, true); 1048 if (!cqp_request) { 1049 i40iw_pr_err("cqp_request memory failed\n"); 1050 return; 1051 } 1052 cqp_info = &cqp_request->info; 1053 cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY; 1054 cqp_info->post_sq = 1; 1055 cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; 1056 cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; 1057 cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx; 1058 cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0; 1059 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1060 if (status) 1061 i40iw_pr_err("CQP-OP Del MAC Ip entry fail"); 1062} 1063 1064/** 1065 * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table 1066 * @iwdev: iwarp device 1067 * @mac_addr: pointer to mac address 1068 * @idx: the index of the mac ip address to add 1069 */ 1070static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev, 1071 u8 *mac_addr, 1072 u8 idx) 1073{ 1074 struct i40iw_local_mac_ipaddr_entry_info *info; 1075 struct i40iw_cqp *iwcqp = &iwdev->cqp; 1076 struct i40iw_cqp_request *cqp_request; 1077 struct cqp_commands_info *cqp_info; 1078 enum i40iw_status_code status = 0; 1079 1080 cqp_request = i40iw_get_cqp_request(iwcqp, true); 1081 if (!cqp_request) { 1082 i40iw_pr_err("cqp_request memory failed\n"); 1083 return I40IW_ERR_NO_MEMORY; 1084 } 1085 1086 cqp_info = &cqp_request->info; 1087 1088 cqp_info->post_sq = 1; 1089 info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info; 1090 ether_addr_copy(info->mac_addr, mac_addr); 1091 info->entry_idx = idx; 1092 cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; 1093 cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY; 1094 cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; 1095 cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; 1096 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1097 if (status) 1098 i40iw_pr_err("CQP-OP Add MAC Ip entry fail"); 1099 return status; 1100} 1101 1102/** 1103 * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry 1104 * @iwdev: iwarp device 1105 * @mac_ip_tbl_idx: the index of the new mac ip address 1106 * 1107 * Allocate a mac ip address entry and update the mac_ip_tbl_idx 1108 * to hold the index of the newly created mac ip address 1109 * Return 0 if successful, otherwise return error 1110 */ 1111static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev, 1112 u16 *mac_ip_tbl_idx) 1113{ 1114 struct i40iw_cqp *iwcqp = &iwdev->cqp; 1115 struct i40iw_cqp_request *cqp_request; 1116 struct cqp_commands_info *cqp_info; 1117 enum i40iw_status_code status = 0; 1118 1119 cqp_request = i40iw_get_cqp_request(iwcqp, true); 1120 if (!cqp_request) { 1121 i40iw_pr_err("cqp_request memory failed\n"); 1122 return I40IW_ERR_NO_MEMORY; 1123 } 1124 1125 /* increment refcount, because we need the cqp request ret value */ 1126 atomic_inc(&cqp_request->refcount); 1127 1128 cqp_info = &cqp_request->info; 1129 cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY; 1130 cqp_info->post_sq = 1; 1131 cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; 1132 cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; 1133 status = i40iw_handle_cqp_op(iwdev, cqp_request); 1134 if (!status) 1135 *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val; 1136 else 1137 i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail"); 1138 /* decrement refcount and free the cqp request, if no longer used */ 1139 i40iw_put_cqp_request(iwcqp, cqp_request); 1140 return status; 1141} 1142 1143/** 1144 * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry 1145 * @iwdev: iwarp device 1146 * @macaddr: pointer to mac address 1147 * 1148 * Allocate a mac ip address entry and add it to the hw table 1149 * Return 0 if successful, otherwise return error 1150 */ 1151static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev, 1152 u8 *macaddr) 1153{ 1154 enum i40iw_status_code status; 1155 1156 status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx); 1157 if (!status) { 1158 status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr, 1159 (u8)iwdev->mac_ip_table_idx); 1160 if (status) 1161 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1162 } 1163 return status; 1164} 1165 1166/** 1167 * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table 1168 * @iwdev: iwarp device 1169 */ 1170static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev) 1171{ 1172 struct net_device *ip_dev; 1173 struct inet6_dev *idev; 1174 struct inet6_ifaddr *ifp, *tmp; 1175 u32 local_ipaddr6[4]; 1176 1177 rcu_read_lock(); 1178 for_each_netdev_rcu(&init_net, ip_dev) { 1179 if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) && 1180 (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || 1181 (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { 1182 idev = __in6_dev_get(ip_dev); 1183 if (!idev) { 1184 i40iw_pr_err("ipv6 inet device not found\n"); 1185 break; 1186 } 1187 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { 1188 i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr, 1189 rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr); 1190 i40iw_copy_ip_ntohl(local_ipaddr6, 1191 ifp->addr.in6_u.u6_addr32); 1192 i40iw_manage_arp_cache(iwdev, 1193 ip_dev->dev_addr, 1194 local_ipaddr6, 1195 false, 1196 I40IW_ARP_ADD); 1197 } 1198 } 1199 } 1200 rcu_read_unlock(); 1201} 1202 1203/** 1204 * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table 1205 * @iwdev: iwarp device 1206 */ 1207static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) 1208{ 1209 struct net_device *dev; 1210 struct in_device *idev; 1211 u32 ip_addr; 1212 1213 rcu_read_lock(); 1214 for_each_netdev_rcu(&init_net, dev) { 1215 if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) && 1216 (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || 1217 (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) { 1218 const struct in_ifaddr *ifa; 1219 1220 idev = __in_dev_get_rcu(dev); 1221 if (!idev) 1222 continue; 1223 in_dev_for_each_ifa_rcu(ifa, idev) { 1224 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, 1225 "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, 1226 rdma_vlan_dev_vlan_id(dev), dev->dev_addr); 1227 1228 ip_addr = ntohl(ifa->ifa_address); 1229 i40iw_manage_arp_cache(iwdev, 1230 dev->dev_addr, 1231 &ip_addr, 1232 true, 1233 I40IW_ARP_ADD); 1234 } 1235 } 1236 } 1237 rcu_read_unlock(); 1238} 1239 1240/** 1241 * i40iw_add_mac_ip - add mac and ip addresses 1242 * @iwdev: iwarp device 1243 * 1244 * Create and add a mac ip address entry to the hw table and 1245 * ipv4/ipv6 addresses to the arp cache 1246 * Return 0 if successful, otherwise return error 1247 */ 1248static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev) 1249{ 1250 struct net_device *netdev = iwdev->netdev; 1251 enum i40iw_status_code status; 1252 1253 status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr); 1254 if (status) 1255 return status; 1256 i40iw_add_ipv4_addr(iwdev); 1257 i40iw_add_ipv6_addr(iwdev); 1258 return 0; 1259} 1260 1261/** 1262 * i40iw_wait_pe_ready - Check if firmware is ready 1263 * @hw: provides access to registers 1264 */ 1265static void i40iw_wait_pe_ready(struct i40iw_hw *hw) 1266{ 1267 u32 statusfw; 1268 u32 statuscpu0; 1269 u32 statuscpu1; 1270 u32 statuscpu2; 1271 u32 retrycount = 0; 1272 1273 do { 1274 statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS); 1275 i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw); 1276 statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0); 1277 i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0); 1278 statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1); 1279 i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n", 1280 __LINE__, statuscpu1); 1281 statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2); 1282 i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n", 1283 __LINE__, statuscpu2); 1284 if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80)) 1285 break; /* SUCCESS */ 1286 msleep(1000); 1287 retrycount++; 1288 } while (retrycount < 14); 1289 i40iw_wr32(hw, 0xb4040, 0x4C104C5); 1290} 1291 1292/** 1293 * i40iw_initialize_dev - initialize device 1294 * @iwdev: iwarp device 1295 * @ldev: lan device information 1296 * 1297 * Allocate memory for the hmc objects and initialize iwdev 1298 * Return 0 if successful, otherwise clean up the resources 1299 * and return error 1300 */ 1301static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, 1302 struct i40e_info *ldev) 1303{ 1304 enum i40iw_status_code status; 1305 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 1306 struct i40iw_device_init_info info; 1307 struct i40iw_vsi_init_info vsi_info; 1308 struct i40iw_dma_mem mem; 1309 struct i40iw_l2params l2params; 1310 u32 size; 1311 struct i40iw_vsi_stats_info stats_info; 1312 u16 last_qset = I40IW_NO_QSET; 1313 u16 qset; 1314 u32 i; 1315 1316 memset(&l2params, 0, sizeof(l2params)); 1317 memset(&info, 0, sizeof(info)); 1318 size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) + 1319 (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX); 1320 iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL); 1321 if (!iwdev->hmc_info_mem) 1322 return I40IW_ERR_NO_MEMORY; 1323 1324 iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem; 1325 dev->hmc_info = &iwdev->hw.hmc; 1326 dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1); 1327 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, 1328 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); 1329 if (status) 1330 goto error; 1331 info.fpm_query_buf_pa = mem.pa; 1332 info.fpm_query_buf = mem.va; 1333 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, 1334 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); 1335 if (status) 1336 goto error; 1337 info.fpm_commit_buf_pa = mem.pa; 1338 info.fpm_commit_buf = mem.va; 1339 info.hmc_fn_id = ldev->fid; 1340 info.is_pf = (ldev->ftype) ? false : true; 1341 info.bar0 = ldev->hw_addr; 1342 info.hw = &iwdev->hw; 1343 info.debug_mask = debug; 1344 l2params.mtu = 1345 (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU; 1346 for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) { 1347 qset = ldev->params.qos.prio_qos[i].qs_handle; 1348 l2params.qs_handle_list[i] = qset; 1349 if (last_qset == I40IW_NO_QSET) 1350 last_qset = qset; 1351 else if ((qset != last_qset) && (qset != I40IW_NO_QSET)) 1352 iwdev->dcb = true; 1353 } 1354 i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb); 1355 info.vchnl_send = i40iw_virtchnl_send; 1356 status = i40iw_device_init(&iwdev->sc_dev, &info); 1357 1358 if (status) 1359 goto error; 1360 memset(&vsi_info, 0, sizeof(vsi_info)); 1361 vsi_info.dev = &iwdev->sc_dev; 1362 vsi_info.back_vsi = (void *)iwdev; 1363 vsi_info.params = &l2params; 1364 vsi_info.exception_lan_queue = 1; 1365 i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info); 1366 1367 if (dev->is_pf) { 1368 memset(&stats_info, 0, sizeof(stats_info)); 1369 stats_info.fcn_id = ldev->fid; 1370 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); 1371 if (!stats_info.pestat) { 1372 status = I40IW_ERR_NO_MEMORY; 1373 goto error; 1374 } 1375 stats_info.stats_initialize = true; 1376 if (stats_info.pestat) 1377 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); 1378 } 1379 return status; 1380error: 1381 kfree(iwdev->hmc_info_mem); 1382 iwdev->hmc_info_mem = NULL; 1383 return status; 1384} 1385 1386/** 1387 * i40iw_register_notifiers - register tcp ip notifiers 1388 */ 1389static void i40iw_register_notifiers(void) 1390{ 1391 register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1392 register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1393 register_netevent_notifier(&i40iw_net_notifier); 1394 register_netdevice_notifier(&i40iw_netdevice_notifier); 1395} 1396 1397/** 1398 * i40iw_unregister_notifiers - unregister tcp ip notifiers 1399 */ 1400 1401static void i40iw_unregister_notifiers(void) 1402{ 1403 unregister_netevent_notifier(&i40iw_net_notifier); 1404 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); 1405 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1406 unregister_netdevice_notifier(&i40iw_netdevice_notifier); 1407} 1408 1409/** 1410 * i40iw_save_msix_info - copy msix vector information to iwarp device 1411 * @iwdev: iwarp device 1412 * @ldev: lan device information 1413 * 1414 * Allocate iwdev msix table and copy the ldev msix info to the table 1415 * Return 0 if successful, otherwise return error 1416 */ 1417static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, 1418 struct i40e_info *ldev) 1419{ 1420 struct i40e_qvlist_info *iw_qvlist; 1421 struct i40e_qv_info *iw_qvinfo; 1422 u32 ceq_idx; 1423 u32 i; 1424 u32 size; 1425 1426 if (!ldev->msix_count) { 1427 i40iw_pr_err("No MSI-X vectors\n"); 1428 return I40IW_ERR_CONFIG; 1429 } 1430 1431 iwdev->msix_count = ldev->msix_count; 1432 1433 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; 1434 size += sizeof(struct i40e_qvlist_info); 1435 size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1; 1436 iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL); 1437 1438 if (!iwdev->iw_msixtbl) 1439 return I40IW_ERR_NO_MEMORY; 1440 iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]); 1441 iw_qvlist = iwdev->iw_qvlist; 1442 iw_qvinfo = iw_qvlist->qv_info; 1443 iw_qvlist->num_vectors = iwdev->msix_count; 1444 if (iwdev->msix_count <= num_online_cpus()) 1445 iwdev->msix_shared = true; 1446 for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) { 1447 iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry; 1448 iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector; 1449 iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx; 1450 if (i == 0) { 1451 iw_qvinfo->aeq_idx = 0; 1452 if (iwdev->msix_shared) 1453 iw_qvinfo->ceq_idx = ceq_idx++; 1454 else 1455 iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX; 1456 } else { 1457 iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX; 1458 iw_qvinfo->ceq_idx = ceq_idx++; 1459 } 1460 iw_qvinfo->itr_idx = 3; 1461 iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx; 1462 } 1463 return 0; 1464} 1465 1466/** 1467 * i40iw_deinit_device - clean up the device resources 1468 * @iwdev: iwarp device 1469 * 1470 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, 1471 * destroy the device queues and free the pble and the hmc objects 1472 */ 1473static void i40iw_deinit_device(struct i40iw_device *iwdev) 1474{ 1475 struct i40e_info *ldev = iwdev->ldev; 1476 1477 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 1478 1479 i40iw_pr_info("state = %d\n", iwdev->init_state); 1480 if (iwdev->param_wq) 1481 destroy_workqueue(iwdev->param_wq); 1482 1483 switch (iwdev->init_state) { 1484 case RDMA_DEV_REGISTERED: 1485 iwdev->iw_status = 0; 1486 i40iw_port_ibevent(iwdev); 1487 i40iw_destroy_rdma_device(iwdev->iwibdev); 1488 fallthrough; 1489 case IP_ADDR_REGISTERED: 1490 if (!iwdev->reset) 1491 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1492 fallthrough; 1493 case PBLE_CHUNK_MEM: 1494 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); 1495 fallthrough; 1496 case CEQ_CREATED: 1497 i40iw_dele_ceqs(iwdev); 1498 fallthrough; 1499 case AEQ_CREATED: 1500 i40iw_destroy_aeq(iwdev); 1501 fallthrough; 1502 case IEQ_CREATED: 1503 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); 1504 fallthrough; 1505 case ILQ_CREATED: 1506 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); 1507 fallthrough; 1508 case CCQ_CREATED: 1509 i40iw_destroy_ccq(iwdev); 1510 fallthrough; 1511 case HMC_OBJS_CREATED: 1512 i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); 1513 fallthrough; 1514 case CQP_CREATED: 1515 i40iw_destroy_cqp(iwdev, true); 1516 fallthrough; 1517 case INITIAL_STATE: 1518 i40iw_cleanup_cm_core(&iwdev->cm_core); 1519 if (iwdev->vsi.pestat) { 1520 i40iw_vsi_stats_free(&iwdev->vsi); 1521 kfree(iwdev->vsi.pestat); 1522 } 1523 i40iw_del_init_mem(iwdev); 1524 break; 1525 case INVALID_STATE: 1526 default: 1527 i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); 1528 break; 1529 } 1530 1531 i40iw_del_handler(i40iw_find_i40e_handler(ldev)); 1532 kfree(iwdev->hdl); 1533} 1534 1535/** 1536 * i40iw_setup_init_state - set up the initial device struct 1537 * @hdl: handler for iwarp device - one per instance 1538 * @ldev: lan device information 1539 * @client: iwarp client information, provided during registration 1540 * 1541 * Initialize the iwarp device and its hdl information 1542 * using the ldev and client information 1543 * Return 0 if successful, otherwise return error 1544 */ 1545static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, 1546 struct i40e_info *ldev, 1547 struct i40e_client *client) 1548{ 1549 struct i40iw_device *iwdev = &hdl->device; 1550 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 1551 enum i40iw_status_code status; 1552 1553 memcpy(&hdl->ldev, ldev, sizeof(*ldev)); 1554 1555 iwdev->mpa_version = mpa_version; 1556 iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ? 1557 (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT : 1558 I40IW_HMC_PROFILE_DEFAULT; 1559 iwdev->max_rdma_vfs = 1560 (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0; 1561 iwdev->max_enabled_vfs = iwdev->max_rdma_vfs; 1562 iwdev->netdev = ldev->netdev; 1563 hdl->client = client; 1564 if (!ldev->ftype) 1565 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET; 1566 else 1567 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET; 1568 1569 status = i40iw_save_msix_info(iwdev, ldev); 1570 if (status) 1571 return status; 1572 iwdev->hw.pcidev = ldev->pcidev; 1573 iwdev->hw.hw_addr = ldev->hw_addr; 1574 status = i40iw_allocate_dma_mem(&iwdev->hw, 1575 &iwdev->obj_mem, 8192, 4096); 1576 if (status) 1577 goto exit; 1578 iwdev->obj_next = iwdev->obj_mem; 1579 1580 init_waitqueue_head(&iwdev->vchnl_waitq); 1581 init_waitqueue_head(&dev->vf_reqs); 1582 init_waitqueue_head(&iwdev->close_wq); 1583 1584 status = i40iw_initialize_dev(iwdev, ldev); 1585exit: 1586 if (status) { 1587 kfree(iwdev->iw_msixtbl); 1588 i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem); 1589 iwdev->iw_msixtbl = NULL; 1590 } 1591 return status; 1592} 1593 1594/** 1595 * i40iw_get_used_rsrc - determine resources used internally 1596 * @iwdev: iwarp device 1597 * 1598 * Called after internal allocations 1599 */ 1600static void i40iw_get_used_rsrc(struct i40iw_device *iwdev) 1601{ 1602 iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0); 1603 iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0); 1604 iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0); 1605 iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0); 1606} 1607 1608/** 1609 * i40iw_open - client interface operation open for iwarp/uda device 1610 * @ldev: lan device information 1611 * @client: iwarp client information, provided during registration 1612 * 1613 * Called by the lan driver during the processing of client register 1614 * Create device resources, set up queues, pble and hmc objects and 1615 * register the device with the ib verbs interface 1616 * Return 0 if successful, otherwise return error 1617 */ 1618static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) 1619{ 1620 struct i40iw_device *iwdev; 1621 struct i40iw_sc_dev *dev; 1622 enum i40iw_status_code status; 1623 struct i40iw_handler *hdl; 1624 1625 hdl = i40iw_find_netdev(ldev->netdev); 1626 if (hdl) 1627 return 0; 1628 1629 hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); 1630 if (!hdl) 1631 return -ENOMEM; 1632 iwdev = &hdl->device; 1633 iwdev->hdl = hdl; 1634 dev = &iwdev->sc_dev; 1635 if (i40iw_setup_cm_core(iwdev)) { 1636 kfree(iwdev->hdl); 1637 return -ENOMEM; 1638 } 1639 1640 dev->back_dev = (void *)iwdev; 1641 iwdev->ldev = &hdl->ldev; 1642 iwdev->client = client; 1643 mutex_init(&iwdev->pbl_mutex); 1644 i40iw_add_handler(hdl); 1645 1646 do { 1647 status = i40iw_setup_init_state(hdl, ldev, client); 1648 if (status) 1649 break; 1650 iwdev->init_state = INITIAL_STATE; 1651 if (dev->is_pf) 1652 i40iw_wait_pe_ready(dev->hw); 1653 status = i40iw_create_cqp(iwdev); 1654 if (status) 1655 break; 1656 iwdev->init_state = CQP_CREATED; 1657 status = i40iw_hmc_setup(iwdev); 1658 if (status) 1659 break; 1660 status = i40iw_create_ccq(iwdev); 1661 if (status) 1662 break; 1663 iwdev->init_state = CCQ_CREATED; 1664 status = i40iw_initialize_ilq(iwdev); 1665 if (status) 1666 break; 1667 iwdev->init_state = ILQ_CREATED; 1668 status = i40iw_initialize_ieq(iwdev); 1669 if (status) 1670 break; 1671 iwdev->init_state = IEQ_CREATED; 1672 status = i40iw_setup_aeq(iwdev); 1673 if (status) 1674 break; 1675 iwdev->init_state = AEQ_CREATED; 1676 status = i40iw_setup_ceqs(iwdev, ldev); 1677 if (status) 1678 break; 1679 1680 status = i40iw_get_rdma_features(dev); 1681 if (status) 1682 dev->feature_info[I40IW_FEATURE_FW_INFO] = 1683 I40IW_FW_VER_DEFAULT; 1684 1685 iwdev->init_state = CEQ_CREATED; 1686 status = i40iw_initialize_hw_resources(iwdev); 1687 if (status) 1688 break; 1689 i40iw_get_used_rsrc(iwdev); 1690 dev->ccq_ops->ccq_arm(dev->ccq); 1691 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); 1692 if (status) 1693 break; 1694 iwdev->init_state = PBLE_CHUNK_MEM; 1695 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); 1696 status = i40iw_add_mac_ip(iwdev); 1697 if (status) 1698 break; 1699 iwdev->init_state = IP_ADDR_REGISTERED; 1700 if (i40iw_register_rdma_device(iwdev)) { 1701 i40iw_pr_err("register rdma device fail\n"); 1702 break; 1703 }; 1704 1705 iwdev->init_state = RDMA_DEV_REGISTERED; 1706 iwdev->iw_status = 1; 1707 i40iw_port_ibevent(iwdev); 1708 iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM); 1709 if(iwdev->param_wq == NULL) 1710 break; 1711 i40iw_pr_info("i40iw_open completed\n"); 1712 return 0; 1713 } while (0); 1714 1715 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); 1716 i40iw_deinit_device(iwdev); 1717 return -ERESTART; 1718} 1719 1720/** 1721 * i40iw_l2params_worker - worker for l2 params change 1722 * @work: work pointer for l2 params 1723 */ 1724static void i40iw_l2params_worker(struct work_struct *work) 1725{ 1726 struct l2params_work *dwork = 1727 container_of(work, struct l2params_work, work); 1728 struct i40iw_device *iwdev = dwork->iwdev; 1729 1730 i40iw_change_l2params(&iwdev->vsi, &dwork->l2params); 1731 atomic_dec(&iwdev->params_busy); 1732 kfree(work); 1733} 1734 1735/** 1736 * i40iw_l2param_change - handle qs handles for qos and mss change 1737 * @ldev: lan device information 1738 * @client: client for paramater change 1739 * @params: new parameters from L2 1740 */ 1741static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client, 1742 struct i40e_params *params) 1743{ 1744 struct i40iw_handler *hdl; 1745 struct i40iw_l2params *l2params; 1746 struct l2params_work *work; 1747 struct i40iw_device *iwdev; 1748 int i; 1749 1750 hdl = i40iw_find_i40e_handler(ldev); 1751 if (!hdl) 1752 return; 1753 1754 iwdev = &hdl->device; 1755 1756 if (atomic_read(&iwdev->params_busy)) 1757 return; 1758 1759 1760 work = kzalloc(sizeof(*work), GFP_KERNEL); 1761 if (!work) 1762 return; 1763 1764 atomic_inc(&iwdev->params_busy); 1765 1766 work->iwdev = iwdev; 1767 l2params = &work->l2params; 1768 for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) 1769 l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle; 1770 1771 l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu; 1772 1773 INIT_WORK(&work->work, i40iw_l2params_worker); 1774 queue_work(iwdev->param_wq, &work->work); 1775} 1776 1777/** 1778 * i40iw_close - client interface operation close for iwarp/uda device 1779 * @ldev: lan device information 1780 * @client: client to close 1781 * 1782 * Called by the lan driver during the processing of client unregister 1783 * Destroy and clean up the driver resources 1784 */ 1785static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset) 1786{ 1787 struct i40iw_device *iwdev; 1788 struct i40iw_handler *hdl; 1789 1790 hdl = i40iw_find_i40e_handler(ldev); 1791 if (!hdl) 1792 return; 1793 1794 iwdev = &hdl->device; 1795 iwdev->closing = true; 1796 1797 if (reset) 1798 iwdev->reset = true; 1799 1800 i40iw_cm_teardown_connections(iwdev, NULL, NULL, true); 1801 destroy_workqueue(iwdev->virtchnl_wq); 1802 i40iw_deinit_device(iwdev); 1803} 1804 1805/** 1806 * i40iw_vf_reset - process VF reset 1807 * @ldev: lan device information 1808 * @client: client interface instance 1809 * @vf_id: virtual function id 1810 * 1811 * Called when a VF is reset by the PF 1812 * Destroy and clean up the VF resources 1813 */ 1814static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id) 1815{ 1816 struct i40iw_handler *hdl; 1817 struct i40iw_sc_dev *dev; 1818 struct i40iw_hmc_fcn_info hmc_fcn_info; 1819 struct i40iw_virt_mem vf_dev_mem; 1820 struct i40iw_vfdev *tmp_vfdev; 1821 unsigned int i; 1822 unsigned long flags; 1823 struct i40iw_device *iwdev; 1824 1825 hdl = i40iw_find_i40e_handler(ldev); 1826 if (!hdl) 1827 return; 1828 1829 dev = &hdl->device.sc_dev; 1830 iwdev = (struct i40iw_device *)dev->back_dev; 1831 1832 for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) { 1833 if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id)) 1834 continue; 1835 /* free all resources allocated on behalf of vf */ 1836 tmp_vfdev = dev->vf_dev[i]; 1837 spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags); 1838 dev->vf_dev[i] = NULL; 1839 spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags); 1840 i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false); 1841 /* remove vf hmc function */ 1842 memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info)); 1843 hmc_fcn_info.vf_id = vf_id; 1844 hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx; 1845 hmc_fcn_info.free_fcn = true; 1846 i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info); 1847 /* free vf_dev */ 1848 vf_dev_mem.va = tmp_vfdev; 1849 vf_dev_mem.size = sizeof(struct i40iw_vfdev) + 1850 sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX; 1851 i40iw_free_virt_mem(dev->hw, &vf_dev_mem); 1852 break; 1853 } 1854} 1855 1856/** 1857 * i40iw_vf_enable - enable a number of VFs 1858 * @ldev: lan device information 1859 * @client: client interface instance 1860 * @num_vfs: number of VFs for the PF 1861 * 1862 * Called when the number of VFs changes 1863 */ 1864static void i40iw_vf_enable(struct i40e_info *ldev, 1865 struct i40e_client *client, 1866 u32 num_vfs) 1867{ 1868 struct i40iw_handler *hdl; 1869 1870 hdl = i40iw_find_i40e_handler(ldev); 1871 if (!hdl) 1872 return; 1873 1874 if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT) 1875 hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT; 1876 else 1877 hdl->device.max_enabled_vfs = num_vfs; 1878} 1879 1880/** 1881 * i40iw_vf_capable - check if VF capable 1882 * @ldev: lan device information 1883 * @client: client interface instance 1884 * @vf_id: virtual function id 1885 * 1886 * Return 1 if a VF slot is available or if VF is already RDMA enabled 1887 * Return 0 otherwise 1888 */ 1889static int i40iw_vf_capable(struct i40e_info *ldev, 1890 struct i40e_client *client, 1891 u32 vf_id) 1892{ 1893 struct i40iw_handler *hdl; 1894 struct i40iw_sc_dev *dev; 1895 unsigned int i; 1896 1897 hdl = i40iw_find_i40e_handler(ldev); 1898 if (!hdl) 1899 return 0; 1900 1901 dev = &hdl->device.sc_dev; 1902 1903 for (i = 0; i < hdl->device.max_enabled_vfs; i++) { 1904 if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id)) 1905 return 1; 1906 } 1907 1908 return 0; 1909} 1910 1911/** 1912 * i40iw_virtchnl_receive - receive a message through the virtual channel 1913 * @ldev: lan device information 1914 * @client: client interface instance 1915 * @vf_id: virtual function id associated with the message 1916 * @msg: message buffer pointer 1917 * @len: length of the message 1918 * 1919 * Invoke virtual channel receive operation for the given msg 1920 * Return 0 if successful, otherwise return error 1921 */ 1922static int i40iw_virtchnl_receive(struct i40e_info *ldev, 1923 struct i40e_client *client, 1924 u32 vf_id, 1925 u8 *msg, 1926 u16 len) 1927{ 1928 struct i40iw_handler *hdl; 1929 struct i40iw_sc_dev *dev; 1930 struct i40iw_device *iwdev; 1931 int ret_code = I40IW_NOT_SUPPORTED; 1932 1933 if (!len || !msg) 1934 return I40IW_ERR_PARAM; 1935 1936 hdl = i40iw_find_i40e_handler(ldev); 1937 if (!hdl) 1938 return I40IW_ERR_PARAM; 1939 1940 dev = &hdl->device.sc_dev; 1941 iwdev = dev->back_dev; 1942 1943 if (dev->vchnl_if.vchnl_recv) { 1944 ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len); 1945 if (!dev->is_pf) { 1946 atomic_dec(&iwdev->vchnl_msgs); 1947 wake_up(&iwdev->vchnl_waitq); 1948 } 1949 } 1950 return ret_code; 1951} 1952 1953/** 1954 * i40iw_vf_clear_to_send - wait to send virtual channel message 1955 * @dev: iwarp device * 1956 * Wait for until virtual channel is clear 1957 * before sending the next message 1958 * 1959 * Returns false if error 1960 * Returns true if clear to send 1961 */ 1962bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) 1963{ 1964 struct i40iw_device *iwdev; 1965 wait_queue_entry_t wait; 1966 1967 iwdev = dev->back_dev; 1968 1969 if (!wq_has_sleeper(&dev->vf_reqs) && 1970 (atomic_read(&iwdev->vchnl_msgs) == 0)) 1971 return true; /* virtual channel is clear */ 1972 1973 init_wait(&wait); 1974 add_wait_queue_exclusive(&dev->vf_reqs, &wait); 1975 1976 if (!wait_event_timeout(dev->vf_reqs, 1977 (atomic_read(&iwdev->vchnl_msgs) == 0), 1978 I40IW_VCHNL_EVENT_TIMEOUT)) 1979 dev->vchnl_up = false; 1980 1981 remove_wait_queue(&dev->vf_reqs, &wait); 1982 1983 return dev->vchnl_up; 1984} 1985 1986/** 1987 * i40iw_virtchnl_send - send a message through the virtual channel 1988 * @dev: iwarp device 1989 * @vf_id: virtual function id associated with the message 1990 * @msg: virtual channel message buffer pointer 1991 * @len: length of the message 1992 * 1993 * Invoke virtual channel send operation for the given msg 1994 * Return 0 if successful, otherwise return error 1995 */ 1996static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, 1997 u32 vf_id, 1998 u8 *msg, 1999 u16 len) 2000{ 2001 struct i40iw_device *iwdev; 2002 struct i40e_info *ldev; 2003 2004 if (!dev || !dev->back_dev) 2005 return I40IW_ERR_BAD_PTR; 2006 2007 iwdev = dev->back_dev; 2008 ldev = iwdev->ldev; 2009 2010 if (ldev && ldev->ops && ldev->ops->virtchnl_send) 2011 return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len); 2012 return I40IW_ERR_BAD_PTR; 2013} 2014 2015/* client interface functions */ 2016static const struct i40e_client_ops i40e_ops = { 2017 .open = i40iw_open, 2018 .close = i40iw_close, 2019 .l2_param_change = i40iw_l2param_change, 2020 .virtchnl_receive = i40iw_virtchnl_receive, 2021 .vf_reset = i40iw_vf_reset, 2022 .vf_enable = i40iw_vf_enable, 2023 .vf_capable = i40iw_vf_capable 2024}; 2025 2026/** 2027 * i40iw_init_module - driver initialization function 2028 * 2029 * First function to call when the driver is loaded 2030 * Register the driver as i40e client and port mapper client 2031 */ 2032static int __init i40iw_init_module(void) 2033{ 2034 int ret; 2035 2036 memset(&i40iw_client, 0, sizeof(i40iw_client)); 2037 i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR; 2038 i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR; 2039 i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD; 2040 i40iw_client.ops = &i40e_ops; 2041 memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH); 2042 i40iw_client.type = I40E_CLIENT_IWARP; 2043 spin_lock_init(&i40iw_handler_lock); 2044 ret = i40e_register_client(&i40iw_client); 2045 i40iw_register_notifiers(); 2046 2047 return ret; 2048} 2049 2050/** 2051 * i40iw_exit_module - driver exit clean up function 2052 * 2053 * The function is called just before the driver is unloaded 2054 * Unregister the driver as i40e client and port mapper client 2055 */ 2056static void __exit i40iw_exit_module(void) 2057{ 2058 i40iw_unregister_notifiers(); 2059 i40e_unregister_client(&i40iw_client); 2060} 2061 2062module_init(i40iw_init_module); 2063module_exit(i40iw_exit_module); 2064