Lines Matching defs:iwdev
61 struct i40iw_device *iwdev = to_iwdev(ibdev);
66 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
67 props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 |
68 i40iw_fw_minor_ver(&iwdev->sc_dev);
69 props->device_cap_flags = iwdev->device_cap_flags;
70 props->vendor_id = iwdev->ldev->pcidev->vendor;
71 props->vendor_part_id = iwdev->ldev->pcidev->device;
72 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
74 props->max_qp = iwdev->max_qp - iwdev->used_qps;
78 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
79 props->max_cqe = iwdev->max_cqe;
80 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
81 props->max_pd = iwdev->max_pd - iwdev->used_pds;
122 struct i40iw_device *iwdev = to_iwdev(ibdev);
135 uresp.max_qps = iwdev->max_qp;
136 uresp.max_pds = iwdev->max_pd;
137 uresp.wq_size = iwdev->max_qp_wr * 2;
140 ucontext->iwdev = iwdev;
176 dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
184 * @iwdev: iwarp device
187 static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
196 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
208 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
211 status = i40iw_handle_cqp_op(iwdev, cqp_request);
216 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
221 * @iwdev: iwarp device
224 static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
233 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
244 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
247 status = i40iw_handle_cqp_op(iwdev, cqp_request);
262 struct i40iw_device *iwdev = to_iwdev(pd->device);
263 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
269 if (iwdev->closing)
272 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
273 iwdev->max_pd, &pd_id, &iwdev->next_pd);
299 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
311 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
313 i40iw_rem_pdusecount(iwpd, iwdev);
340 * @iwdev: iwarp device
347 struct i40iw_device *iwdev = iwqp->iwdev;
350 i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
351 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
353 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
355 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
356 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
357 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
383 struct i40iw_device *iwdev = iwqp->iwdev;
404 i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
405 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
407 i40iw_rem_devusecount(iwdev);
418 static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
440 * @iwdev: iwarp device
444 static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
476 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
509 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
510 struct i40iw_cqp *iwcqp = &iwdev->cqp;
522 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
532 if (iwdev->closing)
551 init_info.vsi = &iwdev->vsi;
566 iwqp->iwdev = iwdev;
584 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
585 &qp_num, &iwdev->next_qp);
609 if (iwdev->push_mode)
610 i40iw_alloc_push_page(iwdev, qp);
638 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
640 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
697 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
707 iwdev->qp_table[qp_num] = iwqp;
709 i40iw_add_devusecount(iwdev);
770 * @iwdev: iwarp device
775 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
783 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
794 if (!i40iw_handle_cqp_op(iwdev, cqp_request))
807 i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
826 struct i40iw_device *iwdev = iwqp->iwdev;
842 if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
910 iwdev->iw_status &&
945 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
963 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1000 * @iwdev: iwarp device
1003 static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1008 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1009 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1014 * @iwdev: iwarp device
1017 void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1023 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1033 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1046 struct i40iw_device *iwdev;
1050 iwdev = to_iwdev(ib_cq->device);
1052 i40iw_cq_wq_destroy(iwdev, cq);
1053 cq_free_resources(iwdev, iwcq);
1054 i40iw_rem_devusecount(iwdev);
1069 struct i40iw_device *iwdev = to_iwdev(ibdev);
1074 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1084 if (iwdev->closing)
1087 if (entries > iwdev->max_cqe)
1090 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1091 iwdev->max_cq, &cq_num,
1092 &iwdev->next_cq);
1105 if (attr->comp_vector < iwdev->ceqs_count)
1169 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1180 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1200 i40iw_add_devusecount(iwdev);
1204 i40iw_cq_wq_destroy(iwdev, cq);
1206 cq_free_resources(iwdev, iwcq);
1227 * @iwdev: iwarp device
1230 static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1234 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1235 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1236 i40iw_rem_devusecount(iwdev);
1241 * @iwdev: iwarp device
1243 static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1256 driver_key = random & ~iwdev->mr_stagmask;
1257 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1258 next_stag_index %= iwdev->max_mr;
1260 ret = i40iw_alloc_resource(iwdev,
1261 iwdev->allocated_mrs, iwdev->max_mr,
1267 i40iw_add_devusecount(iwdev);
1372 * @iwdev: iwarp device
1376 static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1388 mutex_lock(&iwdev->pbl_mutex);
1389 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1390 mutex_unlock(&iwdev->pbl_mutex);
1412 * @iwdev: iwarp device
1417 static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1436 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1441 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1482 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1491 * @iwdev: iwarp device
1494 static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1504 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1519 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1522 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1540 struct i40iw_device *iwdev = to_iwdev(pd->device);
1552 stag = i40iw_create_stag(iwdev);
1570 mutex_lock(&iwdev->pbl_mutex);
1571 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1572 mutex_unlock(&iwdev->pbl_mutex);
1578 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1585 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1587 i40iw_free_stag(iwdev, stag);
1656 * @iwdev: iwarp device
1660 static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1674 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1705 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1708 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1733 struct i40iw_device *iwdev = to_iwdev(pd->device);
1751 if (iwdev->closing)
1792 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1802 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1815 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1822 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1828 stag = i40iw_create_stag(iwdev);
1838 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1840 i40iw_free_stag(iwdev, stag);
1856 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1877 struct i40iw_device *iwdev = to_iwdev(pd->device);
1894 stag = i40iw_create_stag(iwdev);
1906 status = i40iw_hwreg_mr(iwdev, iwmr, access);
1908 i40iw_free_stag(iwdev, stag);
1974 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
1997 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2002 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2019 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2021 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2024 i40iw_rem_pdusecount(iwpd, iwdev);
2025 i40iw_free_stag(iwdev, iwmr->stag);
2027 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2040 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2221 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2519 struct i40iw_device *iwdev = to_iwdev(dev);
2522 i40iw_fw_major_ver(&iwdev->sc_dev),
2523 i40iw_fw_minor_ver(&iwdev->sc_dev));
2534 struct i40iw_device *iwdev = to_iwdev(ibdev);
2535 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2565 struct i40iw_device *iwdev = to_iwdev(ibdev);
2566 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2567 struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
2594 struct i40iw_device *iwdev = to_iwdev(ibdev);
2597 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2651 * @iwdev: iwarp device
2653 static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2656 struct net_device *netdev = iwdev->netdev;
2657 struct pci_dev *pcidev = iwdev->hw.pcidev;
2661 i40iw_pr_err("iwdev == NULL\n");
2664 iwdev->iwibdev = iwibdev;
2665 iwibdev->iwdev = iwdev;
2692 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
2703 * @iwdev: iwarp device
2705 void i40iw_port_ibevent(struct i40iw_device *iwdev)
2707 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2712 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2723 wait_event_timeout(iwibdev->iwdev->close_wq,
2724 !atomic64_read(&iwibdev->iwdev->use_count),
2731 * @iwdev: iwarp device
2733 int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2738 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2739 if (!iwdev->iwibdev)
2741 iwibdev = iwdev->iwibdev;
2743 ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
2747 dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX);
2748 ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev);
2754 ib_dealloc_device(&iwdev->iwibdev->ibdev);