Lines Matching refs:iwdev
15 struct irdma_device *iwdev = to_iwdev(ibdev);
16 struct irdma_pci_f *rf = iwdev->rf;
17 struct pci_dev *pcidev = iwdev->rf->pcidev;
25 iwdev->netdev->dev_addr);
75 struct irdma_device *iwdev = to_iwdev(ibdev);
76 struct net_device *netdev = iwdev->netdev;
106 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
128 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
129 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
187 ibdev_dbg(&ucontext->iwdev->ibdev,
194 ibdev_dbg(&ucontext->iwdev->ibdev,
199 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
217 ibdev_dbg(&ucontext->iwdev->ibdev,
233 struct irdma_device *iwdev = iwqp->iwdev;
237 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
249 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
252 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
254 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
259 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
276 struct irdma_device *iwdev = to_iwdev(ibdev);
280 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
292 ucontext->iwdev = iwdev;
304 uresp.max_qps = iwdev->rf->max_qp;
305 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
306 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
312 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
350 ibdev_err(&iwdev->ibdev,
376 struct irdma_device *iwdev = to_iwdev(pd->device);
377 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
378 struct irdma_pci_f *rf = iwdev->rf;
423 struct irdma_device *iwdev = to_iwdev(ibpd->device);
425 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
484 struct irdma_device *iwdev = ucontext->iwdev;
488 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
520 struct irdma_device *iwdev = iwqp->iwdev;
541 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
551 * @iwdev: irdma device
555 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
577 * @iwdev: iwarp device
583 struct irdma_device *iwdev,
598 ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
613 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
650 irdma_setup_virt_qp(iwdev, iwqp, info);
657 * @iwdev: iwarp device
662 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
703 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
733 struct irdma_pci_f *rf = iwqp->iwdev->rf;
763 struct irdma_device *iwdev = iwqp->iwdev;
764 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
769 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
770 udp_info->cwnd = iwdev->roce_cwnd;
776 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
784 roce_info->ack_credits = iwdev->roce_ackcreds;
803 struct irdma_device *iwdev = iwqp->iwdev;
804 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
808 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
831 struct irdma_device *iwdev)
833 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
846 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
881 struct irdma_device *iwdev = to_iwdev(ibpd->device);
882 struct irdma_pci_f *rf = iwdev->rf;
893 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
901 init_info.vsi = &iwdev->vsi;
913 iwqp->iwdev = iwdev;
947 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
955 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
960 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
964 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
968 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
989 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
997 if (rdma_protocol_roce(&iwdev->ibdev, 1))
1012 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1013 if (dev->ws_add(&iwdev->vsi, 0)) {
1028 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1039 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1163 if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
1167 ibdev_warn(&iwqp->iwdev->ibdev,
1190 struct irdma_device *iwdev = iwqp->iwdev;
1191 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1285 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1329 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1335 ibdev_err(&iwdev->ibdev,
1348 ibdev_err(&iwdev->ibdev,
1369 ibdev_dbg(&iwdev->ibdev,
1378 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1420 if (iwdev->push_mode && udata &&
1478 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1519 ibdev_dbg(&iwdev->ibdev,
1546 struct irdma_device *iwdev = iwqp->iwdev;
1547 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1573 ibdev_dbg(&iwdev->ibdev,
1594 if (iwdev->push_mode && udata &&
1671 iwdev->iw_status &&
1707 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1731 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1735 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1744 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1766 ibdev_dbg(&iwdev->ibdev,
1818 * @iwdev: irdma device
1822 struct irdma_device *iwdev,
1835 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1849 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1861 irdma_process_resize_list(iwcq, iwdev, NULL);
1867 irdma_cq_wq_destroy(iwdev->rf, cq);
1872 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1896 struct irdma_device *iwdev;
1902 iwdev = to_iwdev(ibcq->device);
1903 rf = iwdev->rf;
2047 struct irdma_device *iwdev = to_iwdev(ibdev);
2048 struct irdma_pci_f *rf = iwdev->rf;
2089 info.vsi = &iwdev->vsi;
2192 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2221 ibdev_dbg(&iwdev->ibdev,
2264 * @iwdev: irdma device
2267 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2271 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2272 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2277 * @iwdev: irdma device
2279 static u32 irdma_create_stag(struct irdma_device *iwdev)
2292 driver_key = random & ~iwdev->rf->mr_stagmask;
2293 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2294 next_stag_index %= iwdev->rf->max_mr;
2296 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2297 iwdev->rf->max_mr, &stag_index,
2452 * @iwdev: irdma device
2457 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2472 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2520 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2525 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2534 * @iwdev: irdma device
2537 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2545 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2561 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2563 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2564 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2576 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2581 stag = irdma_create_stag(iwdev);
2588 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2590 irdma_free_stag(iwdev, stag);
2606 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2611 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2623 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2625 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2626 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2627 irdma_free_stag(iwdev, iwmr->stag);
2634 * @iwdev: irdma device
2637 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2647 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2662 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2664 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2665 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2679 struct irdma_device *iwdev = to_iwdev(pd->device);
2690 stag = irdma_create_stag(iwdev);
2708 err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2713 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2721 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2723 irdma_free_stag(iwdev, stag);
2778 * @iwdev: irdma device
2782 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2794 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2828 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2830 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2831 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2838 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2846 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2854 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2859 stag = irdma_create_stag(iwdev);
2868 err = irdma_hwreg_mr(iwdev, iwmr, access);
2875 irdma_free_stag(iwdev, stag);
2879 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2888 struct irdma_device *iwdev = to_iwdev(pd->device);
2906 iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
2930 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2939 if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
2949 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2967 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2976 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2983 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3011 struct irdma_device *iwdev = to_iwdev(pd->device);
3017 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3026 ibdev_dbg(&iwdev->ibdev,
3078 struct irdma_device *iwdev = to_iwdev(pd->device);
3083 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3089 ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
3125 struct irdma_device *iwdev = to_iwdev(pd->device);
3141 stag = irdma_create_stag(iwdev);
3155 ret = irdma_hwreg_mr(iwdev, iwmr, access);
3157 irdma_free_stag(iwdev, stag);
3224 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3244 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3259 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3261 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3262 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3266 irdma_free_stag(iwdev, iwmr->stag);
3269 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3297 dev = &iwqp->iwdev->rf->sc_dev;
3422 ibdev_dbg(&iwqp->iwdev->ibdev,
3439 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3474 ibdev_dbg(&iwqp->iwdev->ibdev,
3485 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3634 struct irdma_device *iwdev;
3640 iwdev = to_iwdev(iwcq->ibcq.device);
3695 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3698 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3705 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3861 struct irdma_device *iwdev = to_iwdev(dev);
3864 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3865 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3876 struct irdma_device *iwdev = to_iwdev(ibdev);
3877 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
3897 struct irdma_device *iwdev = to_iwdev(ibdev);
3898 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3900 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3901 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3903 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3920 struct irdma_device *iwdev = to_iwdev(ibdev);
3923 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3972 * @iwdev: irdma device
3978 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3985 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3994 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3995 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3996 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
4043 struct irdma_device *iwdev = iwqp->iwdev;
4044 struct irdma_pci_f *rf = iwdev->rf;
4064 ibdev_dbg(&iwdev->ibdev,
4073 ibdev_dbg(&iwdev->ibdev,
4119 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
4141 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4144 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4183 struct irdma_device *iwdev = iwqp->iwdev;
4184 struct irdma_pci_f *rf = iwdev->rf;
4203 ibdev_dbg(&iwdev->ibdev,
4213 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4216 ibdev_dbg(&iwdev->ibdev,
4234 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4237 ibdev_dbg(&iwdev->ibdev,
4246 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
4248 struct irdma_pci_f *rf = iwdev->rf;
4260 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
4273 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
4281 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
4292 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4293 struct irdma_pci_f *rf = iwdev->rf;
4302 sc_ah->ah_info.vsi = &iwdev->vsi;
4350 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4356 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
4373 * @iwdev: irdma device
4378 static bool irdma_ah_exists(struct irdma_device *iwdev,
4387 hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
4409 struct irdma_device *iwdev = to_iwdev(ibah->device);
4413 mutex_lock(&iwdev->ah_tbl_lock);
4415 mutex_unlock(&iwdev->ah_tbl_lock);
4420 mutex_unlock(&iwdev->ah_tbl_lock);
4423 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4426 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4446 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4457 mutex_lock(&iwdev->ah_tbl_lock);
4458 if (!irdma_ah_exists(iwdev, ah)) {
4459 err = irdma_create_hw_ah(iwdev, ah, true);
4461 mutex_unlock(&iwdev->ah_tbl_lock);
4473 hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
4477 mutex_unlock(&iwdev->ah_tbl_lock);
4499 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4505 err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
4612 * @iwdev: irdma device
4614 static void irdma_init_roce_device(struct irdma_device *iwdev)
4616 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4617 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4618 iwdev->netdev->dev_addr);
4619 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4624 * @iwdev: irdma device
4626 static void irdma_init_iw_device(struct irdma_device *iwdev)
4628 struct net_device *netdev = iwdev->netdev;
4630 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4631 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4633 memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4634 sizeof(iwdev->ibdev.iw_ifname));
4635 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4640 * @iwdev: irdma device
4642 static void irdma_init_rdma_device(struct irdma_device *iwdev)
4644 struct pci_dev *pcidev = iwdev->rf->pcidev;
4646 if (iwdev->roce_mode)
4647 irdma_init_roce_device(iwdev);
4649 irdma_init_iw_device(iwdev);
4651 iwdev->ibdev.phys_port_cnt = 1;
4652 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4653 iwdev->ibdev.dev.parent = &pcidev->dev;
4654 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4659 * @iwdev: irdma device
4661 void irdma_port_ibevent(struct irdma_device *iwdev)
4665 event.device = &iwdev->ibdev;
4668 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4675 * @iwdev: irdma device
4677 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4679 iwdev->iw_status = 0;
4680 irdma_port_ibevent(iwdev);
4681 ib_unregister_device(&iwdev->ibdev);
4686 * @iwdev: irdma device
4688 int irdma_ib_register_device(struct irdma_device *iwdev)
4692 irdma_init_rdma_device(iwdev);
4694 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4697 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4698 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4702 iwdev->iw_status = 1;
4703 irdma_port_ibevent(iwdev);
4709 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4723 struct irdma_device *iwdev = to_iwdev(ibdev);
4725 irdma_rt_deinit_hw(iwdev);
4726 irdma_ctrl_deinit_hw(iwdev->rf);
4727 kfree(iwdev->rf);