Lines Matching refs:iwdev

225 	struct irdma_device *iwdev = rf->iwdev;
240 ibdev_dbg(&iwdev->ibdev,
252 atomic_dec(&iwdev->vsi.qp_suspend_reqs);
253 wake_up(&iwdev->suspend_wq);
256 ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
323 if (iwqp->iwdev->vsi.tc_change_pending) {
325 wake_up(&iwqp->iwdev->suspend_wq);
329 wake_up(&iwqp->iwdev->suspend_wq);
339 ibdev_err(&iwdev->ibdev,
394 ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
396 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
473 * Allocate iwdev msix table and copy the msix info to the table
1083 * @iwdev: irdma device
1088 static int irdma_alloc_set_mac(struct irdma_device *iwdev)
1092 status = irdma_alloc_local_mac_entry(iwdev->rf,
1093 &iwdev->mac_ip_table_idx);
1095 status = irdma_add_local_mac_entry(iwdev->rf,
1096 (const u8 *)iwdev->netdev->dev_addr,
1097 (u8)iwdev->mac_ip_table_idx);
1099 irdma_del_local_mac_entry(iwdev->rf,
1100 (u8)iwdev->mac_ip_table_idx);
1140 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1170 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1259 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1313 ibdev_dbg(&rf->iwdev->ibdev,
1469 * @iwdev: irdma device
1473 static int irdma_initialize_ilq(struct irdma_device *iwdev)
1484 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1490 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1492 ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1499 * @iwdev: irdma device
1503 static int irdma_initialize_ieq(struct irdma_device *iwdev)
1510 info.qp_id = iwdev->vsi.exception_lan_q;
1514 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1516 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1518 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1520 ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1531 struct irdma_device *iwdev = vsi->back_vsi;
1532 struct irdma_pci_f *rf = iwdev->rf;
1535 if (irdma_initialize_ieq(iwdev)) {
1536 iwdev->rf->reset = true;
1597 * Allocate memory for the hmc objects and initialize iwdev
1655 * @iwdev: irdma device
1660 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1662 ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1664 switch (iwdev->init_state) {
1666 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1667 irdma_del_local_mac_entry(iwdev->rf,
1668 (u8)iwdev->mac_ip_table_idx);
1674 if (!iwdev->roce_mode)
1675 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1676 iwdev->rf->reset);
1679 if (!iwdev->roce_mode)
1680 irdma_puda_dele_rsrc(&iwdev->vsi,
1682 iwdev->rf->reset);
1685 ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1689 irdma_cleanup_cm_core(&iwdev->cm_core);
1690 if (iwdev->vsi.pestat) {
1691 irdma_vsi_stats_free(&iwdev->vsi);
1692 kfree(iwdev->vsi.pestat);
1694 if (iwdev->cleanup_wq)
1695 destroy_workqueue(iwdev->cleanup_wq);
1734 * @iwdev: irdma device
1738 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1740 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1741 iwdev->rf->max_pd);
1742 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1743 iwdev->rf->max_qp);
1744 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1745 iwdev->rf->max_cq);
1746 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1747 iwdev->rf->max_mr);
1781 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1788 * @iwdev: irdma device
1794 int irdma_rt_init_hw(struct irdma_device *iwdev,
1797 struct irdma_pci_f *rf = iwdev->rf;
1804 vsi_info.back_vsi = iwdev;
1806 vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1810 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1812 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1818 irdma_cleanup_cm_core(&iwdev->cm_core);
1822 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1824 irdma_cleanup_cm_core(&iwdev->cm_core);
1830 if (!iwdev->roce_mode) {
1831 status = irdma_initialize_ilq(iwdev);
1834 iwdev->init_state = ILQ_CREATED;
1835 status = irdma_initialize_ieq(iwdev);
1838 iwdev->init_state = IEQ_CREATED;
1841 status = irdma_setup_ceqs(rf, &iwdev->vsi);
1845 iwdev->init_state = CEQS_CREATED;
1854 iwdev->init_state = PBLE_CHUNK_MEM;
1862 iwdev->init_state = AEQ_CREATED;
1866 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1867 irdma_alloc_set_mac(iwdev);
1868 irdma_add_ip(iwdev);
1869 iwdev->init_state = IP_ADDR_REGISTERED;
1874 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1876 if (!iwdev->cleanup_wq)
1878 irdma_get_used_rsrc(iwdev);
1879 init_waitqueue_head(&iwdev->suspend_wq);
1885 status, iwdev->init_state);
1886 irdma_rt_deinit_hw(iwdev);
2097 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2186 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2289 * @iwdev: irdma device
2293 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2301 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2312 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2314 ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2317 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2318 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2325 * @iwdev: irdma device
2328 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2330 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2352 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2362 * @iwdev: irdma device
2365 void irdma_del_apbvt(struct irdma_device *iwdev,
2368 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2382 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2450 * @iwdev: irdma device
2457 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2463 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2476 info->vsi = &iwdev->vsi;
2487 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2512 ibdev_dbg(&iwdev->ibdev,
2520 ibdev_dbg(&iwdev->ibdev,
2528 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2532 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2570 ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2659 ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2675 ibdev_dbg(&rf->iwdev->ibdev,
2720 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2750 queue_delayed_work(iwqp->iwdev->cleanup_wq,