Lines Matching defs:iwqp
227 * @iwqp: qp pointer
229 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
233 struct irdma_device *iwdev = iwqp->iwdev;
234 struct irdma_sc_qp *qp = &iwqp->sc_qp;
454 * @iwqp: qp ptr (user or kernel)
457 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
463 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
467 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
469 if (iwqp->push_db_mmap_entry) {
470 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
471 iwqp->push_db_mmap_entry = NULL;
473 if (iwqp->push_wqe_mmap_entry) {
474 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
475 iwqp->push_wqe_mmap_entry = NULL;
480 struct irdma_qp *iwqp,
492 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
493 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
496 if (!iwqp->push_wqe_mmap_entry)
501 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
504 if (!iwqp->push_db_mmap_entry) {
505 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
519 struct irdma_qp *iwqp = to_iwqp(ibqp);
520 struct irdma_device *iwdev = iwqp->iwdev;
522 iwqp->sc_qp.qp_uk.destroy_pending = true;
524 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
525 irdma_modify_qp_to_err(&iwqp->sc_qp);
527 if (!iwqp->user_mode)
528 cancel_delayed_work_sync(&iwqp->dwork_flush);
530 if (!iwqp->user_mode) {
531 if (iwqp->iwscq) {
532 irdma_clean_cqes(iwqp, iwqp->iwscq);
533 if (iwqp->iwrcq != iwqp->iwscq)
534 irdma_clean_cqes(iwqp, iwqp->iwrcq);
538 irdma_qp_rem_ref(&iwqp->ibqp);
539 wait_for_completion(&iwqp->free_qp);
540 irdma_free_lsmm_rsrc(iwqp);
541 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
543 irdma_remove_push_mmap_entries(iwqp);
544 irdma_free_qp_rsrc(iwqp);
552 * @iwqp: qp ptr
556 struct irdma_qp *iwqp,
559 struct irdma_pbl *iwpbl = iwqp->iwpbl;
562 iwqp->page = qpmr->sq_page;
578 * @iwqp: qp ptr (user or kernel)
584 struct irdma_qp *iwqp,
602 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
603 iwqp->user_mode = 1;
607 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
611 if (!iwqp->iwpbl) {
623 * iwqp->max_send_wr/max_recv_wr in the kernel.
625 iwqp->max_send_wr = init_attr->cap.max_send_wr;
626 iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
642 iwqp->max_send_wr =
644 iwqp->max_recv_wr =
650 irdma_setup_virt_qp(iwdev, iwqp, info);
658 * @iwqp: qp ptr (user or kernel)
663 struct irdma_qp *iwqp,
667 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
682 iwqp->kqp.sq_wrid_mem =
683 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
684 if (!iwqp->kqp.sq_wrid_mem)
687 iwqp->kqp.rq_wrid_mem =
688 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
690 if (!iwqp->kqp.rq_wrid_mem) {
691 kfree(iwqp->kqp.sq_wrid_mem);
692 iwqp->kqp.sq_wrid_mem = NULL;
696 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
697 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
706 kfree(iwqp->kqp.sq_wrid_mem);
707 iwqp->kqp.sq_wrid_mem = NULL;
708 kfree(iwqp->kqp.rq_wrid_mem);
709 iwqp->kqp.rq_wrid_mem = NULL;
723 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
724 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
725 init_attr->cap.max_send_wr = iwqp->max_send_wr;
726 init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
731 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
733 struct irdma_pci_f *rf = iwqp->iwdev->rf;
752 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
760 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
763 struct irdma_device *iwdev = iwqp->iwdev;
768 udp_info = &iwqp->udp_info;
775 roce_info = &iwqp->roce_info;
788 if (!iwqp->user_mode) {
795 ctx_info->roce_info = &iwqp->roce_info;
796 ctx_info->udp_info = &iwqp->udp_info;
797 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
800 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
803 struct irdma_device *iwdev = iwqp->iwdev;
807 iwarp_info = &iwqp->iwarp_info;
817 if (!iwqp->user_mode) {
824 ctx_info->iwarp_info = &iwqp->iwarp_info;
826 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
862 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
864 irdma_generate_flush_completions(iwqp);
883 struct irdma_qp *iwqp = to_iwqp(ibqp);
909 qp = &iwqp->sc_qp;
910 qp->qp_uk.back_qp = iwqp;
913 iwqp->iwdev = iwdev;
914 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
916 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
917 iwqp->q2_ctx_mem.size,
918 &iwqp->q2_ctx_mem.pa,
920 if (!iwqp->q2_ctx_mem.va)
923 init_info.q2 = iwqp->q2_ctx_mem.va;
924 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
936 iwqp->iwpd = iwpd;
937 iwqp->ibqp.qp_num = qp_num;
938 qp = &iwqp->sc_qp;
939 iwqp->iwscq = to_iwcq(init_attr->send_cq);
940 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
941 iwqp->host_ctx.va = init_info.host_ctx;
942 iwqp->host_ctx.pa = init_info.host_ctx_pa;
943 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
949 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
950 init_waitqueue_head(&iwqp->waitq);
951 init_waitqueue_head(&iwqp->mod_qp_waitq);
955 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
958 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
960 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
993 ctx_info = &iwqp->ctx_info;
994 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
995 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
998 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
1000 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1002 err_code = irdma_cqp_create_qp_cmd(iwqp);
1006 refcount_set(&iwqp->refcnt, 1);
1007 spin_lock_init(&iwqp->lock);
1008 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1009 iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1010 rf->qp_table[qp_num] = iwqp;
1014 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1019 irdma_qp_add_qos(&iwqp->sc_qp);
1040 irdma_destroy_qp(&iwqp->ibqp, udata);
1045 init_completion(&iwqp->free_qp);
1049 irdma_free_qp_rsrc(iwqp);
1053 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1057 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1058 if (iwqp->roce_info.wr_rdresp_en) {
1062 if (iwqp->roce_info.rd_en)
1064 if (iwqp->roce_info.bind_en)
1067 if (iwqp->iwarp_info.wr_rdresp_en) {
1071 if (iwqp->iwarp_info.rd_en)
1073 if (iwqp->iwarp_info.bind_en)
1089 struct irdma_qp *iwqp = to_iwqp(ibqp);
1090 struct irdma_sc_qp *qp = &iwqp->sc_qp;
1095 attr->qp_state = iwqp->ibqp_state;
1096 attr->cur_qp_state = iwqp->ibqp_state;
1097 attr->cap.max_send_wr = iwqp->max_send_wr;
1098 attr->cap.max_recv_wr = iwqp->max_recv_wr;
1102 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1105 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1106 attr->qkey = iwqp->roce_info.qkey;
1107 attr->rq_psn = iwqp->udp_info.epsn;
1108 attr->sq_psn = iwqp->udp_info.psn_nxt;
1109 attr->dest_qp_num = iwqp->roce_info.dest_qp;
1110 attr->pkey_index = iwqp->roce_info.p_key;
1111 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1112 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1113 attr->max_rd_atomic = iwqp->roce_info.ord_size;
1114 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1117 init_attr->event_handler = iwqp->ibqp.event_handler;
1118 init_attr->qp_context = iwqp->ibqp.qp_context;
1119 init_attr->send_cq = iwqp->ibqp.send_cq;
1120 init_attr->recv_cq = iwqp->ibqp.recv_cq;
1161 static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
1163 if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
1164 !iwqp->suspend_pending,
1166 iwqp->suspend_pending = false;
1167 ibdev_warn(&iwqp->iwdev->ibdev,
1169 iwqp->ibqp.qp_num, iwqp->last_aeq);
1189 struct irdma_qp *iwqp = to_iwqp(ibqp);
1190 struct irdma_device *iwdev = iwqp->iwdev;
1202 ctx_info = &iwqp->ctx_info;
1203 roce_info = &iwqp->roce_info;
1204 udp_info = &iwqp->udp_info;
1251 struct irdma_av *av = &iwqp->roce_ah.av;
1257 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1266 irdma_qp_rem_qos(&iwqp->sc_qp);
1267 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1268 if (iwqp->sc_qp.vsi->dscp_mode)
1270 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1280 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1282 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1283 irdma_qp_add_qos(&iwqp->sc_qp);
1367 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1372 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1374 spin_lock_irqsave(&iwqp->lock, flags);
1376 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1377 iwqp->ibqp.qp_type, attr_mask)) {
1379 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1384 info.curr_iwarp_state = iwqp->iwarp_state;
1388 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1393 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1399 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1409 if (iwqp->ibqp_state < IB_QPS_RTR ||
1410 iwqp->ibqp_state == IB_QPS_ERR) {
1421 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1423 spin_unlock_irqrestore(&iwqp->lock, flags);
1424 irdma_alloc_push_page(iwqp);
1425 spin_lock_irqsave(&iwqp->lock, flags);
1429 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1432 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1439 iwqp->suspend_pending = true;
1444 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1445 spin_unlock_irqrestore(&iwqp->lock, flags);
1451 irdma_flush_wqes(iwqp,
1467 iwqp->ibqp_state = attr->qp_state;
1470 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1471 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1472 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1473 spin_unlock_irqrestore(&iwqp->lock, flags);
1478 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1481 ret = irdma_wait_for_suspend(iwqp);
1485 spin_lock_irqsave(&iwqp->lock, flags);
1486 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1487 iwqp->iwarp_state = info.next_iwarp_state;
1488 iwqp->ibqp_state = attr->qp_state;
1490 if (iwqp->ibqp_state > IB_QPS_RTS &&
1491 !iwqp->flush_issued) {
1492 spin_unlock_irqrestore(&iwqp->lock, flags);
1493 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1496 iwqp->flush_issued = 1;
1498 spin_unlock_irqrestore(&iwqp->lock, flags);
1501 iwqp->ibqp_state = attr->qp_state;
1508 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1509 !iwqp->push_wqe_mmap_entry &&
1510 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1513 uresp.push_offset = iwqp->sc_qp.push_offset;
1518 irdma_remove_push_mmap_entries(iwqp);
1528 spin_unlock_irqrestore(&iwqp->lock, flags);
1545 struct irdma_qp *iwqp = to_iwqp(ibqp);
1546 struct irdma_device *iwdev = iwqp->iwdev;
1569 ctx_info = &iwqp->ctx_info;
1570 offload_info = &iwqp->iwarp_info;
1571 tcp_info = &iwqp->tcp_info;
1572 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1576 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1577 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1579 spin_lock_irqsave(&iwqp->lock, flags);
1581 info.curr_iwarp_state = iwqp->iwarp_state;
1585 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1590 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1595 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1597 spin_unlock_irqrestore(&iwqp->lock, flags);
1598 irdma_alloc_push_page(iwqp);
1599 spin_lock_irqsave(&iwqp->lock, flags);
1603 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1604 !iwqp->cm_id) {
1610 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1611 iwqp->hte_added = 1;
1619 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1624 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1625 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1630 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1639 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1649 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1650 spin_unlock_irqrestore(&iwqp->lock, flags);
1656 irdma_flush_wqes(iwqp,
1664 if (iwqp->sc_qp.term_flags) {
1665 spin_unlock_irqrestore(&iwqp->lock, flags);
1666 irdma_terminate_del_timer(&iwqp->sc_qp);
1667 spin_lock_irqsave(&iwqp->lock, flags);
1670 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1672 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1685 iwqp->ibqp_state = attr->qp_state;
1698 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1699 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1700 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1702 spin_unlock_irqrestore(&iwqp->lock, flags);
1707 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1711 spin_lock_irqsave(&iwqp->lock, flags);
1712 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1713 iwqp->iwarp_state = info.next_iwarp_state;
1714 iwqp->ibqp_state = attr->qp_state;
1716 spin_unlock_irqrestore(&iwqp->lock, flags);
1719 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1721 if (iwqp->hw_tcp_state) {
1722 spin_lock_irqsave(&iwqp->lock, flags);
1723 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1724 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1725 spin_unlock_irqrestore(&iwqp->lock, flags);
1727 irdma_cm_disconn(iwqp);
1733 if (iwqp->cm_node) {
1734 refcount_inc(&iwqp->cm_node->refcnt);
1736 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1737 if (iwqp->cm_id && close_timer_started == 1)
1738 irdma_schedule_cm_timer(iwqp->cm_node,
1739 (struct irdma_puda_buf *)iwqp,
1742 irdma_rem_ref_cm_node(iwqp->cm_node);
1754 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1755 !iwqp->push_wqe_mmap_entry &&
1756 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1759 uresp.push_offset = iwqp->sc_qp.push_offset;
1765 irdma_remove_push_mmap_entries(iwqp);
1774 spin_unlock_irqrestore(&iwqp->lock, flags);
3286 struct irdma_qp *iwqp;
3295 iwqp = to_iwqp(ibqp);
3296 ukqp = &iwqp->sc_qp.qp_uk;
3297 dev = &iwqp->iwdev->rf->sc_dev;
3299 spin_lock_irqsave(&iwqp->lock, flags);
3304 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3336 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3337 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3416 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3422 ibdev_dbg(&iwqp->iwdev->ibdev,
3433 if (!iwqp->flush_issued) {
3434 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
3436 spin_unlock_irqrestore(&iwqp->lock, flags);
3438 spin_unlock_irqrestore(&iwqp->lock, flags);
3439 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3458 struct irdma_qp *iwqp;
3464 iwqp = to_iwqp(ibqp);
3465 ukqp = &iwqp->sc_qp.qp_uk;
3467 spin_lock_irqsave(&iwqp->lock, flags);
3474 ibdev_dbg(&iwqp->iwdev->ibdev,
3483 spin_unlock_irqrestore(&iwqp->lock, flags);
3484 if (iwqp->flush_issued)
3485 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
4042 struct irdma_qp *iwqp = to_iwqp(ibqp);
4043 struct irdma_device *iwdev = iwqp->iwdev;
4121 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4134 mcg_info.qp_id = iwqp->ibqp.qp_num;
4182 struct irdma_qp *iwqp = to_iwqp(ibqp);
4183 struct irdma_device *iwdev = iwqp->iwdev;
4208 mcg_info.qp_id = iwqp->ibqp.qp_num;