Lines Matching defs:iwqp

341  * @iwqp: qp ptr (user or kernel)
344 void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
346 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
347 struct i40iw_device *iwdev = iwqp->iwdev;
348 u32 qp_num = iwqp->ibqp.qp_num;
350 i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
351 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
356 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
357 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
358 kfree(iwqp->kqp.wrid_mem);
359 iwqp->kqp.wrid_mem = NULL;
360 kfree(iwqp);
365 * @iwqp: qp ptr (user or kernel)
368 static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
372 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
381 struct i40iw_qp *iwqp = to_iwqp(ibqp);
383 struct i40iw_device *iwdev = iwqp->iwdev;
387 iwqp->destroyed = 1;
389 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
390 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
392 if (!iwqp->user_mode) {
393 if (iwqp->iwscq) {
394 i40iw_clean_cqes(iwqp, iwqp->iwscq);
395 if (iwqp->iwrcq != iwqp->iwscq)
396 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
401 i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
402 i40iw_qp_rem_ref(&iwqp->ibqp);
403 wait_for_completion(&iwqp->free_qp);
404 i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
405 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
406 i40iw_free_qp_resources(iwqp);
419 struct i40iw_qp *iwqp,
422 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
425 iwqp->page = qpmr->sq_page;
441 * @iwqp: qp ptr (user or kernel)
445 struct i40iw_qp *iwqp,
448 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
465 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
467 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
494 ukinfo->qp_id = iwqp->ibqp.qp_num;
511 struct i40iw_qp *iwqp;
558 iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
559 if (!iwqp)
562 qp = &iwqp->sc_qp;
563 qp->back_qp = (void *)iwqp;
566 iwqp->iwdev = iwdev;
567 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
570 &iwqp->q2_ctx_mem,
578 init_info.q2 = iwqp->q2_ctx_mem.va;
579 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
591 iwqp->iwpd = iwpd;
592 iwqp->ibqp.qp_num = qp_num;
593 qp = &iwqp->sc_qp;
594 iwqp->iwscq = to_iwcq(init_attr->send_cq);
595 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
597 iwqp->host_ctx.va = init_info.host_ctx;
598 iwqp->host_ctx.pa = init_info.host_ctx_pa;
599 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
602 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
603 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
617 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
618 iwqp->user_mode = 1;
636 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
638 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
640 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
655 ctx_info = &iwqp->ctx_info;
656 iwarp_info = &iwqp->iwarp_info;
659 if (!iwqp->user_mode) {
667 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
668 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
676 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
677 (u64 *)iwqp->host_ctx.va,
704 refcount_set(&iwqp->refcount, 1);
705 spin_lock_init(&iwqp->lock);
706 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
707 iwdev->qp_table[qp_num] = iwqp;
708 i40iw_add_pdusecount(iwqp->iwpd);
719 i40iw_destroy_qp(&iwqp->ibqp, udata);
724 init_completion(&iwqp->sq_drained);
725 init_completion(&iwqp->rq_drained);
726 init_completion(&iwqp->free_qp);
728 return &iwqp->ibqp;
730 i40iw_free_qp_resources(iwqp);
746 struct i40iw_qp *iwqp = to_iwqp(ibqp);
747 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
749 attr->qp_state = iwqp->ibqp_state;
758 init_attr->event_handler = iwqp->ibqp.event_handler;
759 init_attr->qp_context = iwqp->ibqp.qp_context;
760 init_attr->send_cq = iwqp->ibqp.send_cq;
761 init_attr->recv_cq = iwqp->ibqp.recv_cq;
762 init_attr->srq = iwqp->ibqp.srq;
771 * @iwqp: qp ptr (user or kernel)
775 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
792 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
799 if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
800 i40iw_send_reset(iwqp->cm_node);
807 i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
825 struct i40iw_qp *iwqp = to_iwqp(ibqp);
826 struct i40iw_device *iwdev = iwqp->iwdev;
836 ctx_info = &iwqp->ctx_info;
837 iwarp_info = &iwqp->iwarp_info;
839 spin_lock_irqsave(&iwqp->lock, flags);
850 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
854 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
860 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
861 (!iwqp->cm_id)) {
867 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
868 iwqp->hte_added = 1;
876 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
880 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
881 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
885 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
893 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
902 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
906 if (iwqp->sc_qp.term_flags)
907 i40iw_terminate_del_timer(&iwqp->sc_qp);
909 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
911 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
923 iwqp->ibqp_state = attr->qp_state;
937 if (iwqp->user_mode) {
948 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
949 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
950 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
951 (u64 *)iwqp->host_ctx.va,
960 spin_unlock_irqrestore(&iwqp->lock, flags);
963 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
965 spin_lock_irqsave(&iwqp->lock, flags);
966 iwqp->iwarp_state = info.next_iwarp_state;
967 spin_unlock_irqrestore(&iwqp->lock, flags);
970 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
972 if (iwqp->cm_id && iwqp->hw_tcp_state) {
973 spin_lock_irqsave(&iwqp->lock, flags);
974 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
975 iwqp->last_aeq = I40IW_AE_RESET_SENT;
976 spin_unlock_irqrestore(&iwqp->lock, flags);
977 i40iw_cm_disconn(iwqp);
980 spin_lock_irqsave(&iwqp->lock, flags);
981 if (iwqp->cm_id) {
982 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
983 iwqp->cm_id->add_ref(iwqp->cm_id);
984 i40iw_schedule_cm_timer(iwqp->cm_node,
985 (struct i40iw_puda_buf *)iwqp,
989 spin_unlock_irqrestore(&iwqp->lock, flags);
994 spin_unlock_irqrestore(&iwqp->lock, flags);
1634 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1635 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1638 wait_for_completion(&iwqp->sq_drained);
1647 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1648 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1651 wait_for_completion(&iwqp->rq_drained);
2104 struct i40iw_qp *iwqp;
2112 iwqp = (struct i40iw_qp *)ibqp;
2113 ukqp = &iwqp->sc_qp.qp_uk;
2115 spin_lock_irqsave(&iwqp->lock, flags);
2117 if (iwqp->flush_issued) {
2126 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2221 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2243 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2265 spin_unlock_irqrestore(&iwqp->lock, flags);
2279 struct i40iw_qp *iwqp;
2287 iwqp = (struct i40iw_qp *)ibqp;
2288 ukqp = &iwqp->sc_qp.qp_uk;
2291 spin_lock_irqsave(&iwqp->lock, flags);
2293 if (iwqp->flush_issued) {
2316 spin_unlock_irqrestore(&iwqp->lock, flags);
2336 struct i40iw_qp *iwqp;
2389 iwqp = (struct i40iw_qp *)qp->back_qp;
2390 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2392 complete(&iwqp->sq_drained);
2394 complete(&iwqp->rq_drained);