Lines Matching refs:qp
98 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
100 qp->resp.res_head = 0;
101 qp->resp.res_tail = 0;
102 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
104 if (!qp->resp.resources)
110 static void free_rd_atomic_resources(struct rxe_qp *qp)
112 if (qp->resp.resources) {
115 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
116 struct resp_res *res = &qp->resp.resources[i];
118 free_rd_atomic_resource(qp, res);
120 kfree(qp->resp.resources);
121 qp->resp.resources = NULL;
125 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
136 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
141 if (qp->resp.resources) {
142 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143 res = &qp->resp.resources[i];
144 free_rd_atomic_resource(qp, res);
149 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
155 qp->sq_sig_type = init->sq_sig_type;
156 qp->attr.path_mtu = 1;
157 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
159 qpn = qp->pelem.index;
164 qp->ibqp.qp_num = 0;
166 qp->attr.port_num = init->port_num;
170 qp->ibqp.qp_num = 1;
172 qp->attr.port_num = init->port_num;
176 qp->ibqp.qp_num = qpn;
180 INIT_LIST_HEAD(&qp->grp_list);
182 skb_queue_head_init(&qp->send_pkts);
184 spin_lock_init(&qp->grp_lock);
185 spin_lock_init(&qp->state_lock);
187 spin_lock_init(&qp->req.task.state_lock);
188 spin_lock_init(&qp->resp.task.state_lock);
189 spin_lock_init(&qp->comp.task.state_lock);
191 spin_lock_init(&qp->sq.sq_lock);
192 spin_lock_init(&qp->rq.producer_lock);
193 spin_lock_init(&qp->rq.consumer_lock);
195 skb_queue_head_init(&qp->req_pkts);
196 skb_queue_head_init(&qp->resp_pkts);
198 atomic_set(&qp->ssn, 0);
199 atomic_set(&qp->skb_out, 0);
202 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
209 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
212 qp->sk->sk->sk_user_data = qp;
221 qp->src_port = RXE_ROCE_V2_SPORT +
222 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
223 qp->sq.max_wr = init->cap.max_send_wr;
228 qp->sq.max_sge = init->cap.max_send_sge =
230 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
233 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
234 if (!qp->sq.queue)
238 qp->sq.queue->buf, qp->sq.queue->buf_size,
239 &qp->sq.queue->ip);
242 vfree(qp->sq.queue->buf);
243 kfree(qp->sq.queue);
244 qp->sq.queue = NULL;
248 qp->req.wqe_index = producer_index(qp->sq.queue);
249 qp->req.state = QP_STATE_RESET;
250 qp->req.opcode = -1;
251 qp->comp.opcode = -1;
253 rxe_init_task(&qp->req.task, qp, rxe_requester);
254 rxe_init_task(&qp->comp.task, qp, rxe_completer);
256 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
258 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
259 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
264 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
272 if (!qp->srq) {
273 qp->rq.max_wr = init->cap.max_recv_wr;
274 qp->rq.max_sge = init->cap.max_recv_sge;
276 wqe_size = rcv_wqe_size(qp->rq.max_sge);
278 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
279 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
281 qp->rq.queue = rxe_queue_init(rxe,
282 &qp->rq.max_wr,
284 if (!qp->rq.queue)
288 qp->rq.queue->buf, qp->rq.queue->buf_size,
289 &qp->rq.queue->ip);
291 vfree(qp->rq.queue->buf);
292 kfree(qp->rq.queue);
293 qp->rq.queue = NULL;
298 rxe_init_task(&qp->resp.task, qp, rxe_responder);
300 qp->resp.opcode = OPCODE_NONE;
301 qp->resp.msn = 0;
302 qp->resp.state = QP_STATE_RESET;
307 /* called by the create qp verb */
308 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
325 qp->pd = pd;
326 qp->rcq = rcq;
327 qp->scq = scq;
328 qp->srq = srq;
330 rxe_qp_init_misc(rxe, qp, init);
332 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
336 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
340 qp->attr.qp_state = IB_QPS_RESET;
341 qp->valid = 1;
346 rxe_queue_cleanup(qp->sq.queue);
348 qp->pd = NULL;
349 qp->rcq = NULL;
350 qp->scq = NULL;
351 qp->srq = NULL;
362 /* called by the query qp verb */
363 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
365 init->event_handler = qp->ibqp.event_handler;
366 init->qp_context = qp->ibqp.qp_context;
367 init->send_cq = qp->ibqp.send_cq;
368 init->recv_cq = qp->ibqp.recv_cq;
369 init->srq = qp->ibqp.srq;
371 init->cap.max_send_wr = qp->sq.max_wr;
372 init->cap.max_send_sge = qp->sq.max_sge;
373 init->cap.max_inline_data = qp->sq.max_inline;
375 if (!qp->srq) {
376 init->cap.max_recv_wr = qp->rq.max_wr;
377 init->cap.max_recv_sge = qp->rq.max_sge;
380 init->sq_sig_type = qp->sq_sig_type;
382 init->qp_type = qp->ibqp.qp_type;
388 /* called by the modify qp verb, this routine checks all the parameters before
391 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
395 attr->cur_qp_state : qp->attr.qp_state;
399 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
400 pr_warn("invalid mask or state for qp\n");
406 if (qp->req.state == QP_STATE_DRAIN &&
419 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
476 /* move the qp to the reset state */
477 static void rxe_qp_reset(struct rxe_qp *qp)
480 rxe_disable_task(&qp->resp.task);
483 if (qp->sq.queue) {
484 if (qp_type(qp) == IB_QPT_RC)
485 rxe_disable_task(&qp->comp.task);
486 rxe_disable_task(&qp->req.task);
489 /* move qp to the reset state */
490 qp->req.state = QP_STATE_RESET;
491 qp->resp.state = QP_STATE_RESET;
496 __rxe_do_task(&qp->resp.task);
498 if (qp->sq.queue) {
499 __rxe_do_task(&qp->comp.task);
500 __rxe_do_task(&qp->req.task);
501 rxe_queue_reset(qp->sq.queue);
505 atomic_set(&qp->ssn, 0);
506 qp->req.opcode = -1;
507 qp->req.need_retry = 0;
508 qp->req.noack_pkts = 0;
509 qp->resp.msn = 0;
510 qp->resp.opcode = -1;
511 qp->resp.drop_msg = 0;
512 qp->resp.goto_error = 0;
513 qp->resp.sent_psn_nak = 0;
515 if (qp->resp.mr) {
516 rxe_drop_ref(qp->resp.mr);
517 qp->resp.mr = NULL;
520 cleanup_rd_atomic_resources(qp);
523 rxe_enable_task(&qp->resp.task);
525 if (qp->sq.queue) {
526 if (qp_type(qp) == IB_QPT_RC)
527 rxe_enable_task(&qp->comp.task);
529 rxe_enable_task(&qp->req.task);
534 static void rxe_qp_drain(struct rxe_qp *qp)
536 if (qp->sq.queue) {
537 if (qp->req.state != QP_STATE_DRAINED) {
538 qp->req.state = QP_STATE_DRAIN;
539 if (qp_type(qp) == IB_QPT_RC)
540 rxe_run_task(&qp->comp.task, 1);
542 __rxe_do_task(&qp->comp.task);
543 rxe_run_task(&qp->req.task, 1);
548 /* move the qp to the error state */
549 void rxe_qp_error(struct rxe_qp *qp)
551 qp->req.state = QP_STATE_ERROR;
552 qp->resp.state = QP_STATE_ERROR;
553 qp->attr.qp_state = IB_QPS_ERR;
556 rxe_run_task(&qp->resp.task, 1);
558 if (qp_type(qp) == IB_QPT_RC)
559 rxe_run_task(&qp->comp.task, 1);
561 __rxe_do_task(&qp->comp.task);
562 rxe_run_task(&qp->req.task, 1);
565 /* called by the modify qp verb */
566 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
575 qp->attr.max_rd_atomic = max_rd_atomic;
576 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
583 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
585 free_rd_atomic_resources(qp);
587 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
593 qp->attr.cur_qp_state = attr->qp_state;
596 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
599 qp->attr.qp_access_flags = attr->qp_access_flags;
602 qp->attr.pkey_index = attr->pkey_index;
605 qp->attr.port_num = attr->port_num;
608 qp->attr.qkey = attr->qkey;
611 rxe_init_av(&attr->ah_attr, &qp->pri_av);
614 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
615 qp->attr.alt_port_num = attr->alt_port_num;
616 qp->attr.alt_pkey_index = attr->alt_pkey_index;
617 qp->attr.alt_timeout = attr->alt_timeout;
621 qp->attr.path_mtu = attr->path_mtu;
622 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
626 qp->attr.timeout = attr->timeout;
628 qp->qp_timeout_jiffies = 0;
633 qp->qp_timeout_jiffies = j ? j : 1;
638 qp->attr.retry_cnt = attr->retry_cnt;
639 qp->comp.retry_cnt = attr->retry_cnt;
640 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
645 qp->attr.rnr_retry = attr->rnr_retry;
646 qp->comp.rnr_retry = attr->rnr_retry;
647 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
652 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
653 qp->resp.psn = qp->attr.rq_psn;
654 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
655 qp->resp.psn);
659 qp->attr.min_rnr_timer = attr->min_rnr_timer;
660 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
665 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
666 qp->req.psn = qp->attr.sq_psn;
667 qp->comp.psn = qp->attr.sq_psn;
668 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
672 qp->attr.path_mig_state = attr->path_mig_state;
675 qp->attr.dest_qp_num = attr->dest_qp_num;
678 qp->attr.qp_state = attr->qp_state;
682 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
683 rxe_qp_reset(qp);
687 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
688 qp->req.state = QP_STATE_INIT;
689 qp->resp.state = QP_STATE_INIT;
693 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
694 qp->resp.state = QP_STATE_READY;
698 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
699 qp->req.state = QP_STATE_READY;
703 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
704 rxe_qp_drain(qp);
708 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
713 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
714 rxe_qp_error(qp);
722 /* called by the query qp verb */
723 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
725 *attr = qp->attr;
727 attr->rq_psn = qp->resp.psn;
728 attr->sq_psn = qp->req.psn;
730 attr->cap.max_send_wr = qp->sq.max_wr;
731 attr->cap.max_send_sge = qp->sq.max_sge;
732 attr->cap.max_inline_data = qp->sq.max_inline;
734 if (!qp->srq) {
735 attr->cap.max_recv_wr = qp->rq.max_wr;
736 attr->cap.max_recv_sge = qp->rq.max_sge;
739 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
740 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
742 if (qp->req.state == QP_STATE_DRAIN) {
758 /* called by the destroy qp verb */
759 void rxe_qp_destroy(struct rxe_qp *qp)
761 qp->valid = 0;
762 qp->qp_timeout_jiffies = 0;
763 rxe_cleanup_task(&qp->resp.task);
765 if (qp_type(qp) == IB_QPT_RC) {
766 del_timer_sync(&qp->retrans_timer);
767 del_timer_sync(&qp->rnr_nak_timer);
770 rxe_cleanup_task(&qp->req.task);
771 rxe_cleanup_task(&qp->comp.task);
774 if (qp->req.task.func)
775 __rxe_do_task(&qp->req.task);
777 if (qp->sq.queue) {
778 __rxe_do_task(&qp->comp.task);
779 __rxe_do_task(&qp->req.task);
783 /* called when the last reference to the qp is dropped */
786 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
788 rxe_drop_all_mcast_groups(qp);
790 if (qp->sq.queue)
791 rxe_queue_cleanup(qp->sq.queue);
793 if (qp->srq)
794 rxe_drop_ref(qp->srq);
796 if (qp->rq.queue)
797 rxe_queue_cleanup(qp->rq.queue);
799 if (qp->scq)
800 rxe_drop_ref(qp->scq);
801 if (qp->rcq)
802 rxe_drop_ref(qp->rcq);
803 if (qp->pd)
804 rxe_drop_ref(qp->pd);
806 if (qp->resp.mr) {
807 rxe_drop_ref(qp->resp.mr);
808 qp->resp.mr = NULL;
811 free_rd_atomic_resources(qp);
813 if (qp->sk) {
814 if (qp_type(qp) == IB_QPT_RC)
815 sk_dst_reset(qp->sk->sk);
817 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
818 sock_release(qp->sk);
822 /* called when the last reference to the qp is dropped */
825 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
827 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);