Lines Matching refs:qp

95 	struct siw_qp *qp;
102 qp = sk_to_qp(sk);
104 if (likely(!qp->rx_stream.rx_suspend &&
105 down_read_trylock(&qp->state_lock))) {
106 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 };
108 if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
117 up_read(&qp->state_lock);
119 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n",
120 qp->rx_stream.rx_suspend);
126 void siw_qp_llp_close(struct siw_qp *qp)
128 siw_dbg_qp(qp, "enter llp close, state = %s\n",
129 siw_qp_state_to_string[qp->attrs.state]);
131 down_write(&qp->state_lock);
133 qp->rx_stream.rx_suspend = 1;
134 qp->tx_ctx.tx_suspend = 1;
135 qp->attrs.sk = NULL;
137 switch (qp->attrs.state) {
142 qp->attrs.state = SIW_QP_STATE_ERROR;
151 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
152 qp->attrs.state = SIW_QP_STATE_ERROR;
154 qp->attrs.state = SIW_QP_STATE_IDLE;
158 siw_dbg_qp(qp, "llp close: no state transition needed: %s\n",
159 siw_qp_state_to_string[qp->attrs.state]);
162 siw_sq_flush(qp);
163 siw_rq_flush(qp);
168 if (qp->cep) {
169 siw_cep_put(qp->cep);
170 qp->cep = NULL;
173 up_write(&qp->state_lock);
175 siw_dbg_qp(qp, "llp close exit: state %s\n",
176 siw_qp_state_to_string[qp->attrs.state]);
194 (void)siw_sq_start(cep->qp);
200 static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
204 qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
205 if (!qp->irq) {
206 qp->attrs.irq_size = 0;
212 qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
213 if (!qp->orq) {
214 qp->attrs.orq_size = 0;
215 qp->attrs.irq_size = 0;
216 vfree(qp->irq);
220 qp->attrs.irq_size = irq_size;
221 qp->attrs.orq_size = orq_size;
222 siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
226 static int siw_qp_enable_crc(struct siw_qp *qp)
228 struct siw_rx_stream *c_rx = &qp->rx_stream;
229 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
260 int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
262 struct siw_wqe *wqe = tx_wqe(qp);
266 spin_lock_irqsave(&qp->sq_lock, flags);
269 spin_unlock_irqrestore(&qp->sq_lock, flags);
295 spin_lock(&qp->orq_lock);
297 if (qp->attrs.orq_size)
298 rreq = orq_get_free(qp);
301 qp->orq_put++;
305 spin_unlock(&qp->orq_lock);
312 spin_unlock_irqrestore(&qp->sq_lock, flags);
315 rv = siw_sq_start(qp);
364 void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype,
367 if (!qp->term_info.valid) {
368 memset(&qp->term_info, 0, sizeof(qp->term_info));
369 qp->term_info.layer = layer;
370 qp->term_info.etype = etype;
371 qp->term_info.ecode = ecode;
372 qp->term_info.in_tx = in_tx;
373 qp->term_info.valid = 1;
375 siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n",
387 void siw_send_terminate(struct siw_qp *qp)
393 struct socket *s = qp->attrs.sk;
394 struct siw_rx_stream *srx = &qp->rx_stream;
399 if (!qp->term_info.valid)
402 qp->term_info.valid = 0;
404 if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) {
405 siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n",
406 tx_type(tx_wqe(qp)));
409 if (!s && qp->cep)
411 s = qp->cep->sock;
414 siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n");
429 if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) ||
430 ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) &&
431 (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) {
441 __rdmap_term_set_layer(term, qp->term_info.layer);
442 __rdmap_term_set_etype(term, qp->term_info.etype);
443 __rdmap_term_set_ecode(term, qp->term_info.ecode);
445 switch (qp->term_info.layer) {
447 if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC)
451 if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) {
460 if (qp->term_info.in_tx) {
462 struct siw_wqe *wqe = tx_wqe(qp);
512 if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) ||
513 (qp->term_info.ecode == RDMAP_ECODE_OPCODE))
540 if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) &&
541 (qp->term_info.ecode == DDP_ECODE_T_VERSION)) ||
542 ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) &&
543 (qp->term_info.ecode == DDP_ECODE_UT_VERSION)))
583 if (qp->tx_ctx.mpa_crc_hd) {
584 crypto_shash_init(qp->tx_ctx.mpa_crc_hd);
585 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd,
591 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd,
596 crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc);
600 siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n",
612 static void siw_qp_modify_nonstate(struct siw_qp *qp,
618 qp->attrs.flags |= SIW_RDMA_BIND_ENABLED;
620 qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED;
623 qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED;
625 qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED;
628 qp->attrs.flags |= SIW_RDMA_READ_ENABLED;
630 qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED;
634 static int siw_qp_nextstate_from_idle(struct siw_qp *qp,
643 rv = siw_qp_enable_crc(qp);
648 siw_dbg_qp(qp, "no socket\n");
653 siw_dbg_qp(qp, "no MPA\n");
660 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0;
661 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0;
662 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0;
667 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1;
668 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1;
669 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1;
675 rv = siw_qp_readq_init(qp, attrs->irq_size,
680 qp->attrs.sk = attrs->sk;
681 qp->attrs.state = SIW_QP_STATE_RTS;
683 siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n",
685 qp->attrs.orq_size, qp->attrs.irq_size);
689 siw_rq_flush(qp);
690 qp->attrs.state = SIW_QP_STATE_ERROR;
691 if (qp->cep) {
692 siw_cep_put(qp->cep);
693 qp->cep = NULL;
703 static int siw_qp_nextstate_from_rts(struct siw_qp *qp,
718 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) {
719 qp->attrs.state = SIW_QP_STATE_CLOSING;
721 qp->attrs.state = SIW_QP_STATE_ERROR;
722 siw_sq_flush(qp);
724 siw_rq_flush(qp);
730 qp->attrs.state = SIW_QP_STATE_TERMINATE;
732 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
752 siw_sq_flush(qp);
753 siw_rq_flush(qp);
754 qp->attrs.state = SIW_QP_STATE_ERROR;
764 static void siw_qp_nextstate_from_term(struct siw_qp *qp,
769 siw_rq_flush(qp);
770 qp->attrs.state = SIW_QP_STATE_ERROR;
772 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
773 siw_sq_flush(qp);
781 static int siw_qp_nextstate_from_close(struct siw_qp *qp,
788 WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE);
789 qp->attrs.state = SIW_QP_STATE_IDLE;
804 qp->attrs.state = SIW_QP_STATE_ERROR;
806 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
807 siw_sq_flush(qp);
809 siw_rq_flush(qp);
813 siw_dbg_qp(qp, "state transition undefined: %s => %s\n",
814 siw_qp_state_to_string[qp->attrs.state],
823 * Caller must hold qp->state_lock
825 int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs,
833 siw_dbg_qp(qp, "state: %s => %s\n",
834 siw_qp_state_to_string[qp->attrs.state],
838 siw_qp_modify_nonstate(qp, attrs, mask);
843 switch (qp->attrs.state) {
846 rv = siw_qp_nextstate_from_idle(qp, attrs, mask);
850 drop_conn = siw_qp_nextstate_from_rts(qp, attrs);
854 siw_qp_nextstate_from_term(qp, attrs);
858 siw_qp_nextstate_from_close(qp, attrs);
864 siw_qp_cm_drop(qp, 0);
881 static int siw_activate_tx_from_sq(struct siw_qp *qp)
884 struct siw_wqe *wqe = tx_wqe(qp);
887 sqe = sq_get_next(qp);
920 siw_dbg_qp(qp, "cannot fence read\n");
924 spin_lock(&qp->orq_lock);
926 if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
927 qp->tx_ctx.orq_fence = 1;
930 spin_unlock(&qp->orq_lock);
936 if (unlikely(!qp->attrs.orq_size)) {
943 spin_lock(&qp->orq_lock);
945 rreq = orq_get_free(qp);
952 qp->orq_put++;
954 qp->tx_ctx.orq_fence = 1;
957 spin_unlock(&qp->orq_lock);
962 qp->sq_get++;
965 siw_dbg_qp(qp, "error %d\n", rv);
974 * the active IRQ will not be served after qp->irq_burst, if the
977 int siw_activate_tx(struct siw_qp *qp)
980 struct siw_wqe *wqe = tx_wqe(qp);
982 if (!qp->attrs.irq_size)
983 return siw_activate_tx_from_sq(qp);
985 irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
988 return siw_activate_tx_from_sq(qp);
994 if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
995 qp->irq_burst = 0;
996 return siw_activate_tx_from_sq(qp);
1022 qp->irq_get++;
1060 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
1063 struct siw_cq *cq = qp->scq;
1087 cqe->base_qp = &qp->base_qp;
1089 cqe->qp_id = qp_id(qp);
1118 int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
1121 struct siw_cq *cq = qp->rcq;
1145 cqe->base_qp = &qp->base_qp;
1151 cqe->qp_id = qp_id(qp);
1188 void siw_sq_flush(struct siw_qp *qp)
1191 struct siw_wqe *wqe = tx_wqe(qp);
1197 while (qp->attrs.orq_size) {
1198 sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size];
1202 if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1206 qp->orq_get++;
1212 siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n",
1225 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1233 while (qp->attrs.sq_size) {
1234 sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
1239 if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1247 qp->sq_get++;
1250 siw_qp_event(qp, IB_EVENT_SQ_DRAINED);
1264 void siw_rq_flush(struct siw_qp *qp)
1266 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active;
1272 siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n",
1278 siw_rqe_complete(qp, &wqe->rqe, wqe->bytes,
1283 siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR);
1287 wqe = &qp->rx_tagged.wqe_active;
1296 while (qp->attrs.rq_size) {
1298 &qp->recvq[qp->rq_get % qp->attrs.rq_size];
1303 if (siw_rqe_complete(qp, rqe, 0, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1307 qp->rq_get++;
1311 int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
1313 int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
1317 kref_init(&qp->ref);
1318 qp->sdev = sdev;
1319 siw_dbg_qp(qp, "new QP\n");
1326 struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
1327 struct siw_device *sdev = qp->sdev;
1330 if (qp->cep)
1331 siw_cep_put(qp->cep);
1333 found = xa_erase(&sdev->qp_xa, qp_id(qp));
1334 WARN_ON(found != qp);
1336 list_del(&qp->devq);
1339 vfree(qp->sendq);
1340 vfree(qp->recvq);
1341 vfree(qp->irq);
1342 vfree(qp->orq);
1344 siw_put_tx_cpu(qp->tx_cpu);
1347 siw_dbg_qp(qp, "free QP\n");
1348 kfree_rcu(qp, rcu);