Lines Matching refs:qp

80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
85 skb_queue_tail(&qp->req_pkts, skb);
88 (skb_queue_len(&qp->req_pkts) > 1);
90 rxe_run_task(&qp->resp.task, must_sched);
93 static inline enum resp_states get_req(struct rxe_qp *qp,
98 if (qp->resp.state == QP_STATE_ERROR) {
99 while ((skb = skb_dequeue(&qp->req_pkts))) {
100 rxe_drop_ref(qp);
108 skb = skb_peek(&qp->req_pkts);
114 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
117 static enum resp_states check_psn(struct rxe_qp *qp,
120 int diff = psn_compare(pkt->psn, qp->resp.psn);
121 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
123 switch (qp_type(qp)) {
126 if (qp->resp.sent_psn_nak)
129 qp->resp.sent_psn_nak = 1;
138 if (qp->resp.sent_psn_nak)
139 qp->resp.sent_psn_nak = 0;
144 if (qp->resp.drop_msg || diff != 0) {
146 qp->resp.drop_msg = 0;
150 qp->resp.drop_msg = 1;
161 static enum resp_states check_op_seq(struct rxe_qp *qp,
164 switch (qp_type(qp)) {
166 switch (qp->resp.opcode) {
207 switch (qp->resp.opcode) {
238 qp->resp.drop_msg = 1;
251 static enum resp_states check_op_valid(struct rxe_qp *qp,
254 switch (qp_type(qp)) {
257 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
259 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
261 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
269 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
270 qp->resp.drop_msg = 1;
289 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
291 struct rxe_srq *srq = qp->srq;
308 memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
310 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
324 ev.device = qp->ibqp.device;
325 ev.element.srq = qp->ibqp.srq;
331 static enum resp_states check_resource(struct rxe_qp *qp,
334 struct rxe_srq *srq = qp->srq;
336 if (qp->resp.state == QP_STATE_ERROR) {
337 if (qp->resp.wqe) {
338 qp->resp.status = IB_WC_WR_FLUSH_ERR;
341 qp->resp.wqe = queue_head(qp->rq.queue);
342 if (qp->resp.wqe) {
343 qp->resp.status = IB_WC_WR_FLUSH_ERR;
358 if (likely(qp->attr.max_dest_rd_atomic > 0))
366 return get_srq_wqe(qp);
368 qp->resp.wqe = queue_head(qp->rq.queue);
369 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
375 static enum resp_states check_length(struct rxe_qp *qp,
378 switch (qp_type(qp)) {
390 static enum resp_states check_rkey(struct rxe_qp *qp,
398 int mtu = qp->mtu;
404 qp->resp.va = reth_va(pkt);
405 qp->resp.rkey = reth_rkey(pkt);
406 qp->resp.resid = reth_len(pkt);
407 qp->resp.length = reth_len(pkt);
412 qp->resp.va = atmeth_va(pkt);
413 qp->resp.rkey = atmeth_rkey(pkt);
414 qp->resp.resid = sizeof(u64);
427 va = qp->resp.va;
428 rkey = qp->resp.rkey;
429 resid = qp->resp.resid;
432 mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
469 WARN_ON_ONCE(qp->resp.mr);
471 qp->resp.mr = mem;
480 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
485 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
494 static enum resp_states write_data_in(struct rxe_qp *qp,
501 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
508 qp->resp.va += data_len;
509 qp->resp.resid -= data_len;
518 static enum resp_states process_atomic(struct rxe_qp *qp,
524 struct rxe_mem *mr = qp->resp.mr;
541 qp->resp.atomic_orig = *vaddr;
558 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
567 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
581 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
585 ack->qp = qp;
595 bth_set_qpn(ack, qp->attr.dest_qp_num);
604 aeth_set_msn(ack, qp->resp.msn);
608 atmack_set_orig(ack, qp->resp.atomic_orig);
630 static enum resp_states read_reply(struct rxe_qp *qp,
635 int mtu = qp->mtu;
640 struct resp_res *res = qp->resp.res;
648 res = &qp->resp.resources[qp->resp.res_head];
650 free_rd_atomic_resource(qp, res);
651 rxe_advance_resp_resource(qp);
656 res->read.va = qp->resp.va;
657 res->read.va_org = qp->resp.va;
670 res->read.resid = qp->resp.resid;
671 res->read.length = qp->resp.resid;
672 res->read.rkey = qp->resp.rkey;
674 /* note res inherits the reference to mr from qp */
675 res->read.mr = qp->resp.mr;
676 qp->resp.mr = NULL;
678 qp->resp.res = res;
698 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
709 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
718 err = rxe_xmit_packet(qp, &ack_pkt, skb);
731 qp->resp.res = NULL;
733 qp->resp.opcode = -1;
734 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
735 qp->resp.psn = res->cur_psn;
757 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
762 if (qp_type(qp) == IB_QPT_UD ||
763 qp_type(qp) == IB_QPT_SMI ||
764 qp_type(qp) == IB_QPT_GSI) {
769 err = send_data_in(qp, &hdr, sizeof(hdr));
773 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
777 err = write_data_in(qp, pkt);
782 qp->resp.msn++;
785 err = process_atomic(qp, pkt);
794 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
795 qp->resp.ack_psn = qp->resp.psn;
797 qp->resp.opcode = pkt->opcode;
798 qp->resp.status = IB_WC_SUCCESS;
802 qp->resp.msn++;
804 } else if (qp_type(qp) == IB_QPT_RC)
810 static enum resp_states do_complete(struct rxe_qp *qp,
816 struct rxe_recv_wqe *wqe = qp->resp.wqe;
817 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
824 if (qp->rcq->is_user) {
825 uwc->status = qp->resp.status;
826 uwc->qp_num = qp->ibqp.qp_num;
829 wc->status = qp->resp.status;
830 wc->qp = &qp->ibqp;
842 qp->resp.length : wqe->dma.length - wqe->dma.resid;
847 if (qp->rcq->is_user) {
860 uwc->qp_num = qp->ibqp.qp_num;
865 uwc->port_num = qp->attr.port_num;
902 wc->qp = &qp->ibqp;
907 wc->port_num = qp->attr.port_num;
912 if (!qp->srq)
913 advance_consumer(qp->rq.queue);
915 qp->resp.wqe = NULL;
917 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
920 if (qp->resp.state == QP_STATE_ERROR)
925 else if (qp_type(qp) == IB_QPT_RC)
931 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
938 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
945 err = rxe_xmit_packet(qp, &ack_pkt, skb);
953 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
961 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
969 res = &qp->resp.resources[qp->resp.res_head];
970 free_rd_atomic_resource(qp, res);
971 rxe_advance_resp_resource(qp);
984 rc = rxe_xmit_packet(qp, &ack_pkt, skb);
987 rxe_drop_ref(qp);
993 static enum resp_states acknowledge(struct rxe_qp *qp,
996 if (qp_type(qp) != IB_QPT_RC)
999 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1000 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1002 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1004 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1009 static enum resp_states cleanup(struct rxe_qp *qp,
1015 skb = skb_dequeue(&qp->req_pkts);
1016 rxe_drop_ref(qp);
1020 if (qp->resp.mr) {
1021 rxe_drop_ref(qp->resp.mr);
1022 qp->resp.mr = NULL;
1028 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1032 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1033 struct resp_res *res = &qp->resp.resources[i];
1047 static enum resp_states duplicate_request(struct rxe_qp *qp,
1051 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1057 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1063 res = find_resource(qp, pkt->psn);
1102 qp->resp.res = res;
1110 res = find_resource(qp, pkt->psn);
1114 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1131 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1134 qp->resp.aeth_syndrome = syndrome;
1135 qp->resp.status = status;
1138 qp->resp.goto_error = 1;
1141 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1144 if (qp->srq) {
1146 qp->resp.drop_msg = 1;
1147 if (qp->resp.wqe) {
1148 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1159 if (qp->resp.wqe) {
1160 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1161 qp->resp.wqe->dma.cur_sge = 0;
1162 qp->resp.wqe->dma.sge_offset = 0;
1163 qp->resp.opcode = -1;
1166 if (qp->resp.mr) {
1167 rxe_drop_ref(qp->resp.mr);
1168 qp->resp.mr = NULL;
1175 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1179 while ((skb = skb_dequeue(&qp->req_pkts))) {
1180 rxe_drop_ref(qp);
1187 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1188 advance_consumer(qp->rq.queue);
1193 struct rxe_qp *qp = (struct rxe_qp *)arg;
1194 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1199 rxe_add_ref(qp);
1201 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1203 if (!qp->valid) {
1208 switch (qp->resp.state) {
1219 pr_debug("qp#%d state = %s\n", qp_num(qp),
1223 state = get_req(qp, &pkt);
1226 state = check_psn(qp, pkt);
1229 state = check_op_seq(qp, pkt);
1232 state = check_op_valid(qp, pkt);
1235 state = check_resource(qp, pkt);
1238 state = check_length(qp, pkt);
1241 state = check_rkey(qp, pkt);
1244 state = execute(qp, pkt);
1247 state = do_complete(qp, pkt);
1250 state = read_reply(qp, pkt);
1253 state = acknowledge(qp, pkt);
1256 state = cleanup(qp, pkt);
1259 state = duplicate_request(qp, pkt);
1263 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1273 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1279 state = do_class_d1e_error(qp);
1282 if (qp_type(qp) == IB_QPT_RC) {
1285 send_ack(qp, pkt, AETH_RNR_NAK |
1287 qp->attr.min_rnr_timer),
1291 qp->resp.drop_msg = 1;
1297 if (qp_type(qp) == IB_QPT_RC) {
1299 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1303 qp->resp.drop_msg = 1;
1304 if (qp->srq) {
1306 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1316 if (qp_type(qp) == IB_QPT_RC) {
1318 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1321 } else if (qp->srq) {
1323 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1327 qp->resp.drop_msg = 1;
1334 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1345 if (qp->resp.goto_error) {
1353 if (qp->resp.goto_error) {
1361 rxe_drain_req_pkts(qp, false);
1362 qp->resp.wqe = NULL;
1366 qp->resp.goto_error = 0;
1367 pr_warn("qp#%d moved to error state\n", qp_num(qp));
1368 rxe_qp_error(qp);
1379 rxe_drop_ref(qp);