1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#include <linux/skbuff.h>
8
9#include "rxe.h"
10#include "rxe_loc.h"
11#include "rxe_queue.h"
12
13enum resp_states {
14	RESPST_NONE,
15	RESPST_GET_REQ,
16	RESPST_CHK_PSN,
17	RESPST_CHK_OP_SEQ,
18	RESPST_CHK_OP_VALID,
19	RESPST_CHK_RESOURCE,
20	RESPST_CHK_LENGTH,
21	RESPST_CHK_RKEY,
22	RESPST_EXECUTE,
23	RESPST_READ_REPLY,
24	RESPST_COMPLETE,
25	RESPST_ACKNOWLEDGE,
26	RESPST_CLEANUP,
27	RESPST_DUPLICATE_REQUEST,
28	RESPST_ERR_MALFORMED_WQE,
29	RESPST_ERR_UNSUPPORTED_OPCODE,
30	RESPST_ERR_MISALIGNED_ATOMIC,
31	RESPST_ERR_PSN_OUT_OF_SEQ,
32	RESPST_ERR_MISSING_OPCODE_FIRST,
33	RESPST_ERR_MISSING_OPCODE_LAST_C,
34	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
36	RESPST_ERR_RNR,
37	RESPST_ERR_RKEY_VIOLATION,
38	RESPST_ERR_LENGTH,
39	RESPST_ERR_CQ_OVERFLOW,
40	RESPST_ERROR,
41	RESPST_RESET,
42	RESPST_DONE,
43	RESPST_EXIT,
44};
45
46static char *resp_state_name[] = {
47	[RESPST_NONE]				= "NONE",
48	[RESPST_GET_REQ]			= "GET_REQ",
49	[RESPST_CHK_PSN]			= "CHK_PSN",
50	[RESPST_CHK_OP_SEQ]			= "CHK_OP_SEQ",
51	[RESPST_CHK_OP_VALID]			= "CHK_OP_VALID",
52	[RESPST_CHK_RESOURCE]			= "CHK_RESOURCE",
53	[RESPST_CHK_LENGTH]			= "CHK_LENGTH",
54	[RESPST_CHK_RKEY]			= "CHK_RKEY",
55	[RESPST_EXECUTE]			= "EXECUTE",
56	[RESPST_READ_REPLY]			= "READ_REPLY",
57	[RESPST_COMPLETE]			= "COMPLETE",
58	[RESPST_ACKNOWLEDGE]			= "ACKNOWLEDGE",
59	[RESPST_CLEANUP]			= "CLEANUP",
60	[RESPST_DUPLICATE_REQUEST]		= "DUPLICATE_REQUEST",
61	[RESPST_ERR_MALFORMED_WQE]		= "ERR_MALFORMED_WQE",
62	[RESPST_ERR_UNSUPPORTED_OPCODE]		= "ERR_UNSUPPORTED_OPCODE",
63	[RESPST_ERR_MISALIGNED_ATOMIC]		= "ERR_MISALIGNED_ATOMIC",
64	[RESPST_ERR_PSN_OUT_OF_SEQ]		= "ERR_PSN_OUT_OF_SEQ",
65	[RESPST_ERR_MISSING_OPCODE_FIRST]	= "ERR_MISSING_OPCODE_FIRST",
66	[RESPST_ERR_MISSING_OPCODE_LAST_C]	= "ERR_MISSING_OPCODE_LAST_C",
67	[RESPST_ERR_MISSING_OPCODE_LAST_D1E]	= "ERR_MISSING_OPCODE_LAST_D1E",
68	[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]	= "ERR_TOO_MANY_RDMA_ATM_REQ",
69	[RESPST_ERR_RNR]			= "ERR_RNR",
70	[RESPST_ERR_RKEY_VIOLATION]		= "ERR_RKEY_VIOLATION",
71	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
72	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
73	[RESPST_ERROR]				= "ERROR",
74	[RESPST_RESET]				= "RESET",
75	[RESPST_DONE]				= "DONE",
76	[RESPST_EXIT]				= "EXIT",
77};
78
79/* rxe_recv calls here to add a request packet to the input queue */
80void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
81{
82	int must_sched;
83	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
84
85	skb_queue_tail(&qp->req_pkts, skb);
86
87	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
88			(skb_queue_len(&qp->req_pkts) > 1);
89
90	rxe_run_task(&qp->resp.task, must_sched);
91}
92
93static inline enum resp_states get_req(struct rxe_qp *qp,
94				       struct rxe_pkt_info **pkt_p)
95{
96	struct sk_buff *skb;
97
98	if (qp->resp.state == QP_STATE_ERROR) {
99		while ((skb = skb_dequeue(&qp->req_pkts))) {
100			rxe_drop_ref(qp);
101			kfree_skb(skb);
102		}
103
104		/* go drain recv wr queue */
105		return RESPST_CHK_RESOURCE;
106	}
107
108	skb = skb_peek(&qp->req_pkts);
109	if (!skb)
110		return RESPST_EXIT;
111
112	*pkt_p = SKB_TO_PKT(skb);
113
114	return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
115}
116
117static enum resp_states check_psn(struct rxe_qp *qp,
118				  struct rxe_pkt_info *pkt)
119{
120	int diff = psn_compare(pkt->psn, qp->resp.psn);
121	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
122
123	switch (qp_type(qp)) {
124	case IB_QPT_RC:
125		if (diff > 0) {
126			if (qp->resp.sent_psn_nak)
127				return RESPST_CLEANUP;
128
129			qp->resp.sent_psn_nak = 1;
130			rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
131			return RESPST_ERR_PSN_OUT_OF_SEQ;
132
133		} else if (diff < 0) {
134			rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
135			return RESPST_DUPLICATE_REQUEST;
136		}
137
138		if (qp->resp.sent_psn_nak)
139			qp->resp.sent_psn_nak = 0;
140
141		break;
142
143	case IB_QPT_UC:
144		if (qp->resp.drop_msg || diff != 0) {
145			if (pkt->mask & RXE_START_MASK) {
146				qp->resp.drop_msg = 0;
147				return RESPST_CHK_OP_SEQ;
148			}
149
150			qp->resp.drop_msg = 1;
151			return RESPST_CLEANUP;
152		}
153		break;
154	default:
155		break;
156	}
157
158	return RESPST_CHK_OP_SEQ;
159}
160
161static enum resp_states check_op_seq(struct rxe_qp *qp,
162				     struct rxe_pkt_info *pkt)
163{
164	switch (qp_type(qp)) {
165	case IB_QPT_RC:
166		switch (qp->resp.opcode) {
167		case IB_OPCODE_RC_SEND_FIRST:
168		case IB_OPCODE_RC_SEND_MIDDLE:
169			switch (pkt->opcode) {
170			case IB_OPCODE_RC_SEND_MIDDLE:
171			case IB_OPCODE_RC_SEND_LAST:
172			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
173			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
174				return RESPST_CHK_OP_VALID;
175			default:
176				return RESPST_ERR_MISSING_OPCODE_LAST_C;
177			}
178
179		case IB_OPCODE_RC_RDMA_WRITE_FIRST:
180		case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
181			switch (pkt->opcode) {
182			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
183			case IB_OPCODE_RC_RDMA_WRITE_LAST:
184			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
185				return RESPST_CHK_OP_VALID;
186			default:
187				return RESPST_ERR_MISSING_OPCODE_LAST_C;
188			}
189
190		default:
191			switch (pkt->opcode) {
192			case IB_OPCODE_RC_SEND_MIDDLE:
193			case IB_OPCODE_RC_SEND_LAST:
194			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
195			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
196			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
197			case IB_OPCODE_RC_RDMA_WRITE_LAST:
198			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
199				return RESPST_ERR_MISSING_OPCODE_FIRST;
200			default:
201				return RESPST_CHK_OP_VALID;
202			}
203		}
204		break;
205
206	case IB_QPT_UC:
207		switch (qp->resp.opcode) {
208		case IB_OPCODE_UC_SEND_FIRST:
209		case IB_OPCODE_UC_SEND_MIDDLE:
210			switch (pkt->opcode) {
211			case IB_OPCODE_UC_SEND_MIDDLE:
212			case IB_OPCODE_UC_SEND_LAST:
213			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
214				return RESPST_CHK_OP_VALID;
215			default:
216				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
217			}
218
219		case IB_OPCODE_UC_RDMA_WRITE_FIRST:
220		case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
221			switch (pkt->opcode) {
222			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
223			case IB_OPCODE_UC_RDMA_WRITE_LAST:
224			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
225				return RESPST_CHK_OP_VALID;
226			default:
227				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
228			}
229
230		default:
231			switch (pkt->opcode) {
232			case IB_OPCODE_UC_SEND_MIDDLE:
233			case IB_OPCODE_UC_SEND_LAST:
234			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
235			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
236			case IB_OPCODE_UC_RDMA_WRITE_LAST:
237			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
238				qp->resp.drop_msg = 1;
239				return RESPST_CLEANUP;
240			default:
241				return RESPST_CHK_OP_VALID;
242			}
243		}
244		break;
245
246	default:
247		return RESPST_CHK_OP_VALID;
248	}
249}
250
251static enum resp_states check_op_valid(struct rxe_qp *qp,
252				       struct rxe_pkt_info *pkt)
253{
254	switch (qp_type(qp)) {
255	case IB_QPT_RC:
256		if (((pkt->mask & RXE_READ_MASK) &&
257		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
258		    ((pkt->mask & RXE_WRITE_MASK) &&
259		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
260		    ((pkt->mask & RXE_ATOMIC_MASK) &&
261		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
262			return RESPST_ERR_UNSUPPORTED_OPCODE;
263		}
264
265		break;
266
267	case IB_QPT_UC:
268		if ((pkt->mask & RXE_WRITE_MASK) &&
269		    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
270			qp->resp.drop_msg = 1;
271			return RESPST_CLEANUP;
272		}
273
274		break;
275
276	case IB_QPT_UD:
277	case IB_QPT_SMI:
278	case IB_QPT_GSI:
279		break;
280
281	default:
282		WARN_ON_ONCE(1);
283		break;
284	}
285
286	return RESPST_CHK_RESOURCE;
287}
288
289static enum resp_states get_srq_wqe(struct rxe_qp *qp)
290{
291	struct rxe_srq *srq = qp->srq;
292	struct rxe_queue *q = srq->rq.queue;
293	struct rxe_recv_wqe *wqe;
294	struct ib_event ev;
295
296	if (srq->error)
297		return RESPST_ERR_RNR;
298
299	spin_lock_bh(&srq->rq.consumer_lock);
300
301	wqe = queue_head(q);
302	if (!wqe) {
303		spin_unlock_bh(&srq->rq.consumer_lock);
304		return RESPST_ERR_RNR;
305	}
306
307	/* note kernel and user space recv wqes have same size */
308	memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
309
310	qp->resp.wqe = &qp->resp.srq_wqe.wqe;
311	advance_consumer(q);
312
313	if (srq->limit && srq->ibsrq.event_handler &&
314	    (queue_count(q) < srq->limit)) {
315		srq->limit = 0;
316		goto event;
317	}
318
319	spin_unlock_bh(&srq->rq.consumer_lock);
320	return RESPST_CHK_LENGTH;
321
322event:
323	spin_unlock_bh(&srq->rq.consumer_lock);
324	ev.device = qp->ibqp.device;
325	ev.element.srq = qp->ibqp.srq;
326	ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
327	srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
328	return RESPST_CHK_LENGTH;
329}
330
331static enum resp_states check_resource(struct rxe_qp *qp,
332				       struct rxe_pkt_info *pkt)
333{
334	struct rxe_srq *srq = qp->srq;
335
336	if (qp->resp.state == QP_STATE_ERROR) {
337		if (qp->resp.wqe) {
338			qp->resp.status = IB_WC_WR_FLUSH_ERR;
339			return RESPST_COMPLETE;
340		} else if (!srq) {
341			qp->resp.wqe = queue_head(qp->rq.queue);
342			if (qp->resp.wqe) {
343				qp->resp.status = IB_WC_WR_FLUSH_ERR;
344				return RESPST_COMPLETE;
345			} else {
346				return RESPST_EXIT;
347			}
348		} else {
349			return RESPST_EXIT;
350		}
351	}
352
353	if (pkt->mask & RXE_READ_OR_ATOMIC) {
354		/* it is the requesters job to not send
355		 * too many read/atomic ops, we just
356		 * recycle the responder resource queue
357		 */
358		if (likely(qp->attr.max_dest_rd_atomic > 0))
359			return RESPST_CHK_LENGTH;
360		else
361			return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
362	}
363
364	if (pkt->mask & RXE_RWR_MASK) {
365		if (srq)
366			return get_srq_wqe(qp);
367
368		qp->resp.wqe = queue_head(qp->rq.queue);
369		return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
370	}
371
372	return RESPST_CHK_LENGTH;
373}
374
375static enum resp_states check_length(struct rxe_qp *qp,
376				     struct rxe_pkt_info *pkt)
377{
378	switch (qp_type(qp)) {
379	case IB_QPT_RC:
380		return RESPST_CHK_RKEY;
381
382	case IB_QPT_UC:
383		return RESPST_CHK_RKEY;
384
385	default:
386		return RESPST_CHK_RKEY;
387	}
388}
389
390static enum resp_states check_rkey(struct rxe_qp *qp,
391				   struct rxe_pkt_info *pkt)
392{
393	struct rxe_mem *mem = NULL;
394	u64 va;
395	u32 rkey;
396	u32 resid;
397	u32 pktlen;
398	int mtu = qp->mtu;
399	enum resp_states state;
400	int access;
401
402	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
403		if (pkt->mask & RXE_RETH_MASK) {
404			qp->resp.va = reth_va(pkt);
405			qp->resp.rkey = reth_rkey(pkt);
406			qp->resp.resid = reth_len(pkt);
407			qp->resp.length = reth_len(pkt);
408		}
409		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
410						     : IB_ACCESS_REMOTE_WRITE;
411	} else if (pkt->mask & RXE_ATOMIC_MASK) {
412		qp->resp.va = atmeth_va(pkt);
413		qp->resp.rkey = atmeth_rkey(pkt);
414		qp->resp.resid = sizeof(u64);
415		access = IB_ACCESS_REMOTE_ATOMIC;
416	} else {
417		return RESPST_EXECUTE;
418	}
419
420	/* A zero-byte op is not required to set an addr or rkey. */
421	if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
422	    (pkt->mask & RXE_RETH_MASK) &&
423	    reth_len(pkt) == 0) {
424		return RESPST_EXECUTE;
425	}
426
427	va	= qp->resp.va;
428	rkey	= qp->resp.rkey;
429	resid	= qp->resp.resid;
430	pktlen	= payload_size(pkt);
431
432	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
433	if (!mem) {
434		state = RESPST_ERR_RKEY_VIOLATION;
435		goto err;
436	}
437
438	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
439		state = RESPST_ERR_RKEY_VIOLATION;
440		goto err;
441	}
442
443	if (mem_check_range(mem, va, resid)) {
444		state = RESPST_ERR_RKEY_VIOLATION;
445		goto err;
446	}
447
448	if (pkt->mask & RXE_WRITE_MASK)	 {
449		if (resid > mtu) {
450			if (pktlen != mtu || bth_pad(pkt)) {
451				state = RESPST_ERR_LENGTH;
452				goto err;
453			}
454		} else {
455			if (pktlen != resid) {
456				state = RESPST_ERR_LENGTH;
457				goto err;
458			}
459			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
460				/* This case may not be exactly that
461				 * but nothing else fits.
462				 */
463				state = RESPST_ERR_LENGTH;
464				goto err;
465			}
466		}
467	}
468
469	WARN_ON_ONCE(qp->resp.mr);
470
471	qp->resp.mr = mem;
472	return RESPST_EXECUTE;
473
474err:
475	if (mem)
476		rxe_drop_ref(mem);
477	return state;
478}
479
480static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
481				     int data_len)
482{
483	int err;
484
485	err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
486			data_addr, data_len, to_mem_obj, NULL);
487	if (unlikely(err))
488		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
489					: RESPST_ERR_MALFORMED_WQE;
490
491	return RESPST_NONE;
492}
493
494static enum resp_states write_data_in(struct rxe_qp *qp,
495				      struct rxe_pkt_info *pkt)
496{
497	enum resp_states rc = RESPST_NONE;
498	int	err;
499	int data_len = payload_size(pkt);
500
501	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
502			   data_len, to_mem_obj, NULL);
503	if (err) {
504		rc = RESPST_ERR_RKEY_VIOLATION;
505		goto out;
506	}
507
508	qp->resp.va += data_len;
509	qp->resp.resid -= data_len;
510
511out:
512	return rc;
513}
514
515/* Guarantee atomicity of atomic operations at the machine level. */
516static DEFINE_SPINLOCK(atomic_ops_lock);
517
518static enum resp_states process_atomic(struct rxe_qp *qp,
519				       struct rxe_pkt_info *pkt)
520{
521	u64 iova = atmeth_va(pkt);
522	u64 *vaddr;
523	enum resp_states ret;
524	struct rxe_mem *mr = qp->resp.mr;
525
526	if (mr->state != RXE_MEM_STATE_VALID) {
527		ret = RESPST_ERR_RKEY_VIOLATION;
528		goto out;
529	}
530
531	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
532
533	/* check vaddr is 8 bytes aligned. */
534	if (!vaddr || (uintptr_t)vaddr & 7) {
535		ret = RESPST_ERR_MISALIGNED_ATOMIC;
536		goto out;
537	}
538
539	spin_lock_bh(&atomic_ops_lock);
540
541	qp->resp.atomic_orig = *vaddr;
542
543	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
544	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
545		if (*vaddr == atmeth_comp(pkt))
546			*vaddr = atmeth_swap_add(pkt);
547	} else {
548		*vaddr += atmeth_swap_add(pkt);
549	}
550
551	spin_unlock_bh(&atomic_ops_lock);
552
553	ret = RESPST_NONE;
554out:
555	return ret;
556}
557
558static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
559					  struct rxe_pkt_info *pkt,
560					  struct rxe_pkt_info *ack,
561					  int opcode,
562					  int payload,
563					  u32 psn,
564					  u8 syndrome,
565					  u32 *crcp)
566{
567	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
568	struct sk_buff *skb;
569	u32 crc = 0;
570	u32 *p;
571	int paylen;
572	int pad;
573	int err;
574
575	/*
576	 * allocate packet
577	 */
578	pad = (-payload) & 0x3;
579	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
580
581	skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
582	if (!skb)
583		return NULL;
584
585	ack->qp = qp;
586	ack->opcode = opcode;
587	ack->mask = rxe_opcode[opcode].mask;
588	ack->offset = pkt->offset;
589	ack->paylen = paylen;
590
591	/* fill in bth using the request packet headers */
592	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
593
594	bth_set_opcode(ack, opcode);
595	bth_set_qpn(ack, qp->attr.dest_qp_num);
596	bth_set_pad(ack, pad);
597	bth_set_se(ack, 0);
598	bth_set_psn(ack, psn);
599	bth_set_ack(ack, 0);
600	ack->psn = psn;
601
602	if (ack->mask & RXE_AETH_MASK) {
603		aeth_set_syn(ack, syndrome);
604		aeth_set_msn(ack, qp->resp.msn);
605	}
606
607	if (ack->mask & RXE_ATMACK_MASK)
608		atmack_set_orig(ack, qp->resp.atomic_orig);
609
610	err = rxe_prepare(ack, skb, &crc);
611	if (err) {
612		kfree_skb(skb);
613		return NULL;
614	}
615
616	if (crcp) {
617		/* CRC computation will be continued by the caller */
618		*crcp = crc;
619	} else {
620		p = payload_addr(ack) + payload + bth_pad(ack);
621		*p = ~crc;
622	}
623
624	return skb;
625}
626
627/* RDMA read response. If res is not NULL, then we have a current RDMA request
628 * being processed or replayed.
629 */
630static enum resp_states read_reply(struct rxe_qp *qp,
631				   struct rxe_pkt_info *req_pkt)
632{
633	struct rxe_pkt_info ack_pkt;
634	struct sk_buff *skb;
635	int mtu = qp->mtu;
636	enum resp_states state;
637	int payload;
638	int opcode;
639	int err;
640	struct resp_res *res = qp->resp.res;
641	u32 icrc;
642	u32 *p;
643
644	if (!res) {
645		/* This is the first time we process that request. Get a
646		 * resource
647		 */
648		res = &qp->resp.resources[qp->resp.res_head];
649
650		free_rd_atomic_resource(qp, res);
651		rxe_advance_resp_resource(qp);
652
653		res->type		= RXE_READ_MASK;
654		res->replay		= 0;
655
656		res->read.va		= qp->resp.va;
657		res->read.va_org	= qp->resp.va;
658
659		res->first_psn		= req_pkt->psn;
660
661		if (reth_len(req_pkt)) {
662			res->last_psn	= (req_pkt->psn +
663					   (reth_len(req_pkt) + mtu - 1) /
664					   mtu - 1) & BTH_PSN_MASK;
665		} else {
666			res->last_psn	= res->first_psn;
667		}
668		res->cur_psn		= req_pkt->psn;
669
670		res->read.resid		= qp->resp.resid;
671		res->read.length	= qp->resp.resid;
672		res->read.rkey		= qp->resp.rkey;
673
674		/* note res inherits the reference to mr from qp */
675		res->read.mr		= qp->resp.mr;
676		qp->resp.mr		= NULL;
677
678		qp->resp.res		= res;
679		res->state		= rdatm_res_state_new;
680	}
681
682	if (res->state == rdatm_res_state_new) {
683		if (res->read.resid <= mtu)
684			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
685		else
686			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
687	} else {
688		if (res->read.resid > mtu)
689			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
690		else
691			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
692	}
693
694	res->state = rdatm_res_state_next;
695
696	payload = min_t(int, res->read.resid, mtu);
697
698	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
699				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
700	if (!skb)
701		return RESPST_ERR_RNR;
702
703	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
704			   payload, from_mem_obj, &icrc);
705	if (err)
706		pr_err("Failed copying memory\n");
707
708	if (bth_pad(&ack_pkt)) {
709		struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
710		u8 *pad = payload_addr(&ack_pkt) + payload;
711
712		memset(pad, 0, bth_pad(&ack_pkt));
713		icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
714	}
715	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
716	*p = ~icrc;
717
718	err = rxe_xmit_packet(qp, &ack_pkt, skb);
719	if (err) {
720		pr_err("Failed sending RDMA reply.\n");
721		return RESPST_ERR_RNR;
722	}
723
724	res->read.va += payload;
725	res->read.resid -= payload;
726	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
727
728	if (res->read.resid > 0) {
729		state = RESPST_DONE;
730	} else {
731		qp->resp.res = NULL;
732		if (!res->replay)
733			qp->resp.opcode = -1;
734		if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
735			qp->resp.psn = res->cur_psn;
736		state = RESPST_CLEANUP;
737	}
738
739	return state;
740}
741
742static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
743				   struct rxe_pkt_info *pkt)
744{
745	struct sk_buff *skb = PKT_TO_SKB(pkt);
746
747	memset(hdr, 0, sizeof(*hdr));
748	if (skb->protocol == htons(ETH_P_IP))
749		memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
750	else if (skb->protocol == htons(ETH_P_IPV6))
751		memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
752}
753
754/* Executes a new request. A retried request never reach that function (send
755 * and writes are discarded, and reads and atomics are retried elsewhere.
756 */
757static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
758{
759	enum resp_states err;
760
761	if (pkt->mask & RXE_SEND_MASK) {
762		if (qp_type(qp) == IB_QPT_UD ||
763		    qp_type(qp) == IB_QPT_SMI ||
764		    qp_type(qp) == IB_QPT_GSI) {
765			union rdma_network_hdr hdr;
766
767			build_rdma_network_hdr(&hdr, pkt);
768
769			err = send_data_in(qp, &hdr, sizeof(hdr));
770			if (err)
771				return err;
772		}
773		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
774		if (err)
775			return err;
776	} else if (pkt->mask & RXE_WRITE_MASK) {
777		err = write_data_in(qp, pkt);
778		if (err)
779			return err;
780	} else if (pkt->mask & RXE_READ_MASK) {
781		/* For RDMA Read we can increment the msn now. See C9-148. */
782		qp->resp.msn++;
783		return RESPST_READ_REPLY;
784	} else if (pkt->mask & RXE_ATOMIC_MASK) {
785		err = process_atomic(qp, pkt);
786		if (err)
787			return err;
788	} else {
789		/* Unreachable */
790		WARN_ON_ONCE(1);
791	}
792
793	/* next expected psn, read handles this separately */
794	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
795	qp->resp.ack_psn = qp->resp.psn;
796
797	qp->resp.opcode = pkt->opcode;
798	qp->resp.status = IB_WC_SUCCESS;
799
800	if (pkt->mask & RXE_COMP_MASK) {
801		/* We successfully processed this new request. */
802		qp->resp.msn++;
803		return RESPST_COMPLETE;
804	} else if (qp_type(qp) == IB_QPT_RC)
805		return RESPST_ACKNOWLEDGE;
806	else
807		return RESPST_CLEANUP;
808}
809
810static enum resp_states do_complete(struct rxe_qp *qp,
811				    struct rxe_pkt_info *pkt)
812{
813	struct rxe_cqe cqe;
814	struct ib_wc *wc = &cqe.ibwc;
815	struct ib_uverbs_wc *uwc = &cqe.uibwc;
816	struct rxe_recv_wqe *wqe = qp->resp.wqe;
817	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
818
819	if (unlikely(!wqe))
820		return RESPST_CLEANUP;
821
822	memset(&cqe, 0, sizeof(cqe));
823
824	if (qp->rcq->is_user) {
825		uwc->status             = qp->resp.status;
826		uwc->qp_num             = qp->ibqp.qp_num;
827		uwc->wr_id              = wqe->wr_id;
828	} else {
829		wc->status              = qp->resp.status;
830		wc->qp                  = &qp->ibqp;
831		wc->wr_id               = wqe->wr_id;
832	}
833
834	if (wc->status == IB_WC_SUCCESS) {
835		rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
836		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
837				pkt->mask & RXE_WRITE_MASK) ?
838					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
839		wc->vendor_err = 0;
840		wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
841				pkt->mask & RXE_WRITE_MASK) ?
842					qp->resp.length : wqe->dma.length - wqe->dma.resid;
843
844		/* fields after byte_len are different between kernel and user
845		 * space
846		 */
847		if (qp->rcq->is_user) {
848			uwc->wc_flags = IB_WC_GRH;
849
850			if (pkt->mask & RXE_IMMDT_MASK) {
851				uwc->wc_flags |= IB_WC_WITH_IMM;
852				uwc->ex.imm_data = immdt_imm(pkt);
853			}
854
855			if (pkt->mask & RXE_IETH_MASK) {
856				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
857				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
858			}
859
860			uwc->qp_num		= qp->ibqp.qp_num;
861
862			if (pkt->mask & RXE_DETH_MASK)
863				uwc->src_qp = deth_sqp(pkt);
864
865			uwc->port_num		= qp->attr.port_num;
866		} else {
867			struct sk_buff *skb = PKT_TO_SKB(pkt);
868
869			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
870			if (skb->protocol == htons(ETH_P_IP))
871				wc->network_hdr_type = RDMA_NETWORK_IPV4;
872			else
873				wc->network_hdr_type = RDMA_NETWORK_IPV6;
874
875			if (is_vlan_dev(skb->dev)) {
876				wc->wc_flags |= IB_WC_WITH_VLAN;
877				wc->vlan_id = vlan_dev_vlan_id(skb->dev);
878			}
879
880			if (pkt->mask & RXE_IMMDT_MASK) {
881				wc->wc_flags |= IB_WC_WITH_IMM;
882				wc->ex.imm_data = immdt_imm(pkt);
883			}
884
885			if (pkt->mask & RXE_IETH_MASK) {
886				struct rxe_mem *rmr;
887
888				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
889				wc->ex.invalidate_rkey = ieth_rkey(pkt);
890
891				rmr = rxe_pool_get_index(&rxe->mr_pool,
892							 wc->ex.invalidate_rkey >> 8);
893				if (unlikely(!rmr)) {
894					pr_err("Bad rkey %#x invalidation\n",
895					       wc->ex.invalidate_rkey);
896					return RESPST_ERROR;
897				}
898				rmr->state = RXE_MEM_STATE_FREE;
899				rxe_drop_ref(rmr);
900			}
901
902			wc->qp			= &qp->ibqp;
903
904			if (pkt->mask & RXE_DETH_MASK)
905				wc->src_qp = deth_sqp(pkt);
906
907			wc->port_num		= qp->attr.port_num;
908		}
909	}
910
911	/* have copy for srq and reference for !srq */
912	if (!qp->srq)
913		advance_consumer(qp->rq.queue);
914
915	qp->resp.wqe = NULL;
916
917	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
918		return RESPST_ERR_CQ_OVERFLOW;
919
920	if (qp->resp.state == QP_STATE_ERROR)
921		return RESPST_CHK_RESOURCE;
922
923	if (!pkt)
924		return RESPST_DONE;
925	else if (qp_type(qp) == IB_QPT_RC)
926		return RESPST_ACKNOWLEDGE;
927	else
928		return RESPST_CLEANUP;
929}
930
931static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
932		    u8 syndrome, u32 psn)
933{
934	int err = 0;
935	struct rxe_pkt_info ack_pkt;
936	struct sk_buff *skb;
937
938	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
939				 0, psn, syndrome, NULL);
940	if (!skb) {
941		err = -ENOMEM;
942		goto err1;
943	}
944
945	err = rxe_xmit_packet(qp, &ack_pkt, skb);
946	if (err)
947		pr_err_ratelimited("Failed sending ack\n");
948
949err1:
950	return err;
951}
952
953static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
954			   u8 syndrome)
955{
956	int rc = 0;
957	struct rxe_pkt_info ack_pkt;
958	struct sk_buff *skb;
959	struct resp_res *res;
960
961	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
962				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
963				 syndrome, NULL);
964	if (!skb) {
965		rc = -ENOMEM;
966		goto out;
967	}
968
969	res = &qp->resp.resources[qp->resp.res_head];
970	free_rd_atomic_resource(qp, res);
971	rxe_advance_resp_resource(qp);
972
973	memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
974	memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
975	       sizeof(skb->cb) - sizeof(ack_pkt));
976
977	skb_get(skb);
978	res->type = RXE_ATOMIC_MASK;
979	res->atomic.skb = skb;
980	res->first_psn = ack_pkt.psn;
981	res->last_psn  = ack_pkt.psn;
982	res->cur_psn   = ack_pkt.psn;
983
984	rc = rxe_xmit_packet(qp, &ack_pkt, skb);
985	if (rc) {
986		pr_err_ratelimited("Failed sending ack\n");
987		rxe_drop_ref(qp);
988	}
989out:
990	return rc;
991}
992
993static enum resp_states acknowledge(struct rxe_qp *qp,
994				    struct rxe_pkt_info *pkt)
995{
996	if (qp_type(qp) != IB_QPT_RC)
997		return RESPST_CLEANUP;
998
999	if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1000		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1001	else if (pkt->mask & RXE_ATOMIC_MASK)
1002		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1003	else if (bth_ack(pkt))
1004		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1005
1006	return RESPST_CLEANUP;
1007}
1008
1009static enum resp_states cleanup(struct rxe_qp *qp,
1010				struct rxe_pkt_info *pkt)
1011{
1012	struct sk_buff *skb;
1013
1014	if (pkt) {
1015		skb = skb_dequeue(&qp->req_pkts);
1016		rxe_drop_ref(qp);
1017		kfree_skb(skb);
1018	}
1019
1020	if (qp->resp.mr) {
1021		rxe_drop_ref(qp->resp.mr);
1022		qp->resp.mr = NULL;
1023	}
1024
1025	return RESPST_DONE;
1026}
1027
1028static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1029{
1030	int i;
1031
1032	for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1033		struct resp_res *res = &qp->resp.resources[i];
1034
1035		if (res->type == 0)
1036			continue;
1037
1038		if (psn_compare(psn, res->first_psn) >= 0 &&
1039		    psn_compare(psn, res->last_psn) <= 0) {
1040			return res;
1041		}
1042	}
1043
1044	return NULL;
1045}
1046
1047static enum resp_states duplicate_request(struct rxe_qp *qp,
1048					  struct rxe_pkt_info *pkt)
1049{
1050	enum resp_states rc;
1051	u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1052
1053	if (pkt->mask & RXE_SEND_MASK ||
1054	    pkt->mask & RXE_WRITE_MASK) {
1055		/* SEND. Ack again and cleanup. C9-105. */
1056		if (bth_ack(pkt))
1057			send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1058		rc = RESPST_CLEANUP;
1059		goto out;
1060	} else if (pkt->mask & RXE_READ_MASK) {
1061		struct resp_res *res;
1062
1063		res = find_resource(qp, pkt->psn);
1064		if (!res) {
1065			/* Resource not found. Class D error.  Drop the
1066			 * request.
1067			 */
1068			rc = RESPST_CLEANUP;
1069			goto out;
1070		} else {
1071			/* Ensure this new request is the same as the previous
1072			 * one or a subset of it.
1073			 */
1074			u64 iova = reth_va(pkt);
1075			u32 resid = reth_len(pkt);
1076
1077			if (iova < res->read.va_org ||
1078			    resid > res->read.length ||
1079			    (iova + resid) > (res->read.va_org +
1080					      res->read.length)) {
1081				rc = RESPST_CLEANUP;
1082				goto out;
1083			}
1084
1085			if (reth_rkey(pkt) != res->read.rkey) {
1086				rc = RESPST_CLEANUP;
1087				goto out;
1088			}
1089
1090			res->cur_psn = pkt->psn;
1091			res->state = (pkt->psn == res->first_psn) ?
1092					rdatm_res_state_new :
1093					rdatm_res_state_replay;
1094			res->replay = 1;
1095
1096			/* Reset the resource, except length. */
1097			res->read.va_org = iova;
1098			res->read.va = iova;
1099			res->read.resid = resid;
1100
1101			/* Replay the RDMA read reply. */
1102			qp->resp.res = res;
1103			rc = RESPST_READ_REPLY;
1104			goto out;
1105		}
1106	} else {
1107		struct resp_res *res;
1108
1109		/* Find the operation in our list of responder resources. */
1110		res = find_resource(qp, pkt->psn);
1111		if (res) {
1112			skb_get(res->atomic.skb);
1113			/* Resend the result. */
1114			rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1115			if (rc) {
1116				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1117				rc = RESPST_CLEANUP;
1118				goto out;
1119			}
1120		}
1121
1122		/* Resource not found. Class D error. Drop the request. */
1123		rc = RESPST_CLEANUP;
1124		goto out;
1125	}
1126out:
1127	return rc;
1128}
1129
1130/* Process a class A or C. Both are treated the same in this implementation. */
1131static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1132			      enum ib_wc_status status)
1133{
1134	qp->resp.aeth_syndrome	= syndrome;
1135	qp->resp.status		= status;
1136
1137	/* indicate that we should go through the ERROR state */
1138	qp->resp.goto_error	= 1;
1139}
1140
1141static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1142{
1143	/* UC */
1144	if (qp->srq) {
1145		/* Class E */
1146		qp->resp.drop_msg = 1;
1147		if (qp->resp.wqe) {
1148			qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1149			return RESPST_COMPLETE;
1150		} else {
1151			return RESPST_CLEANUP;
1152		}
1153	} else {
1154		/* Class D1. This packet may be the start of a
1155		 * new message and could be valid. The previous
1156		 * message is invalid and ignored. reset the
1157		 * recv wr to its original state
1158		 */
1159		if (qp->resp.wqe) {
1160			qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1161			qp->resp.wqe->dma.cur_sge = 0;
1162			qp->resp.wqe->dma.sge_offset = 0;
1163			qp->resp.opcode = -1;
1164		}
1165
1166		if (qp->resp.mr) {
1167			rxe_drop_ref(qp->resp.mr);
1168			qp->resp.mr = NULL;
1169		}
1170
1171		return RESPST_CLEANUP;
1172	}
1173}
1174
1175static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1176{
1177	struct sk_buff *skb;
1178
1179	while ((skb = skb_dequeue(&qp->req_pkts))) {
1180		rxe_drop_ref(qp);
1181		kfree_skb(skb);
1182	}
1183
1184	if (notify)
1185		return;
1186
1187	while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1188		advance_consumer(qp->rq.queue);
1189}
1190
1191int rxe_responder(void *arg)
1192{
1193	struct rxe_qp *qp = (struct rxe_qp *)arg;
1194	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1195	enum resp_states state;
1196	struct rxe_pkt_info *pkt = NULL;
1197	int ret = 0;
1198
1199	rxe_add_ref(qp);
1200
1201	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1202
1203	if (!qp->valid) {
1204		ret = -EINVAL;
1205		goto done;
1206	}
1207
1208	switch (qp->resp.state) {
1209	case QP_STATE_RESET:
1210		state = RESPST_RESET;
1211		break;
1212
1213	default:
1214		state = RESPST_GET_REQ;
1215		break;
1216	}
1217
1218	while (1) {
1219		pr_debug("qp#%d state = %s\n", qp_num(qp),
1220			 resp_state_name[state]);
1221		switch (state) {
1222		case RESPST_GET_REQ:
1223			state = get_req(qp, &pkt);
1224			break;
1225		case RESPST_CHK_PSN:
1226			state = check_psn(qp, pkt);
1227			break;
1228		case RESPST_CHK_OP_SEQ:
1229			state = check_op_seq(qp, pkt);
1230			break;
1231		case RESPST_CHK_OP_VALID:
1232			state = check_op_valid(qp, pkt);
1233			break;
1234		case RESPST_CHK_RESOURCE:
1235			state = check_resource(qp, pkt);
1236			break;
1237		case RESPST_CHK_LENGTH:
1238			state = check_length(qp, pkt);
1239			break;
1240		case RESPST_CHK_RKEY:
1241			state = check_rkey(qp, pkt);
1242			break;
1243		case RESPST_EXECUTE:
1244			state = execute(qp, pkt);
1245			break;
1246		case RESPST_COMPLETE:
1247			state = do_complete(qp, pkt);
1248			break;
1249		case RESPST_READ_REPLY:
1250			state = read_reply(qp, pkt);
1251			break;
1252		case RESPST_ACKNOWLEDGE:
1253			state = acknowledge(qp, pkt);
1254			break;
1255		case RESPST_CLEANUP:
1256			state = cleanup(qp, pkt);
1257			break;
1258		case RESPST_DUPLICATE_REQUEST:
1259			state = duplicate_request(qp, pkt);
1260			break;
1261		case RESPST_ERR_PSN_OUT_OF_SEQ:
1262			/* RC only - Class B. Drop packet. */
1263			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1264			state = RESPST_CLEANUP;
1265			break;
1266
1267		case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1268		case RESPST_ERR_MISSING_OPCODE_FIRST:
1269		case RESPST_ERR_MISSING_OPCODE_LAST_C:
1270		case RESPST_ERR_UNSUPPORTED_OPCODE:
1271		case RESPST_ERR_MISALIGNED_ATOMIC:
1272			/* RC Only - Class C. */
1273			do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1274					  IB_WC_REM_INV_REQ_ERR);
1275			state = RESPST_COMPLETE;
1276			break;
1277
1278		case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1279			state = do_class_d1e_error(qp);
1280			break;
1281		case RESPST_ERR_RNR:
1282			if (qp_type(qp) == IB_QPT_RC) {
1283				rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1284				/* RC - class B */
1285				send_ack(qp, pkt, AETH_RNR_NAK |
1286					 (~AETH_TYPE_MASK &
1287					 qp->attr.min_rnr_timer),
1288					 pkt->psn);
1289			} else {
1290				/* UD/UC - class D */
1291				qp->resp.drop_msg = 1;
1292			}
1293			state = RESPST_CLEANUP;
1294			break;
1295
1296		case RESPST_ERR_RKEY_VIOLATION:
1297			if (qp_type(qp) == IB_QPT_RC) {
1298				/* Class C */
1299				do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1300						  IB_WC_REM_ACCESS_ERR);
1301				state = RESPST_COMPLETE;
1302			} else {
1303				qp->resp.drop_msg = 1;
1304				if (qp->srq) {
1305					/* UC/SRQ Class D */
1306					qp->resp.status = IB_WC_REM_ACCESS_ERR;
1307					state = RESPST_COMPLETE;
1308				} else {
1309					/* UC/non-SRQ Class E. */
1310					state = RESPST_CLEANUP;
1311				}
1312			}
1313			break;
1314
1315		case RESPST_ERR_LENGTH:
1316			if (qp_type(qp) == IB_QPT_RC) {
1317				/* Class C */
1318				do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1319						  IB_WC_REM_INV_REQ_ERR);
1320				state = RESPST_COMPLETE;
1321			} else if (qp->srq) {
1322				/* UC/UD - class E */
1323				qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1324				state = RESPST_COMPLETE;
1325			} else {
1326				/* UC/UD - class D */
1327				qp->resp.drop_msg = 1;
1328				state = RESPST_CLEANUP;
1329			}
1330			break;
1331
1332		case RESPST_ERR_MALFORMED_WQE:
1333			/* All, Class A. */
1334			do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1335					  IB_WC_LOC_QP_OP_ERR);
1336			state = RESPST_COMPLETE;
1337			break;
1338
1339		case RESPST_ERR_CQ_OVERFLOW:
1340			/* All - Class G */
1341			state = RESPST_ERROR;
1342			break;
1343
1344		case RESPST_DONE:
1345			if (qp->resp.goto_error) {
1346				state = RESPST_ERROR;
1347				break;
1348			}
1349
1350			goto done;
1351
1352		case RESPST_EXIT:
1353			if (qp->resp.goto_error) {
1354				state = RESPST_ERROR;
1355				break;
1356			}
1357
1358			goto exit;
1359
1360		case RESPST_RESET:
1361			rxe_drain_req_pkts(qp, false);
1362			qp->resp.wqe = NULL;
1363			goto exit;
1364
1365		case RESPST_ERROR:
1366			qp->resp.goto_error = 0;
1367			pr_warn("qp#%d moved to error state\n", qp_num(qp));
1368			rxe_qp_error(qp);
1369			goto exit;
1370
1371		default:
1372			WARN_ON_ONCE(1);
1373		}
1374	}
1375
1376exit:
1377	ret = -EAGAIN;
1378done:
1379	rxe_drop_ref(qp);
1380	return ret;
1381}
1382