1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#include <linux/skbuff.h>
8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/vmalloc.h>
11#include <rdma/uverbs_ioctl.h>
12
13#include "rxe.h"
14#include "rxe_loc.h"
15#include "rxe_queue.h"
16#include "rxe_task.h"
17
18static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19			  int has_srq)
20{
21	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22		pr_warn("invalid send wr = %d > %d\n",
23			cap->max_send_wr, rxe->attr.max_qp_wr);
24		goto err1;
25	}
26
27	if (cap->max_send_sge > rxe->attr.max_send_sge) {
28		pr_warn("invalid send sge = %d > %d\n",
29			cap->max_send_sge, rxe->attr.max_send_sge);
30		goto err1;
31	}
32
33	if (!has_srq) {
34		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35			pr_warn("invalid recv wr = %d > %d\n",
36				cap->max_recv_wr, rxe->attr.max_qp_wr);
37			goto err1;
38		}
39
40		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41			pr_warn("invalid recv sge = %d > %d\n",
42				cap->max_recv_sge, rxe->attr.max_recv_sge);
43			goto err1;
44		}
45	}
46
47	if (cap->max_inline_data > rxe->max_inline_data) {
48		pr_warn("invalid max inline data = %d > %d\n",
49			cap->max_inline_data, rxe->max_inline_data);
50		goto err1;
51	}
52
53	return 0;
54
55err1:
56	return -EINVAL;
57}
58
59int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60{
61	struct ib_qp_cap *cap = &init->cap;
62	struct rxe_port *port;
63	int port_num = init->port_num;
64
65	if (!init->recv_cq || !init->send_cq) {
66		pr_warn("missing cq\n");
67		goto err1;
68	}
69
70	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
71		goto err1;
72
73	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
74		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
75			pr_warn("invalid port = %d\n", port_num);
76			goto err1;
77		}
78
79		port = &rxe->port;
80
81		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
82			pr_warn("SMI QP exists for port %d\n", port_num);
83			goto err1;
84		}
85
86		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
87			pr_warn("GSI QP exists for port %d\n", port_num);
88			goto err1;
89		}
90	}
91
92	return 0;
93
94err1:
95	return -EINVAL;
96}
97
98static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
99{
100	qp->resp.res_head = 0;
101	qp->resp.res_tail = 0;
102	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
103
104	if (!qp->resp.resources)
105		return -ENOMEM;
106
107	return 0;
108}
109
110static void free_rd_atomic_resources(struct rxe_qp *qp)
111{
112	if (qp->resp.resources) {
113		int i;
114
115		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
116			struct resp_res *res = &qp->resp.resources[i];
117
118			free_rd_atomic_resource(qp, res);
119		}
120		kfree(qp->resp.resources);
121		qp->resp.resources = NULL;
122	}
123}
124
125void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
126{
127	if (res->type == RXE_ATOMIC_MASK) {
128		kfree_skb(res->atomic.skb);
129	} else if (res->type == RXE_READ_MASK) {
130		if (res->read.mr)
131			rxe_drop_ref(res->read.mr);
132	}
133	res->type = 0;
134}
135
136static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
137{
138	int i;
139	struct resp_res *res;
140
141	if (qp->resp.resources) {
142		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143			res = &qp->resp.resources[i];
144			free_rd_atomic_resource(qp, res);
145		}
146	}
147}
148
149static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
150			     struct ib_qp_init_attr *init)
151{
152	struct rxe_port *port;
153	u32 qpn;
154
155	qp->sq_sig_type		= init->sq_sig_type;
156	qp->attr.path_mtu	= 1;
157	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
158
159	qpn			= qp->pelem.index;
160	port			= &rxe->port;
161
162	switch (init->qp_type) {
163	case IB_QPT_SMI:
164		qp->ibqp.qp_num		= 0;
165		port->qp_smi_index	= qpn;
166		qp->attr.port_num	= init->port_num;
167		break;
168
169	case IB_QPT_GSI:
170		qp->ibqp.qp_num		= 1;
171		port->qp_gsi_index	= qpn;
172		qp->attr.port_num	= init->port_num;
173		break;
174
175	default:
176		qp->ibqp.qp_num		= qpn;
177		break;
178	}
179
180	INIT_LIST_HEAD(&qp->grp_list);
181
182	skb_queue_head_init(&qp->send_pkts);
183
184	spin_lock_init(&qp->grp_lock);
185	spin_lock_init(&qp->state_lock);
186
187	spin_lock_init(&qp->req.task.state_lock);
188	spin_lock_init(&qp->resp.task.state_lock);
189	spin_lock_init(&qp->comp.task.state_lock);
190
191	spin_lock_init(&qp->sq.sq_lock);
192	spin_lock_init(&qp->rq.producer_lock);
193	spin_lock_init(&qp->rq.consumer_lock);
194
195	skb_queue_head_init(&qp->req_pkts);
196	skb_queue_head_init(&qp->resp_pkts);
197
198	atomic_set(&qp->ssn, 0);
199	atomic_set(&qp->skb_out, 0);
200}
201
202static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
203			   struct ib_qp_init_attr *init, struct ib_udata *udata,
204			   struct rxe_create_qp_resp __user *uresp)
205{
206	int err;
207	int wqe_size;
208
209	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
210	if (err < 0)
211		return err;
212	qp->sk->sk->sk_user_data = qp;
213
214	/* pick a source UDP port number for this QP based on
215	 * the source QPN. this spreads traffic for different QPs
216	 * across different NIC RX queues (while using a single
217	 * flow for a given QP to maintain packet order).
218	 * the port number must be in the Dynamic Ports range
219	 * (0xc000 - 0xffff).
220	 */
221	qp->src_port = RXE_ROCE_V2_SPORT +
222		(hash_32_generic(qp_num(qp), 14) & 0x3fff);
223	qp->sq.max_wr		= init->cap.max_send_wr;
224
225	/* These caps are limited by rxe_qp_chk_cap() done by the caller */
226	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
227			 init->cap.max_inline_data);
228	qp->sq.max_sge = init->cap.max_send_sge =
229		wqe_size / sizeof(struct ib_sge);
230	qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
231	wqe_size += sizeof(struct rxe_send_wqe);
232
233	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
234	if (!qp->sq.queue)
235		return -ENOMEM;
236
237	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
238			   qp->sq.queue->buf, qp->sq.queue->buf_size,
239			   &qp->sq.queue->ip);
240
241	if (err) {
242		vfree(qp->sq.queue->buf);
243		kfree(qp->sq.queue);
244		qp->sq.queue = NULL;
245		return err;
246	}
247
248	qp->req.wqe_index	= producer_index(qp->sq.queue);
249	qp->req.state		= QP_STATE_RESET;
250	qp->req.opcode		= -1;
251	qp->comp.opcode		= -1;
252
253	rxe_init_task(&qp->req.task, qp, rxe_requester);
254	rxe_init_task(&qp->comp.task, qp, rxe_completer);
255
256	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
257	if (init->qp_type == IB_QPT_RC) {
258		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
259		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
260	}
261	return 0;
262}
263
264static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
265			    struct ib_qp_init_attr *init,
266			    struct ib_udata *udata,
267			    struct rxe_create_qp_resp __user *uresp)
268{
269	int err;
270	int wqe_size;
271
272	if (!qp->srq) {
273		qp->rq.max_wr		= init->cap.max_recv_wr;
274		qp->rq.max_sge		= init->cap.max_recv_sge;
275
276		wqe_size = rcv_wqe_size(qp->rq.max_sge);
277
278		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
279			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
280
281		qp->rq.queue = rxe_queue_init(rxe,
282					      &qp->rq.max_wr,
283					      wqe_size);
284		if (!qp->rq.queue)
285			return -ENOMEM;
286
287		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
288				   qp->rq.queue->buf, qp->rq.queue->buf_size,
289				   &qp->rq.queue->ip);
290		if (err) {
291			vfree(qp->rq.queue->buf);
292			kfree(qp->rq.queue);
293			qp->rq.queue = NULL;
294			return err;
295		}
296	}
297
298	rxe_init_task(&qp->resp.task, qp, rxe_responder);
299
300	qp->resp.opcode		= OPCODE_NONE;
301	qp->resp.msn		= 0;
302	qp->resp.state		= QP_STATE_RESET;
303
304	return 0;
305}
306
307/* called by the create qp verb */
308int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
309		     struct ib_qp_init_attr *init,
310		     struct rxe_create_qp_resp __user *uresp,
311		     struct ib_pd *ibpd,
312		     struct ib_udata *udata)
313{
314	int err;
315	struct rxe_cq *rcq = to_rcq(init->recv_cq);
316	struct rxe_cq *scq = to_rcq(init->send_cq);
317	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
318
319	rxe_add_ref(pd);
320	rxe_add_ref(rcq);
321	rxe_add_ref(scq);
322	if (srq)
323		rxe_add_ref(srq);
324
325	qp->pd			= pd;
326	qp->rcq			= rcq;
327	qp->scq			= scq;
328	qp->srq			= srq;
329
330	rxe_qp_init_misc(rxe, qp, init);
331
332	err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
333	if (err)
334		goto err1;
335
336	err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
337	if (err)
338		goto err2;
339
340	qp->attr.qp_state = IB_QPS_RESET;
341	qp->valid = 1;
342
343	return 0;
344
345err2:
346	rxe_queue_cleanup(qp->sq.queue);
347err1:
348	qp->pd = NULL;
349	qp->rcq = NULL;
350	qp->scq = NULL;
351	qp->srq = NULL;
352
353	if (srq)
354		rxe_drop_ref(srq);
355	rxe_drop_ref(scq);
356	rxe_drop_ref(rcq);
357	rxe_drop_ref(pd);
358
359	return err;
360}
361
362/* called by the query qp verb */
363int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
364{
365	init->event_handler		= qp->ibqp.event_handler;
366	init->qp_context		= qp->ibqp.qp_context;
367	init->send_cq			= qp->ibqp.send_cq;
368	init->recv_cq			= qp->ibqp.recv_cq;
369	init->srq			= qp->ibqp.srq;
370
371	init->cap.max_send_wr		= qp->sq.max_wr;
372	init->cap.max_send_sge		= qp->sq.max_sge;
373	init->cap.max_inline_data	= qp->sq.max_inline;
374
375	if (!qp->srq) {
376		init->cap.max_recv_wr		= qp->rq.max_wr;
377		init->cap.max_recv_sge		= qp->rq.max_sge;
378	}
379
380	init->sq_sig_type		= qp->sq_sig_type;
381
382	init->qp_type			= qp->ibqp.qp_type;
383	init->port_num			= 1;
384
385	return 0;
386}
387
388/* called by the modify qp verb, this routine checks all the parameters before
389 * making any changes
390 */
391int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
392		    struct ib_qp_attr *attr, int mask)
393{
394	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
395					attr->cur_qp_state : qp->attr.qp_state;
396	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
397					attr->qp_state : cur_state;
398
399	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
400		pr_warn("invalid mask or state for qp\n");
401		goto err1;
402	}
403
404	if (mask & IB_QP_STATE) {
405		if (cur_state == IB_QPS_SQD) {
406			if (qp->req.state == QP_STATE_DRAIN &&
407			    new_state != IB_QPS_ERR)
408				goto err1;
409		}
410	}
411
412	if (mask & IB_QP_PORT) {
413		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
414			pr_warn("invalid port %d\n", attr->port_num);
415			goto err1;
416		}
417	}
418
419	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
420		goto err1;
421
422	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
423		goto err1;
424
425	if (mask & IB_QP_ALT_PATH) {
426		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
427			goto err1;
428		if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
429			pr_warn("invalid alt port %d\n", attr->alt_port_num);
430			goto err1;
431		}
432		if (attr->alt_timeout > 31) {
433			pr_warn("invalid QP alt timeout %d > 31\n",
434				attr->alt_timeout);
435			goto err1;
436		}
437	}
438
439	if (mask & IB_QP_PATH_MTU) {
440		struct rxe_port *port = &rxe->port;
441
442		enum ib_mtu max_mtu = port->attr.max_mtu;
443		enum ib_mtu mtu = attr->path_mtu;
444
445		if (mtu > max_mtu) {
446			pr_debug("invalid mtu (%d) > (%d)\n",
447				 ib_mtu_enum_to_int(mtu),
448				 ib_mtu_enum_to_int(max_mtu));
449			goto err1;
450		}
451	}
452
453	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
454		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
455			pr_warn("invalid max_rd_atomic %d > %d\n",
456				attr->max_rd_atomic,
457				rxe->attr.max_qp_rd_atom);
458			goto err1;
459		}
460	}
461
462	if (mask & IB_QP_TIMEOUT) {
463		if (attr->timeout > 31) {
464			pr_warn("invalid QP timeout %d > 31\n",
465				attr->timeout);
466			goto err1;
467		}
468	}
469
470	return 0;
471
472err1:
473	return -EINVAL;
474}
475
476/* move the qp to the reset state */
477static void rxe_qp_reset(struct rxe_qp *qp)
478{
479	/* stop tasks from running */
480	rxe_disable_task(&qp->resp.task);
481
482	/* stop request/comp */
483	if (qp->sq.queue) {
484		if (qp_type(qp) == IB_QPT_RC)
485			rxe_disable_task(&qp->comp.task);
486		rxe_disable_task(&qp->req.task);
487	}
488
489	/* move qp to the reset state */
490	qp->req.state = QP_STATE_RESET;
491	qp->resp.state = QP_STATE_RESET;
492
493	/* let state machines reset themselves drain work and packet queues
494	 * etc.
495	 */
496	__rxe_do_task(&qp->resp.task);
497
498	if (qp->sq.queue) {
499		__rxe_do_task(&qp->comp.task);
500		__rxe_do_task(&qp->req.task);
501		rxe_queue_reset(qp->sq.queue);
502	}
503
504	/* cleanup attributes */
505	atomic_set(&qp->ssn, 0);
506	qp->req.opcode = -1;
507	qp->req.need_retry = 0;
508	qp->req.noack_pkts = 0;
509	qp->resp.msn = 0;
510	qp->resp.opcode = -1;
511	qp->resp.drop_msg = 0;
512	qp->resp.goto_error = 0;
513	qp->resp.sent_psn_nak = 0;
514
515	if (qp->resp.mr) {
516		rxe_drop_ref(qp->resp.mr);
517		qp->resp.mr = NULL;
518	}
519
520	cleanup_rd_atomic_resources(qp);
521
522	/* reenable tasks */
523	rxe_enable_task(&qp->resp.task);
524
525	if (qp->sq.queue) {
526		if (qp_type(qp) == IB_QPT_RC)
527			rxe_enable_task(&qp->comp.task);
528
529		rxe_enable_task(&qp->req.task);
530	}
531}
532
533/* drain the send queue */
534static void rxe_qp_drain(struct rxe_qp *qp)
535{
536	if (qp->sq.queue) {
537		if (qp->req.state != QP_STATE_DRAINED) {
538			qp->req.state = QP_STATE_DRAIN;
539			if (qp_type(qp) == IB_QPT_RC)
540				rxe_run_task(&qp->comp.task, 1);
541			else
542				__rxe_do_task(&qp->comp.task);
543			rxe_run_task(&qp->req.task, 1);
544		}
545	}
546}
547
548/* move the qp to the error state */
549void rxe_qp_error(struct rxe_qp *qp)
550{
551	qp->req.state = QP_STATE_ERROR;
552	qp->resp.state = QP_STATE_ERROR;
553	qp->attr.qp_state = IB_QPS_ERR;
554
555	/* drain work and packet queues */
556	rxe_run_task(&qp->resp.task, 1);
557
558	if (qp_type(qp) == IB_QPT_RC)
559		rxe_run_task(&qp->comp.task, 1);
560	else
561		__rxe_do_task(&qp->comp.task);
562	rxe_run_task(&qp->req.task, 1);
563}
564
565/* called by the modify qp verb */
566int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
567		     struct ib_udata *udata)
568{
569	int err;
570
571	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
572		int max_rd_atomic = attr->max_rd_atomic ?
573			roundup_pow_of_two(attr->max_rd_atomic) : 0;
574
575		qp->attr.max_rd_atomic = max_rd_atomic;
576		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
577	}
578
579	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
580		int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
581			roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
582
583		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
584
585		free_rd_atomic_resources(qp);
586
587		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
588		if (err)
589			return err;
590	}
591
592	if (mask & IB_QP_CUR_STATE)
593		qp->attr.cur_qp_state = attr->qp_state;
594
595	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
596		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
597
598	if (mask & IB_QP_ACCESS_FLAGS)
599		qp->attr.qp_access_flags = attr->qp_access_flags;
600
601	if (mask & IB_QP_PKEY_INDEX)
602		qp->attr.pkey_index = attr->pkey_index;
603
604	if (mask & IB_QP_PORT)
605		qp->attr.port_num = attr->port_num;
606
607	if (mask & IB_QP_QKEY)
608		qp->attr.qkey = attr->qkey;
609
610	if (mask & IB_QP_AV)
611		rxe_init_av(&attr->ah_attr, &qp->pri_av);
612
613	if (mask & IB_QP_ALT_PATH) {
614		rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
615		qp->attr.alt_port_num = attr->alt_port_num;
616		qp->attr.alt_pkey_index = attr->alt_pkey_index;
617		qp->attr.alt_timeout = attr->alt_timeout;
618	}
619
620	if (mask & IB_QP_PATH_MTU) {
621		qp->attr.path_mtu = attr->path_mtu;
622		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
623	}
624
625	if (mask & IB_QP_TIMEOUT) {
626		qp->attr.timeout = attr->timeout;
627		if (attr->timeout == 0) {
628			qp->qp_timeout_jiffies = 0;
629		} else {
630			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
631			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
632
633			qp->qp_timeout_jiffies = j ? j : 1;
634		}
635	}
636
637	if (mask & IB_QP_RETRY_CNT) {
638		qp->attr.retry_cnt = attr->retry_cnt;
639		qp->comp.retry_cnt = attr->retry_cnt;
640		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
641			 attr->retry_cnt);
642	}
643
644	if (mask & IB_QP_RNR_RETRY) {
645		qp->attr.rnr_retry = attr->rnr_retry;
646		qp->comp.rnr_retry = attr->rnr_retry;
647		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
648			 attr->rnr_retry);
649	}
650
651	if (mask & IB_QP_RQ_PSN) {
652		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
653		qp->resp.psn = qp->attr.rq_psn;
654		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
655			 qp->resp.psn);
656	}
657
658	if (mask & IB_QP_MIN_RNR_TIMER) {
659		qp->attr.min_rnr_timer = attr->min_rnr_timer;
660		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
661			 attr->min_rnr_timer);
662	}
663
664	if (mask & IB_QP_SQ_PSN) {
665		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
666		qp->req.psn = qp->attr.sq_psn;
667		qp->comp.psn = qp->attr.sq_psn;
668		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
669	}
670
671	if (mask & IB_QP_PATH_MIG_STATE)
672		qp->attr.path_mig_state = attr->path_mig_state;
673
674	if (mask & IB_QP_DEST_QPN)
675		qp->attr.dest_qp_num = attr->dest_qp_num;
676
677	if (mask & IB_QP_STATE) {
678		qp->attr.qp_state = attr->qp_state;
679
680		switch (attr->qp_state) {
681		case IB_QPS_RESET:
682			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
683			rxe_qp_reset(qp);
684			break;
685
686		case IB_QPS_INIT:
687			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
688			qp->req.state = QP_STATE_INIT;
689			qp->resp.state = QP_STATE_INIT;
690			break;
691
692		case IB_QPS_RTR:
693			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
694			qp->resp.state = QP_STATE_READY;
695			break;
696
697		case IB_QPS_RTS:
698			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
699			qp->req.state = QP_STATE_READY;
700			break;
701
702		case IB_QPS_SQD:
703			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
704			rxe_qp_drain(qp);
705			break;
706
707		case IB_QPS_SQE:
708			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
709			/* Not possible from modify_qp. */
710			break;
711
712		case IB_QPS_ERR:
713			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
714			rxe_qp_error(qp);
715			break;
716		}
717	}
718
719	return 0;
720}
721
722/* called by the query qp verb */
723int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
724{
725	*attr = qp->attr;
726
727	attr->rq_psn				= qp->resp.psn;
728	attr->sq_psn				= qp->req.psn;
729
730	attr->cap.max_send_wr			= qp->sq.max_wr;
731	attr->cap.max_send_sge			= qp->sq.max_sge;
732	attr->cap.max_inline_data		= qp->sq.max_inline;
733
734	if (!qp->srq) {
735		attr->cap.max_recv_wr		= qp->rq.max_wr;
736		attr->cap.max_recv_sge		= qp->rq.max_sge;
737	}
738
739	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
740	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
741
742	if (qp->req.state == QP_STATE_DRAIN) {
743		attr->sq_draining = 1;
744		/* applications that get this state
745		 * typically spin on it. yield the
746		 * processor
747		 */
748		cond_resched();
749	} else {
750		attr->sq_draining = 0;
751	}
752
753	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
754
755	return 0;
756}
757
758/* called by the destroy qp verb */
759void rxe_qp_destroy(struct rxe_qp *qp)
760{
761	qp->valid = 0;
762	qp->qp_timeout_jiffies = 0;
763	rxe_cleanup_task(&qp->resp.task);
764
765	if (qp_type(qp) == IB_QPT_RC) {
766		del_timer_sync(&qp->retrans_timer);
767		del_timer_sync(&qp->rnr_nak_timer);
768	}
769
770	rxe_cleanup_task(&qp->req.task);
771	rxe_cleanup_task(&qp->comp.task);
772
773	/* flush out any receive wr's or pending requests */
774	if (qp->req.task.func)
775		__rxe_do_task(&qp->req.task);
776
777	if (qp->sq.queue) {
778		__rxe_do_task(&qp->comp.task);
779		__rxe_do_task(&qp->req.task);
780	}
781}
782
783/* called when the last reference to the qp is dropped */
784static void rxe_qp_do_cleanup(struct work_struct *work)
785{
786	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
787
788	rxe_drop_all_mcast_groups(qp);
789
790	if (qp->sq.queue)
791		rxe_queue_cleanup(qp->sq.queue);
792
793	if (qp->srq)
794		rxe_drop_ref(qp->srq);
795
796	if (qp->rq.queue)
797		rxe_queue_cleanup(qp->rq.queue);
798
799	if (qp->scq)
800		rxe_drop_ref(qp->scq);
801	if (qp->rcq)
802		rxe_drop_ref(qp->rcq);
803	if (qp->pd)
804		rxe_drop_ref(qp->pd);
805
806	if (qp->resp.mr) {
807		rxe_drop_ref(qp->resp.mr);
808		qp->resp.mr = NULL;
809	}
810
811	free_rd_atomic_resources(qp);
812
813	if (qp->sk) {
814		if (qp_type(qp) == IB_QPT_RC)
815			sk_dst_reset(qp->sk->sk);
816
817		kernel_sock_shutdown(qp->sk, SHUT_RDWR);
818		sock_release(qp->sk);
819	}
820}
821
822/* called when the last reference to the qp is dropped */
823void rxe_qp_cleanup(struct rxe_pool_entry *arg)
824{
825	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
826
827	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
828}
829