1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#include <linux/dma-mapping.h>
8#include <net/addrconf.h>
9#include <rdma/uverbs_ioctl.h>
10#include "rxe.h"
11#include "rxe_loc.h"
12#include "rxe_queue.h"
13#include "rxe_hw_counters.h"
14
15static int rxe_query_device(struct ib_device *dev,
16			    struct ib_device_attr *attr,
17			    struct ib_udata *uhw)
18{
19	struct rxe_dev *rxe = to_rdev(dev);
20
21	if (uhw->inlen || uhw->outlen)
22		return -EINVAL;
23
24	*attr = rxe->attr;
25	return 0;
26}
27
28static int rxe_query_port(struct ib_device *dev,
29			  u8 port_num, struct ib_port_attr *attr)
30{
31	struct rxe_dev *rxe = to_rdev(dev);
32	struct rxe_port *port;
33	int rc;
34
35	port = &rxe->port;
36
37	/* *attr being zeroed by the caller, avoid zeroing it here */
38	*attr = port->attr;
39
40	mutex_lock(&rxe->usdev_lock);
41	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
42			      &attr->active_width);
43
44	if (attr->state == IB_PORT_ACTIVE)
45		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
46	else if (dev_get_flags(rxe->ndev) & IFF_UP)
47		attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
48	else
49		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
50
51	mutex_unlock(&rxe->usdev_lock);
52
53	return rc;
54}
55
56static int rxe_query_pkey(struct ib_device *device,
57			  u8 port_num, u16 index, u16 *pkey)
58{
59	if (index > 0)
60		return -EINVAL;
61
62	*pkey = IB_DEFAULT_PKEY_FULL;
63	return 0;
64}
65
66static int rxe_modify_device(struct ib_device *dev,
67			     int mask, struct ib_device_modify *attr)
68{
69	struct rxe_dev *rxe = to_rdev(dev);
70
71	if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
72		     IB_DEVICE_MODIFY_NODE_DESC))
73		return -EOPNOTSUPP;
74
75	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
76		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
77
78	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
79		memcpy(rxe->ib_dev.node_desc,
80		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
81	}
82
83	return 0;
84}
85
86static int rxe_modify_port(struct ib_device *dev,
87			   u8 port_num, int mask, struct ib_port_modify *attr)
88{
89	struct rxe_dev *rxe = to_rdev(dev);
90	struct rxe_port *port;
91
92	port = &rxe->port;
93
94	port->attr.port_cap_flags |= attr->set_port_cap_mask;
95	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
96
97	if (mask & IB_PORT_RESET_QKEY_CNTR)
98		port->attr.qkey_viol_cntr = 0;
99
100	return 0;
101}
102
103static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
104					       u8 port_num)
105{
106	return IB_LINK_LAYER_ETHERNET;
107}
108
109static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
110{
111	struct rxe_dev *rxe = to_rdev(uctx->device);
112	struct rxe_ucontext *uc = to_ruc(uctx);
113
114	return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
115}
116
117static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
118{
119	struct rxe_ucontext *uc = to_ruc(ibuc);
120
121	rxe_drop_ref(uc);
122}
123
124static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
125			      struct ib_port_immutable *immutable)
126{
127	int err;
128	struct ib_port_attr attr;
129
130	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
131
132	err = ib_query_port(dev, port_num, &attr);
133	if (err)
134		return err;
135
136	immutable->pkey_tbl_len = attr.pkey_tbl_len;
137	immutable->gid_tbl_len = attr.gid_tbl_len;
138	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
139
140	return 0;
141}
142
143static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
144{
145	struct rxe_dev *rxe = to_rdev(ibpd->device);
146	struct rxe_pd *pd = to_rpd(ibpd);
147
148	return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
149}
150
151static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
152{
153	struct rxe_pd *pd = to_rpd(ibpd);
154
155	rxe_drop_ref(pd);
156	return 0;
157}
158
159static int rxe_create_ah(struct ib_ah *ibah,
160			 struct rdma_ah_init_attr *init_attr,
161			 struct ib_udata *udata)
162
163{
164	int err;
165	struct rxe_dev *rxe = to_rdev(ibah->device);
166	struct rxe_ah *ah = to_rah(ibah);
167
168	err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
169	if (err)
170		return err;
171
172	err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
173	if (err)
174		return err;
175
176	rxe_init_av(init_attr->ah_attr, &ah->av);
177	return 0;
178}
179
180static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
181{
182	int err;
183	struct rxe_dev *rxe = to_rdev(ibah->device);
184	struct rxe_ah *ah = to_rah(ibah);
185
186	err = rxe_av_chk_attr(rxe, attr);
187	if (err)
188		return err;
189
190	rxe_init_av(attr, &ah->av);
191	return 0;
192}
193
194static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
195{
196	struct rxe_ah *ah = to_rah(ibah);
197
198	memset(attr, 0, sizeof(*attr));
199	attr->type = ibah->type;
200	rxe_av_to_attr(&ah->av, attr);
201	return 0;
202}
203
204static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
205{
206	struct rxe_ah *ah = to_rah(ibah);
207
208	rxe_drop_ref(ah);
209	return 0;
210}
211
212static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
213{
214	int err;
215	int i;
216	u32 length;
217	struct rxe_recv_wqe *recv_wqe;
218	int num_sge = ibwr->num_sge;
219
220	if (unlikely(queue_full(rq->queue))) {
221		err = -ENOMEM;
222		goto err1;
223	}
224
225	if (unlikely(num_sge > rq->max_sge)) {
226		err = -EINVAL;
227		goto err1;
228	}
229
230	length = 0;
231	for (i = 0; i < num_sge; i++)
232		length += ibwr->sg_list[i].length;
233
234	recv_wqe = producer_addr(rq->queue);
235	recv_wqe->wr_id = ibwr->wr_id;
236	recv_wqe->num_sge = num_sge;
237
238	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
239	       num_sge * sizeof(struct ib_sge));
240
241	recv_wqe->dma.length		= length;
242	recv_wqe->dma.resid		= length;
243	recv_wqe->dma.num_sge		= num_sge;
244	recv_wqe->dma.cur_sge		= 0;
245	recv_wqe->dma.sge_offset	= 0;
246
247	/* make sure all changes to the work queue are written before we
248	 * update the producer pointer
249	 */
250	smp_wmb();
251
252	advance_producer(rq->queue);
253	return 0;
254
255err1:
256	return err;
257}
258
259static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
260			  struct ib_udata *udata)
261{
262	int err;
263	struct rxe_dev *rxe = to_rdev(ibsrq->device);
264	struct rxe_pd *pd = to_rpd(ibsrq->pd);
265	struct rxe_srq *srq = to_rsrq(ibsrq);
266	struct rxe_create_srq_resp __user *uresp = NULL;
267
268	if (udata) {
269		if (udata->outlen < sizeof(*uresp))
270			return -EINVAL;
271		uresp = udata->outbuf;
272	}
273
274	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
275	if (err)
276		goto err1;
277
278	err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
279	if (err)
280		goto err1;
281
282	rxe_add_ref(pd);
283	srq->pd = pd;
284
285	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
286	if (err)
287		goto err2;
288
289	return 0;
290
291err2:
292	rxe_drop_ref(pd);
293	rxe_drop_ref(srq);
294err1:
295	return err;
296}
297
298static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
299			  enum ib_srq_attr_mask mask,
300			  struct ib_udata *udata)
301{
302	int err;
303	struct rxe_srq *srq = to_rsrq(ibsrq);
304	struct rxe_dev *rxe = to_rdev(ibsrq->device);
305	struct rxe_modify_srq_cmd ucmd = {};
306
307	if (udata) {
308		if (udata->inlen < sizeof(ucmd))
309			return -EINVAL;
310
311		err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
312		if (err)
313			return err;
314	}
315
316	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
317	if (err)
318		goto err1;
319
320	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
321	if (err)
322		goto err1;
323
324	return 0;
325
326err1:
327	return err;
328}
329
330static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
331{
332	struct rxe_srq *srq = to_rsrq(ibsrq);
333
334	if (srq->error)
335		return -EINVAL;
336
337	attr->max_wr = srq->rq.queue->buf->index_mask;
338	attr->max_sge = srq->rq.max_sge;
339	attr->srq_limit = srq->limit;
340	return 0;
341}
342
343static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
344{
345	struct rxe_srq *srq = to_rsrq(ibsrq);
346
347	if (srq->rq.queue)
348		rxe_queue_cleanup(srq->rq.queue);
349
350	rxe_drop_ref(srq->pd);
351	rxe_drop_ref(srq);
352	return 0;
353}
354
355static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
356			     const struct ib_recv_wr **bad_wr)
357{
358	int err = 0;
359	unsigned long flags;
360	struct rxe_srq *srq = to_rsrq(ibsrq);
361
362	spin_lock_irqsave(&srq->rq.producer_lock, flags);
363
364	while (wr) {
365		err = post_one_recv(&srq->rq, wr);
366		if (unlikely(err))
367			break;
368		wr = wr->next;
369	}
370
371	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
372
373	if (err)
374		*bad_wr = wr;
375
376	return err;
377}
378
379static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
380				   struct ib_qp_init_attr *init,
381				   struct ib_udata *udata)
382{
383	int err;
384	struct rxe_dev *rxe = to_rdev(ibpd->device);
385	struct rxe_pd *pd = to_rpd(ibpd);
386	struct rxe_qp *qp;
387	struct rxe_create_qp_resp __user *uresp = NULL;
388
389	if (udata) {
390		if (udata->outlen < sizeof(*uresp))
391			return ERR_PTR(-EINVAL);
392		uresp = udata->outbuf;
393	}
394
395	err = rxe_qp_chk_init(rxe, init);
396	if (err)
397		goto err1;
398
399	qp = rxe_alloc(&rxe->qp_pool);
400	if (!qp) {
401		err = -ENOMEM;
402		goto err1;
403	}
404
405	if (udata) {
406		if (udata->inlen) {
407			err = -EINVAL;
408			goto err2;
409		}
410		qp->is_user = 1;
411	}
412
413	rxe_add_index(qp);
414
415	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
416	if (err)
417		goto err3;
418
419	return &qp->ibqp;
420
421err3:
422	rxe_drop_index(qp);
423err2:
424	rxe_drop_ref(qp);
425err1:
426	return ERR_PTR(err);
427}
428
429static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
430			 int mask, struct ib_udata *udata)
431{
432	int err;
433	struct rxe_dev *rxe = to_rdev(ibqp->device);
434	struct rxe_qp *qp = to_rqp(ibqp);
435
436	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
437	if (err)
438		goto err1;
439
440	err = rxe_qp_from_attr(qp, attr, mask, udata);
441	if (err)
442		goto err1;
443
444	return 0;
445
446err1:
447	return err;
448}
449
450static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
451			int mask, struct ib_qp_init_attr *init)
452{
453	struct rxe_qp *qp = to_rqp(ibqp);
454
455	rxe_qp_to_init(qp, init);
456	rxe_qp_to_attr(qp, attr, mask);
457
458	return 0;
459}
460
461static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
462{
463	struct rxe_qp *qp = to_rqp(ibqp);
464
465	rxe_qp_destroy(qp);
466	rxe_drop_index(qp);
467	rxe_drop_ref(qp);
468	return 0;
469}
470
471static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
472			    unsigned int mask, unsigned int length)
473{
474	int num_sge = ibwr->num_sge;
475	struct rxe_sq *sq = &qp->sq;
476
477	if (unlikely(num_sge > sq->max_sge))
478		goto err1;
479
480	if (unlikely(mask & WR_ATOMIC_MASK)) {
481		if (length < 8)
482			goto err1;
483
484		if (atomic_wr(ibwr)->remote_addr & 0x7)
485			goto err1;
486	}
487
488	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
489		     (length > sq->max_inline)))
490		goto err1;
491
492	return 0;
493
494err1:
495	return -EINVAL;
496}
497
498static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
499			 const struct ib_send_wr *ibwr)
500{
501	wr->wr_id = ibwr->wr_id;
502	wr->num_sge = ibwr->num_sge;
503	wr->opcode = ibwr->opcode;
504	wr->send_flags = ibwr->send_flags;
505
506	if (qp_type(qp) == IB_QPT_UD ||
507	    qp_type(qp) == IB_QPT_SMI ||
508	    qp_type(qp) == IB_QPT_GSI) {
509		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
510		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
511		if (qp_type(qp) == IB_QPT_GSI)
512			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
513		if (wr->opcode == IB_WR_SEND_WITH_IMM)
514			wr->ex.imm_data = ibwr->ex.imm_data;
515	} else {
516		switch (wr->opcode) {
517		case IB_WR_RDMA_WRITE_WITH_IMM:
518			wr->ex.imm_data = ibwr->ex.imm_data;
519			fallthrough;
520		case IB_WR_RDMA_READ:
521		case IB_WR_RDMA_WRITE:
522			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
523			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
524			break;
525		case IB_WR_SEND_WITH_IMM:
526			wr->ex.imm_data = ibwr->ex.imm_data;
527			break;
528		case IB_WR_SEND_WITH_INV:
529			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
530			break;
531		case IB_WR_ATOMIC_CMP_AND_SWP:
532		case IB_WR_ATOMIC_FETCH_AND_ADD:
533			wr->wr.atomic.remote_addr =
534				atomic_wr(ibwr)->remote_addr;
535			wr->wr.atomic.compare_add =
536				atomic_wr(ibwr)->compare_add;
537			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
538			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
539			break;
540		case IB_WR_LOCAL_INV:
541			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
542		break;
543		case IB_WR_REG_MR:
544			wr->wr.reg.mr = reg_wr(ibwr)->mr;
545			wr->wr.reg.key = reg_wr(ibwr)->key;
546			wr->wr.reg.access = reg_wr(ibwr)->access;
547		break;
548		default:
549			break;
550		}
551	}
552}
553
554static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
555			 unsigned int mask, unsigned int length,
556			 struct rxe_send_wqe *wqe)
557{
558	int num_sge = ibwr->num_sge;
559	struct ib_sge *sge;
560	int i;
561	u8 *p;
562
563	init_send_wr(qp, &wqe->wr, ibwr);
564
565	if (qp_type(qp) == IB_QPT_UD ||
566	    qp_type(qp) == IB_QPT_SMI ||
567	    qp_type(qp) == IB_QPT_GSI)
568		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
569
570	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
571		p = wqe->dma.inline_data;
572
573		sge = ibwr->sg_list;
574		for (i = 0; i < num_sge; i++, sge++) {
575			memcpy(p, (void *)(uintptr_t)sge->addr,
576					sge->length);
577
578			p += sge->length;
579		}
580	} else if (mask & WR_REG_MASK) {
581		wqe->mask = mask;
582		wqe->state = wqe_state_posted;
583		return 0;
584	} else
585		memcpy(wqe->dma.sge, ibwr->sg_list,
586		       num_sge * sizeof(struct ib_sge));
587
588	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
589		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
590	wqe->mask		= mask;
591	wqe->dma.length		= length;
592	wqe->dma.resid		= length;
593	wqe->dma.num_sge	= num_sge;
594	wqe->dma.cur_sge	= 0;
595	wqe->dma.sge_offset	= 0;
596	wqe->state		= wqe_state_posted;
597	wqe->ssn		= atomic_add_return(1, &qp->ssn);
598
599	return 0;
600}
601
602static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
603			 unsigned int mask, u32 length)
604{
605	int err;
606	struct rxe_sq *sq = &qp->sq;
607	struct rxe_send_wqe *send_wqe;
608	unsigned long flags;
609
610	err = validate_send_wr(qp, ibwr, mask, length);
611	if (err)
612		return err;
613
614	spin_lock_irqsave(&qp->sq.sq_lock, flags);
615
616	if (unlikely(queue_full(sq->queue))) {
617		err = -ENOMEM;
618		goto err1;
619	}
620
621	send_wqe = producer_addr(sq->queue);
622
623	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
624	if (unlikely(err))
625		goto err1;
626
627	/*
628	 * make sure all changes to the work queue are
629	 * written before we update the producer pointer
630	 */
631	smp_wmb();
632
633	advance_producer(sq->queue);
634	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
635
636	return 0;
637
638err1:
639	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
640	return err;
641}
642
643static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
644				const struct ib_send_wr **bad_wr)
645{
646	int err = 0;
647	unsigned int mask;
648	unsigned int length = 0;
649	int i;
650	struct ib_send_wr *next;
651
652	while (wr) {
653		mask = wr_opcode_mask(wr->opcode, qp);
654		if (unlikely(!mask)) {
655			err = -EINVAL;
656			*bad_wr = wr;
657			break;
658		}
659
660		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
661			     !(mask & WR_INLINE_MASK))) {
662			err = -EINVAL;
663			*bad_wr = wr;
664			break;
665		}
666
667		next = wr->next;
668
669		length = 0;
670		for (i = 0; i < wr->num_sge; i++)
671			length += wr->sg_list[i].length;
672
673		err = post_one_send(qp, wr, mask, length);
674
675		if (err) {
676			*bad_wr = wr;
677			break;
678		}
679		wr = next;
680	}
681
682	rxe_run_task(&qp->req.task, 1);
683	if (unlikely(qp->req.state == QP_STATE_ERROR))
684		rxe_run_task(&qp->comp.task, 1);
685
686	return err;
687}
688
689static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
690			 const struct ib_send_wr **bad_wr)
691{
692	struct rxe_qp *qp = to_rqp(ibqp);
693
694	if (unlikely(!qp->valid)) {
695		*bad_wr = wr;
696		return -EINVAL;
697	}
698
699	if (unlikely(qp->req.state < QP_STATE_READY)) {
700		*bad_wr = wr;
701		return -EINVAL;
702	}
703
704	if (qp->is_user) {
705		/* Utilize process context to do protocol processing */
706		rxe_run_task(&qp->req.task, 0);
707		return 0;
708	} else
709		return rxe_post_send_kernel(qp, wr, bad_wr);
710}
711
712static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
713			 const struct ib_recv_wr **bad_wr)
714{
715	int err = 0;
716	struct rxe_qp *qp = to_rqp(ibqp);
717	struct rxe_rq *rq = &qp->rq;
718	unsigned long flags;
719
720	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
721		*bad_wr = wr;
722		err = -EINVAL;
723		goto err1;
724	}
725
726	if (unlikely(qp->srq)) {
727		*bad_wr = wr;
728		err = -EINVAL;
729		goto err1;
730	}
731
732	spin_lock_irqsave(&rq->producer_lock, flags);
733
734	while (wr) {
735		err = post_one_recv(rq, wr);
736		if (unlikely(err)) {
737			*bad_wr = wr;
738			break;
739		}
740		wr = wr->next;
741	}
742
743	spin_unlock_irqrestore(&rq->producer_lock, flags);
744
745	if (qp->resp.state == QP_STATE_ERROR)
746		rxe_run_task(&qp->resp.task, 1);
747
748err1:
749	return err;
750}
751
752static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
753			 struct ib_udata *udata)
754{
755	int err;
756	struct ib_device *dev = ibcq->device;
757	struct rxe_dev *rxe = to_rdev(dev);
758	struct rxe_cq *cq = to_rcq(ibcq);
759	struct rxe_create_cq_resp __user *uresp = NULL;
760
761	if (udata) {
762		if (udata->outlen < sizeof(*uresp))
763			return -EINVAL;
764		uresp = udata->outbuf;
765	}
766
767	if (attr->flags)
768		return -EINVAL;
769
770	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
771	if (err)
772		return err;
773
774	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
775			       uresp);
776	if (err)
777		return err;
778
779	return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
780}
781
782static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
783{
784	struct rxe_cq *cq = to_rcq(ibcq);
785
786	rxe_cq_disable(cq);
787
788	rxe_drop_ref(cq);
789	return 0;
790}
791
792static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
793{
794	int err;
795	struct rxe_cq *cq = to_rcq(ibcq);
796	struct rxe_dev *rxe = to_rdev(ibcq->device);
797	struct rxe_resize_cq_resp __user *uresp = NULL;
798
799	if (udata) {
800		if (udata->outlen < sizeof(*uresp))
801			return -EINVAL;
802		uresp = udata->outbuf;
803	}
804
805	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
806	if (err)
807		goto err1;
808
809	err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
810	if (err)
811		goto err1;
812
813	return 0;
814
815err1:
816	return err;
817}
818
819static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
820{
821	int i;
822	struct rxe_cq *cq = to_rcq(ibcq);
823	struct rxe_cqe *cqe;
824	unsigned long flags;
825
826	spin_lock_irqsave(&cq->cq_lock, flags);
827	for (i = 0; i < num_entries; i++) {
828		cqe = queue_head(cq->queue);
829		if (!cqe)
830			break;
831
832		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
833		advance_consumer(cq->queue);
834	}
835	spin_unlock_irqrestore(&cq->cq_lock, flags);
836
837	return i;
838}
839
840static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
841{
842	struct rxe_cq *cq = to_rcq(ibcq);
843	int count = queue_count(cq->queue);
844
845	return (count > wc_cnt) ? wc_cnt : count;
846}
847
848static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
849{
850	struct rxe_cq *cq = to_rcq(ibcq);
851	unsigned long irq_flags;
852	int ret = 0;
853
854	spin_lock_irqsave(&cq->cq_lock, irq_flags);
855	if (cq->notify != IB_CQ_NEXT_COMP)
856		cq->notify = flags & IB_CQ_SOLICITED_MASK;
857
858	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
859		ret = 1;
860
861	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
862
863	return ret;
864}
865
866static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
867{
868	struct rxe_dev *rxe = to_rdev(ibpd->device);
869	struct rxe_pd *pd = to_rpd(ibpd);
870	struct rxe_mem *mr;
871
872	mr = rxe_alloc(&rxe->mr_pool);
873	if (!mr)
874		return ERR_PTR(-ENOMEM);
875
876	rxe_add_index(mr);
877	rxe_add_ref(pd);
878	rxe_mem_init_dma(pd, access, mr);
879
880	return &mr->ibmr;
881}
882
883static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
884				     u64 start,
885				     u64 length,
886				     u64 iova,
887				     int access, struct ib_udata *udata)
888{
889	int err;
890	struct rxe_dev *rxe = to_rdev(ibpd->device);
891	struct rxe_pd *pd = to_rpd(ibpd);
892	struct rxe_mem *mr;
893
894	mr = rxe_alloc(&rxe->mr_pool);
895	if (!mr) {
896		err = -ENOMEM;
897		goto err2;
898	}
899
900	rxe_add_index(mr);
901
902	rxe_add_ref(pd);
903
904	err = rxe_mem_init_user(pd, start, length, iova,
905				access, udata, mr);
906	if (err)
907		goto err3;
908
909	return &mr->ibmr;
910
911err3:
912	rxe_drop_ref(pd);
913	rxe_drop_index(mr);
914	rxe_drop_ref(mr);
915err2:
916	return ERR_PTR(err);
917}
918
919static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
920{
921	struct rxe_mem *mr = to_rmr(ibmr);
922
923	mr->state = RXE_MEM_STATE_ZOMBIE;
924	rxe_drop_ref(mr_pd(mr));
925	rxe_drop_index(mr);
926	rxe_drop_ref(mr);
927	return 0;
928}
929
930static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
931				  u32 max_num_sg)
932{
933	struct rxe_dev *rxe = to_rdev(ibpd->device);
934	struct rxe_pd *pd = to_rpd(ibpd);
935	struct rxe_mem *mr;
936	int err;
937
938	if (mr_type != IB_MR_TYPE_MEM_REG)
939		return ERR_PTR(-EINVAL);
940
941	mr = rxe_alloc(&rxe->mr_pool);
942	if (!mr) {
943		err = -ENOMEM;
944		goto err1;
945	}
946
947	rxe_add_index(mr);
948
949	rxe_add_ref(pd);
950
951	err = rxe_mem_init_fast(pd, max_num_sg, mr);
952	if (err)
953		goto err2;
954
955	return &mr->ibmr;
956
957err2:
958	rxe_drop_ref(pd);
959	rxe_drop_index(mr);
960	rxe_drop_ref(mr);
961err1:
962	return ERR_PTR(err);
963}
964
965static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
966{
967	struct rxe_mem *mr = to_rmr(ibmr);
968	struct rxe_map *map;
969	struct rxe_phys_buf *buf;
970
971	if (unlikely(mr->nbuf == mr->num_buf))
972		return -ENOMEM;
973
974	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
975	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
976
977	buf->addr = addr;
978	buf->size = ibmr->page_size;
979	mr->nbuf++;
980
981	return 0;
982}
983
984static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
985			 int sg_nents, unsigned int *sg_offset)
986{
987	struct rxe_mem *mr = to_rmr(ibmr);
988	int n;
989
990	mr->nbuf = 0;
991
992	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
993
994	mr->va = ibmr->iova;
995	mr->iova = ibmr->iova;
996	mr->length = ibmr->length;
997	mr->page_shift = ilog2(ibmr->page_size);
998	mr->page_mask = ibmr->page_size - 1;
999	mr->offset = mr->iova & mr->page_mask;
1000
1001	return n;
1002}
1003
1004static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1005{
1006	int err;
1007	struct rxe_dev *rxe = to_rdev(ibqp->device);
1008	struct rxe_qp *qp = to_rqp(ibqp);
1009	struct rxe_mc_grp *grp;
1010
1011	/* takes a ref on grp if successful */
1012	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1013	if (err)
1014		return err;
1015
1016	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1017
1018	rxe_drop_ref(grp);
1019	return err;
1020}
1021
1022static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1023{
1024	struct rxe_dev *rxe = to_rdev(ibqp->device);
1025	struct rxe_qp *qp = to_rqp(ibqp);
1026
1027	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1028}
1029
1030static ssize_t parent_show(struct device *device,
1031			   struct device_attribute *attr, char *buf)
1032{
1033	struct rxe_dev *rxe =
1034		rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1035
1036	return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
1037}
1038
1039static DEVICE_ATTR_RO(parent);
1040
1041static struct attribute *rxe_dev_attributes[] = {
1042	&dev_attr_parent.attr,
1043	NULL
1044};
1045
1046static const struct attribute_group rxe_attr_group = {
1047	.attrs = rxe_dev_attributes,
1048};
1049
1050static int rxe_enable_driver(struct ib_device *ib_dev)
1051{
1052	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1053
1054	rxe_set_port_state(rxe);
1055	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1056	return 0;
1057}
1058
1059static const struct ib_device_ops rxe_dev_ops = {
1060	.owner = THIS_MODULE,
1061	.driver_id = RDMA_DRIVER_RXE,
1062	.uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1063
1064	.alloc_hw_stats = rxe_ib_alloc_hw_stats,
1065	.alloc_mr = rxe_alloc_mr,
1066	.alloc_pd = rxe_alloc_pd,
1067	.alloc_ucontext = rxe_alloc_ucontext,
1068	.attach_mcast = rxe_attach_mcast,
1069	.create_ah = rxe_create_ah,
1070	.create_cq = rxe_create_cq,
1071	.create_qp = rxe_create_qp,
1072	.create_srq = rxe_create_srq,
1073	.dealloc_driver = rxe_dealloc,
1074	.dealloc_pd = rxe_dealloc_pd,
1075	.dealloc_ucontext = rxe_dealloc_ucontext,
1076	.dereg_mr = rxe_dereg_mr,
1077	.destroy_ah = rxe_destroy_ah,
1078	.destroy_cq = rxe_destroy_cq,
1079	.destroy_qp = rxe_destroy_qp,
1080	.destroy_srq = rxe_destroy_srq,
1081	.detach_mcast = rxe_detach_mcast,
1082	.enable_driver = rxe_enable_driver,
1083	.get_dma_mr = rxe_get_dma_mr,
1084	.get_hw_stats = rxe_ib_get_hw_stats,
1085	.get_link_layer = rxe_get_link_layer,
1086	.get_port_immutable = rxe_port_immutable,
1087	.map_mr_sg = rxe_map_mr_sg,
1088	.mmap = rxe_mmap,
1089	.modify_ah = rxe_modify_ah,
1090	.modify_device = rxe_modify_device,
1091	.modify_port = rxe_modify_port,
1092	.modify_qp = rxe_modify_qp,
1093	.modify_srq = rxe_modify_srq,
1094	.peek_cq = rxe_peek_cq,
1095	.poll_cq = rxe_poll_cq,
1096	.post_recv = rxe_post_recv,
1097	.post_send = rxe_post_send,
1098	.post_srq_recv = rxe_post_srq_recv,
1099	.query_ah = rxe_query_ah,
1100	.query_device = rxe_query_device,
1101	.query_pkey = rxe_query_pkey,
1102	.query_port = rxe_query_port,
1103	.query_qp = rxe_query_qp,
1104	.query_srq = rxe_query_srq,
1105	.reg_user_mr = rxe_reg_user_mr,
1106	.req_notify_cq = rxe_req_notify_cq,
1107	.resize_cq = rxe_resize_cq,
1108
1109	INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1110	INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1111	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1112	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1113	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1114};
1115
1116int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1117{
1118	int err;
1119	struct ib_device *dev = &rxe->ib_dev;
1120	struct crypto_shash *tfm;
1121
1122	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1123
1124	dev->node_type = RDMA_NODE_IB_CA;
1125	dev->phys_port_cnt = 1;
1126	dev->num_comp_vectors = num_possible_cpus();
1127	dev->local_dma_lkey = 0;
1128	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1129			    rxe->ndev->dev_addr);
1130
1131	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1132	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1133	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1134	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1135	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1136	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1137	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1138	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1139	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1140	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1141	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1142	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1143	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1144	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1145	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1146	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1147	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1148	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1149	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1150	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1151	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1152	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1153	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1154	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1155	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1156	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1157	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1158	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1159	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1160	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1161	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1162	    ;
1163
1164	ib_set_device_ops(dev, &rxe_dev_ops);
1165	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1166	if (err)
1167		return err;
1168
1169	tfm = crypto_alloc_shash("crc32", 0, 0);
1170	if (IS_ERR(tfm)) {
1171		pr_err("failed to allocate crc algorithm err:%ld\n",
1172		       PTR_ERR(tfm));
1173		return PTR_ERR(tfm);
1174	}
1175	rxe->tfm = tfm;
1176
1177	rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1178	err = ib_register_device(dev, ibdev_name, NULL);
1179	if (err)
1180		pr_warn("%s failed with error %d\n", __func__, err);
1181
1182	/*
1183	 * Note that rxe may be invalid at this point if another thread
1184	 * unregistered it.
1185	 */
1186	return err;
1187}
1188