1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in
21 *    the documentation and/or other materials provided with the
22 *    distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter (header)
37 */
38
39#ifndef __BNXT_RE_IB_VERBS_H__
40#define __BNXT_RE_IB_VERBS_H__
41
42struct bnxt_re_gid_ctx {
43	u32			idx;
44	u32			refcnt;
45};
46
47#define BNXT_RE_FENCE_BYTES	64
48struct bnxt_re_fence_data {
49	u32 size;
50	u8 va[BNXT_RE_FENCE_BYTES];
51	dma_addr_t dma_addr;
52	struct bnxt_re_mr *mr;
53	struct ib_mw *mw;
54	struct bnxt_qplib_swqe bind_wqe;
55	u32 bind_rkey;
56};
57
58struct bnxt_re_pd {
59	struct ib_pd            ib_pd;
60	struct bnxt_re_dev	*rdev;
61	struct bnxt_qplib_pd	qplib_pd;
62	struct bnxt_re_fence_data fence;
63};
64
65struct bnxt_re_ah {
66	struct ib_ah		ib_ah;
67	struct bnxt_re_dev	*rdev;
68	struct bnxt_qplib_ah	qplib_ah;
69};
70
71struct bnxt_re_srq {
72	struct ib_srq		ib_srq;
73	struct bnxt_re_dev	*rdev;
74	u32			srq_limit;
75	struct bnxt_qplib_srq	qplib_srq;
76	struct ib_umem		*umem;
77	spinlock_t		lock;		/* protect srq */
78};
79
80struct bnxt_re_qp {
81	struct list_head	list;
82	struct bnxt_re_dev	*rdev;
83	struct ib_qp		ib_qp;
84	spinlock_t		sq_lock;	/* protect sq */
85	spinlock_t		rq_lock;	/* protect rq */
86	struct bnxt_qplib_qp	qplib_qp;
87	struct ib_umem		*sumem;
88	struct ib_umem		*rumem;
89	/* QP1 */
90	u32			send_psn;
91	struct ib_ud_header	qp1_hdr;
92	struct bnxt_re_cq	*scq;
93	struct bnxt_re_cq	*rcq;
94};
95
96struct bnxt_re_cq {
97	struct ib_cq		ib_cq;
98	struct bnxt_re_dev	*rdev;
99	spinlock_t              cq_lock;	/* protect cq */
100	u16			cq_count;
101	u16			cq_period;
102	struct bnxt_qplib_cq	qplib_cq;
103	struct bnxt_qplib_cqe	*cql;
104#define MAX_CQL_PER_POLL	1024
105	u32			max_cql;
106	struct ib_umem		*umem;
107};
108
109struct bnxt_re_mr {
110	struct bnxt_re_dev	*rdev;
111	struct ib_mr		ib_mr;
112	struct ib_umem		*ib_umem;
113	struct bnxt_qplib_mrw	qplib_mr;
114	u32			npages;
115	u64			*pages;
116	struct bnxt_qplib_frpl	qplib_frpl;
117};
118
119struct bnxt_re_frpl {
120	struct bnxt_re_dev		*rdev;
121	struct bnxt_qplib_frpl		qplib_frpl;
122	u64				*page_list;
123};
124
125struct bnxt_re_mw {
126	struct bnxt_re_dev	*rdev;
127	struct ib_mw		ib_mw;
128	struct bnxt_qplib_mrw	qplib_mw;
129};
130
131struct bnxt_re_ucontext {
132	struct ib_ucontext      ib_uctx;
133	struct bnxt_re_dev	*rdev;
134	struct bnxt_qplib_dpi	dpi;
135	void			*shpg;
136	spinlock_t		sh_lock;	/* protect shpg */
137};
138
139static inline u16 bnxt_re_get_swqe_size(int nsge)
140{
141	return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
142}
143
144static inline u16 bnxt_re_get_rwqe_size(int nsge)
145{
146	return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
147}
148
149int bnxt_re_query_device(struct ib_device *ibdev,
150			 struct ib_device_attr *ib_attr,
151			 struct ib_udata *udata);
152int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
153		       struct ib_port_attr *port_attr);
154int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
155			       struct ib_port_immutable *immutable);
156void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
157int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
158		       u16 index, u16 *pkey);
159int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
160int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
161int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
162		      int index, union ib_gid *gid);
163enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
164					    u8 port_num);
165int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
166int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
167int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
168		      struct ib_udata *udata);
169int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
170int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
171int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
172int bnxt_re_create_srq(struct ib_srq *srq,
173		       struct ib_srq_init_attr *srq_init_attr,
174		       struct ib_udata *udata);
175int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
176		       enum ib_srq_attr_mask srq_attr_mask,
177		       struct ib_udata *udata);
178int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
179int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
180int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
181			  const struct ib_recv_wr **bad_recv_wr);
182struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
183				struct ib_qp_init_attr *qp_init_attr,
184				struct ib_udata *udata);
185int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
186		      int qp_attr_mask, struct ib_udata *udata);
187int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
188		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
189int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
190int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
191		      const struct ib_send_wr **bad_send_wr);
192int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
193		      const struct ib_recv_wr **bad_recv_wr);
194int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
195		      struct ib_udata *udata);
196int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
197int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
198int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
199struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
200
201int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
202		      unsigned int *sg_offset);
203struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
204			       u32 max_num_sg);
205int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
206struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
207			       struct ib_udata *udata);
208int bnxt_re_dealloc_mw(struct ib_mw *mw);
209struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
210				  u64 virt_addr, int mr_access_flags,
211				  struct ib_udata *udata);
212int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
213void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
214int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
215
216unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
217void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
218#endif /* __BNXT_RE_IB_VERBS_H__ */
219