1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#ifndef RXE_LOC_H
8#define RXE_LOC_H
9
10/* rxe_av.c */
11void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av);
12
13int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
14
15void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
16		     struct rdma_ah_attr *attr);
17
18void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
19
20void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
21
22struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
23
24/* rxe_cq.c */
25int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
26		    int cqe, int comp_vector);
27
28int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
29		     int comp_vector, struct ib_udata *udata,
30		     struct rxe_create_cq_resp __user *uresp);
31
32int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
33			struct rxe_resize_cq_resp __user *uresp,
34			struct ib_udata *udata);
35
36int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
37
38void rxe_cq_disable(struct rxe_cq *cq);
39
40void rxe_cq_cleanup(struct rxe_pool_entry *arg);
41
42/* rxe_mcast.c */
43int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
44		      struct rxe_mc_grp **grp_p);
45
46int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
47			   struct rxe_mc_grp *grp);
48
49int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
50			    union ib_gid *mgid);
51
52void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
53
54void rxe_mc_cleanup(struct rxe_pool_entry *arg);
55
56/* rxe_mmap.c */
57struct rxe_mmap_info {
58	struct list_head	pending_mmaps;
59	struct ib_ucontext	*context;
60	struct kref		ref;
61	void			*obj;
62
63	struct mminfo info;
64};
65
66void rxe_mmap_release(struct kref *ref);
67
68struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
69					   struct ib_udata *udata, void *obj);
70
71int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
72
73/* rxe_mr.c */
74enum copy_direction {
75	to_mem_obj,
76	from_mem_obj,
77};
78
79void rxe_mem_init_dma(struct rxe_pd *pd,
80		      int access, struct rxe_mem *mem);
81
82int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
83		      u64 length, u64 iova, int access, struct ib_udata *udata,
84		      struct rxe_mem *mr);
85
86int rxe_mem_init_fast(struct rxe_pd *pd,
87		      int max_pages, struct rxe_mem *mem);
88
89int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
90		 int length, enum copy_direction dir, u32 *crcp);
91
92int copy_data(struct rxe_pd *pd, int access,
93	      struct rxe_dma_info *dma, void *addr, int length,
94	      enum copy_direction dir, u32 *crcp);
95
96void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
97
98enum lookup_type {
99	lookup_local,
100	lookup_remote,
101};
102
103struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
104			   enum lookup_type type);
105
106int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
107
108void rxe_mem_cleanup(struct rxe_pool_entry *arg);
109
110int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
111
112/* rxe_net.c */
113void rxe_loopback(struct sk_buff *skb);
114int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
115struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
116				int paylen, struct rxe_pkt_info *pkt);
117int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
118const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
119struct device *rxe_dma_device(struct rxe_dev *rxe);
120int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
121int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
122
123/* rxe_qp.c */
124int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
125
126int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
127		     struct ib_qp_init_attr *init,
128		     struct rxe_create_qp_resp __user *uresp,
129		     struct ib_pd *ibpd, struct ib_udata *udata);
130
131int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
132
133int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
134		    struct ib_qp_attr *attr, int mask);
135
136int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
137		     int mask, struct ib_udata *udata);
138
139int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
140
141void rxe_qp_error(struct rxe_qp *qp);
142
143void rxe_qp_destroy(struct rxe_qp *qp);
144
145void rxe_qp_cleanup(struct rxe_pool_entry *arg);
146
147static inline int qp_num(struct rxe_qp *qp)
148{
149	return qp->ibqp.qp_num;
150}
151
152static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
153{
154	return qp->ibqp.qp_type;
155}
156
157static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
158{
159	return qp->attr.qp_state;
160}
161
162static inline int qp_mtu(struct rxe_qp *qp)
163{
164	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
165		return qp->attr.path_mtu;
166	else
167		return IB_MTU_4096;
168}
169
170static inline int rcv_wqe_size(int max_sge)
171{
172	return sizeof(struct rxe_recv_wqe) +
173		max_sge * sizeof(struct ib_sge);
174}
175
176void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
177
178static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
179{
180	qp->resp.res_head++;
181	if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
182		qp->resp.res_head = 0;
183}
184
185void retransmit_timer(struct timer_list *t);
186void rnr_nak_timer(struct timer_list *t);
187
188/* rxe_srq.c */
189#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
190
191int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
192		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
193
194int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
195		      struct ib_srq_init_attr *init, struct ib_udata *udata,
196		      struct rxe_create_srq_resp __user *uresp);
197
198int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
199		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
200		      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
201
202void rxe_dealloc(struct ib_device *ib_dev);
203
204int rxe_completer(void *arg);
205int rxe_requester(void *arg);
206int rxe_responder(void *arg);
207
208u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
209
210void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
211
212void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
213
214static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
215{
216	return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
217}
218
219static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
220				  struct sk_buff *skb)
221{
222	int err;
223	int is_request = pkt->mask & RXE_REQ_MASK;
224	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
225
226	if ((is_request && (qp->req.state != QP_STATE_READY)) ||
227	    (!is_request && (qp->resp.state != QP_STATE_READY))) {
228		pr_info("Packet dropped. QP is not in ready state\n");
229		goto drop;
230	}
231
232	if (pkt->mask & RXE_LOOPBACK_MASK) {
233		memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
234		rxe_loopback(skb);
235		err = 0;
236	} else {
237		err = rxe_send(pkt, skb);
238	}
239
240	if (err) {
241		rxe->xmit_errors++;
242		rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
243		return err;
244	}
245
246	if ((qp_type(qp) != IB_QPT_RC) &&
247	    (pkt->mask & RXE_END_MASK)) {
248		pkt->wqe->state = wqe_state_done;
249		rxe_run_task(&qp->comp.task, 1);
250	}
251
252	rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
253	goto done;
254
255drop:
256	kfree_skb(skb);
257	err = 0;
258done:
259	return err;
260}
261
262#endif /* RXE_LOC_H */
263