1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018 Hisilicon Limited.
4 */
5
6#include <rdma/ib_umem.h>
7#include <rdma/hns-abi.h>
8#include "hns_roce_device.h"
9#include "hns_roce_cmd.h"
10#include "hns_roce_hem.h"
11
12void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13{
14	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15	struct hns_roce_srq *srq;
16
17	xa_lock(&srq_table->xa);
18	srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19	if (srq)
20		atomic_inc(&srq->refcount);
21	xa_unlock(&srq_table->xa);
22
23	if (!srq) {
24		dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25		return;
26	}
27
28	srq->event(srq, event_type);
29
30	if (atomic_dec_and_test(&srq->refcount))
31		complete(&srq->free);
32}
33
34static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35				  enum hns_roce_event event_type)
36{
37	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38	struct ib_srq *ibsrq = &srq->ibsrq;
39	struct ib_event event;
40
41	if (ibsrq->event_handler) {
42		event.device      = ibsrq->device;
43		event.element.srq = ibsrq;
44		switch (event_type) {
45		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
47			break;
48		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49			event.event = IB_EVENT_SRQ_ERR;
50			break;
51		default:
52			dev_err(hr_dev->dev,
53			   "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54			   event_type, srq->srqn);
55			return;
56		}
57
58		ibsrq->event_handler(&event, ibsrq->srq_context);
59	}
60}
61
62static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
63				  struct hns_roce_cmd_mailbox *mailbox,
64				  unsigned long srq_num)
65{
66	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
67				 HNS_ROCE_CMD_CREATE_SRQ,
68				 HNS_ROCE_CMD_TIMEOUT_MSECS);
69}
70
71static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
72				   struct hns_roce_cmd_mailbox *mailbox,
73				   unsigned long srq_num)
74{
75	return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
76				 mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
77				 HNS_ROCE_CMD_TIMEOUT_MSECS);
78}
79
80static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
81		      u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
82{
83	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
84	struct ib_device *ibdev = &hr_dev->ib_dev;
85	struct hns_roce_cmd_mailbox *mailbox;
86	u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
87	u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
88	dma_addr_t dma_handle_wqe = 0;
89	dma_addr_t dma_handle_idx = 0;
90	int ret;
91
92	/* Get the physical address of srq buf */
93	ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
94				ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
95	if (ret < 1) {
96		ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
97			  ret);
98		return -ENOBUFS;
99	}
100
101	/* Get physical address of idx que buf */
102	ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
103				ARRAY_SIZE(mtts_idx), &dma_handle_idx);
104	if (ret < 1) {
105		ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
106			  ret);
107		return -ENOBUFS;
108	}
109
110	ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
111	if (ret) {
112		ibdev_err(ibdev,
113			  "failed to alloc SRQ number, ret = %d.\n", ret);
114		return -ENOMEM;
115	}
116
117	ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
118	if (ret) {
119		ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
120		goto err_out;
121	}
122
123	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
124	if (ret) {
125		ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
126		goto err_put;
127	}
128
129	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
130	if (IS_ERR_OR_NULL(mailbox)) {
131		ret = -ENOMEM;
132		ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
133		goto err_xa;
134	}
135
136	hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
137			       mtts_wqe, mtts_idx, dma_handle_wqe,
138			       dma_handle_idx);
139
140	ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
141	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
142	if (ret) {
143		ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
144		goto err_xa;
145	}
146
147	atomic_set(&srq->refcount, 1);
148	init_completion(&srq->free);
149	return ret;
150
151err_xa:
152	xa_erase(&srq_table->xa, srq->srqn);
153
154err_put:
155	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
156
157err_out:
158	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
159	return ret;
160}
161
162static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
163{
164	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
165	int ret;
166
167	ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
168	if (ret)
169		dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
170			ret, srq->srqn);
171
172	xa_erase(&srq_table->xa, srq->srqn);
173
174	if (atomic_dec_and_test(&srq->refcount))
175		complete(&srq->free);
176	wait_for_completion(&srq->free);
177
178	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
179	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
180}
181
182static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
183			 struct ib_udata *udata, unsigned long addr)
184{
185	struct ib_device *ibdev = &hr_dev->ib_dev;
186	struct hns_roce_buf_attr buf_attr = {};
187	int err;
188
189	srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
190						      HNS_ROCE_SGE_SIZE *
191						      srq->max_gs)));
192
193	buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
194	buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
195							 srq->wqe_shift);
196	buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
197	buf_attr.region_count = 1;
198	buf_attr.fixed_page = true;
199
200	err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
201				  hr_dev->caps.srqwqe_ba_pg_sz +
202				  HNS_HW_PAGE_SHIFT, udata, addr);
203	if (err)
204		ibdev_err(ibdev,
205			  "failed to alloc SRQ buf mtr, ret = %d.\n", err);
206
207	return err;
208}
209
210static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
211{
212	hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
213}
214
215static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
216			 struct ib_udata *udata, unsigned long addr)
217{
218	struct hns_roce_idx_que *idx_que = &srq->idx_que;
219	struct ib_device *ibdev = &hr_dev->ib_dev;
220	struct hns_roce_buf_attr buf_attr = {};
221	int err;
222
223	srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
224
225	buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
226	buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
227					srq->idx_que.entry_shift);
228	buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
229	buf_attr.region_count = 1;
230	buf_attr.fixed_page = true;
231
232	err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
233				  hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
234				  udata, addr);
235	if (err) {
236		ibdev_err(ibdev,
237			  "failed to alloc SRQ idx mtr, ret = %d.\n", err);
238		return err;
239	}
240
241	if (!udata) {
242		idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
243		if (!idx_que->bitmap) {
244			ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
245			err = -ENOMEM;
246			goto err_idx_mtr;
247		}
248	}
249
250	return 0;
251err_idx_mtr:
252	hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
253
254	return err;
255}
256
257static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
258{
259	struct hns_roce_idx_que *idx_que = &srq->idx_que;
260
261	bitmap_free(idx_que->bitmap);
262	idx_que->bitmap = NULL;
263	hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
264}
265
266static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
267{
268	srq->head = 0;
269	srq->tail = srq->wqe_cnt - 1;
270	srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
271	if (!srq->wrid)
272		return -ENOMEM;
273
274	return 0;
275}
276
277static void free_srq_wrid(struct hns_roce_srq *srq)
278{
279	kvfree(srq->wrid);
280	srq->wrid = NULL;
281}
282
283int hns_roce_create_srq(struct ib_srq *ib_srq,
284			struct ib_srq_init_attr *init_attr,
285			struct ib_udata *udata)
286{
287	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
288	struct hns_roce_ib_create_srq_resp resp = {};
289	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
290	struct ib_device *ibdev = &hr_dev->ib_dev;
291	struct hns_roce_ib_create_srq ucmd = {};
292	int ret;
293	u32 cqn;
294
295	/* Check the actual SRQ wqe and SRQ sge num */
296	if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
297	    init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
298		return -EINVAL;
299
300	mutex_init(&srq->mutex);
301	spin_lock_init(&srq->lock);
302
303	srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
304	srq->max_gs = init_attr->attr.max_sge;
305
306	if (udata) {
307		ret = ib_copy_from_udata(&ucmd, udata,
308					 min(udata->inlen, sizeof(ucmd)));
309		if (ret) {
310			ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
311				  ret);
312			return ret;
313		}
314	}
315
316	ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
317	if (ret) {
318		ibdev_err(ibdev,
319			  "failed to alloc SRQ buffer, ret = %d.\n", ret);
320		return ret;
321	}
322
323	ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
324	if (ret) {
325		ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
326		goto err_buf_alloc;
327	}
328
329	if (!udata) {
330		ret = alloc_srq_wrid(hr_dev, srq);
331		if (ret) {
332			ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
333				  ret);
334			goto err_idx_alloc;
335		}
336	}
337
338	cqn = ib_srq_has_cq(init_attr->srq_type) ?
339	      to_hr_cq(init_attr->ext.cq)->cqn : 0;
340	srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
341
342	ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
343	if (ret) {
344		ibdev_err(ibdev,
345			  "failed to alloc SRQ context, ret = %d.\n", ret);
346		goto err_wrid_alloc;
347	}
348
349	srq->event = hns_roce_ib_srq_event;
350	resp.srqn = srq->srqn;
351
352	if (udata) {
353		ret = ib_copy_to_udata(udata, &resp,
354				       min(udata->outlen, sizeof(resp)));
355		if (ret)
356			goto err_srqc_alloc;
357	}
358
359	return 0;
360
361err_srqc_alloc:
362	free_srqc(hr_dev, srq);
363err_wrid_alloc:
364	free_srq_wrid(srq);
365err_idx_alloc:
366	free_srq_idx(hr_dev, srq);
367err_buf_alloc:
368	free_srq_buf(hr_dev, srq);
369	return ret;
370}
371
372int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
373{
374	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
375	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
376
377	free_srqc(hr_dev, srq);
378	free_srq_idx(hr_dev, srq);
379	free_srq_wrid(srq);
380	free_srq_buf(hr_dev, srq);
381	return 0;
382}
383
384int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
385{
386	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
387
388	xa_init(&srq_table->xa);
389
390	return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
391				    hr_dev->caps.num_srqs - 1,
392				    hr_dev->caps.reserved_srqs, 0);
393}
394
395void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
396{
397	hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
398}
399