1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
34#include <rdma/uverbs_ioctl.h>
35#include "hns_roce_device.h"
36#include "hns_roce_cmd.h"
37#include "hns_roce_hem.h"
38#include "hns_roce_common.h"
39
40static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
41{
42	u32 least_load = bank[0].inuse;
43	u8 bankid = 0;
44	u32 bankcnt;
45	u8 i;
46
47	for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
48		bankcnt = bank[i].inuse;
49		if (bankcnt < least_load) {
50			least_load = bankcnt;
51			bankid = i;
52		}
53	}
54
55	return bankid;
56}
57
58static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
59{
60	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
61	struct hns_roce_bank *bank;
62	u8 bankid;
63	int id;
64
65	mutex_lock(&cq_table->bank_mutex);
66	bankid = get_least_load_bankid_for_cq(cq_table->bank);
67	bank = &cq_table->bank[bankid];
68
69	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
70	if (id < 0) {
71		mutex_unlock(&cq_table->bank_mutex);
72		return id;
73	}
74
75	/* the lower 2 bits is bankid */
76	hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
77	bank->inuse++;
78	mutex_unlock(&cq_table->bank_mutex);
79
80	return 0;
81}
82
83static inline u8 get_cq_bankid(unsigned long cqn)
84{
85	/* The lower 2 bits of CQN are used to hash to different banks */
86	return (u8)(cqn & GENMASK(1, 0));
87}
88
89static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
90{
91	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
92	struct hns_roce_bank *bank;
93
94	bank = &cq_table->bank[get_cq_bankid(cqn)];
95
96	ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
97
98	mutex_lock(&cq_table->bank_mutex);
99	bank->inuse--;
100	mutex_unlock(&cq_table->bank_mutex);
101}
102
103static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
104			       struct hns_roce_cq *hr_cq,
105			       u64 *mtts, dma_addr_t dma_handle)
106{
107	struct ib_device *ibdev = &hr_dev->ib_dev;
108	struct hns_roce_cmd_mailbox *mailbox;
109	int ret;
110
111	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
112	if (IS_ERR(mailbox)) {
113		ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
114		return PTR_ERR(mailbox);
115	}
116
117	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
118
119	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
120				     hr_cq->cqn);
121	if (ret)
122		ibdev_err(ibdev,
123			  "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
124			  hr_cq->cqn, ret);
125
126	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
127
128	return ret;
129}
130
131static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
132{
133	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
134	struct ib_device *ibdev = &hr_dev->ib_dev;
135	u64 mtts[MTT_MIN_COUNT] = {};
136	dma_addr_t dma_handle;
137	int ret;
138
139	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
140				&dma_handle);
141	if (!ret) {
142		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
143		return -EINVAL;
144	}
145
146	/* Get CQC memory HEM(Hardware Entry Memory) table */
147	ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
148	if (ret) {
149		ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
150			  hr_cq->cqn, ret);
151		return ret;
152	}
153
154	ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
155	if (ret) {
156		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
157		goto err_put;
158	}
159
160	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
161	if (ret)
162		goto err_xa;
163
164	return 0;
165
166err_xa:
167	xa_erase(&cq_table->array, hr_cq->cqn);
168err_put:
169	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
170
171	return ret;
172}
173
174static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
175{
176	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
177	struct device *dev = hr_dev->dev;
178	int ret;
179
180	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
181				      hr_cq->cqn);
182	if (ret)
183		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
184			hr_cq->cqn);
185
186	xa_erase(&cq_table->array, hr_cq->cqn);
187
188	/* Waiting interrupt process procedure carried out */
189	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
190
191	/* wait for all interrupt processed */
192	if (refcount_dec_and_test(&hr_cq->refcount))
193		complete(&hr_cq->free);
194	wait_for_completion(&hr_cq->free);
195
196	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
197}
198
199static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
200			struct ib_udata *udata, unsigned long addr)
201{
202	struct ib_device *ibdev = &hr_dev->ib_dev;
203	struct hns_roce_buf_attr buf_attr = {};
204	int ret;
205
206	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
207	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
208	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
209	buf_attr.region_count = 1;
210
211	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
212				  hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
213				  udata, addr);
214	if (ret)
215		ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
216
217	return ret;
218}
219
220static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
221{
222	hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
223}
224
225static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
226		       struct ib_udata *udata, unsigned long addr,
227		       struct hns_roce_ib_create_cq_resp *resp)
228{
229	bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
230	struct hns_roce_ucontext *uctx;
231	int err;
232
233	if (udata) {
234		if (has_db &&
235		    udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
236			uctx = rdma_udata_to_drv_context(udata,
237					struct hns_roce_ucontext, ibucontext);
238			err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
239			if (err)
240				return err;
241			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
242			resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
243		}
244	} else {
245		if (has_db) {
246			err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
247			if (err)
248				return err;
249			hr_cq->set_ci_db = hr_cq->db.db_record;
250			*hr_cq->set_ci_db = 0;
251			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
252		}
253		hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
254				DB_REG_OFFSET * hr_dev->priv_uar.index;
255	}
256
257	return 0;
258}
259
260static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
261		       struct ib_udata *udata)
262{
263	struct hns_roce_ucontext *uctx;
264
265	if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
266		return;
267
268	hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
269	if (udata) {
270		uctx = rdma_udata_to_drv_context(udata,
271						 struct hns_roce_ucontext,
272						 ibucontext);
273		hns_roce_db_unmap_user(uctx, &hr_cq->db);
274	} else {
275		hns_roce_free_db(hr_dev, &hr_cq->db);
276	}
277}
278
279static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
280				 const struct ib_cq_init_attr *attr)
281{
282	struct ib_device *ibdev = &hr_dev->ib_dev;
283
284	if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
285		ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
286			  attr->cqe, hr_dev->caps.max_cqes);
287		return -EINVAL;
288	}
289
290	if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
291		ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
292			  attr->comp_vector, hr_dev->caps.num_comp_vectors);
293		return -EINVAL;
294	}
295
296	return 0;
297}
298
299static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
300		       struct hns_roce_ib_create_cq *ucmd)
301{
302	struct ib_device *ibdev = hr_cq->ib_cq.device;
303	int ret;
304
305	ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
306	if (ret) {
307		ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
308		return ret;
309	}
310
311	return 0;
312}
313
314static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
315			 struct hns_roce_ib_create_cq *ucmd)
316{
317	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
318
319	cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
320	cq_entries = roundup_pow_of_two(cq_entries);
321	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
322	hr_cq->cq_depth = cq_entries;
323	hr_cq->vector = vector;
324
325	spin_lock_init(&hr_cq->lock);
326	INIT_LIST_HEAD(&hr_cq->sq_list);
327	INIT_LIST_HEAD(&hr_cq->rq_list);
328}
329
330static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
331			struct hns_roce_ib_create_cq *ucmd)
332{
333	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
334
335	if (!udata) {
336		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
337		return 0;
338	}
339
340	if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
341		if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
342		    ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
343			ibdev_err(&hr_dev->ib_dev,
344				  "invalid cqe size %u.\n", ucmd->cqe_size);
345			return -EINVAL;
346		}
347
348		hr_cq->cqe_size = ucmd->cqe_size;
349	} else {
350		hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
351	}
352
353	return 0;
354}
355
356int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
357		       struct ib_udata *udata)
358{
359	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
360	struct hns_roce_ib_create_cq_resp resp = {};
361	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
362	struct ib_device *ibdev = &hr_dev->ib_dev;
363	struct hns_roce_ib_create_cq ucmd = {};
364	int ret;
365
366	if (attr->flags)
367		return -EOPNOTSUPP;
368
369	ret = verify_cq_create_attr(hr_dev, attr);
370	if (ret)
371		return ret;
372
373	if (udata) {
374		ret = get_cq_ucmd(hr_cq, udata, &ucmd);
375		if (ret)
376			return ret;
377	}
378
379	set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
380
381	ret = set_cqe_size(hr_cq, udata, &ucmd);
382	if (ret)
383		return ret;
384
385	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
386	if (ret) {
387		ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
388		return ret;
389	}
390
391	ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
392	if (ret) {
393		ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
394		goto err_cq_buf;
395	}
396
397	ret = alloc_cqn(hr_dev, hr_cq);
398	if (ret) {
399		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
400		goto err_cq_db;
401	}
402
403	ret = alloc_cqc(hr_dev, hr_cq);
404	if (ret) {
405		ibdev_err(ibdev,
406			  "failed to alloc CQ context, ret = %d.\n", ret);
407		goto err_cqn;
408	}
409
410	if (udata) {
411		resp.cqn = hr_cq->cqn;
412		ret = ib_copy_to_udata(udata, &resp,
413				       min(udata->outlen, sizeof(resp)));
414		if (ret)
415			goto err_cqc;
416	}
417
418	hr_cq->cons_index = 0;
419	hr_cq->arm_sn = 1;
420	refcount_set(&hr_cq->refcount, 1);
421	init_completion(&hr_cq->free);
422
423	return 0;
424
425err_cqc:
426	free_cqc(hr_dev, hr_cq);
427err_cqn:
428	free_cqn(hr_dev, hr_cq->cqn);
429err_cq_db:
430	free_cq_db(hr_dev, hr_cq, udata);
431err_cq_buf:
432	free_cq_buf(hr_dev, hr_cq);
433	return ret;
434}
435
436int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
437{
438	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
439	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
440
441	free_cqc(hr_dev, hr_cq);
442	free_cqn(hr_dev, hr_cq->cqn);
443	free_cq_db(hr_dev, hr_cq, udata);
444	free_cq_buf(hr_dev, hr_cq);
445
446	return 0;
447}
448
449void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
450{
451	struct hns_roce_cq *hr_cq;
452	struct ib_cq *ibcq;
453
454	hr_cq = xa_load(&hr_dev->cq_table.array,
455			cqn & (hr_dev->caps.num_cqs - 1));
456	if (!hr_cq) {
457		dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
458			 cqn);
459		return;
460	}
461
462	++hr_cq->arm_sn;
463	ibcq = &hr_cq->ib_cq;
464	if (ibcq->comp_handler)
465		ibcq->comp_handler(ibcq, ibcq->cq_context);
466}
467
468void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
469{
470	struct device *dev = hr_dev->dev;
471	struct hns_roce_cq *hr_cq;
472	struct ib_event event;
473	struct ib_cq *ibcq;
474
475	hr_cq = xa_load(&hr_dev->cq_table.array,
476			cqn & (hr_dev->caps.num_cqs - 1));
477	if (!hr_cq) {
478		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
479		return;
480	}
481
482	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
483	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
484	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
485		dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
486			event_type, cqn);
487		return;
488	}
489
490	refcount_inc(&hr_cq->refcount);
491
492	ibcq = &hr_cq->ib_cq;
493	if (ibcq->event_handler) {
494		event.device = ibcq->device;
495		event.element.cq = ibcq;
496		event.event = IB_EVENT_CQ_ERR;
497		ibcq->event_handler(&event, ibcq->cq_context);
498	}
499
500	if (refcount_dec_and_test(&hr_cq->refcount))
501		complete(&hr_cq->free);
502}
503
504void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
505{
506	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
507	unsigned int reserved_from_bot;
508	unsigned int i;
509
510	mutex_init(&cq_table->bank_mutex);
511	xa_init(&cq_table->array);
512
513	reserved_from_bot = hr_dev->caps.reserved_cqs;
514
515	for (i = 0; i < reserved_from_bot; i++) {
516		cq_table->bank[get_cq_bankid(i)].inuse++;
517		cq_table->bank[get_cq_bankid(i)].min++;
518	}
519
520	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
521		ida_init(&cq_table->bank[i].ida);
522		cq_table->bank[i].max = hr_dev->caps.num_cqs /
523					HNS_ROCE_CQ_BANK_NUM - 1;
524	}
525}
526
527void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
528{
529	int i;
530
531	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
532		ida_destroy(&hr_dev->cq_table.bank[i].ida);
533}
534