1/*
2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license.  When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 *  - Redistributions of source code must retain the above copyright
25 *    notice, this list of conditions and the following disclaimer.
26 *  - Redistributions in binary form must reproduce the above copyright
27 *    notice, this list of conditions and the following disclaimer in
28 *    the documentation and/or other materials provided with the
29 *    distribution.
30 *  - Neither the name of Intel Corporation nor the names of its
31 *    contributors may be used to endorse or promote products derived
32 *    from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/slab.h>
49#include <linux/vmalloc.h>
50#include "cq.h"
51#include "vt.h"
52#include "trace.h"
53
54static struct workqueue_struct *comp_vector_wq;
55
56/**
57 * rvt_cq_enter - add a new entry to the completion queue
58 * @cq: completion queue
59 * @entry: work completion entry to add
60 * @solicited: true if @entry is solicited
61 *
62 * This may be called with qp->s_lock held.
63 *
64 * Return: return true on success, else return
65 * false if cq is full.
66 */
67bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
68{
69	struct ib_uverbs_wc *uqueue = NULL;
70	struct ib_wc *kqueue = NULL;
71	struct rvt_cq_wc *u_wc = NULL;
72	struct rvt_k_cq_wc *k_wc = NULL;
73	unsigned long flags;
74	u32 head;
75	u32 next;
76	u32 tail;
77
78	spin_lock_irqsave(&cq->lock, flags);
79
80	if (cq->ip) {
81		u_wc = cq->queue;
82		uqueue = &u_wc->uqueue[0];
83		head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
84		tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
85	} else {
86		k_wc = cq->kqueue;
87		kqueue = &k_wc->kqueue[0];
88		head = k_wc->head;
89		tail = k_wc->tail;
90	}
91
92	/*
93	 * Note that the head pointer might be writable by
94	 * user processes.Take care to verify it is a sane value.
95	 */
96	if (head >= (unsigned)cq->ibcq.cqe) {
97		head = cq->ibcq.cqe;
98		next = 0;
99	} else {
100		next = head + 1;
101	}
102
103	if (unlikely(next == tail || cq->cq_full)) {
104		struct rvt_dev_info *rdi = cq->rdi;
105
106		if (!cq->cq_full)
107			rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
108		cq->cq_full = true;
109		spin_unlock_irqrestore(&cq->lock, flags);
110		if (cq->ibcq.event_handler) {
111			struct ib_event ev;
112
113			ev.device = cq->ibcq.device;
114			ev.element.cq = &cq->ibcq;
115			ev.event = IB_EVENT_CQ_ERR;
116			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
117		}
118		return false;
119	}
120	trace_rvt_cq_enter(cq, entry, head);
121	if (uqueue) {
122		uqueue[head].wr_id = entry->wr_id;
123		uqueue[head].status = entry->status;
124		uqueue[head].opcode = entry->opcode;
125		uqueue[head].vendor_err = entry->vendor_err;
126		uqueue[head].byte_len = entry->byte_len;
127		uqueue[head].ex.imm_data = entry->ex.imm_data;
128		uqueue[head].qp_num = entry->qp->qp_num;
129		uqueue[head].src_qp = entry->src_qp;
130		uqueue[head].wc_flags = entry->wc_flags;
131		uqueue[head].pkey_index = entry->pkey_index;
132		uqueue[head].slid = ib_lid_cpu16(entry->slid);
133		uqueue[head].sl = entry->sl;
134		uqueue[head].dlid_path_bits = entry->dlid_path_bits;
135		uqueue[head].port_num = entry->port_num;
136		/* Make sure entry is written before the head index. */
137		RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
138	} else {
139		kqueue[head] = *entry;
140		k_wc->head = next;
141	}
142
143	if (cq->notify == IB_CQ_NEXT_COMP ||
144	    (cq->notify == IB_CQ_SOLICITED &&
145	     (solicited || entry->status != IB_WC_SUCCESS))) {
146		/*
147		 * This will cause send_complete() to be called in
148		 * another thread.
149		 */
150		cq->notify = RVT_CQ_NONE;
151		cq->triggered++;
152		queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
153			      &cq->comptask);
154	}
155
156	spin_unlock_irqrestore(&cq->lock, flags);
157	return true;
158}
159EXPORT_SYMBOL(rvt_cq_enter);
160
161static void send_complete(struct work_struct *work)
162{
163	struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
164
165	/*
166	 * The completion handler will most likely rearm the notification
167	 * and poll for all pending entries.  If a new completion entry
168	 * is added while we are in this routine, queue_work()
169	 * won't call us again until we return so we check triggered to
170	 * see if we need to call the handler again.
171	 */
172	for (;;) {
173		u8 triggered = cq->triggered;
174
175		/*
176		 * IPoIB connected mode assumes the callback is from a
177		 * soft IRQ. We simulate this by blocking "bottom halves".
178		 * See the implementation for ipoib_cm_handle_tx_wc(),
179		 * netif_tx_lock_bh() and netif_tx_lock().
180		 */
181		local_bh_disable();
182		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
183		local_bh_enable();
184
185		if (cq->triggered == triggered)
186			return;
187	}
188}
189
190/**
191 * rvt_create_cq - create a completion queue
192 * @ibcq: Allocated CQ
193 * @attr: creation attributes
194 * @udata: user data for libibverbs.so
195 *
196 * Called by ib_create_cq() in the generic verbs code.
197 *
198 * Return: 0 on success
199 */
200int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
201		  struct ib_udata *udata)
202{
203	struct ib_device *ibdev = ibcq->device;
204	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
205	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
206	struct rvt_cq_wc *u_wc = NULL;
207	struct rvt_k_cq_wc *k_wc = NULL;
208	u32 sz;
209	unsigned int entries = attr->cqe;
210	int comp_vector = attr->comp_vector;
211	int err;
212
213	if (attr->flags)
214		return -EINVAL;
215
216	if (entries < 1 || entries > rdi->dparms.props.max_cqe)
217		return -EINVAL;
218
219	if (comp_vector < 0)
220		comp_vector = 0;
221
222	comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
223
224	/*
225	 * Allocate the completion queue entries and head/tail pointers.
226	 * This is allocated separately so that it can be resized and
227	 * also mapped into user space.
228	 * We need to use vmalloc() in order to support mmap and large
229	 * numbers of entries.
230	 */
231	if (udata && udata->outlen >= sizeof(__u64)) {
232		sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
233		sz += sizeof(*u_wc);
234		u_wc = vmalloc_user(sz);
235		if (!u_wc)
236			return -ENOMEM;
237	} else {
238		sz = sizeof(struct ib_wc) * (entries + 1);
239		sz += sizeof(*k_wc);
240		k_wc = vzalloc_node(sz, rdi->dparms.node);
241		if (!k_wc)
242			return -ENOMEM;
243	}
244
245	/*
246	 * Return the address of the WC as the offset to mmap.
247	 * See rvt_mmap() for details.
248	 */
249	if (udata && udata->outlen >= sizeof(__u64)) {
250		cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
251		if (IS_ERR(cq->ip)) {
252			err = PTR_ERR(cq->ip);
253			goto bail_wc;
254		}
255
256		err = ib_copy_to_udata(udata, &cq->ip->offset,
257				       sizeof(cq->ip->offset));
258		if (err)
259			goto bail_ip;
260	}
261
262	spin_lock_irq(&rdi->n_cqs_lock);
263	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
264		spin_unlock_irq(&rdi->n_cqs_lock);
265		err = -ENOMEM;
266		goto bail_ip;
267	}
268
269	rdi->n_cqs_allocated++;
270	spin_unlock_irq(&rdi->n_cqs_lock);
271
272	if (cq->ip) {
273		spin_lock_irq(&rdi->pending_lock);
274		list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
275		spin_unlock_irq(&rdi->pending_lock);
276	}
277
278	/*
279	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
280	 * The number of entries should be >= the number requested or return
281	 * an error.
282	 */
283	cq->rdi = rdi;
284	if (rdi->driver_f.comp_vect_cpu_lookup)
285		cq->comp_vector_cpu =
286			rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
287	else
288		cq->comp_vector_cpu =
289			cpumask_first(cpumask_of_node(rdi->dparms.node));
290
291	cq->ibcq.cqe = entries;
292	cq->notify = RVT_CQ_NONE;
293	spin_lock_init(&cq->lock);
294	INIT_WORK(&cq->comptask, send_complete);
295	if (u_wc)
296		cq->queue = u_wc;
297	else
298		cq->kqueue = k_wc;
299
300	trace_rvt_create_cq(cq, attr);
301	return 0;
302
303bail_ip:
304	kfree(cq->ip);
305bail_wc:
306	vfree(u_wc);
307	vfree(k_wc);
308	return err;
309}
310
311/**
312 * rvt_destroy_cq - destroy a completion queue
313 * @ibcq: the completion queue to destroy.
314 * @udata: user data or NULL for kernel object
315 *
316 * Called by ib_destroy_cq() in the generic verbs code.
317 */
318int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
319{
320	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
321	struct rvt_dev_info *rdi = cq->rdi;
322
323	flush_work(&cq->comptask);
324	spin_lock_irq(&rdi->n_cqs_lock);
325	rdi->n_cqs_allocated--;
326	spin_unlock_irq(&rdi->n_cqs_lock);
327	if (cq->ip)
328		kref_put(&cq->ip->ref, rvt_release_mmap_info);
329	else
330		vfree(cq->kqueue);
331	return 0;
332}
333
334/**
335 * rvt_req_notify_cq - change the notification type for a completion queue
336 * @ibcq: the completion queue
337 * @notify_flags: the type of notification to request
338 *
339 * This may be called from interrupt context.  Also called by
340 * ib_req_notify_cq() in the generic verbs code.
341 *
342 * Return: 0 for success.
343 */
344int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
345{
346	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
347	unsigned long flags;
348	int ret = 0;
349
350	spin_lock_irqsave(&cq->lock, flags);
351	/*
352	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
353	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
354	 */
355	if (cq->notify != IB_CQ_NEXT_COMP)
356		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
357
358	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
359		if (cq->queue) {
360			if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
361				RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
362				ret = 1;
363		} else {
364			if (cq->kqueue->head != cq->kqueue->tail)
365				ret = 1;
366		}
367	}
368
369	spin_unlock_irqrestore(&cq->lock, flags);
370
371	return ret;
372}
373
374/**
375 * rvt_resize_cq - change the size of the CQ
376 * @ibcq: the completion queue
377 *
378 * Return: 0 for success.
379 */
380int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
381{
382	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
383	u32 head, tail, n;
384	int ret;
385	u32 sz;
386	struct rvt_dev_info *rdi = cq->rdi;
387	struct rvt_cq_wc *u_wc = NULL;
388	struct rvt_cq_wc *old_u_wc = NULL;
389	struct rvt_k_cq_wc *k_wc = NULL;
390	struct rvt_k_cq_wc *old_k_wc = NULL;
391
392	if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
393		return -EINVAL;
394
395	/*
396	 * Need to use vmalloc() if we want to support large #s of entries.
397	 */
398	if (udata && udata->outlen >= sizeof(__u64)) {
399		sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
400		sz += sizeof(*u_wc);
401		u_wc = vmalloc_user(sz);
402		if (!u_wc)
403			return -ENOMEM;
404	} else {
405		sz = sizeof(struct ib_wc) * (cqe + 1);
406		sz += sizeof(*k_wc);
407		k_wc = vzalloc_node(sz, rdi->dparms.node);
408		if (!k_wc)
409			return -ENOMEM;
410	}
411	/* Check that we can write the offset to mmap. */
412	if (udata && udata->outlen >= sizeof(__u64)) {
413		__u64 offset = 0;
414
415		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
416		if (ret)
417			goto bail_free;
418	}
419
420	spin_lock_irq(&cq->lock);
421	/*
422	 * Make sure head and tail are sane since they
423	 * might be user writable.
424	 */
425	if (u_wc) {
426		old_u_wc = cq->queue;
427		head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
428		tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
429	} else {
430		old_k_wc = cq->kqueue;
431		head = old_k_wc->head;
432		tail = old_k_wc->tail;
433	}
434
435	if (head > (u32)cq->ibcq.cqe)
436		head = (u32)cq->ibcq.cqe;
437	if (tail > (u32)cq->ibcq.cqe)
438		tail = (u32)cq->ibcq.cqe;
439	if (head < tail)
440		n = cq->ibcq.cqe + 1 + head - tail;
441	else
442		n = head - tail;
443	if (unlikely((u32)cqe < n)) {
444		ret = -EINVAL;
445		goto bail_unlock;
446	}
447	for (n = 0; tail != head; n++) {
448		if (u_wc)
449			u_wc->uqueue[n] = old_u_wc->uqueue[tail];
450		else
451			k_wc->kqueue[n] = old_k_wc->kqueue[tail];
452		if (tail == (u32)cq->ibcq.cqe)
453			tail = 0;
454		else
455			tail++;
456	}
457	cq->ibcq.cqe = cqe;
458	if (u_wc) {
459		RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
460		RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
461		cq->queue = u_wc;
462	} else {
463		k_wc->head = n;
464		k_wc->tail = 0;
465		cq->kqueue = k_wc;
466	}
467	spin_unlock_irq(&cq->lock);
468
469	if (u_wc)
470		vfree(old_u_wc);
471	else
472		vfree(old_k_wc);
473
474	if (cq->ip) {
475		struct rvt_mmap_info *ip = cq->ip;
476
477		rvt_update_mmap_info(rdi, ip, sz, u_wc);
478
479		/*
480		 * Return the offset to mmap.
481		 * See rvt_mmap() for details.
482		 */
483		if (udata && udata->outlen >= sizeof(__u64)) {
484			ret = ib_copy_to_udata(udata, &ip->offset,
485					       sizeof(ip->offset));
486			if (ret)
487				return ret;
488		}
489
490		spin_lock_irq(&rdi->pending_lock);
491		if (list_empty(&ip->pending_mmaps))
492			list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
493		spin_unlock_irq(&rdi->pending_lock);
494	}
495
496	return 0;
497
498bail_unlock:
499	spin_unlock_irq(&cq->lock);
500bail_free:
501	vfree(u_wc);
502	vfree(k_wc);
503
504	return ret;
505}
506
507/**
508 * rvt_poll_cq - poll for work completion entries
509 * @ibcq: the completion queue to poll
510 * @num_entries: the maximum number of entries to return
511 * @entry: pointer to array where work completions are placed
512 *
513 * This may be called from interrupt context.  Also called by ib_poll_cq()
514 * in the generic verbs code.
515 *
516 * Return: the number of completion entries polled.
517 */
518int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
519{
520	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
521	struct rvt_k_cq_wc *wc;
522	unsigned long flags;
523	int npolled;
524	u32 tail;
525
526	/* The kernel can only poll a kernel completion queue */
527	if (cq->ip)
528		return -EINVAL;
529
530	spin_lock_irqsave(&cq->lock, flags);
531
532	wc = cq->kqueue;
533	tail = wc->tail;
534	if (tail > (u32)cq->ibcq.cqe)
535		tail = (u32)cq->ibcq.cqe;
536	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
537		if (tail == wc->head)
538			break;
539		/* The kernel doesn't need a RMB since it has the lock. */
540		trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
541		*entry = wc->kqueue[tail];
542		if (tail >= cq->ibcq.cqe)
543			tail = 0;
544		else
545			tail++;
546	}
547	wc->tail = tail;
548
549	spin_unlock_irqrestore(&cq->lock, flags);
550
551	return npolled;
552}
553
554/**
555 * rvt_driver_cq_init - Init cq resources on behalf of driver
556 *
557 * Return: 0 on success
558 */
559int rvt_driver_cq_init(void)
560{
561	comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
562					 0, "rdmavt_cq");
563	if (!comp_vector_wq)
564		return -ENOMEM;
565
566	return 0;
567}
568
569/**
570 * rvt_cq_exit - tear down cq reources
571 */
572void rvt_cq_exit(void)
573{
574	destroy_workqueue(comp_vector_wq);
575	comp_vector_wq = NULL;
576}
577