Lines Matching refs:cq

8 #include "cq.h"
16 * @cq: completion queue
23 * false if cq is full.
25 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
36 spin_lock_irqsave(&cq->lock, flags);
38 if (cq->ip) {
39 u_wc = cq->queue;
44 k_wc = cq->kqueue;
54 if (head >= (unsigned)cq->ibcq.cqe) {
55 head = cq->ibcq.cqe;
61 if (unlikely(next == tail || cq->cq_full)) {
62 struct rvt_dev_info *rdi = cq->rdi;
64 if (!cq->cq_full)
66 cq->cq_full = true;
67 spin_unlock_irqrestore(&cq->lock, flags);
68 if (cq->ibcq.event_handler) {
71 ev.device = cq->ibcq.device;
72 ev.element.cq = &cq->ibcq;
74 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
78 trace_rvt_cq_enter(cq, entry, head);
101 if (cq->notify == IB_CQ_NEXT_COMP ||
102 (cq->notify == IB_CQ_SOLICITED &&
108 cq->notify = RVT_CQ_NONE;
109 cq->triggered++;
110 queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
111 &cq->comptask);
114 spin_unlock_irqrestore(&cq->lock, flags);
121 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
131 u8 triggered = cq->triggered;
140 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
143 if (cq->triggered == triggered)
163 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
208 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
209 if (IS_ERR(cq->ip)) {
210 err = PTR_ERR(cq->ip);
214 err = ib_copy_to_udata(udata, &cq->ip->offset,
215 sizeof(cq->ip->offset));
230 if (cq->ip) {
232 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
237 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
241 cq->rdi = rdi;
243 cq->comp_vector_cpu =
246 cq->comp_vector_cpu =
249 cq->ibcq.cqe = entries;
250 cq->notify = RVT_CQ_NONE;
251 spin_lock_init(&cq->lock);
252 INIT_WORK(&cq->comptask, send_complete);
254 cq->queue = u_wc;
256 cq->kqueue = k_wc;
258 trace_rvt_create_cq(cq, attr);
262 kfree(cq->ip);
278 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
279 struct rvt_dev_info *rdi = cq->rdi;
281 flush_work(&cq->comptask);
285 if (cq->ip)
286 kref_put(&cq->ip->ref, rvt_release_mmap_info);
288 vfree(cq->kqueue);
304 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
308 spin_lock_irqsave(&cq->lock, flags);
313 if (cq->notify != IB_CQ_NEXT_COMP)
314 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
317 if (cq->queue) {
318 if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
319 RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
322 if (cq->kqueue->head != cq->kqueue->tail)
327 spin_unlock_irqrestore(&cq->lock, flags);
340 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
344 struct rvt_dev_info *rdi = cq->rdi;
378 spin_lock_irq(&cq->lock);
384 old_u_wc = cq->queue;
388 old_k_wc = cq->kqueue;
393 if (head > (u32)cq->ibcq.cqe)
394 head = (u32)cq->ibcq.cqe;
395 if (tail > (u32)cq->ibcq.cqe)
396 tail = (u32)cq->ibcq.cqe;
398 n = cq->ibcq.cqe + 1 + head - tail;
410 if (tail == (u32)cq->ibcq.cqe)
415 cq->ibcq.cqe = cqe;
419 cq->queue = u_wc;
423 cq->kqueue = k_wc;
425 spin_unlock_irq(&cq->lock);
432 if (cq->ip) {
433 struct rvt_mmap_info *ip = cq->ip;
457 spin_unlock_irq(&cq->lock);
478 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
485 if (cq->ip)
488 spin_lock_irqsave(&cq->lock, flags);
490 wc = cq->kqueue;
492 if (tail > (u32)cq->ibcq.cqe)
493 tail = (u32)cq->ibcq.cqe;
498 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
500 if (tail >= cq->ibcq.cqe)
507 spin_unlock_irqrestore(&cq->lock, flags);
513 * rvt_driver_cq_init - Init cq resources on behalf of driver
528 * rvt_cq_exit - tear down cq reources