Lines Matching refs:cq

50 #include "cq.h"
58 * @cq: completion queue
65 * false if cq is full.
67 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
78 spin_lock_irqsave(&cq->lock, flags);
80 if (cq->ip) {
81 u_wc = cq->queue;
86 k_wc = cq->kqueue;
96 if (head >= (unsigned)cq->ibcq.cqe) {
97 head = cq->ibcq.cqe;
103 if (unlikely(next == tail || cq->cq_full)) {
104 struct rvt_dev_info *rdi = cq->rdi;
106 if (!cq->cq_full)
108 cq->cq_full = true;
109 spin_unlock_irqrestore(&cq->lock, flags);
110 if (cq->ibcq.event_handler) {
113 ev.device = cq->ibcq.device;
114 ev.element.cq = &cq->ibcq;
116 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
120 trace_rvt_cq_enter(cq, entry, head);
143 if (cq->notify == IB_CQ_NEXT_COMP ||
144 (cq->notify == IB_CQ_SOLICITED &&
150 cq->notify = RVT_CQ_NONE;
151 cq->triggered++;
152 queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
153 &cq->comptask);
156 spin_unlock_irqrestore(&cq->lock, flags);
163 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
173 u8 triggered = cq->triggered;
182 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
185 if (cq->triggered == triggered)
205 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
250 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
251 if (IS_ERR(cq->ip)) {
252 err = PTR_ERR(cq->ip);
256 err = ib_copy_to_udata(udata, &cq->ip->offset,
257 sizeof(cq->ip->offset));
272 if (cq->ip) {
274 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
283 cq->rdi = rdi;
285 cq->comp_vector_cpu =
288 cq->comp_vector_cpu =
291 cq->ibcq.cqe = entries;
292 cq->notify = RVT_CQ_NONE;
293 spin_lock_init(&cq->lock);
294 INIT_WORK(&cq->comptask, send_complete);
296 cq->queue = u_wc;
298 cq->kqueue = k_wc;
300 trace_rvt_create_cq(cq, attr);
304 kfree(cq->ip);
320 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
321 struct rvt_dev_info *rdi = cq->rdi;
323 flush_work(&cq->comptask);
327 if (cq->ip)
328 kref_put(&cq->ip->ref, rvt_release_mmap_info);
330 vfree(cq->kqueue);
346 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
350 spin_lock_irqsave(&cq->lock, flags);
355 if (cq->notify != IB_CQ_NEXT_COMP)
356 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
359 if (cq->queue) {
360 if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
361 RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
364 if (cq->kqueue->head != cq->kqueue->tail)
369 spin_unlock_irqrestore(&cq->lock, flags);
382 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
386 struct rvt_dev_info *rdi = cq->rdi;
420 spin_lock_irq(&cq->lock);
426 old_u_wc = cq->queue;
430 old_k_wc = cq->kqueue;
435 if (head > (u32)cq->ibcq.cqe)
436 head = (u32)cq->ibcq.cqe;
437 if (tail > (u32)cq->ibcq.cqe)
438 tail = (u32)cq->ibcq.cqe;
440 n = cq->ibcq.cqe + 1 + head - tail;
452 if (tail == (u32)cq->ibcq.cqe)
457 cq->ibcq.cqe = cqe;
461 cq->queue = u_wc;
465 cq->kqueue = k_wc;
467 spin_unlock_irq(&cq->lock);
474 if (cq->ip) {
475 struct rvt_mmap_info *ip = cq->ip;
499 spin_unlock_irq(&cq->lock);
520 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
527 if (cq->ip)
530 spin_lock_irqsave(&cq->lock, flags);
532 wc = cq->kqueue;
534 if (tail > (u32)cq->ibcq.cqe)
535 tail = (u32)cq->ibcq.cqe;
540 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
542 if (tail >= cq->ibcq.cqe)
549 spin_unlock_irqrestore(&cq->lock, flags);
555 * rvt_driver_cq_init - Init cq resources on behalf of driver
570 * rvt_cq_exit - tear down cq reources