Lines Matching refs:chp
242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
243 struct t4_cq *cq = &chp->cq;
335 void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
342 pr_debug("cqid 0x%x\n", chp->cq.cqid);
343 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
407 flush_completed_wrs(&qhp->wq, &chp->cq);
409 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
412 t4_swcq_produce(&chp->cq);
415 t4_hwcq_consume(&chp->cq);
416 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
754 static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
764 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
921 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
928 ret = t4_next_cqe(&chp->cq, &rd_cqe);
933 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
939 ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
944 ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
951 struct c4iw_cq *chp;
956 chp = to_c4iw_cq(ibcq);
958 spin_lock_irqsave(&chp->lock, flags);
961 err = c4iw_poll_cq_one(chp, wc + npolled);
966 spin_unlock_irqrestore(&chp->lock, flags);
972 struct c4iw_cq *chp;
976 chp = to_c4iw_cq(ib_cq);
978 xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
979 atomic_dec(&chp->refcnt);
980 wait_event(chp->wait, !atomic_read(&chp->refcnt));
984 destroy_cq(&chp->rhp->rdev, &chp->cq,
985 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
986 chp->destroy_skb, chp->wr_waitp);
987 c4iw_put_wr_wait(chp->wr_waitp);
998 struct c4iw_cq *chp = to_c4iw_cq(ibcq);
1022 chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1023 if (!chp->wr_waitp) {
1027 c4iw_init_wr_wait(chp->wr_waitp);
1030 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
1031 if (!chp->destroy_skb) {
1060 (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
1068 chp->cq.size = hwentries;
1069 chp->cq.memsize = memsize;
1070 chp->cq.vector = vector;
1072 ret = create_cq(&rhp->rdev, &chp->cq,
1074 chp->wr_waitp);
1078 chp->rhp = rhp;
1079 chp->cq.size--; /* status page */
1080 chp->ibcq.cqe = entries - 2;
1081 spin_lock_init(&chp->lock);
1082 spin_lock_init(&chp->comp_handler_lock);
1083 atomic_set(&chp->refcnt, 1);
1084 init_waitqueue_head(&chp->wait);
1085 ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
1100 uresp.cqid = chp->cq.cqid;
1101 uresp.size = chp->cq.size;
1102 uresp.memsize = chp->cq.memsize;
1122 mm->addr = virt_to_phys(chp->cq.queue);
1123 mm->len = chp->cq.memsize;
1127 mm2->addr = chp->cq.bar2_pa;
1132 pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
1133 chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1134 &chp->cq.dma_addr);
1141 xa_erase_irq(&rhp->cqs, chp->cq.cqid);
1143 destroy_cq(&chp->rhp->rdev, &chp->cq,
1145 chp->destroy_skb, chp->wr_waitp);
1147 kfree_skb(chp->destroy_skb);
1149 c4iw_put_wr_wait(chp->wr_waitp);
1156 struct c4iw_cq *chp;
1160 chp = to_c4iw_cq(ibcq);
1161 spin_lock_irqsave(&chp->lock, flag);
1162 t4_arm_cq(&chp->cq,
1165 ret = t4_cq_notempty(&chp->cq);
1166 spin_unlock_irqrestore(&chp->lock, flag);