Lines Matching refs:cq
34 #include <linux/mlx4/cq.h>
43 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
49 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
56 "on CQ %06x\n", type, cq->cqn);
60 ibcq = &to_mibcq(cq)->ibcq;
64 event.element.cq = ibcq;
74 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
76 return get_cqe_from_buf(&cq->buf, n);
79 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
88 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
90 return get_sw_cqe(cq, cq->mcq.cons_index);
93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
95 struct mlx4_ib_cq *mcq = to_mcq(cq);
96 struct mlx4_ib_dev *dev = to_mdev(cq->device);
181 struct mlx4_ib_cq *cq = to_mcq(ibcq);
195 cq->ibcq.cqe = entries - 1;
196 mutex_init(&cq->resize_mutex);
197 spin_lock_init(&cq->lock);
198 cq->resize_buf = NULL;
199 cq->resize_umem = NULL;
200 cq->create_flags = attr->flags;
201 INIT_LIST_HEAD(&cq->send_qp_list);
202 INIT_LIST_HEAD(&cq->recv_qp_list);
213 err = mlx4_ib_get_cq_umem(dev, &cq->buf, &cq->umem,
218 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
223 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
225 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
229 cq->mcq.set_ci_db = cq->db.db;
230 cq->mcq.arm_db = cq->db.db + 1;
231 *cq->mcq.set_ci_db = 0;
232 *cq->mcq.arm_db = 0;
234 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
238 buf_addr = &cq->buf.buf;
241 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
247 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
248 &cq->mcq, vector, 0,
249 !!(cq->create_flags &
256 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
258 cq->mcq.comp = mlx4_ib_cq_comp;
259 cq->mcq.event = mlx4_ib_cq_event;
262 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
270 mlx4_cq_free(dev->dev, &cq->mcq);
274 mlx4_ib_db_unmap_user(context, &cq->db);
277 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
279 ib_umem_release(cq->umem);
281 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
285 mlx4_db_free(dev->dev, &cq->db);
290 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
295 if (cq->resize_buf)
298 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
299 if (!cq->resize_buf)
302 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
304 kfree(cq->resize_buf);
305 cq->resize_buf = NULL;
309 cq->resize_buf->cqe = entries - 1;
314 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
320 if (cq->resize_umem)
326 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
327 if (!cq->resize_buf)
330 err = mlx4_ib_get_cq_umem(dev, &cq->resize_buf->buf, &cq->resize_umem,
333 kfree(cq->resize_buf);
334 cq->resize_buf = NULL;
338 cq->resize_buf->cqe = entries - 1;
343 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
347 i = cq->mcq.cons_index;
348 while (get_sw_cqe(cq, i))
351 return i - cq->mcq.cons_index;
354 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
358 int cqe_size = cq->buf.entry_size;
361 i = cq->mcq.cons_index;
362 cqe = get_cqe(cq, i & cq->ibcq.cqe);
366 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
367 (i + 1) & cq->resize_buf->cqe);
368 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
372 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
373 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
376 ++cq->mcq.cons_index;
382 struct mlx4_ib_cq *cq = to_mcq(ibcq);
387 mutex_lock(&cq->resize_mutex);
405 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
410 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
416 err = mlx4_alloc_resize_buf(dev, cq, entries);
421 mtt = cq->buf.mtt;
423 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
429 cq->buf = cq->resize_buf->buf;
430 cq->ibcq.cqe = cq->resize_buf->cqe;
431 ib_umem_release(cq->umem);
432 cq->umem = cq->resize_umem;
434 kfree(cq->resize_buf);
435 cq->resize_buf = NULL;
436 cq->resize_umem = NULL;
441 spin_lock_irq(&cq->lock);
442 if (cq->resize_buf) {
443 mlx4_ib_cq_resize_copy_cqes(cq);
444 tmp_buf = cq->buf;
445 tmp_cqe = cq->ibcq.cqe;
446 cq->buf = cq->resize_buf->buf;
447 cq->ibcq.cqe = cq->resize_buf->cqe;
449 kfree(cq->resize_buf);
450 cq->resize_buf = NULL;
452 spin_unlock_irq(&cq->lock);
461 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
463 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
464 cq->resize_buf->cqe);
466 kfree(cq->resize_buf);
467 cq->resize_buf = NULL;
469 ib_umem_release(cq->resize_umem);
470 cq->resize_umem = NULL;
472 mutex_unlock(&cq->resize_mutex);
477 int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
479 struct mlx4_ib_dev *dev = to_mdev(cq->device);
480 struct mlx4_ib_cq *mcq = to_mcq(cq);
493 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
580 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
631 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
637 /* Find uncompleted WQEs belonging to that cq and return
640 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
646 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
656 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
673 cqe = next_cqe_sw(cq);
677 if (cq->buf.entry_size == 64)
680 ++cq->mcq.cons_index;
694 if (cq->resize_buf) {
695 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
697 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
698 cq->buf = cq->resize_buf->buf;
699 cq->ibcq.cqe = cq->resize_buf->cqe;
701 kfree(cq->resize_buf);
702 cq->resize_buf = NULL;
715 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
727 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
837 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
841 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
879 struct mlx4_ib_cq *cq = to_mcq(ibcq);
883 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
885 spin_lock_irqsave(&cq->lock, flags);
887 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
892 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
896 mlx4_cq_set_ci(&cq->mcq);
899 spin_unlock_irqrestore(&cq->lock, flags);
915 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
921 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
930 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
931 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
938 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
939 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
947 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
958 cq->mcq.cons_index += nfreed;
964 mlx4_cq_set_ci(&cq->mcq);
968 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
970 spin_lock_irq(&cq->lock);
971 __mlx4_ib_cq_clean(cq, qpn, srq);
972 spin_unlock_irq(&cq->lock);