Lines Matching refs:cqe

81 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
140 struct ib_umem **umem, u64 buf_addr, int cqe)
147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
178 int entries = attr->cqe;
195 cq->ibcq.cqe = entries - 1;
281 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
309 cq->resize_buf->cqe = entries - 1;
338 cq->resize_buf->cqe = entries - 1;
356 struct mlx4_cqe *cqe, *new_cqe;
362 cqe = get_cqe(cq, i & cq->ibcq.cqe);
363 cqe += cqe_inc;
365 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
367 (i + 1) & cq->resize_buf->cqe);
368 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
371 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
372 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
373 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
374 cqe += cqe_inc;
394 if (entries == ibcq->cqe + 1) {
430 cq->ibcq.cqe = cq->resize_buf->cqe;
445 tmp_cqe = cq->ibcq.cqe;
447 cq->ibcq.cqe = cq->resize_buf->cqe;
464 cq->resize_buf->cqe);
493 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
500 static void dump_cqe(void *cqe)
502 __be32 *buf = cqe;
510 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
513 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
517 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
518 cqe->vendor_err_syndrome,
519 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
520 dump_cqe(cqe);
523 switch (cqe->syndrome) {
568 wc->vendor_err = cqe->vendor_err_syndrome;
581 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
660 struct mlx4_cqe *cqe;
673 cqe = next_cqe_sw(cq);
674 if (!cqe)
678 cqe++;
688 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
689 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
693 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
697 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
699 cq->ibcq.cqe = cq->resize_buf->cqe;
709 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
716 be32_to_cpu(cqe->vlan_my_qpn));
724 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
734 wqe_ctr = be16_to_cpu(cqe->wqe_index);
741 wqe_ctr = be16_to_cpu(cqe->wqe_index);
746 wqe_ctr = be16_to_cpu(cqe->wqe_index);
757 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
765 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
781 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
810 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
812 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
816 wc->ex.imm_data = cqe->immed_rss_invalid;
821 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
830 wc->ex.imm_data = cqe->immed_rss_invalid;
841 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
847 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
851 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
852 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
853 cqe->badfcs_enc,
854 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
857 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
858 if (be32_to_cpu(cqe->vlan_my_qpn) &
860 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
865 memcpy(wc->smac, cqe->smac, ETH_ALEN);
868 wc->slid = be16_to_cpu(cqe->rlid);
869 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
919 struct mlx4_cqe *cqe, *dest;
931 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
939 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
940 cqe += cqe_inc;
942 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
943 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
944 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
947 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
951 memcpy(dest, cqe, sizeof *cqe);