Lines Matching refs:cqe

81 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88 return cqe;
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
189 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
200 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
202 switch (get_cqe_opcode(cqe)) {
206 wc->ex.imm_data = cqe->immediate;
211 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
212 (cqe->hds_ip_ext & CQE_L4_OK))))
218 wc->ex.imm_data = cqe->immediate;
223 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
226 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
227 wc->dlid_path_bits = cqe->ml_path;
228 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
231 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
240 wc->slid = be16_to_cpu(cqe->slid);
241 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
246 vlan_present = cqe->l4_l3_hdr_type & 0x1;
247 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
249 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
250 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
270 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
272 mlx5_ib_warn(dev, "dump error cqe\n");
273 mlx5_dump_err_cqe(dev->mdev, cqe);
277 struct mlx5_err_cqe *cqe,
282 switch (cqe->syndrome) {
330 wc->vendor_err = cqe->vendor_err_synd;
332 dump_cqe(dev, cqe);
356 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
359 u16 syndrome = be16_to_cpu(cqe->syndrome);
367 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
368 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
372 item->expected = be32_to_cpu(cqe->expected_reftag);
373 item->actual = be32_to_cpu(cqe->actual_reftag);
377 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
378 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
384 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
385 item->key = be32_to_cpu(cqe->mkey);
453 void *cqe;
457 cqe = next_cqe_sw(cq);
458 if (!cqe)
461 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
479 mlx5_ib_warn(dev, "unexpected resize cqe\n");
518 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
844 void *cqe;
848 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
849 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
923 int entries = attr->cqe;
947 cq->ibcq.cqe = entries - 1;
1049 void *cqe, *dest;
1064 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1071 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1072 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1078 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1081 memcpy(dest, cqe, cq->mcq.cqe_sz);
1202 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1211 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1227 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1272 if (entries == ibcq->cqe + 1)
1333 cq->ibcq.cqe = entries - 1;
1352 cq->ibcq.cqe = entries - 1;