Lines Matching refs:cqe

81 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88 return cqe;
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
189 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
200 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
202 switch (get_cqe_opcode(cqe)) {
206 wc->ex.imm_data = cqe->immediate;
211 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
212 (cqe->hds_ip_ext & CQE_L4_OK))))
218 wc->ex.imm_data = cqe->immediate;
223 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
226 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
227 wc->dlid_path_bits = cqe->ml_path;
228 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
231 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
240 wc->slid = be16_to_cpu(cqe->slid);
241 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
246 vlan_present = cqe->l4_l3_hdr_type & 0x1;
247 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
249 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
250 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
270 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe,
276 cqe, sizeof(*cqe), false);
280 struct mlx5_err_cqe *cqe,
285 switch (cqe->syndrome) {
336 wc->vendor_err = cqe->vendor_err_synd;
338 dump_cqe(dev, cqe, wc, dump);
362 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
365 u16 syndrome = be16_to_cpu(cqe->syndrome);
373 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
374 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
378 item->expected = be32_to_cpu(cqe->expected_reftag);
379 item->actual = be32_to_cpu(cqe->actual_reftag);
383 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
384 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
390 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
391 item->key = be32_to_cpu(cqe->mkey);
459 void *cqe;
463 cqe = next_cqe_sw(cq);
464 if (!cqe)
467 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
485 mlx5_ib_warn(dev, "unexpected resize cqe\n");
524 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
869 void *cqe;
873 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
874 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
948 int entries = attr->cqe;
972 cq->ibcq.cqe = entries - 1;
1074 void *cqe, *dest;
1089 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1096 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1097 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1103 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1106 memcpy(dest, cqe, cq->mcq.cqe_sz);
1223 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1232 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1248 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1294 if (entries == ibcq->cqe + 1)
1368 cq->ibcq.cqe = entries - 1;
1387 cq->ibcq.cqe = entries - 1;