Lines Matching refs:wqe

9  * irdma_set_fragment - set fragment in wqe
10 * @wqe: wqe for setting fragment
13 * @valid: The wqe valid
15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
19 set_64bit_val(wqe, offset,
21 set_64bit_val(wqe, offset + 8,
26 set_64bit_val(wqe, offset, 0);
27 set_64bit_val(wqe, offset + 8,
33 * irdma_set_fragment_gen_1 - set fragment in wqe
34 * @wqe: wqe for setting fragment
37 * @valid: wqe valid flag
39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
43 set_64bit_val(wqe, offset,
45 set_64bit_val(wqe, offset + 8,
49 set_64bit_val(wqe, offset, 0);
50 set_64bit_val(wqe, offset + 8, 0);
55 * irdma_nop_1 - insert a NOP wqe
61 __le64 *wqe;
69 wqe = qp->sq_base[wqe_idx].elem;
73 set_64bit_val(wqe, 0, 0);
74 set_64bit_val(wqe, 8, 0);
75 set_64bit_val(wqe, 16, 0);
84 set_64bit_val(wqe, 24, hdr);
149 * @wqe_idx: return wqe index
158 __le64 *wqe;
188 wqe = qp->sq_base[*wqe_idx].elem;
198 return wqe;
202 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
204 * @wqe_idx: return wqe index
208 __le64 *wqe;
220 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
221 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
223 return wqe;
236 __le64 *wqe;
263 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
265 if (!wqe)
270 set_64bit_val(wqe, 16,
274 set_64bit_val(wqe, 0,
278 qp->wqe_ops.iw_set_fragment(wqe, 0,
285 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
294 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
312 set_64bit_val(wqe, 24, hdr);
335 __le64 *wqe;
351 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
353 if (!wqe)
362 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
365 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
374 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
379 set_64bit_val(wqe, 16,
393 set_64bit_val(wqe, 24, hdr);
410 __le64 *wqe;
434 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
436 if (!wqe)
444 set_64bit_val(wqe, 0,
448 qp->wqe_ops.iw_set_fragment(wqe, 0,
455 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
463 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
469 set_64bit_val(wqe, 16,
488 set_64bit_val(wqe, 24, hdr);
497 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
498 * @wqe: wqe for setting fragment
499 * @op_info: info for setting bind wqe values
501 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
504 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
505 set_64bit_val(wqe, 8,
508 set_64bit_val(wqe, 16, op_info->bind_len);
512 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
513 * @wqe: pointer to wqe
518 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
532 memcpy(wqe, cur_sge, bytes_copied);
533 wqe += bytes_copied;
540 wqe += 16;
559 * irdma_set_mw_bind_wqe - set mw bind in wqe
560 * @wqe: wqe for setting mw bind
561 * @op_info: info for setting wqe values
563 static void irdma_set_mw_bind_wqe(__le64 *wqe,
566 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
567 set_64bit_val(wqe, 8,
570 set_64bit_val(wqe, 16, op_info->bind_len);
574 * irdma_copy_inline_data - Copy inline data to wqe
575 * @wqe: pointer to wqe
578 * @polarity: polarity of wqe valid bit
580 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
588 wqe += 8;
598 memcpy(wqe, cur_sge, bytes_copied);
599 wqe += bytes_copied;
610 wqe += 16;
612 *wqe = inline_valid;
613 wqe++;
619 *(wqe + quanta_bytes_remaining) = inline_valid;
657 __le64 *wqe;
677 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
679 if (!wqe)
685 set_64bit_val(wqe, 16,
700 set_64bit_val(wqe, 0,
703 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
708 set_64bit_val(wqe, 24, hdr);
725 __le64 *wqe;
745 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
747 if (!wqe)
752 set_64bit_val(wqe, 16,
773 set_64bit_val(wqe, 0,
775 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
780 set_64bit_val(wqe, 24, hdr);
798 __le64 *wqe;
808 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
810 if (!wqe)
816 qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
818 set_64bit_val(wqe, 16, 0);
828 set_64bit_val(wqe, 24, hdr);
837 * irdma_uk_post_receive - post receive wqe
846 __le64 *wqe;
852 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
853 if (!wqe)
858 qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
862 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
870 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
876 set_64bit_val(wqe, 16, 0);
882 set_64bit_val(wqe, 24, hdr);
1227 * irdma_get_wqe_shift - get shift count for maximum wqe size
1229 * @sge: Maximum Scatter Gather Elements wqe
1233 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1234 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1235 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1237 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1416 * size of the wqe depends on numbers of max. fragements
1417 * allowed. Then size of wqe * the number of wqes should be the
1533 __le64 *wqe;
1539 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1541 if (!wqe)
1546 set_64bit_val(wqe, 0, 0);
1547 set_64bit_val(wqe, 8, 0);
1548 set_64bit_val(wqe, 16, 0);
1556 set_64bit_val(wqe, 24, hdr);
1611 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ