/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
H A D | trace_tx.h | 87 "[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" 95 __field(u64, wr_id) 116 __entry->wr_id = wqe->wr.wr_id; 137 __entry->wr_id, 163 __field(u64, wr_id) 175 __entry->wr_id = wqe->wr.wr_id; 185 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x", 191 __entry->wr_id, [all...] |
H A D | trace_cq.h | 108 "[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x" 116 __field(u64, wr_id) 127 __entry->wr_id = wc->wr_id; 140 __entry->wr_id,
|
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/ |
H A D | trace_tx.h | 45 "[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" 53 __field(u64, wr_id) 74 __entry->wr_id = wqe->wr.wr_id; 95 __entry->wr_id, 121 __field(u64, wr_id) 133 __entry->wr_id = wqe->wr.wr_id; 143 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x", 149 __entry->wr_id, [all...] |
H A D | trace_cq.h | 66 "[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x" 74 __field(u64, wr_id) 85 __entry->wr_id = wc->wr_id; 98 __entry->wr_id,
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_ib.c | 106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; in ipoib_ib_post_receive() 176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; in ipoib_ib_handle_rx_wc() local 183 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 185 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_ib_handle_rx_wc() 187 wr_id, ipoib_recvq_size); in ipoib_ib_handle_rx_wc() 191 skb = priv->rx_ring[wr_id].skb; in ipoib_ib_handle_rx_wc() 197 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); in ipoib_ib_handle_rx_wc() 200 priv->rx_ring[wr_id] in ipoib_ib_handle_rx_wc() 388 unsigned int wr_id = wc->wr_id; ipoib_ib_handle_tx_wc() local 541 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 dqpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) post_send() argument [all...] |
H A D | ipoib_cm.c | 99 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_srq() 124 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_nonsrq() 226 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID; in ipoib_cm_start_rx_drain() 564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); in ipoib_cm_handle_rx_wc() local 574 wr_id, wc->status); in ipoib_cm_handle_rx_wc() 576 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_cm_handle_rx_wc() 577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { in ipoib_cm_handle_rx_wc() 585 wr_id, ipoib_recvq_size); in ipoib_cm_handle_rx_wc() 594 skb = rx_ring[wr_id] in ipoib_cm_handle_rx_wc() 696 post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, struct ipoib_tx_buf *tx_req) post_send() argument 798 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; ipoib_cm_handle_tx_wc() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_ib.c | 106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; in ipoib_ib_post_receive() 176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; in ipoib_ib_handle_rx_wc() local 183 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 185 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_ib_handle_rx_wc() 187 wr_id, ipoib_recvq_size); in ipoib_ib_handle_rx_wc() 191 skb = priv->rx_ring[wr_id].skb; in ipoib_ib_handle_rx_wc() 197 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); in ipoib_ib_handle_rx_wc() 200 priv->rx_ring[wr_id] in ipoib_ib_handle_rx_wc() 388 unsigned int wr_id = wc->wr_id; ipoib_ib_handle_tx_wc() local 541 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 dqpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) post_send() argument [all...] |
H A D | ipoib_cm.c | 99 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_srq() 124 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_nonsrq() 226 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID; in ipoib_cm_start_rx_drain() 564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); in ipoib_cm_handle_rx_wc() local 574 wr_id, wc->status); in ipoib_cm_handle_rx_wc() 576 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_cm_handle_rx_wc() 577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { in ipoib_cm_handle_rx_wc() 585 wr_id, ipoib_recvq_size); in ipoib_cm_handle_rx_wc() 594 skb = rx_ring[wr_id] in ipoib_cm_handle_rx_wc() 696 post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, struct ipoib_tx_buf *tx_req) post_send() argument 798 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; ipoib_cm_handle_tx_wc() local [all...] |
/kernel/linux/linux-5.10/net/rds/ |
H A D | ib_ring.c | 156 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest) in rds_ib_ring_completed() argument 160 if (oldest <= (unsigned long long)wr_id) in rds_ib_ring_completed() 161 ret = (unsigned long long)wr_id - oldest + 1; in rds_ib_ring_completed() 163 ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1; in rds_ib_ring_completed() 165 rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret, in rds_ib_ring_completed() 166 wr_id, oldest); in rds_ib_ring_completed()
|
/kernel/linux/linux-5.10/net/smc/ |
H A D | smc_wr.h | 90 u64 wr_id, temp_wr_id; in smc_wr_rx_post() local 93 wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */ in smc_wr_rx_post() 94 temp_wr_id = wr_id; in smc_wr_rx_post() 96 link->wr_rx_ibs[index].wr_id = wr_id; in smc_wr_rx_post()
|
H A D | smc_wr.c | 41 u64 wr_id; /* work request id sent */ member 70 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) in smc_wr_tx_find_pending_index() argument 75 if (link->wr_tx_pends[i].wr_id == wr_id) in smc_wr_tx_find_pending_index() 98 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); in smc_wr_tx_process_cqe() 190 u64 wr_id; in smc_wr_tx_get_free_slot() local 214 wr_id = smc_wr_tx_get_next_wr_id(link); in smc_wr_tx_get_free_slot() 216 wr_pend->wr_id = wr_id; in smc_wr_tx_get_free_slot() 221 wr_ib->wr_id in smc_wr_tx_get_free_slot() [all...] |
/kernel/linux/linux-6.6/net/rds/ |
H A D | ib_ring.c | 156 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest) in rds_ib_ring_completed() argument 160 if (oldest <= (unsigned long long)wr_id) in rds_ib_ring_completed() 161 ret = (unsigned long long)wr_id - oldest + 1; in rds_ib_ring_completed() 163 ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1; in rds_ib_ring_completed() 165 rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret, in rds_ib_ring_completed() 166 wr_id, oldest); in rds_ib_ring_completed()
|
/kernel/linux/linux-6.6/net/smc/ |
H A D | smc_wr.h | 94 u64 wr_id, temp_wr_id; in smc_wr_rx_post() local 97 wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */ in smc_wr_rx_post() 98 temp_wr_id = wr_id; in smc_wr_rx_post() 100 link->wr_rx_ibs[index].wr_id = wr_id; in smc_wr_rx_post()
|
H A D | smc_wr.c | 41 u64 wr_id; /* work request id sent */ member 66 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) in smc_wr_tx_find_pending_index() argument 71 if (link->wr_tx_pends[i].wr_id == wr_id) in smc_wr_tx_find_pending_index() 94 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); in smc_wr_tx_process_cqe() 97 link->wr_tx_v2_pend->wr_id != wc->wr_id) in smc_wr_tx_process_cqe() 205 u64 wr_id; in smc_wr_tx_get_free_slot() local 229 wr_id = smc_wr_tx_get_next_wr_id(link); in smc_wr_tx_get_free_slot() 231 wr_pend->wr_id in smc_wr_tx_get_free_slot() 251 u64 wr_id; smc_wr_tx_get_v2_slot() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 74 u64 wr_id; in handle_single_completion() local 79 wr_id = wr->wc.wr_id; in handle_single_completion() 81 wr->wc.wr_id = wr_id; in handle_single_completion() 407 gsi_wr->wc.wr_id = wr->wr.wr_id; in mlx5_ib_add_outstanding_wr() 423 { .wr_id = wr->wr.wr_id }, in mlx5_ib_gsi_silent_drop()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 74 u64 wr_id; in handle_single_completion() local 79 wr_id = wr->wc.wr_id; in handle_single_completion() 81 wr->wc.wr_id = wr_id; in handle_single_completion() 378 gsi_wr->wc.wr_id = wr->wr.wr_id; in mlx5_ib_add_outstanding_wr() 394 { .wr_id = wr->wr.wr_id }, in mlx5_ib_gsi_silent_drop()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_uk.c | 138 u64 wr_id in i40iw_qp_get_next_send_wqe() 193 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id; in i40iw_qp_get_next_send_wqe() 274 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_rdma_write() 331 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id); in i40iw_rdma_read() 385 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_send() 444 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); in i40iw_inline_rdma_write() 520 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); in i40iw_inline_send() 582 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); in i40iw_stag_local_invalidate() 624 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); in i40iw_mw_bind() 673 qp->rq_wrid_array[wqe_idx] = info->wr_id; in i40iw_post_receive() 1127 i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) i40iw_nop() argument [all...] |
H A D | i40iw_user.h | 238 u64 wr_id; member 258 u64 wr_id; member 264 u64 wr_id; member 410 u64 wr_id 421 enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
|
/kernel/linux/linux-5.10/include/uapi/rdma/ |
H A D | vmw_pvrdma-abi.h | 230 __aligned_u64 wr_id; /* wr id */ member 238 __aligned_u64 wr_id; /* wr id */ member 291 __aligned_u64 wr_id; member
|
/kernel/linux/linux-6.6/include/uapi/rdma/ |
H A D | vmw_pvrdma-abi.h | 230 __aligned_u64 wr_id; /* wr id */ member 238 __aligned_u64 wr_id; /* wr id */ member 291 __aligned_u64 wr_id; member
|
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/rdma/ |
H A D | vmw_pvrdma-abi.h | 153 __aligned_u64 wr_id; member 158 __aligned_u64 wr_id; member 208 __aligned_u64 wr_id; member
|
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/rdma/ |
H A D | vmw_pvrdma-abi.h | 170 __aligned_u64 wr_id; member 175 __aligned_u64 wr_id; member 225 __aligned_u64 wr_id; member
|
/kernel/linux/patches/linux-6.6/prebuilts/usr/include/rdma/ |
H A D | vmw_pvrdma-abi.h | 170 __aligned_u64 wr_id; member 175 __aligned_u64 wr_id; member 225 __aligned_u64 wr_id; member
|
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_comp.c | 411 wc->wr_id = wqe->wr.wr_id; in make_send_cqe() 415 uwc->wr_id = wqe->wr.wr_id; in make_send_cqe() 574 uwc->wr_id = wqe->wr.wr_id; in flush_send_wqe() 578 wc->wr_id = wqe->wr.wr_id; in flush_send_wqe()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | cq.c | 469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; in post_pending_srq_wrs() 472 pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n", in post_pending_srq_wrs() 476 (unsigned long long)pwr->wr_id); in post_pending_srq_wrs() 494 u64 wr_id; in reap_srq_cqe() local 497 wr_id = srq->sw_rq[rel_idx].wr_id; in reap_srq_cqe() 500 pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n", in reap_srq_cqe() 503 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe() 506 pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id in reap_srq_cqe() [all...] |