/kernel/linux/linux-5.10/io_uring/ |
H A D | io-wq.c | 39 * One for each thread in a wqe pool 47 struct io_wqe *wqe; member 138 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); 140 static bool io_acct_cancel_pending_work(struct io_wqe *wqe, 157 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound) in io_get_acct() argument 159 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; in io_get_acct() 162 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, in io_work_get_acct() argument 165 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct() 170 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); in io_wqe_get_acct() 186 return test_bit(IO_WQ_BIT_EXIT, &worker->wqe in io_wq_worker_stopped() 192 struct io_wqe *wqe = worker->wqe; io_worker_cancel_cb() local 216 struct io_wqe *wqe = worker->wqe; io_worker_exit() local 293 io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) io_wqe_create_worker() argument 325 struct io_wqe *wqe; create_worker_cb() local 353 struct io_wqe *wqe = worker->wqe; io_queue_worker_create() local 400 struct io_wqe *wqe = worker->wqe; global() variable 449 io_wait_on_hash(struct io_wqe *wqe, unsigned int hash) io_wait_on_hash() argument 474 struct io_wqe *wqe = worker->wqe; global() variable 553 struct io_wqe *wqe = worker->wqe; global() variable 623 struct io_wqe *wqe = worker->wqe; io_wqe_worker() local 714 io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, struct task_struct *tsk) io_init_new_worker() argument 759 struct io_wqe *wqe; create_worker_cont() local 804 create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) create_io_worker() argument 849 io_wq_for_each_worker(struct io_wqe *wqe, bool (*func)(struct io_worker *, void *), void *data) io_wq_for_each_worker() argument 877 io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) io_run_cancel() argument 888 io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) io_wqe_insert_work() argument 914 io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) io_wqe_enqueue() argument 966 struct io_wqe *wqe = wq->wqes[numa_node_id()]; io_wq_enqueue() local 1002 io_wqe_remove_pending(struct io_wqe *wqe, struct io_wq_work *work, struct io_wq_work_node *prev) io_wqe_remove_pending() argument 1044 io_wqe_cancel_pending_work(struct io_wqe *wqe, struct io_cb_cancel_data *match) io_wqe_cancel_pending_work() argument 1062 io_wqe_cancel_running_work(struct io_wqe *wqe, struct io_cb_cancel_data *match) io_wqe_cancel_running_work() argument 1086 struct io_wqe *wqe = wq->wqes[node]; io_wq_cancel_cb() local 1100 struct io_wqe *wqe = wq->wqes[node]; io_wq_cancel_cb() local 1117 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait); io_wqe_hash_wake() local 1157 struct io_wqe *wqe; io_wq_create() local 1250 struct io_wqe *wqe = wq->wqes[node]; io_wq_exit_workers() local 1274 struct io_wqe *wqe = wq->wqes[node]; io_wq_destroy() local 1346 struct io_wqe *wqe = wq->wqes[i]; io_wq_cpu_affinity() local 1381 struct io_wqe *wqe = wq->wqes[node]; io_wq_max_workers() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 18 struct rxe_send_wqe *wqe, int npsn) in retry_first_write_send() 23 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 24 qp->mtu : wqe->dma.resid; in retry_first_write_send() 26 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 27 wqe->wr.opcode); in retry_first_write_send() 29 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 30 wqe->dma.resid -= to_send; in retry_first_write_send() 31 wqe->dma.sge_offset += to_send; in retry_first_write_send() 33 advance_dma_data(&wqe in retry_first_write_send() 17 retry_first_write_send(struct rxe_qp *qp, struct rxe_send_wqe *wqe, int npsn) retry_first_write_send() argument 40 struct rxe_send_wqe *wqe; req_retry() local 121 struct rxe_send_wqe *wqe; req_check_sq_drain_done() local 177 struct rxe_send_wqe *wqe; req_next_wqe() local 206 rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_wqe_is_fenced() argument 354 next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode) next_opcode() argument 384 check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) check_init_depth() argument 414 init_req_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, int opcode, u32 payload, struct rxe_pkt_info *pkt) init_req_packet() argument 498 finish_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 payload) finish_packet() argument 541 update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt) update_wqe_state() argument 553 update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, u32 payload) update_wqe_psn() argument 576 save_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 *rollback_psn) save_state() argument 588 rollback_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 rollback_psn) rollback_state() argument 615 rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_do_local_ops() argument 672 struct rxe_send_wqe *wqe; rxe_requester() local [all...] |
H A D | rxe_comp.c | 150 struct rxe_send_wqe *wqe; in get_wqe() local 155 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe() 156 *wqe_p = wqe; in get_wqe() 159 if (!wqe || wqe->state == wqe_state_posted) in get_wqe() 163 if (wqe->state == wqe_state_done) in get_wqe() 167 if (wqe->state == wqe_state_error) in get_wqe() 183 struct rxe_send_wqe *wqe) in check_psn() 190 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn() 192 if (wqe in check_psn() 181 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_psn() argument 224 check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_ack() argument 363 do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_read() argument 383 do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_atomic() argument 402 make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) make_send_cqe() argument 449 do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) do_complete() argument 508 complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_ack() argument 532 complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_wqe() argument 566 flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) flush_send_wqe() argument 596 struct rxe_send_wqe *wqe; flush_send_queue() local 650 struct rxe_send_wqe *wqe = NULL; rxe_completer() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
H A D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 18 struct rxe_send_wqe *wqe, in retry_first_write_send() 24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 25 qp->mtu : wqe->dma.resid; in retry_first_write_send() 27 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 28 wqe->wr.opcode); in retry_first_write_send() 30 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 31 wqe->dma.resid -= to_send; in retry_first_write_send() 32 wqe->dma.sge_offset += to_send; in retry_first_write_send() 34 advance_dma_data(&wqe in retry_first_write_send() 17 retry_first_write_send(struct rxe_qp *qp, struct rxe_send_wqe *wqe, unsigned int mask, int npsn) retry_first_write_send() argument 43 struct rxe_send_wqe *wqe; req_retry() local 107 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue); req_next_wqe() local 290 next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode) next_opcode() argument 321 check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) check_init_depth() argument 351 init_req_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, int opcode, int payload, struct rxe_pkt_info *pkt) init_req_packet() argument 443 fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen) fill_packet() argument 487 update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt) update_wqe_state() argument 499 update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, int payload) update_wqe_psn() argument 522 save_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 *rollback_psn) save_state() argument 533 rollback_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 rollback_psn) rollback_state() argument 544 update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, int payload) update_state() argument 564 struct rxe_send_wqe *wqe; rxe_requester() local [all...] |
H A D | rxe_comp.c | 139 struct rxe_send_wqe *wqe; in get_wqe() local 144 wqe = queue_head(qp->sq.queue); in get_wqe() 145 *wqe_p = wqe; in get_wqe() 148 if (!wqe || wqe->state == wqe_state_posted) in get_wqe() 152 if (wqe->state == wqe_state_done) in get_wqe() 156 if (wqe->state == wqe_state_error) in get_wqe() 172 struct rxe_send_wqe *wqe) in check_psn() 179 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn() 181 if (wqe in check_psn() 170 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_psn() argument 209 check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_ack() argument 340 do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_read() argument 358 do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_atomic() argument 375 make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) make_send_cqe() argument 413 do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) do_complete() argument 443 complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_ack() argument 489 complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_wqe() argument 513 struct rxe_send_wqe *wqe; rxe_drain_resp_pkts() local 534 struct rxe_send_wqe *wqe = NULL; rxe_completer() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/irdma/ |
H A D | uda.c | 23 __le64 *wqe; in irdma_sc_access_ah() local 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah() 27 if (!wqe) in irdma_sc_access_ah() 30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); in irdma_sc_access_ah() 41 set_64bit_val(wqe, 40, in irdma_sc_access_ah() 44 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 48 set_64bit_val(wqe, 56, in irdma_sc_access_ah() 51 set_64bit_val(wqe, 48, in irdma_sc_access_ah() 55 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 58 set_64bit_val(wqe, 4 in irdma_sc_access_ah() 119 __le64 *wqe; irdma_access_mcast_grp() local [all...] |
H A D | uk.c | 9 * irdma_set_fragment - set fragment in wqe 10 * @wqe: wqe for setting fragment 13 * @valid: The wqe valid 15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, in irdma_set_fragment() argument 19 set_64bit_val(wqe, offset, in irdma_set_fragment() 21 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 26 set_64bit_val(wqe, offset, 0); in irdma_set_fragment() 27 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 33 * irdma_set_fragment_gen_1 - set fragment in wqe 39 irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) irdma_set_fragment_gen_1() argument 61 __le64 *wqe; irdma_nop_1() local 158 __le64 *wqe; irdma_qp_get_next_send_wqe() local 208 __le64 *wqe; irdma_qp_get_next_recv_wqe() local 236 __le64 *wqe; irdma_uk_rdma_write() local 335 __le64 *wqe; irdma_uk_rdma_read() local 410 __le64 *wqe; irdma_uk_send() local 501 irdma_set_mw_bind_wqe_gen_1(__le64 *wqe, struct irdma_bind_window *op_info) irdma_set_mw_bind_wqe_gen_1() argument 518 irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) irdma_copy_inline_data_gen_1() argument 563 irdma_set_mw_bind_wqe(__le64 *wqe, struct irdma_bind_window *op_info) irdma_set_mw_bind_wqe() argument 580 irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) irdma_copy_inline_data() argument 657 __le64 *wqe; irdma_uk_inline_rdma_write() local 725 __le64 *wqe; irdma_uk_inline_send() local 798 __le64 *wqe; irdma_uk_stag_local_invalidate() local 846 __le64 *wqe; irdma_uk_post_receive() local 1533 __le64 *wqe; irdma_nop() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
H A D | trace_tx.h | 87 "[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" 91 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 92 TP_ARGS(qp, wqe, wr_num_sge), 96 __field(struct rvt_swqe *, wqe) 115 __entry->wqe = wqe; 116 __entry->wr_id = wqe->wr.wr_id; 119 __entry->psn = wqe->psn; 120 __entry->lpsn = wqe->lpsn; 121 __entry->length = wqe [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/ |
H A D | trace_tx.h | 45 "[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" 49 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 50 TP_ARGS(qp, wqe, wr_num_sge), 54 __field(struct rvt_swqe *, wqe) 73 __entry->wqe = wqe; 74 __entry->wr_id = wqe->wr.wr_id; 77 __entry->psn = wqe->psn; 78 __entry->lpsn = wqe->lpsn; 79 __entry->length = wqe [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/ |
H A D | siw_qp_tx.c | 42 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 52 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 55 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 121 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 125 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 139 cpu_to_be64(wqe in siw_qp_prepare_tx() 426 struct siw_wqe *wqe = &c_tx->wqe_active; siw_tx_hdt() local 703 siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe) siw_prepare_fpdu() argument 761 siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe, enum ib_access_flags perms) siw_check_sgl_tx() argument 791 siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) siw_qp_sq_proc_tx() argument 976 siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe) siw_qp_sq_proc_local() argument 1024 struct siw_wqe *wqe = tx_wqe(qp); siw_qp_sq_process() local [all...] |
H A D | siw_qp.c | 262 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local 268 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts() 272 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts() 274 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts() 275 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 279 wqe in siw_qp_mpa_rts() 462 struct siw_wqe *wqe = tx_wqe(qp); siw_send_terminate() local 884 struct siw_wqe *wqe = tx_wqe(qp); siw_activate_tx_from_sq() local 980 struct siw_wqe *wqe = tx_wqe(qp); siw_activate_tx() local 1191 struct siw_wqe *wqe = tx_wqe(qp); siw_sq_flush() local 1266 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; siw_rq_flush() local [all...] |
H A D | siw_qp_rx.c | 169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh() 207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh() 281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local 301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh() 303 qp_id(rx_qp(srx)), ddp_mo, wqe in siw_send_check_ntoh() 334 struct siw_wqe *wqe = NULL; siw_rqe_get() local 425 struct siw_wqe *wqe; siw_proc_send() local 560 struct siw_wqe *wqe = rx_wqe(frx); siw_proc_write() local 746 struct siw_wqe *wqe = NULL; siw_orqe_start_rx() local 790 struct siw_wqe *wqe = rx_wqe(frx); siw_proc_rresp() local 1222 struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); siw_rdmap_complete() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/siw/ |
H A D | siw_qp_tx.c | 42 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 52 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 55 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 120 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 124 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 136 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 138 cpu_to_be64(wqe in siw_qp_prepare_tx() 438 struct siw_wqe *wqe = &c_tx->wqe_active; siw_tx_hdt() local 712 siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe) siw_prepare_fpdu() argument 770 siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe, enum ib_access_flags perms) siw_check_sgl_tx() argument 800 siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) siw_qp_sq_proc_tx() argument 985 siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe) siw_qp_sq_proc_local() argument 1033 struct siw_wqe *wqe = tx_wqe(qp); siw_qp_sq_process() local [all...] |
H A D | siw_qp.c | 265 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local 271 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts() 275 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts() 277 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts() 278 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 279 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 280 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 281 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 282 wqe in siw_qp_mpa_rts() 465 struct siw_wqe *wqe = tx_wqe(qp); siw_send_terminate() local 887 struct siw_wqe *wqe = tx_wqe(qp); siw_activate_tx_from_sq() local 983 struct siw_wqe *wqe = tx_wqe(qp); siw_activate_tx() local 1194 struct siw_wqe *wqe = tx_wqe(qp); siw_sq_flush() local 1269 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; siw_rq_flush() local [all...] |
H A D | siw_qp_rx.c | 169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh() 207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh() 281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local 301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh() 303 qp_id(rx_qp(srx)), ddp_mo, wqe in siw_send_check_ntoh() 334 struct siw_wqe *wqe = NULL; siw_rqe_get() local 425 struct siw_wqe *wqe; siw_proc_send() local 560 struct siw_wqe *wqe = rx_wqe(frx); siw_proc_write() local 746 struct siw_wqe *wqe = NULL; siw_orqe_start_rx() local 790 struct siw_wqe *wqe = rx_wqe(frx); siw_proc_rresp() local 1222 struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); siw_rdmap_complete() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_ctrl.c | 47 * i40iw_insert_wqe_hdr - write wqe header 48 * @wqe: cqp wqe for header 49 * @header: header for the cqp wqe 51 void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) in i40iw_insert_wqe_hdr() argument 54 set_64bit_val(wqe, 24, header); in i40iw_insert_wqe_hdr() 607 u64 *wqe = NULL; in i40iw_sc_cqp_get_next_send_wqe_idx() local 627 wqe = cqp->sq_base[*wqe_idx].elem; in i40iw_sc_cqp_get_next_send_wqe_idx() 629 I40IW_CQP_INIT_WQE(wqe); in i40iw_sc_cqp_get_next_send_wqe_idx() 631 return wqe; in i40iw_sc_cqp_get_next_send_wqe_idx() 835 u64 *wqe; i40iw_sc_manage_push_page() local 877 u64 *wqe; i40iw_sc_manage_hmc_pm_func_table() local 915 u64 *wqe; i40iw_sc_set_hmc_resource_profile() local 988 u64 *wqe; i40iw_sc_commit_fpm_values() local 1045 u64 *wqe; i40iw_sc_query_rdma_features() local 1139 u64 *wqe; i40iw_sc_query_fpm_values() local 1189 u64 *wqe; i40iw_sc_add_arp_cache_entry() local 1235 u64 *wqe; i40iw_sc_del_arp_cache_entry() local 1268 u64 *wqe; i40iw_sc_query_arp_cache_entry() local 1303 u64 *wqe; i40iw_sc_manage_apbvt_entry() local 1348 u64 *wqe; i40iw_sc_manage_qhash_table_entry() local 1435 u64 *wqe; i40iw_sc_alloc_local_mac_ipaddr_entry() local 1465 u64 *wqe; i40iw_sc_add_local_mac_ipaddr_entry() local 1509 u64 *wqe; i40iw_sc_del_local_mac_ipaddr_entry() local 1541 u64 *wqe; i40iw_sc_cqp_nop() local 1612 u64 *wqe; i40iw_sc_ceq_create() local 1692 u64 *wqe; i40iw_sc_ceq_destroy() local 1794 u64 *wqe; i40iw_sc_aeq_create() local 1831 u64 *wqe; i40iw_sc_aeq_destroy() local 2092 u64 *wqe; i40iw_sc_ccq_create() local 2151 u64 *wqe; i40iw_sc_ccq_destroy() local 2250 u64 *wqe; i40iw_sc_cq_create() local 2310 u64 *wqe; i40iw_sc_cq_destroy() local 2356 u64 *wqe; i40iw_sc_cq_modify() local 2533 u64 *wqe; i40iw_sc_qp_create() local 2582 u64 *wqe; i40iw_sc_qp_modify() local 2650 u64 *wqe; i40iw_sc_qp_destroy() local 2692 u64 *wqe; i40iw_sc_qp_flush_wqes() local 2761 u64 *wqe; i40iw_sc_gen_ae() local 2803 u64 *wqe; i40iw_sc_qp_upload_context() local 3031 u64 *wqe; i40iw_sc_alloc_stag() local 3088 u64 *wqe; i40iw_sc_mr_reg_non_shared() local 3174 u64 *wqe; i40iw_sc_mr_reg_shared() local 3238 u64 *wqe; i40iw_sc_dealloc_stag() local 3279 u64 *wqe; i40iw_sc_query_stag() local 3320 u64 *wqe; i40iw_sc_mw_alloc() local 3356 u64 *wqe; i40iw_sc_mr_fast_register() local 3417 u64 *wqe; i40iw_sc_send_lsmm() local 3451 u64 *wqe; i40iw_sc_send_lsmm_nostag() local 3482 u64 *wqe; i40iw_sc_send_rtt() local 3515 u64 *wqe; i40iw_sc_post_wqe0() local 3733 u64 *wqe; cqp_sds_wqe_fill() local 3851 u64 *wqe; i40iw_sc_suspend_qp() local 3880 u64 *wqe; i40iw_sc_resume_qp() local 3918 u64 *wqe; i40iw_sc_static_hmc_pages_allocated() local [all...] |
H A D | i40iw_uk.c | 44 * i40iw_nop_1 - insert a nop wqe and move head. no post work 49 u64 header, *wqe; in i40iw_nop_1() local 58 wqe = qp->sq_base[wqe_idx].elem; in i40iw_nop_1() 69 set_64bit_val(wqe, 0, 0); in i40iw_nop_1() 70 set_64bit_val(wqe, 8, 0); in i40iw_nop_1() 71 set_64bit_val(wqe, 16, 0); in i40iw_nop_1() 79 set_64bit_val(wqe, 24, header); in i40iw_nop_1() 120 * @wqe_idx: wqe index 129 * i40iw_qp_get_next_send_wqe - return next wqe ptr 131 * @wqe_idx: return wqe inde 141 u64 *wqe = NULL; i40iw_qp_get_next_send_wqe() local 205 i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) i40iw_set_fragment() argument 222 u64 *wqe = NULL; i40iw_qp_get_next_recv_wqe() local 250 u64 *wqe; i40iw_rdma_write() local 319 u64 *wqe; i40iw_rdma_read() local 367 u64 *wqe; i40iw_send() local 426 u64 *wqe; i40iw_inline_rdma_write() local 502 u64 *wqe; i40iw_inline_send() local 573 u64 *wqe; i40iw_stag_local_invalidate() local 615 u64 *wqe; i40iw_mw_bind() local 661 u64 *wqe; i40iw_post_receive() local 1132 u64 header, *wqe; i40iw_nop() local [all...] |
/kernel/linux/linux-5.10/drivers/scsi/lpfc/ |
H A D | lpfc_nvme.c | 73 union lpfc_wqe128 *wqe; in lpfc_nvme_cmd_template() local 76 wqe = &lpfc_iread_cmd_template; in lpfc_nvme_cmd_template() 77 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvme_cmd_template() 90 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); in lpfc_nvme_cmd_template() 91 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); in lpfc_nvme_cmd_template() 92 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); in lpfc_nvme_cmd_template() 93 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); in lpfc_nvme_cmd_template() 100 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); in lpfc_nvme_cmd_template() 101 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1); in lpfc_nvme_cmd_template() 102 bf_set(wqe_iod, &wqe in lpfc_nvme_cmd_template() 207 union lpfc_wqe128 *wqe = &pwqeq->wqe; lpfc_nvme_prep_abort_wqe() local 550 union lpfc_wqe128 *wqe; lpfc_nvme_gen_req() local 852 struct lpfc_iocbq *wqe, *next_wqe; __lpfc_nvme_ls_abort() local 974 union lpfc_wqe128 *wqe; lpfc_nvme_adj_fcp_sgls() local 1320 union lpfc_wqe128 *wqe = &pwqeq->wqe; lpfc_nvme_prep_io_cmd() local 1429 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; lpfc_nvme_prep_io_dma() local 2104 union lpfc_wqe128 *wqe; lpfc_get_nvme_buf() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | rc.c | 435 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 490 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 536 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 547 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 548 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 558 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 562 wqe in hfi1_make_rc_req() 1459 update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) update_num_rd_atomic() argument 1503 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); reset_psn() local 1609 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); hfi1_restart_rc() local 1679 struct rvt_swqe *wqe; reset_sending_psn() local 1739 struct rvt_swqe *wqe; hfi1_rc_send_complete() local 1866 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) do_rc_completion() argument 2012 struct rvt_swqe *wqe; do_rc_ack() local 2308 struct rvt_swqe *wqe; rdma_seq_err() local 2352 struct rvt_swqe *wqe; rc_rcv_resp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | rc.c | 394 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 521 wqe in hfi1_make_rc_req() 1417 update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) update_num_rd_atomic() argument 1461 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); reset_psn() local 1567 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); hfi1_restart_rc() local 1637 struct rvt_swqe *wqe; reset_sending_psn() local 1697 struct rvt_swqe *wqe; hfi1_rc_send_complete() local 1824 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) do_rc_completion() argument 1970 struct rvt_swqe *wqe; do_rc_ack() local 2266 struct rvt_swqe *wqe; rdma_seq_err() local 2310 struct rvt_swqe *wqe; rc_rcv_resp() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument 47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge() 48 return rvt_restart_sge(ss, wqe, len); in restart_sge() 221 struct rvt_swqe *wqe; in qib_make_rc_req() local 251 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req() 252 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req() 275 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_rc_req() 296 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req() 302 qp->s_psn = wqe->psn; in qib_make_rc_req() 309 len = wqe in qib_make_rc_req() 738 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); reset_psn() local 822 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); qib_restart_rc() local 858 struct rvt_swqe *wqe; reset_sending_psn() local 884 struct rvt_swqe *wqe; qib_rc_send_complete() local 950 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct qib_ibport *ibp) do_rc_completion() argument 1011 struct rvt_swqe *wqe; do_rc_ack() local 1226 struct rvt_swqe *wqe; rdma_seq_err() local 1275 struct rvt_swqe *wqe; qib_rc_rcv_resp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument 47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge() 48 return rvt_restart_sge(ss, wqe, len); in restart_sge() 222 struct rvt_swqe *wqe; in qib_make_rc_req() local 252 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req() 253 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req() 276 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_rc_req() 297 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req() 303 qp->s_psn = wqe->psn; in qib_make_rc_req() 310 len = wqe in qib_make_rc_req() 739 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); reset_psn() local 823 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); qib_restart_rc() local 859 struct rvt_swqe *wqe; reset_sending_psn() local 885 struct rvt_swqe *wqe; qib_rc_send_complete() local 951 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct qib_ibport *ibp) do_rc_completion() argument 1012 struct rvt_swqe *wqe; do_rc_ack() local 1227 struct rvt_swqe *wqe; rdma_seq_err() local 1277 struct rvt_swqe *wqe; qib_rc_rcv_resp() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 504 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 506 wqe->send.stag_inv = 0; in build_rdma_send() 510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 513 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 521 wqe->send.r3 = 0; in build_rdma_send() 522 wqe->send.r4 = 0; in build_rdma_send() 527 ret = build_immd(sq, wqe in build_rdma_send() 556 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) build_rdma_write() argument 660 build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) build_rdma_read() argument 697 union t4_wr *wqe; post_write_cmpl() local 759 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) build_rdma_recv() argument 774 build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) build_srq_recv() argument 820 build_memreg(struct t4_sq *sq, union t4_wr *wqe, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) build_memreg() argument 884 build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) build_inv_stag() argument 1086 union t4_wr *wqe = NULL; c4iw_post_send() local 1266 union t4_recv_wr *wqe = NULL; c4iw_post_receive() local 1341 defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe, u64 wr_id, u8 len16) defer_srq_wr() argument 1360 union t4_recv_wr *wqe, lwqe; c4iw_post_srq_recv() local 1563 struct fw_ri_wr *wqe; post_terminate() local 1695 struct fw_ri_wr *wqe; rdma_fini() local 1751 struct fw_ri_wr *wqe; rdma_init() local 2654 c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) c4iw_copy_wr_to_srq() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 504 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 506 wqe->send.stag_inv = 0; in build_rdma_send() 510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 513 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 521 wqe->send.r3 = 0; in build_rdma_send() 522 wqe->send.r4 = 0; in build_rdma_send() 527 ret = build_immd(sq, wqe in build_rdma_send() 556 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) build_rdma_write() argument 660 build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) build_rdma_read() argument 697 union t4_wr *wqe; post_write_cmpl() local 759 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) build_rdma_recv() argument 774 build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) build_srq_recv() argument 820 build_memreg(struct t4_sq *sq, union t4_wr *wqe, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) build_memreg() argument 884 build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) build_inv_stag() argument 1086 union t4_wr *wqe = NULL; c4iw_post_send() local 1266 union t4_recv_wr *wqe = NULL; c4iw_post_receive() local 1341 defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe, u64 wr_id, u8 len16) defer_srq_wr() argument 1360 union t4_recv_wr *wqe, lwqe; c4iw_post_srq_recv() local 1563 struct fw_ri_wr *wqe; post_terminate() local 1695 struct fw_ri_wr *wqe; rdma_fini() local 1751 struct fw_ri_wr *wqe; rdma_init() local 2645 c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) c4iw_copy_wr_to_srq() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | umr.c | 228 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_post_send() 259 mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size); in mlx5r_umr_post_send() 289 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_post_send_wait() 295 err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask)); in mlx5r_umr_post_send_wait() 316 err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, in mlx5r_umr_post_send_wait() 361 struct mlx5r_umr_wqe wqe = {}; in mlx5r_umr_revoke_mr() local 366 wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask(); in mlx5r_umr_revoke_mr() 367 wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask(); in mlx5r_umr_revoke_mr() 368 wqe.ctrl_seg.flags |= MLX5_UMR_INLINE; in mlx5r_umr_revoke_mr() 370 MLX5_SET(mkc, &wqe in mlx5r_umr_revoke_mr() 227 mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, struct mlx5r_umr_wqe *wqe, bool with_data) mlx5r_umr_post_send() argument 288 mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, struct mlx5r_umr_wqe *wqe, bool with_data) mlx5r_umr_post_send_wait() argument 401 struct mlx5r_umr_wqe wqe = {}; mlx5r_umr_rereg_pd_access() local 573 mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev, struct mlx5r_umr_wqe *wqe, struct mlx5_ib_mr *mr, struct ib_sge *sg, unsigned int flags) mlx5r_umr_final_update_xlt() argument 615 struct mlx5r_umr_wqe wqe = {}; mlx5r_umr_update_mr_pas() local 698 struct mlx5r_umr_wqe wqe = {}; mlx5r_umr_update_xlt() local [all...] |