/kernel/linux/linux-6.6/net/sunrpc/xprtrdma/ |
H A D | frwr_ops.c | 360 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg() 363 /* WARNING: Only wr_cqe and status are reliable at this point */ in frwr_wc_fastreg() 398 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_send() 455 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv() 458 /* WARNING: Only wr_cqe and status are reliable at this point */ in frwr_wc_localinv() 474 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake() 477 /* WARNING: Only wr_cqe and status are reliable at this point */ in frwr_wc_localinv_wake() 517 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_sync() 524 last->wr_cqe->done = frwr_wc_localinv; in frwr_unmap_sync() 536 last->wr_cqe in frwr_unmap_sync() [all...] |
/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/ |
H A D | frwr_ops.c | 362 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg() 366 /* WARNING: Only wr_cqe and status are reliable at this point */ in frwr_wc_fastreg() 400 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; in frwr_send() 446 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv() 451 /* WARNING: Only wr_cqe and status are reliable at this point */ in frwr_wc_localinv() 467 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake() 472 /* WARNING: Only wr_cqe and status are reliable at this point */ in frwr_wc_localinv_wake() 515 last->wr_cqe = &frwr->fr_cqe; in frwr_unmap_sync() 570 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done() 576 /* WARNING: Only wr_cqe an in frwr_wc_localinv_done() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs-srv.c | 281 wr->wr.wr_cqe = &io_comp_cqe; in rdma_write_sg() 309 inv_wr.wr_cqe = &io_comp_cqe; in rdma_write_sg() 320 rwr.wr.wr_cqe = &local_reg_cqe; in rdma_write_sg() 350 imm_wr.wr.wr_cqe = &io_comp_cqe; in rdma_write_sg() 397 inv_wr.wr_cqe = &io_comp_cqe; in send_io_resp_imm() 440 rwr.wr.wr_cqe = &local_reg_cqe; in send_io_resp_imm() 467 imm_wr.wr.wr_cqe = &io_comp_cqe; in send_io_resp_imm() 741 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_srv_info_rsp_done() 834 rwr[mri].wr.wr_cqe = &local_reg_cqe; in process_info_req() 887 iu = container_of(wc->wr_cqe, struc in rtrs_srv_info_req_done() [all...] |
H A D | rtrs.c | 88 .wr_cqe = &iu->cqe, in rtrs_iu_post_recv() 102 .wr_cqe = cqe, in rtrs_post_recv_empty() 140 .wr_cqe = &iu->cqe, in rtrs_iu_post_send() 161 .wr.wr_cqe = &iu->cqe, in rtrs_iu_post_rdma_write_imm() 190 .wr.wr_cqe = cqe, in rtrs_post_rdma_write_imm_empty()
|
H A D | rtrs-clt.c | 353 container_of(wc->wr_cqe, typeof(*req), inv_cqe); in rtrs_clt_inv_rkey_done() 374 .wr_cqe = &req->inv_cqe, in rtrs_inv_rkey() 506 iu = container_of(wc->wr_cqe, struct rtrs_iu, in rtrs_clt_recv_done() 527 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_rkey_rsp_done() 584 wr->wr_cqe = cqe; in rtrs_post_recv_empty_x2() 617 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) in rtrs_clt_rdma_done() 664 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); in rtrs_clt_rdma_done() 1110 .wr.wr_cqe = &fast_reg_cqe, in rtrs_clt_read_req() 2267 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_info_req_done() 2357 iu = container_of(wc->wr_cqe, struc in rtrs_clt_info_rsp_done() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs.c | 88 .wr_cqe = &iu->cqe, in rtrs_iu_post_recv() 102 .wr_cqe = cqe, in rtrs_post_recv_empty() 143 .wr_cqe = &iu->cqe, in rtrs_iu_post_send() 165 .wr.wr_cqe = &iu->cqe, in rtrs_iu_post_rdma_write_imm() 201 .wr.wr_cqe = cqe, in rtrs_post_rdma_write_imm_empty()
|
H A D | rtrs-srv.c | 247 wr->wr.wr_cqe = &io_comp_cqe; in rdma_write_sg() 275 inv_wr.wr_cqe = &io_comp_cqe; in rdma_write_sg() 286 rwr.wr.wr_cqe = &local_reg_cqe; in rdma_write_sg() 316 imm_wr.wr.wr_cqe = &io_comp_cqe; in rdma_write_sg() 362 inv_wr.wr_cqe = &io_comp_cqe; in send_io_resp_imm() 407 rwr.wr.wr_cqe = &local_reg_cqe; in send_io_resp_imm() 434 imm_wr.wr.wr_cqe = &io_comp_cqe; in send_io_resp_imm() 704 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_srv_info_rsp_done() 846 rwr[mri].wr.wr_cqe = &local_reg_cqe; in process_info_req() 909 iu = container_of(wc->wr_cqe, struc in rtrs_srv_info_req_done() [all...] |
H A D | rtrs-clt.c | 350 container_of(wc->wr_cqe, typeof(*req), inv_cqe); in rtrs_clt_inv_rkey_done() 371 .wr_cqe = &req->inv_cqe, in rtrs_inv_rkey() 515 iu = container_of(wc->wr_cqe, struct rtrs_iu, in rtrs_clt_recv_done() 536 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_rkey_rsp_done() 594 wr->wr_cqe = cqe; in rtrs_post_recv_empty_x2() 627 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) in rtrs_clt_rdma_done() 675 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); in rtrs_clt_rdma_done() 1130 .wr_cqe = &req->inv_cqe, in rtrs_clt_write_req() 1137 .wr.wr_cqe = &fast_reg_cqe, in rtrs_clt_write_req() 1219 .wr.wr_cqe in rtrs_clt_read_req() [all...] |
/kernel/linux/linux-5.10/net/9p/ |
H A D | trans_rdma.c | 296 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done() 347 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done() 404 wr.wr_cqe = &c->cqe; in post_recv() 501 wr.wr_cqe = &c->cqe; in rdma_request()
|
/kernel/linux/linux-6.6/net/9p/ |
H A D | trans_rdma.c | 295 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done() 346 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done() 403 wr.wr_cqe = &c->cqe; in post_recv() 500 wr.wr_cqe = &c->cqe; in rdma_request()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/iser/ |
H A D | iser_memory.c | 225 inv_wr->wr_cqe = cqe; in iser_inv_rkey() 269 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr() 316 wr->wr.wr_cqe = cqe; in iser_fast_reg_mr()
|
H A D | iser_initiator.c | 566 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp() 661 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); in iser_task_rsp() 717 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_ctrl_comp() 733 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_dataout_comp()
|
/kernel/linux/linux-6.6/drivers/infiniband/ulp/iser/ |
H A D | iser_memory.c | 240 inv_wr->wr_cqe = cqe; in iser_inv_rkey() 283 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr() 330 wr->wr.wr_cqe = cqe; in iser_fast_reg_mr()
|
H A D | iser_initiator.c | 537 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp() 638 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); in iser_task_rsp() 682 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_ctrl_comp() 698 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_dataout_comp()
|
/kernel/linux/linux-5.10/drivers/nvme/target/ |
H A D | rdma.c | 341 c->wr.wr_cqe = &c->cqe; in nvmet_rdma_alloc_cmd() 424 r->send_wr.wr_cqe = &r->send_cqe; in nvmet_rdma_alloc_rsp() 702 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); in nvmet_rdma_send_done() 710 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_send_done() 755 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); in nvmet_rdma_read_data_done() 769 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_read_data_done() 788 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); in nvmet_rdma_write_data_done() 1010 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); in nvmet_rdma_recv_done() 1017 wc->wr_cqe, ib_wc_status_msg(wc->status), in nvmet_rdma_recv_done()
|
/kernel/linux/linux-6.6/drivers/nvme/target/ |
H A D | rdma.c | 342 c->wr.wr_cqe = &c->cqe; in nvmet_rdma_alloc_cmd() 425 r->send_wr.wr_cqe = &r->send_cqe; in nvmet_rdma_alloc_rsp() 703 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); in nvmet_rdma_send_done() 711 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_send_done() 756 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); in nvmet_rdma_read_data_done() 770 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_read_data_done() 789 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); in nvmet_rdma_write_data_done() 1011 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); in nvmet_rdma_recv_done() 1018 wc->wr_cqe, ib_wc_status_msg(wc->status), in nvmet_rdma_recv_done()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/isert/ |
H A D | ib_isert.c | 738 rx_wr->wr_cqe = &rx_desc->rx_cqe; in isert_post_recvm() 769 rx_wr.wr_cqe = &rx_desc->rx_cqe; in isert_post_recv() 794 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send() 871 send_wr->wr_cqe = &tx_desc->tx_cqe; in isert_init_send_wr() 904 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; in isert_login_post_recv() 1323 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); in isert_recv_done() 1580 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_write_done() 1622 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_read_done() 1698 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_login_send_done() 1714 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_send_done() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/ulp/isert/ |
H A D | ib_isert.c | 739 rx_wr->wr_cqe = &rx_desc->rx_cqe; in isert_post_recvm() 770 rx_wr.wr_cqe = &rx_desc->rx_cqe; in isert_post_recv() 795 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send() 872 send_wr->wr_cqe = &tx_desc->tx_cqe; in isert_init_send_wr() 905 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; in isert_login_post_recv() 1323 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); in isert_recv_done() 1580 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_write_done() 1622 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_read_done() 1698 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_login_send_done() 1714 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_send_done() [all...] |
/kernel/linux/linux-6.6/fs/smb/server/ |
H A D | transport_rdma.c | 540 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe); in recv_done() 655 wr.wr_cqe = &recvmsg->cqe; in smb_direct_post_recv() 860 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe); in send_done() 947 last->wr.wr_cqe = &last->cqe; in smb_direct_flush_send_list() 1141 msg->wr.wr_cqe = NULL; in post_sendmsg() 1156 msg->wr.wr_cqe = &msg->cqe; in post_sendmsg() 1329 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe, in read_write_done()
|
/kernel/linux/linux-5.10/fs/cifs/ |
H A D | smbdirect.c | 270 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done() 446 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done() 719 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req() 806 send_wr.wr_cqe = &request->cqe; in smbd_post_send() 1057 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv() 2157 cqe = wc->wr_cqe; in register_mr_done() 2417 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr() 2458 cqe = wc->wr_cqe; in local_inv_done() 2485 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
H A D | rdma.c | 1259 op, wc->wr_cqe, in nvme_rdma_wr_error() 1273 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done() 1293 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey() 1408 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr() 1513 wr->wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_pi() 1641 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done() 1663 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send() 1696 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv() 1797 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
|
/kernel/linux/linux-6.6/fs/smb/client/ |
H A D | smbdirect.c | 276 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done() 452 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done() 725 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req() 812 send_wr.wr_cqe = &request->cqe; in smbd_post_send() 1024 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv() 2038 cqe = wc->wr_cqe; in register_mr_done() 2302 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr() 2343 cqe = wc->wr_cqe; in local_inv_done() 2370 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
|
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | rdma.c | 1172 op, wc->wr_cqe, in nvme_rdma_wr_error() 1186 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done() 1206 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey() 1328 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr() 1433 wr->wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_pi() 1573 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done() 1595 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send() 1628 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv() 1729 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
|
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | cq.c | 110 if (wc->wr_cqe) in __ib_process_cq() 111 wc->wr_cqe->done(cq, wc); in __ib_process_cq() 210 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 72 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe); in handle_single_completion() 414 wr->wr.wr_cqe = &gsi_wr->cqe; in mlx5_ib_add_outstanding_wr()
|