/kernel/linux/linux-5.10/net/rds/ |
H A D | page.c | 51 * @scat: Scatter list for message 68 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, in rds_page_remainder_alloc() argument 84 sg_set_page(scat, page, PAGE_SIZE, 0); in rds_page_remainder_alloc() 103 sg_set_page(scat, rem->r_page, bytes, rem->r_offset); in rds_page_remainder_alloc() 104 get_page(sg_page(scat)); in rds_page_remainder_alloc() 147 ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, in rds_page_remainder_alloc() 148 ret ? 0 : scat->length); in rds_page_remainder_alloc()
|
H A D | ib_send.c | 494 struct scatterlist *scat; in rds_ib_xmit() local 514 scat = &rm->data.op_sg[sg]; in rds_ib_xmit() 515 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); in rds_ib_xmit() 623 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit() 651 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit() 653 sg_dma_len(scat) - rm->data.op_dmaoff); in rds_ib_xmit() 656 send->s_sge[1].addr = sg_dma_address(scat); in rds_ib_xmit() 663 if (rm->data.op_dmaoff == sg_dma_len(scat)) { in rds_ib_xmit() 664 scat++; in rds_ib_xmit() 709 && scat ! in rds_ib_xmit() 860 struct scatterlist *scat; rds_ib_xmit_rdma() local [all...] |
H A D | rds.h | 886 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
|
/kernel/linux/linux-6.6/net/rds/ |
H A D | page.c | 51 * @scat: Scatter list for message 68 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, in rds_page_remainder_alloc() argument 84 sg_set_page(scat, page, PAGE_SIZE, 0); in rds_page_remainder_alloc() 103 sg_set_page(scat, rem->r_page, bytes, rem->r_offset); in rds_page_remainder_alloc() 104 get_page(sg_page(scat)); in rds_page_remainder_alloc() 147 ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, in rds_page_remainder_alloc() 148 ret ? 0 : scat->length); in rds_page_remainder_alloc()
|
H A D | ib_send.c | 493 struct scatterlist *scat; in rds_ib_xmit() local 513 scat = &rm->data.op_sg[sg]; in rds_ib_xmit() 514 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); in rds_ib_xmit() 622 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit() 650 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit() 652 sg_dma_len(scat) - rm->data.op_dmaoff); in rds_ib_xmit() 655 send->s_sge[1].addr = sg_dma_address(scat); in rds_ib_xmit() 662 if (rm->data.op_dmaoff == sg_dma_len(scat)) { in rds_ib_xmit() 663 scat++; in rds_ib_xmit() 708 && scat ! in rds_ib_xmit() 859 struct scatterlist *scat; rds_ib_xmit_rdma() local [all...] |
H A D | rds.h | 885 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | srq.c | 428 struct mlx5_wqe_data_seg *scat; in mlx5_ib_post_srq_recv() local 461 scat = (struct mlx5_wqe_data_seg *)(next + 1); in mlx5_ib_post_srq_recv() 464 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx5_ib_post_srq_recv() 465 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx5_ib_post_srq_recv() 466 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx5_ib_post_srq_recv() 470 scat[i].byte_count = 0; in mlx5_ib_post_srq_recv() 471 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); in mlx5_ib_post_srq_recv() 472 scat[i].addr = 0; in mlx5_ib_post_srq_recv()
|
H A D | wr.c | 1463 struct mlx5_wqe_data_seg *scat; in mlx5_ib_post_recv() local 1499 scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind); in mlx5_ib_post_recv() 1501 scat++; in mlx5_ib_post_recv() 1504 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv() 1507 scat[i].byte_count = 0; in mlx5_ib_post_recv() 1508 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); in mlx5_ib_post_recv() 1509 scat[i].addr = 0; in mlx5_ib_post_recv() 1513 sig = (struct mlx5_rwqe_sig *)scat; in mlx5_ib_post_recv()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 308 struct mlx4_wqe_data_seg *scat; in mlx4_ib_post_srq_recv() local 340 scat = (struct mlx4_wqe_data_seg *) (next + 1); in mlx4_ib_post_srq_recv() 343 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv() 344 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv() 345 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx4_ib_post_srq_recv() 349 scat[i].byte_count = 0; in mlx4_ib_post_srq_recv() 350 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); in mlx4_ib_post_srq_recv() 351 scat[i].addr = 0; in mlx4_ib_post_srq_recv()
|
H A D | qp.c | 3814 struct mlx4_wqe_data_seg *scat; in _mlx4_ib_post_recv() local 3849 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv() 3857 scat->byte_count = in _mlx4_ib_post_recv() 3860 scat->lkey = cpu_to_be32(wr->sg_list->lkey); in _mlx4_ib_post_recv() 3861 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv() 3862 scat++; in _mlx4_ib_post_recv() 3867 __set_data_seg(scat + i, wr->sg_list + i); in _mlx4_ib_post_recv() 3870 scat[i].byte_count = 0; in _mlx4_ib_post_recv() 3871 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); in _mlx4_ib_post_recv() 3872 scat[ in _mlx4_ib_post_recv() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 312 struct mlx4_wqe_data_seg *scat; in mlx4_ib_post_srq_recv() local 343 scat = (struct mlx4_wqe_data_seg *) (next + 1); in mlx4_ib_post_srq_recv() 346 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv() 347 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv() 348 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx4_ib_post_srq_recv() 352 scat[i].byte_count = 0; in mlx4_ib_post_srq_recv() 353 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); in mlx4_ib_post_srq_recv() 354 scat[i].addr = 0; in mlx4_ib_post_srq_recv()
|
H A D | qp.c | 3847 struct mlx4_wqe_data_seg *scat; in _mlx4_ib_post_recv() local 3882 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv() 3890 scat->byte_count = in _mlx4_ib_post_recv() 3893 scat->lkey = cpu_to_be32(wr->sg_list->lkey); in _mlx4_ib_post_recv() 3894 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv() 3895 scat++; in _mlx4_ib_post_recv() 3900 __set_data_seg(scat + i, wr->sg_list + i); in _mlx4_ib_post_recv() 3903 scat[i].byte_count = 0; in _mlx4_ib_post_recv() 3904 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); in _mlx4_ib_post_recv() 3905 scat[ in _mlx4_ib_post_recv() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | srq.c | 407 struct mlx5_wqe_data_seg *scat; in mlx5_ib_post_srq_recv() local 440 scat = (struct mlx5_wqe_data_seg *)(next + 1); in mlx5_ib_post_srq_recv() 443 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx5_ib_post_srq_recv() 444 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx5_ib_post_srq_recv() 445 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx5_ib_post_srq_recv() 449 scat[i].byte_count = 0; in mlx5_ib_post_srq_recv() 450 scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey; in mlx5_ib_post_srq_recv() 451 scat[i].addr = 0; in mlx5_ib_post_srq_recv()
|
H A D | wr.c | 1210 struct mlx5_wqe_data_seg *scat; in mlx5_ib_post_recv() local 1246 scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind); in mlx5_ib_post_recv() 1248 scat++; in mlx5_ib_post_recv() 1251 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv() 1254 scat[i].byte_count = 0; in mlx5_ib_post_recv() 1255 scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey; in mlx5_ib_post_recv() 1256 scat[i].addr = 0; in mlx5_ib_post_recv() 1260 sig = (struct mlx5_rwqe_sig *)scat; in mlx5_ib_post_recv()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.c | 1499 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_fr() 1506 state->sg = scat; in srp_map_sg_fr() 1527 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_dma() 1534 for_each_sg(scat, sg, count, i) { in srp_map_sg_dma() 1590 struct scatterlist *scat, int count) in srp_check_mapping() 1623 struct scatterlist *scat, *sg; in srp_map_data() local 1649 scat = scsi_sglist(scmnd); in srp_map_data() 1655 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); in srp_map_data() 1672 for_each_sg(scat, sg, count, i) { in srp_map_data() 1695 buf->va = cpu_to_be64(sg_dma_address(scat)); in srp_map_data() 1498 srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) srp_map_sg_fr() argument 1526 srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) srp_map_sg_dma() argument 1588 srp_check_mapping(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) srp_check_mapping() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.c | 1500 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_fr() 1507 state->sg = scat; in srp_map_sg_fr() 1528 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_dma() 1535 for_each_sg(scat, sg, count, i) { in srp_map_sg_dma() 1591 struct scatterlist *scat, int count) in srp_check_mapping() 1624 struct scatterlist *scat, *sg; in srp_map_data() local 1650 scat = scsi_sglist(scmnd); in srp_map_data() 1656 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); in srp_map_data() 1673 for_each_sg(scat, sg, count, i) { in srp_map_data() 1696 buf->va = cpu_to_be64(sg_dma_address(scat)); in srp_map_data() 1499 srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) srp_map_sg_fr() argument 1527 srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) srp_map_sg_dma() argument 1589 srp_check_mapping(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) srp_check_mapping() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
H A D | hns_roce_hw_v1.c | 347 struct hns_roce_wqe_data_seg *scat = NULL; in hns_roce_v1_post_recv() local 387 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); in hns_roce_v1_post_recv() 390 set_data_seg(scat + i, wr->sg_list + i); in hns_roce_v1_post_recv()
|