Home
last modified time | relevance | path

Searched refs:op_sg (Results 1 - 14 of 14) sorted by relevance

/kernel/linux/linux-6.6/net/rds/
H A Dtcp_send.c120 bvec_set_page(&bvec, sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
121 rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
122 rm->data.op_sg[sg].offset + off); in rds_tcp_xmit()
124 rm->data.op_sg[sg].length - off); in rds_tcp_xmit()
126 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
127 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
134 if (off == rm->data.op_sg[sg].length) { in rds_tcp_xmit()
H A Dib_send.c79 op->op_sg, op->op_nents, in rds_ib_send_unmap_data()
89 op->op_sg, op->op_nents, in rds_ib_send_unmap_rdma()
129 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, in rds_ib_send_unmap_atomic()
513 scat = &rm->data.op_sg[sg]; in rds_ib_xmit()
552 rm->data.op_sg, in rds_ib_xmit()
622 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit()
650 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
708 && scat != &rm->data.op_sg[rm->data.op_count]); in rds_ib_xmit()
716 if (scat == &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
810 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, in rds_ib_xmit_atomic()
[all...]
H A Dmessage.c156 __free_page(sg_page(&rm->data.op_sg[i])); in rds_message_purge()
158 put_page(sg_page(&rm->data.op_sg[i])); in rds_message_purge()
348 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); in rds_message_map_pages()
349 if (IS_ERR(rm->data.op_sg)) { in rds_message_map_pages()
350 void *err = ERR_CAST(rm->data.op_sg); in rds_message_map_pages()
356 sg_set_page(&rm->data.op_sg[i], in rds_message_map_pages()
376 sg = rm->data.op_sg; in rds_message_zcopy_from_user()
400 put_page(sg_page(&rm->data.op_sg[i])); in rds_message_zcopy_from_user()
430 sg = rm->data.op_sg; in rds_message_copy_from_user()
477 sg = rm->data.op_sg; in rds_message_inc_copy_to_user()
[all...]
H A Drdma.c498 struct page *page = sg_page(&ro->op_sg[i]); in rds_rdma_free_op()
516 struct page *page = sg_page(ao->op_sg); in rds_atomic_free_op()
672 op->op_sg = rds_message_alloc_sgs(rm, nr_pages); in rds_cmsg_rdma_args()
673 if (IS_ERR(op->op_sg)) { in rds_cmsg_rdma_args()
674 ret = PTR_ERR(op->op_sg); in rds_cmsg_rdma_args()
770 sg = &op->op_sg[op->op_nents + j]; in rds_cmsg_rdma_args()
915 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); in rds_cmsg_atomic()
916 if (IS_ERR(rm->atomic.op_sg)) { in rds_cmsg_atomic()
917 ret = PTR_ERR(rm->atomic.op_sg); in rds_cmsg_atomic()
932 sg_set_page(rm->atomic.op_sg, pag in rds_cmsg_atomic()
[all...]
H A Dloop.c79 struct scatterlist *sgp = &rm->data.op_sg[sg]; in rds_loop_xmit()
H A Drds.h450 struct scatterlist *op_sg; member
468 struct scatterlist *op_sg; member
483 struct scatterlist *op_sg; member
H A Dsend.c381 sg = &rm->data.op_sg[cp->cp_xmit_sg]; in rds_send_xmit()
1275 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); in rds_sendmsg()
1276 if (IS_ERR(rm->data.op_sg)) { in rds_sendmsg()
1277 ret = PTR_ERR(rm->data.op_sg); in rds_sendmsg()
/kernel/linux/linux-5.10/net/rds/
H A Dtcp_send.c119 sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
120 rm->data.op_sg[sg].offset + off, in rds_tcp_xmit()
121 rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
123 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
124 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
131 if (off == rm->data.op_sg[sg].length) { in rds_tcp_xmit()
H A Dib_send.c79 op->op_sg, op->op_nents, in rds_ib_send_unmap_data()
89 op->op_sg, op->op_nents, in rds_ib_send_unmap_rdma()
129 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, in rds_ib_send_unmap_atomic()
514 scat = &rm->data.op_sg[sg]; in rds_ib_xmit()
553 rm->data.op_sg, in rds_ib_xmit()
623 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit()
651 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
709 && scat != &rm->data.op_sg[rm->data.op_count]); in rds_ib_xmit()
717 if (scat == &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
811 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, in rds_ib_xmit_atomic()
[all...]
H A Dmessage.c156 __free_page(sg_page(&rm->data.op_sg[i])); in rds_message_purge()
158 put_page(sg_page(&rm->data.op_sg[i])); in rds_message_purge()
348 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); in rds_message_map_pages()
349 if (IS_ERR(rm->data.op_sg)) { in rds_message_map_pages()
350 void *err = ERR_CAST(rm->data.op_sg); in rds_message_map_pages()
356 sg_set_page(&rm->data.op_sg[i], in rds_message_map_pages()
377 sg = rm->data.op_sg; in rds_message_zcopy_from_user()
401 put_page(sg_page(&rm->data.op_sg[i])); in rds_message_zcopy_from_user()
433 sg = rm->data.op_sg; in rds_message_copy_from_user()
480 sg = rm->data.op_sg; in rds_message_inc_copy_to_user()
[all...]
H A Drdma.c495 struct page *page = sg_page(&ro->op_sg[i]); in rds_rdma_free_op()
513 struct page *page = sg_page(ao->op_sg); in rds_atomic_free_op()
669 op->op_sg = rds_message_alloc_sgs(rm, nr_pages); in rds_cmsg_rdma_args()
670 if (IS_ERR(op->op_sg)) { in rds_cmsg_rdma_args()
671 ret = PTR_ERR(op->op_sg); in rds_cmsg_rdma_args()
767 sg = &op->op_sg[op->op_nents + j]; in rds_cmsg_rdma_args()
912 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); in rds_cmsg_atomic()
913 if (IS_ERR(rm->atomic.op_sg)) { in rds_cmsg_atomic()
914 ret = PTR_ERR(rm->atomic.op_sg); in rds_cmsg_atomic()
929 sg_set_page(rm->atomic.op_sg, pag in rds_cmsg_atomic()
[all...]
H A Dloop.c79 struct scatterlist *sgp = &rm->data.op_sg[sg]; in rds_loop_xmit()
H A Drds.h450 struct scatterlist *op_sg; member
468 struct scatterlist *op_sg; member
483 struct scatterlist *op_sg; member
H A Dsend.c382 sg = &rm->data.op_sg[cp->cp_xmit_sg]; in rds_send_xmit()
1277 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); in rds_sendmsg()
1278 if (IS_ERR(rm->data.op_sg)) { in rds_sendmsg()
1279 ret = PTR_ERR(rm->data.op_sg); in rds_sendmsg()

Completed in 15 milliseconds