/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 55 * Short messages are moved directly into svc_rqst::rq_arg, and 376 struct xdr_buf *arg = &rqstp->rq_arg; in svc_rdma_build_arg_xdr() 613 * @rq_arg: xdr_buf containing ingress RPC/RDMA message 627 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, in svc_rdma_xdr_decode_req() argument 633 rdma_argp = rq_arg->head[0].iov_base; in svc_rdma_xdr_decode_req() 634 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); in svc_rdma_xdr_decode_req() 664 rq_arg->head[0].iov_base = rctxt->rc_stream.p; in svc_rdma_xdr_decode_req() 666 rq_arg->head[0].iov_len -= hdr_len; in svc_rdma_xdr_decode_req() 667 rq_arg->len -= hdr_len; in svc_rdma_xdr_decode_req() 672 trace_svcrdma_decode_short_err(rctxt, rq_arg in svc_rdma_xdr_decode_req() [all...] |
H A D | svc_rdma_rw.c | 853 head->rc_arg.head[0] = rqstp->rq_arg.head[0]; in svc_rdma_recv_read_chunk() 854 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0]; in svc_rdma_recv_read_chunk() 858 head->rc_arg.len = rqstp->rq_arg.len; in svc_rdma_recv_read_chunk() 859 head->rc_arg.buflen = rqstp->rq_arg.buflen; in svc_rdma_recv_read_chunk()
|
H A D | svc_rdma_backchannel.c | 25 struct xdr_buf *rcvbuf = &rqstp->rq_arg; in svc_rdma_handle_bc_reply()
|
/kernel/linux/linux-5.10/net/sunrpc/ |
H A D | svcsock.c | 482 rqstp->rq_arg.len = len; in svc_udp_recvfrom() 494 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) in svc_udp_recvfrom() 500 rqstp->rq_arg.head[0].iov_base = skb->data; in svc_udp_recvfrom() 501 rqstp->rq_arg.head[0].iov_len = len; in svc_udp_recvfrom() 507 rqstp->rq_arg.page_base = 0; in svc_udp_recvfrom() 508 if (len <= rqstp->rq_arg.head[0].iov_len) { in svc_udp_recvfrom() 509 rqstp->rq_arg.head[0].iov_len = len; in svc_udp_recvfrom() 510 rqstp->rq_arg.page_len = 0; in svc_udp_recvfrom() 513 rqstp->rq_arg.page_len = len - rqstp->rq_arg in svc_udp_recvfrom() [all...] |
H A D | svc.c | 1183 struct kvec *argv = &rqstp->rq_arg.head[0]; in svc_generic_dispatch() 1499 struct kvec *argv = &rqstp->rq_arg.head[0]; in svc_process() 1546 struct kvec *argv = &rqstp->rq_arg.head[0]; in bc_svc_process() 1562 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); in bc_svc_process() 1566 rqstp->rq_arg.len = req->rq_private_buf.len; in bc_svc_process() 1567 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { in bc_svc_process() 1568 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg in bc_svc_process() [all...] |
H A D | svc_xprt.c | 674 arg = &rqstp->rq_arg; in svc_alloc_arg() 817 trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg); in svc_handle_xprt() 867 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); in svc_recv() 1175 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer() 1184 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; in svc_defer() 1194 dr->argslen = rqstp->rq_arg.len >> 2; in svc_defer() 1198 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; in svc_defer() 1199 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, in svc_defer() 1221 rqstp->rq_arg in svc_deferred_recv() [all...] |
H A D | svcauth.c | 69 flavor = svc_getnl(&rqstp->rq_arg.head[0]); in svc_authenticate()
|
H A D | svcauth_unix.c | 748 struct kvec *argv = &rqstp->rq_arg.head[0]; in svcauth_null_accept() 808 struct kvec *argv = &rqstp->rq_arg.head[0]; in svcauth_unix_accept()
|
/kernel/linux/linux-6.6/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 55 * Short messages are moved directly into svc_rqst::rq_arg, and 385 struct xdr_buf *arg = &rqstp->rq_arg; in svc_rdma_build_arg_xdr() 633 * @rq_arg: xdr_buf containing ingress RPC/RDMA message 647 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, in svc_rdma_xdr_decode_req() argument 653 rdma_argp = rq_arg->head[0].iov_base; in svc_rdma_xdr_decode_req() 654 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); in svc_rdma_xdr_decode_req() 685 rq_arg->head[0].iov_base = rctxt->rc_stream.p; in svc_rdma_xdr_decode_req() 687 rq_arg->head[0].iov_len -= hdr_len; in svc_rdma_xdr_decode_req() 688 rq_arg->len -= hdr_len; in svc_rdma_xdr_decode_req() 693 trace_svcrdma_decode_short_err(rctxt, rq_arg in svc_rdma_xdr_decode_req() [all...] |
H A D | svc_rdma_rw.c | 844 * The chunk data lands in rqstp->rq_arg as a series of contiguous pages, 858 struct xdr_buf *buf = &info->ri_rqst->rq_arg; in svc_rdma_read_multiple_chunks() 906 * The chunk data lands in the page list of rqstp->rq_arg.pages. 908 * Currently NFSD does not look at the rqstp->rq_arg.tail[0] kvec. 922 struct xdr_buf *buf = &info->ri_rqst->rq_arg; in svc_rdma_read_data_item() 1063 * Transport header, and the rest lands in rqstp->rq_arg.pages. 1078 struct xdr_buf *buf = &info->ri_rqst->rq_arg; in svc_rdma_read_special() 1107 * message in rqstp->rq_arg when there is a positive return code from
|
H A D | svc_rdma_backchannel.c | 25 struct xdr_buf *rcvbuf = &rqstp->rq_arg; in svc_rdma_handle_bc_reply()
|
/kernel/linux/linux-6.6/net/sunrpc/ |
H A D | svcsock.c | 613 rqstp->rq_arg.len = len; in svc_udp_recvfrom() 625 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) in svc_udp_recvfrom() 631 rqstp->rq_arg.head[0].iov_base = skb->data; in svc_udp_recvfrom() 632 rqstp->rq_arg.head[0].iov_len = len; in svc_udp_recvfrom() 638 rqstp->rq_arg.page_base = 0; in svc_udp_recvfrom() 639 if (len <= rqstp->rq_arg.head[0].iov_len) { in svc_udp_recvfrom() 640 rqstp->rq_arg.head[0].iov_len = len; in svc_udp_recvfrom() 641 rqstp->rq_arg.page_len = 0; in svc_udp_recvfrom() 644 rqstp->rq_arg.page_len = len - rqstp->rq_arg in svc_udp_recvfrom() [all...] |
H A D | svc.c | 1430 rqstp->rq_arg.len); in svc_process_common() 1569 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); in bc_svc_process() 1573 rqstp->rq_arg.len = req->rq_private_buf.len; in bc_svc_process() 1574 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { in bc_svc_process() 1575 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; in bc_svc_process() 1576 rqstp->rq_arg.page_len = 0; in bc_svc_process() 1577 } else if (rqstp->rq_arg in bc_svc_process() [all...] |
H A D | svc_xprt.c | 657 struct xdr_buf *arg = &rqstp->rq_arg; in svc_alloc_arg() 867 trace_svc_xdr_recvfrom(&rqstp->rq_arg); in svc_recv() 1187 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer() 1196 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; in svc_defer() 1206 dr->argslen = rqstp->rq_arg.len >> 2; in svc_defer() 1209 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; in svc_defer() 1210 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, in svc_defer() 1234 rqstp->rq_arg.head[0].iov_base = dr->args; in svc_deferred_recv() 1236 rqstp->rq_arg in svc_deferred_recv() [all...] |
/kernel/linux/linux-5.10/fs/nfsd/ |
H A D | nfsxdr.c | 274 struct kvec *head = rqstp->rq_arg.head; in nfssvc_decode_writeargs() 297 dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; in nfssvc_decode_writeargs() 384 args->first.iov_len = rqstp->rq_arg.head[0].iov_len; in nfssvc_decode_symlinkargs() 392 if (rqstp->rq_arg.page_len) { in nfssvc_decode_symlinkargs() 393 if (args->tlen != rqstp->rq_arg.page_len) in nfssvc_decode_symlinkargs() 395 p = rqstp->rq_arg.tail[0].iov_base; in nfssvc_decode_symlinkargs()
|
H A D | nfs3acl.c | 143 struct kvec *head = rqstp->rq_arg.head; in nfs3svc_decode_setaclargs() 156 n = nfsacl_decode(&rqstp->rq_arg, base, NULL, in nfs3svc_decode_setaclargs() 160 n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, in nfs3svc_decode_setaclargs()
|
H A D | nfs2acl.c | 209 struct kvec *head = rqstp->rq_arg.head; in nfsaclsvc_decode_setaclargs() 222 n = nfsacl_decode(&rqstp->rq_arg, base, NULL, in nfsaclsvc_decode_setaclargs() 226 n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, in nfsaclsvc_decode_setaclargs()
|
H A D | nfs3xdr.c | 404 struct kvec *head = rqstp->rq_arg.head; in nfs3svc_decode_writeargs() 405 struct kvec *tail = rqstp->rq_arg.tail; in nfs3svc_decode_writeargs() 428 dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr; in nfs3svc_decode_writeargs() 503 args->first.iov_len = rqstp->rq_arg.head[0].iov_len; in nfs3svc_decode_symlinkargs() 506 dlen = args->first.iov_len + rqstp->rq_arg.page_len + in nfs3svc_decode_symlinkargs() 507 rqstp->rq_arg.tail[0].iov_len; in nfs3svc_decode_symlinkargs()
|
H A D | nfscache.c | 113 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_reply_cache_alloc() 297 struct xdr_buf *buf = &rqstp->rq_arg; in nfsd_cache_csum() 303 /* rq_arg.head first */ in nfsd_cache_csum()
|
H A D | nfssvc.c | 1023 return rqstp->rq_arg.len > PAGE_SIZE; in nfs_request_too_big() 1040 struct kvec *argv = &rqstp->rq_arg.head[0]; in nfsd_dispatch()
|
H A D | nfsproc.c | 230 nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, in nfsd_proc_write() 482 page_address(rqstp->rq_arg.pages[0]), in nfsd_proc_symlink()
|
/kernel/linux/linux-5.10/net/sunrpc/auth_gss/ |
H A D | svcauth_gss.c | 720 struct kvec *argv = &rqstp->rq_arg.head[0]; in gss_verify_header() 1149 struct kvec *argv = &rqstp->rq_arg.head[0]; in gss_read_proxy_verf() 1159 if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) { in gss_read_proxy_verf() 1186 from_offs = rqstp->rq_arg.page_base; in gss_read_proxy_verf() 1197 page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs, in gss_read_proxy_verf() 1237 struct kvec *argv = &rqstp->rq_arg.head[0]; in svcauth_gss_legacy_init() 1535 struct kvec *argv = &rqstp->rq_arg.head[0]; in svcauth_gss_accept() 1638 if (unwrap_integ_data(rqstp, &rqstp->rq_arg, in svcauth_gss_accept() 1647 if (unwrap_priv_data(rqstp, &rqstp->rq_arg, in svcauth_gss_accept()
|
/kernel/linux/linux-5.10/include/linux/sunrpc/ |
H A D | svc.h | 249 struct xdr_buf rq_arg; member 348 struct kvec *vec = &rqstp->rq_arg.head[0]; in xdr_argsize_check()
|
/kernel/linux/linux-6.6/include/linux/sunrpc/ |
H A D | svc.h | 206 struct xdr_buf rq_arg; member 471 struct xdr_buf *buf = &rqstp->rq_arg; in svcxdr_init_decode()
|
/kernel/linux/linux-6.6/fs/nfsd/ |
H A D | nfscache.c | 107 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_cacherep_alloc() 402 /* rq_arg.head first */ in nfsd_cache_csum() 487 * @start: starting byte in @rqstp->rq_arg of the NFS Call header 519 csum = nfsd_cache_csum(&rqstp->rq_arg, start, len); in nfsd_cache_lookup()
|