Lines Matching refs:svsk

158 	struct svc_sock *svsk =
160 switch (svsk->sk_sk->sk_family) {
196 static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
198 const struct sock *sk = svsk->sk_sk;
260 svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
266 struct socket *sock = svsk->sk_sock;
304 struct svc_sock *svsk =
312 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
324 len = svc_tcp_sock_recv_cmsg(svsk, &msg);
332 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
340 static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
342 unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
343 struct socket *sock = svsk->sk_sock;
367 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
371 if (svsk) {
374 svsk->sk_odata(sk);
375 trace_svcsock_data_ready(&svsk->sk_xprt, 0);
376 if (test_bit(XPT_HANDSHAKE, &svsk->sk_xprt.xpt_flags))
378 if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
379 svc_xprt_enqueue(&svsk->sk_xprt);
388 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
390 if (svsk) {
393 trace_svcsock_write_space(&svsk->sk_xprt, 0);
394 svsk->sk_owspace(sk);
395 svc_xprt_enqueue(&svsk->sk_xprt);
401 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
405 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
410 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
412 sock_no_linger(svsk->sk_sock->sk);
429 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
437 complete_all(&svsk->sk_handshake_done);
447 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
448 struct sock *sk = svsk->sk_sock->sk;
450 .ta_sock = svsk->sk_sock,
459 init_completion(&svsk->sk_handshake_done);
467 ret = wait_for_completion_interruptible_timeout(&svsk->sk_handshake_done,
564 struct svc_sock *svsk =
566 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
582 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
591 svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
593 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
594 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
598 skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err);
609 sock_write_timestamp(svsk->sk_sk, skb->tstamp);
610 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
614 trace_svcsock_udp_recv(&svsk->sk_xprt, len);
660 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
662 trace_svcsock_udp_recv_err(&svsk->sk_xprt, err);
689 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
721 err = sock_sendmsg(svsk->sk_sock, &msg);
726 err = sock_sendmsg(svsk->sk_sock, &msg);
741 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
749 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
750 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
751 if (required*2 > sock_wspace(svsk->sk_sk))
753 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
796 static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
798 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class,
799 &svsk->sk_xprt, serv);
800 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
801 svsk->sk_sk->sk_data_ready = svc_data_ready;
802 svsk->sk_sk->sk_write_space = svc_write_space;
808 svc_sock_setbufsize(svsk, 3);
811 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
812 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
815 switch (svsk->sk_sk->sk_family) {
817 ip_sock_set_pktinfo(svsk->sk_sock->sk);
820 ip6_sock_set_recvpktinfo(svsk->sk_sock->sk);
833 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
846 * dereference svsk.
851 if (svsk) {
854 svsk->sk_odata(sk);
855 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
856 svc_xprt_enqueue(&svsk->sk_xprt);
865 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
867 if (svsk) {
870 svsk->sk_ostate(sk);
871 trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock);
873 svc_xprt_deferred_close(&svsk->sk_xprt);
882 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
885 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
886 struct socket *sock = svsk->sk_sock;
894 clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
904 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
914 newsock->sk->sk_state_change = svsk->sk_ostate;
915 newsock->sk->sk_data_ready = svsk->sk_odata;
916 newsock->sk->sk_write_space = svsk->sk_owspace;
948 static size_t svc_tcp_restore_pages(struct svc_sock *svsk,
951 size_t len = svsk->sk_datalen;
960 BUG_ON(svsk->sk_pages[i] == NULL);
961 rqstp->rq_pages[i] = svsk->sk_pages[i];
962 svsk->sk_pages[i] = NULL;
968 static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
972 if (svsk->sk_datalen == 0)
974 len = svsk->sk_datalen;
977 svsk->sk_pages[i] = rqstp->rq_pages[i];
982 static void svc_tcp_clear_pages(struct svc_sock *svsk)
986 if (svsk->sk_datalen == 0)
988 len = svsk->sk_datalen;
991 if (svsk->sk_pages[i] == NULL) {
995 put_page(svsk->sk_pages[i]);
996 svsk->sk_pages[i] = NULL;
999 svsk->sk_tcplen = 0;
1000 svsk->sk_datalen = 0;
1006 static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
1014 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
1018 want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
1019 iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
1022 len = svc_tcp_sock_recv_cmsg(svsk, &msg);
1025 svsk->sk_tcplen += len;
1030 trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker);
1031 if (svc_sock_reclen(svsk) + svsk->sk_datalen >
1032 svsk->sk_xprt.xpt_server->sv_max_mesg)
1035 return svc_sock_reclen(svsk);
1039 __func__, svsk->sk_xprt.xpt_server->sv_name,
1040 svc_sock_reclen(svsk));
1041 svc_xprt_deferred_close(&svsk->sk_xprt);
1046 static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1048 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
1091 static void svc_tcp_fragment_received(struct svc_sock *svsk)
1094 svsk->sk_tcplen = 0;
1095 svsk->sk_marker = xdr_zero;
1098 tcp_set_rcvlowat(svsk->sk_sk, 1);
1121 struct svc_sock *svsk =
1123 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1129 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1130 len = svc_tcp_read_marker(svsk, rqstp);
1134 base = svc_tcp_restore_pages(svsk, rqstp);
1135 want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
1138 trace_svcsock_tcp_recv(&svsk->sk_xprt, len);
1139 svsk->sk_tcplen += len;
1140 svsk->sk_datalen += len;
1142 if (len != want || !svc_sock_final_rec(svsk))
1144 if (svsk->sk_datalen < 8)
1147 rqstp->rq_arg.len = svsk->sk_datalen;
1157 if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
1165 len = receive_cb_reply(svsk, rqstp);
1168 svsk->sk_datalen = 0;
1169 svc_tcp_fragment_received(svsk);
1174 svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
1183 svc_tcp_save_pages(svsk, rqstp);
1187 svc_tcp_fragment_received(svsk);
1192 tcp_set_rcvlowat(svsk->sk_sk,
1193 svc_sock_reclen(svsk) - svsk->sk_tcplen);
1195 trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
1196 svc_sock_reclen(svsk),
1197 svsk->sk_tcplen - sizeof(rpc_fraghdr));
1203 trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0);
1206 svsk->sk_datalen = 0;
1208 trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
1209 svc_xprt_deferred_close(&svsk->sk_xprt);
1229 static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
1244 buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
1256 ret = sock_sendmsg(svsk->sk_sock, &msg);
1275 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1288 err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent);
1350 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1352 struct sock *sk = svsk->sk_sk;
1354 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class,
1355 &svsk->sk_xprt, serv);
1356 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
1357 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
1359 strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
1360 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
1362 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1368 svsk->sk_marker = xdr_zero;
1369 svsk->sk_tcplen = 0;
1370 svsk->sk_datalen = 0;
1371 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
1375 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1381 svc_xprt_deferred_close(&svsk->sk_xprt);
1392 struct svc_sock *svsk;
1395 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
1396 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1408 struct svc_sock *svsk;
1412 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
1413 if (!svsk)
1425 kfree(svsk);
1430 svsk->sk_sock = sock;
1431 svsk->sk_sk = inet;
1432 svsk->sk_ostate = inet->sk_state_change;
1433 svsk->sk_odata = inet->sk_data_ready;
1434 svsk->sk_owspace = inet->sk_write_space;
1441 inet->sk_user_data = svsk;
1445 svc_udp_init(svsk, serv);
1447 svc_tcp_init(svsk, serv);
1449 trace_svcsock_new(svsk, sock);
1450 return svsk;
1471 struct svc_sock *svsk = NULL;
1494 svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS);
1495 if (IS_ERR(svsk)) {
1497 err = PTR_ERR(svsk);
1500 salen = kernel_getsockname(svsk->sk_sock, sin);
1502 svc_xprt_set_local(&svsk->sk_xprt, sin, salen);
1503 svsk->sk_xprt.xpt_cred = get_cred(cred);
1504 svc_add_new_perm_xprt(serv, &svsk->sk_xprt);
1505 return svc_one_sock_name(svsk, name_return, len);
1521 struct svc_sock *svsk;
1577 svsk = svc_setup_socket(serv, sock, flags);
1578 if (IS_ERR(svsk)) {
1579 error = PTR_ERR(svsk);
1582 svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen);
1583 return (struct svc_xprt *)svsk;
1595 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1596 struct sock *sk = svsk->sk_sk;
1600 sk->sk_state_change = svsk->sk_ostate;
1601 sk->sk_data_ready = svsk->sk_odata;
1602 sk->sk_write_space = svsk->sk_owspace;
1612 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1614 tls_handshake_close(svsk->sk_sock);
1619 svc_tcp_clear_pages(svsk);
1620 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
1629 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1630 struct page_frag_cache *pfc = &svsk->sk_frag_cache;
1631 struct socket *sock = svsk->sk_sock;
1633 trace_svcsock_free(svsk, sock);
1643 kfree(svsk);