/kernel/linux/linux-5.10/net/dccp/ |
H A D | minisocks.c | 147 bool own_req; in dccp_check_req() local 195 req, &own_req); in dccp_check_req() 197 child = inet_csk_complete_hashdance(sk, child, req, own_req); in dccp_check_req()
|
H A D | ipv4.c | 400 bool *own_req) in dccp_v4_request_recv_sock() 432 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL); in dccp_v4_request_recv_sock() 433 if (*own_req) in dccp_v4_request_recv_sock() 395 dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) dccp_v4_request_recv_sock() argument
|
H A D | ipv6.c | 406 bool *own_req) in dccp_v6_request_recv_sock() 421 req_unhash, own_req); in dccp_v6_request_recv_sock() 541 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL); in dccp_v6_request_recv_sock() 543 if (*own_req && ireq->pktopts) { in dccp_v6_request_recv_sock() 401 dccp_v6_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) dccp_v6_request_recv_sock() argument
|
H A D | dccp.h | 275 bool *own_req);
|
/kernel/linux/linux-6.6/net/dccp/ |
H A D | minisocks.c | 148 bool own_req; in dccp_check_req() local 196 req, &own_req); in dccp_check_req() 198 child = inet_csk_complete_hashdance(sk, child, req, own_req); in dccp_check_req()
|
H A D | ipv4.c | 412 bool *own_req) in dccp_v4_request_recv_sock() 444 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL); in dccp_v4_request_recv_sock() 445 if (*own_req) in dccp_v4_request_recv_sock() 407 dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) dccp_v4_request_recv_sock() argument
|
H A D | ipv6.c | 417 bool *own_req) in dccp_v6_request_recv_sock() 432 req_unhash, own_req); in dccp_v6_request_recv_sock() 552 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL); in dccp_v6_request_recv_sock() 554 if (*own_req && ireq->pktopts) { in dccp_v6_request_recv_sock() 412 dccp_v6_request_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) dccp_v6_request_recv_sock() argument
|
H A D | dccp.h | 270 bool *own_req);
|
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_minisocks.c | 582 bool own_req; in tcp_check_req() local 779 req, &own_req); in tcp_check_req() 783 if (own_req && rsk_drop_req(req)) { in tcp_check_req() 791 *req_stolen = !own_req; in tcp_check_req() 792 return inet_csk_complete_hashdance(sk, child, req, own_req); in tcp_check_req()
|
H A D | syncookies.c | 201 bool own_req; in tcp_get_cookie_sock() local 204 NULL, &own_req); in tcp_get_cookie_sock()
|
H A D | tcp_fastopen.c | 262 bool own_req; in tcp_fastopen_create_child() local 265 NULL, &own_req); in tcp_fastopen_create_child()
|
H A D | inet_connection_sock.c | 1036 struct request_sock *req, bool own_req) in inet_csk_complete_hashdance() 1038 if (own_req) { in inet_csk_complete_hashdance() 1035 inet_csk_complete_hashdance(struct sock *sk, struct sock *child, struct request_sock *req, bool own_req) inet_csk_complete_hashdance() argument
|
H A D | tcp_ipv4.c | 1534 bool *own_req) in tcp_v4_syn_recv_sock() 1617 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), in tcp_v4_syn_recv_sock() 1619 if (likely(*own_req)) { in tcp_v4_syn_recv_sock() 1530 tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) tcp_v4_syn_recv_sock() argument
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp_minisocks.c | 608 bool own_req; in tcp_check_req() local 804 req, &own_req); in tcp_check_req() 808 if (own_req && rsk_drop_req(req)) { in tcp_check_req() 816 *req_stolen = !own_req; in tcp_check_req() 817 return inet_csk_complete_hashdance(sk, child, req, own_req); in tcp_check_req()
|
H A D | syncookies.c | 199 bool own_req; in tcp_get_cookie_sock() local 202 NULL, &own_req); in tcp_get_cookie_sock()
|
H A D | tcp_fastopen.c | 245 bool own_req; in tcp_fastopen_create_child() local 248 NULL, &own_req); in tcp_fastopen_create_child()
|
H A D | inet_connection_sock.c | 1088 * CPU may win the "own_req" race so that inet_ehash_insert() fails. in reqsk_timer_handler() 1325 struct request_sock *req, bool own_req) in inet_csk_complete_hashdance() 1327 if (own_req) { in inet_csk_complete_hashdance() 1324 inet_csk_complete_hashdance(struct sock *sk, struct sock *child, struct request_sock *req, bool own_req) inet_csk_complete_hashdance() argument
|
/kernel/linux/linux-5.10/include/net/ |
H A D | inet_connection_sock.h | 48 bool *own_req); 328 bool own_req);
|
/kernel/linux/linux-6.6/include/net/ |
H A D | inet_connection_sock.h | 45 bool *own_req); 270 bool own_req);
|
/kernel/linux/linux-5.10/net/mptcp/ |
H A D | subflow.c | 530 bool *own_req) in subflow_syn_recv_sock() 583 req_unhash, own_req); in subflow_syn_recv_sock() 585 if (child && *own_req) { in subflow_syn_recv_sock() 648 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && in subflow_syn_recv_sock() 525 subflow_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) subflow_syn_recv_sock() argument
|
/kernel/linux/linux-5.10/net/ipv6/ |
H A D | tcp_ipv6.c | 1223 bool *own_req) in tcp_v6_syn_recv_sock() 1245 req_unhash, own_req); in tcp_v6_syn_recv_sock() 1408 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), in tcp_v6_syn_recv_sock() 1410 if (*own_req) { in tcp_v6_syn_recv_sock() 1219 tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) tcp_v6_syn_recv_sock() argument
|
/kernel/linux/linux-6.6/net/mptcp/ |
H A D | subflow.c | 771 bool *own_req) in subflow_syn_recv_sock() 821 req_unhash, own_req); in subflow_syn_recv_sock() 823 if (child && *own_req) { in subflow_syn_recv_sock() 892 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && in subflow_syn_recv_sock() 766 subflow_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) subflow_syn_recv_sock() argument
|
/kernel/linux/linux-6.6/net/ipv6/ |
H A D | tcp_ipv6.c | 1198 bool *own_req) in tcp_v6_syn_recv_sock() 1220 req_unhash, own_req); in tcp_v6_syn_recv_sock() 1382 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), in tcp_v6_syn_recv_sock() 1384 if (*own_req) { in tcp_v6_syn_recv_sock() 1194 tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) tcp_v6_syn_recv_sock() argument
|
/kernel/linux/common_modules/newip/third_party/linux-5.10/net/newip/ |
H A D | tcp_nip_input.c | 2209 bool own_req; in tcp_nip_check_req() local 2236 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, req, &own_req); in tcp_nip_check_req() 2249 return inet_csk_complete_hashdance(sk, child, req, own_req); in tcp_nip_check_req()
|
H A D | tcp_nip.c | 909 bool *own_req) in tcp_nip_syn_recv_sock() 964 * ehash table succeeds *own_req equals true in tcp_nip_syn_recv_sock() 966 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), in tcp_nip_syn_recv_sock() 905 tcp_nip_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) tcp_nip_syn_recv_sock() argument
|