Lines Matching refs:newsk
662 struct sock *newsk;
688 newsk = req->sk;
708 if (newsk && mem_cgroup_sockets_enabled) {
712 * newsk->sk_memcg.
714 lock_sock(newsk);
716 mem_cgroup_sk_alloc(newsk);
717 if (newsk->sk_memcg) {
719 * to look at newsk->sk_wmem_queued.
721 amt = sk_mem_pages(newsk->sk_forward_alloc +
722 atomic_read(&newsk->sk_rmem_alloc));
726 mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
729 release_sock(newsk);
734 if (newsk)
735 inet_init_csk_locks(newsk);
737 return newsk;
739 newsk = NULL;
826 struct sock *newsk,
831 struct inet_sock *newinet = inet_sk(newsk);
1124 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1127 struct inet_connection_sock *icsk = inet_csk(newsk);
1132 icsk->icsk_ulp_ops->clone(req, newsk, priority);
1141 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1147 struct sock *newsk = sk_clone_lock(sk, priority);
1149 if (newsk) {
1150 struct inet_connection_sock *newicsk = inet_csk(newsk);
1152 inet_sk_set_state(newsk, TCP_SYN_RECV);
1156 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
1157 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
1158 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
1161 sock_reset_flag(newsk, SOCK_RCU_FREE);
1163 inet_sk(newsk)->mc_list = NULL;
1165 newsk->sk_mark = inet_rsk(req)->ir_mark;
1166 atomic64_set(&newsk->sk_cookie,
1177 inet_clone_ulp(req, newsk, priority);
1179 security_inet_csk_clone(newsk, req);
1181 return newsk;