Lines Matching refs:sk
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
154 static void sock_def_write_space_wfree(struct sock *sk);
155 static void sock_def_write_space(struct sock *sk);
159 * @sk: Socket to use a capability on or through
167 bool sk_ns_capable(const struct sock *sk,
170 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
177 * @sk: Socket to use a capability on or through
184 bool sk_capable(const struct sock *sk, int cap)
186 return sk_ns_capable(sk, &init_user_ns, cap);
192 * @sk: Socket to use a capability on or through
199 bool sk_net_capable(const struct sock *sk, int cap)
201 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
270 * sk_callback_lock and sk queues locking rules are per-address-family,
298 * @sk: socket to set it on
304 void sk_set_memalloc(struct sock *sk)
306 sock_set_flag(sk, SOCK_MEMALLOC);
307 sk->sk_allocation |= __GFP_MEMALLOC;
312 void sk_clear_memalloc(struct sock *sk)
314 sock_reset_flag(sk, SOCK_MEMALLOC);
315 sk->sk_allocation &= ~__GFP_MEMALLOC;
325 sk_mem_reclaim(sk);
329 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
335 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
338 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
341 sk, skb);
348 void sk_error_report(struct sock *sk)
350 sk->sk_error_report(sk);
352 switch (sk->sk_family) {
356 trace_inet_sk_error_report(sk);
461 static bool sock_needs_netstamp(const struct sock *sk)
463 switch (sk->sk_family) {
472 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
474 if (sk->sk_flags & flags) {
475 sk->sk_flags &= ~flags;
476 if (sock_needs_netstamp(sk) &&
477 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
483 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
486 struct sk_buff_head *list = &sk->sk_receive_queue;
488 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
489 atomic_inc(&sk->sk_drops);
490 trace_sock_rcvqueue_full(sk, skb);
494 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
495 atomic_inc(&sk->sk_drops);
500 skb_set_owner_r(skb, sk);
508 sock_skb_set_dropcount(sk, skb);
512 if (!sock_flag(sk, SOCK_DEAD))
513 sk->sk_data_ready(sk);
518 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
524 err = sk_filter(sk, skb);
529 err = __sock_queue_rcv_skb(sk, skb);
548 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
553 if (sk_filter_trim_cap(sk, skb, trim_cap))
558 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
559 atomic_inc(&sk->sk_drops);
563 bh_lock_sock_nested(sk);
565 bh_lock_sock(sk);
566 if (!sock_owned_by_user(sk)) {
570 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
572 rc = sk_backlog_rcv(sk, skb);
574 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
575 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
576 bh_unlock_sock(sk);
577 atomic_inc(&sk->sk_drops);
581 bh_unlock_sock(sk);
584 sock_put(sk);
596 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
598 struct dst_entry *dst = __sk_dst_get(sk);
603 sk_tx_queue_clear(sk);
604 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
605 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
614 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
616 struct dst_entry *dst = sk_dst_get(sk);
621 sk_dst_reset(sk);
630 static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
634 struct net *net = sock_net(sk);
638 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
646 WRITE_ONCE(sk->sk_bound_dev_if, ifindex);
648 if (sk->sk_prot->rehash)
649 sk->sk_prot->rehash(sk);
650 sk_dst_reset(sk);
660 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
665 lock_sock(sk);
666 ret = sock_bindtoindex_locked(sk, ifindex);
668 release_sock(sk);
674 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
678 struct net *net = sock_net(sk);
713 sockopt_lock_sock(sk);
714 ret = sock_bindtoindex_locked(sk, index);
715 sockopt_release_sock(sk);
722 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval,
727 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
728 struct net *net = sock_net(sk);
763 bool sk_mc_loop(struct sock *sk)
767 if (!sk)
769 /* IPV6_ADDRFORM can change sk->sk_family under us. */
770 switch (READ_ONCE(sk->sk_family)) {
772 return inet_test_bit(MC_LOOP, sk);
775 return inet6_sk(sk)->mc_loop;
783 void sock_set_reuseaddr(struct sock *sk)
785 lock_sock(sk);
786 sk->sk_reuse = SK_CAN_REUSE;
787 release_sock(sk);
791 void sock_set_reuseport(struct sock *sk)
793 lock_sock(sk);
794 sk->sk_reuseport = true;
795 release_sock(sk);
799 void sock_no_linger(struct sock *sk)
801 lock_sock(sk);
802 WRITE_ONCE(sk->sk_lingertime, 0);
803 sock_set_flag(sk, SOCK_LINGER);
804 release_sock(sk);
808 void sock_set_priority(struct sock *sk, u32 priority)
810 lock_sock(sk);
811 WRITE_ONCE(sk->sk_priority, priority);
812 release_sock(sk);
816 void sock_set_sndtimeo(struct sock *sk, s64 secs)
818 lock_sock(sk);
820 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
822 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
823 release_sock(sk);
827 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
830 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
831 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
832 sock_set_flag(sk, SOCK_RCVTSTAMP);
833 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
835 sock_reset_flag(sk, SOCK_RCVTSTAMP);
836 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
840 void sock_enable_timestamps(struct sock *sk)
842 lock_sock(sk);
843 __sock_set_timestamps(sk, true, false, true);
844 release_sock(sk);
848 void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
852 __sock_set_timestamps(sk, valbool, false, false);
855 __sock_set_timestamps(sk, valbool, true, false);
858 __sock_set_timestamps(sk, valbool, false, true);
861 __sock_set_timestamps(sk, valbool, true, true);
866 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
868 struct net *net = sock_net(sk);
874 if (sk->sk_bound_dev_if)
875 dev = dev_get_by_index(net, sk->sk_bound_dev_if);
898 WRITE_ONCE(sk->sk_bind_phc, phc_index);
903 int sock_set_timestamping(struct sock *sk, int optname,
917 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
918 if (sk_is_tcp(sk)) {
919 if ((1 << sk->sk_state) &
923 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq);
925 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
927 atomic_set(&sk->sk_tskey, 0);
936 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
941 WRITE_ONCE(sk->sk_tsflags, val);
942 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
945 sock_enable_timestamp(sk,
948 sock_disable_timestamp(sk,
953 void sock_set_keepalive(struct sock *sk)
955 lock_sock(sk);
956 if (sk->sk_prot->keepalive)
957 sk->sk_prot->keepalive(sk, true);
958 sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
959 release_sock(sk);
963 static void __sock_set_rcvbuf(struct sock *sk, int val)
969 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
981 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
984 void sock_set_rcvbuf(struct sock *sk, int val)
986 lock_sock(sk);
987 __sock_set_rcvbuf(sk, val);
988 release_sock(sk);
992 static void __sock_set_mark(struct sock *sk, u32 val)
994 if (val != sk->sk_mark) {
995 WRITE_ONCE(sk->sk_mark, val);
996 sk_dst_reset(sk);
1000 void sock_set_mark(struct sock *sk, u32 val)
1002 lock_sock(sk);
1003 __sock_set_mark(sk, val);
1004 release_sock(sk);
1008 static void sock_release_reserved_memory(struct sock *sk, int bytes)
1013 WARN_ON(bytes > sk->sk_reserved_mem);
1014 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes);
1015 sk_mem_reclaim(sk);
1018 static int sock_reserve_memory(struct sock *sk, int bytes)
1024 if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
1033 charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
1039 sk_memory_allocated_add(sk, pages);
1040 allocated = sk_memory_allocated(sk);
1044 if (allocated > sk_prot_mem_limits(sk, 1)) {
1045 sk_memory_allocated_sub(sk, pages);
1046 mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
1049 sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
1051 WRITE_ONCE(sk->sk_reserved_mem,
1052 sk->sk_reserved_mem + (pages << PAGE_SHIFT));
1057 void sockopt_lock_sock(struct sock *sk)
1060 * a bpf prog. bpf has ensured the sk lock has been
1066 lock_sock(sk);
1070 void sockopt_release_sock(struct sock *sk)
1075 release_sock(sk);
1096 int sk_setsockopt(struct sock *sk, int level, int optname,
1100 struct socket *sock = sk->sk_socket;
1112 return sock_setbindtodevice(sk, optval, optlen);
1122 sockopt_lock_sock(sk);
1129 sock_valbool_flag(sk, SOCK_DBG, valbool);
1132 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
1135 sk->sk_reuseport = valbool;
1144 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1145 sk_dst_reset(sk);
1148 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1162 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1163 WRITE_ONCE(sk->sk_sndbuf,
1166 sk->sk_write_space(sk);
1188 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
1200 __sock_set_rcvbuf(sk, max(val, 0));
1204 if (sk->sk_prot->keepalive)
1205 sk->sk_prot->keepalive(sk, valbool);
1206 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1210 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1214 sk->sk_no_check_tx = valbool;
1219 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
1220 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1221 WRITE_ONCE(sk->sk_priority, val);
1236 sock_reset_flag(sk, SOCK_LINGER);
1241 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
1243 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
1244 sock_set_flag(sk, SOCK_LINGER);
1263 sock_set_timestamp(sk, optname, valbool);
1278 ret = sock_set_timestamping(sk, optname, timestamping);
1283 int (*set_rcvlowat)(struct sock *sk, int val) = NULL;
1290 ret = set_rcvlowat(sk, val);
1292 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1297 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1303 ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1312 ret = sk_attach_filter(&fprog, sk);
1324 ret = sk_attach_bpf(ufd, sk);
1333 ret = sk_reuseport_attach_filter(&fprog, sk);
1345 ret = sk_reuseport_attach_bpf(ufd, sk);
1350 ret = reuseport_detach_prog(sk);
1354 ret = sk_detach_filter(sk);
1358 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1361 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1368 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1369 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1374 __sock_set_mark(sk, val);
1377 sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
1381 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1385 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1390 int (*set_peek_off)(struct sock *sk, int val);
1394 ret = set_peek_off(sk, val);
1401 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1405 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1413 WRITE_ONCE(sk->sk_ll_usec, val);
1419 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1422 if (val > READ_ONCE(sk->sk_busy_poll_budget) && !sockopt_capable(CAP_NET_ADMIN)) {
1428 WRITE_ONCE(sk->sk_busy_poll_budget, val);
1444 cmpxchg(&sk->sk_pacing_status,
1448 WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
1449 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
1453 reuseport_update_incoming_cpu(sk, val);
1458 dst_negative_advice(sk);
1462 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1463 if (!(sk_is_tcp(sk) ||
1464 (sk->sk_type == SOCK_DGRAM &&
1465 sk->sk_protocol == IPPROTO_UDP)))
1467 } else if (sk->sk_family != PF_RDS) {
1474 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1494 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1498 sock_valbool_flag(sk, SOCK_TXTIME, true);
1499 sk->sk_clockid = sk_txtime.clockid;
1500 sk->sk_txtime_deadline_mode =
1502 sk->sk_txtime_report_errors =
1507 ret = sock_bindtoindex_locked(sk, val);
1515 sk->sk_userlocks = val | (sk->sk_userlocks &
1528 delta = val - sk->sk_reserved_mem;
1530 sock_release_reserved_memory(sk, -delta);
1532 ret = sock_reserve_memory(sk, delta);
1542 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
1546 WRITE_ONCE(sk->sk_txrehash, (u8)val);
1553 sockopt_release_sock(sk);
1560 return sk_setsockopt(sock->sk, level, optname,
1565 static const struct cred *sk_get_peer_cred(struct sock *sk)
1569 spin_lock(&sk->sk_peer_lock);
1570 cred = get_cred(sk->sk_peer_cred);
1571 spin_unlock(&sk->sk_peer_lock);
1604 int sk_getsockopt(struct sock *sk, int level, int optname,
1607 struct socket *sock = sk->sk_socket;
1633 v.val = sock_flag(sk, SOCK_DBG);
1637 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1641 v.val = sock_flag(sk, SOCK_BROADCAST);
1645 v.val = READ_ONCE(sk->sk_sndbuf);
1649 v.val = READ_ONCE(sk->sk_rcvbuf);
1653 v.val = sk->sk_reuse;
1657 v.val = sk->sk_reuseport;
1661 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1665 v.val = sk->sk_type;
1669 v.val = sk->sk_protocol;
1673 v.val = sk->sk_family;
1677 v.val = -sock_error(sk);
1679 v.val = xchg(&sk->sk_err_soft, 0);
1683 v.val = sock_flag(sk, SOCK_URGINLINE);
1687 v.val = sk->sk_no_check_tx;
1691 v.val = READ_ONCE(sk->sk_priority);
1696 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1697 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ;
1704 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1705 !sock_flag(sk, SOCK_TSTAMP_NEW) &&
1706 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1710 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1714 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1718 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1728 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
1729 v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
1730 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
1736 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v,
1742 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v,
1747 v.val = READ_ONCE(sk->sk_rcvlowat);
1768 spin_lock(&sk->sk_peer_lock);
1769 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1770 spin_unlock(&sk->sk_peer_lock);
1786 spin_lock(&sk->sk_peer_lock);
1787 peer_pid = get_pid(sk->sk_peer_pid);
1788 spin_unlock(&sk->sk_peer_lock);
1815 cred = sk_get_peer_cred(sk);
1852 v.val = sk->sk_state == TCP_LISTEN;
1864 v.val = READ_ONCE(sk->sk_mark);
1868 v.val = sock_flag(sk, SOCK_RCVMARK);
1872 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1876 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1883 v.val = READ_ONCE(sk->sk_peek_off);
1886 v.val = sock_flag(sk, SOCK_NOFCS);
1890 return sock_getbindtodevice(sk, optval, optlen, len);
1893 len = sk_get_filter(sk, optval, len);
1900 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1908 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1913 v.val = READ_ONCE(sk->sk_ll_usec);
1916 v.val = READ_ONCE(sk->sk_prefer_busy_poll);
1924 v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
1928 READ_ONCE(sk->sk_max_pacing_rate));
1933 v.val = READ_ONCE(sk->sk_incoming_cpu);
1940 sk_get_meminfo(sk, meminfo);
1951 v.val = READ_ONCE(sk->sk_napi_id);
1964 v.val64 = sock_gen_cookie(sk);
1968 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1973 v.txtime.clockid = sk->sk_clockid;
1974 v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1976 v.txtime.flags |= sk->sk_txtime_report_errors ?
1981 v.val = READ_ONCE(sk->sk_bound_dev_if);
1988 v.val64 = sock_net(sk)->net_cookie;
1992 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
1996 v.val = READ_ONCE(sk->sk_reserved_mem);
2001 v.val = READ_ONCE(sk->sk_txrehash);
2024 return sk_getsockopt(sock->sk, level, optname,
2034 static inline void sock_lock_init(struct sock *sk)
2036 if (sk->sk_kern_sock)
2038 sk,
2039 af_family_kern_slock_key_strings[sk->sk_family],
2040 af_family_kern_slock_keys + sk->sk_family,
2041 af_family_kern_key_strings[sk->sk_family],
2042 af_family_kern_keys + sk->sk_family);
2045 sk,
2046 af_family_slock_key_strings[sk->sk_family],
2047 af_family_slock_keys + sk->sk_family,
2048 af_family_key_strings[sk->sk_family],
2049 af_family_keys + sk->sk_family);
2087 struct sock *sk;
2092 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
2093 if (!sk)
2094 return sk;
2096 sk_prot_clear_nulls(sk, prot->obj_size);
2098 sk = kmalloc(prot->obj_size, priority);
2100 if (sk != NULL) {
2101 if (security_sk_alloc(sk, family, priority))
2108 return sk;
2111 security_sk_free(sk);
2114 kmem_cache_free(slab, sk);
2116 kfree(sk);
2120 static void sk_prot_free(struct proto *prot, struct sock *sk)
2128 cgroup_sk_free(&sk->sk_cgrp_data);
2129 mem_cgroup_sk_free(sk);
2130 security_sk_free(sk);
2132 kmem_cache_free(slab, sk);
2134 kfree(sk);
2149 struct sock *sk;
2151 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
2152 if (sk) {
2153 sk->sk_family = family;
2158 sk->sk_prot = sk->sk_prot_creator = prot;
2159 sk->sk_kern_sock = kern;
2160 sock_lock_init(sk);
2161 sk->sk_net_refcnt = kern ? 0 : 1;
2162 if (likely(sk->sk_net_refcnt)) {
2163 get_net_track(net, &sk->ns_tracker, priority);
2166 __netns_tracker_alloc(net, &sk->ns_tracker,
2170 sock_net_set(sk, net);
2171 refcount_set(&sk->sk_wmem_alloc, 1);
2173 mem_cgroup_sk_alloc(sk);
2174 cgroup_sk_alloc(&sk->sk_cgrp_data);
2175 sock_update_classid(&sk->sk_cgrp_data);
2176 sock_update_netprioidx(&sk->sk_cgrp_data);
2177 sk_tx_queue_clear(sk);
2180 return sk;
2189 struct sock *sk = container_of(head, struct sock, sk_rcu);
2192 if (sk->sk_destruct)
2193 sk->sk_destruct(sk);
2195 filter = rcu_dereference_check(sk->sk_filter,
2196 refcount_read(&sk->sk_wmem_alloc) == 0);
2198 sk_filter_uncharge(sk, filter);
2199 RCU_INIT_POINTER(sk->sk_filter, NULL);
2202 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
2205 bpf_sk_storage_free(sk);
2208 if (atomic_read(&sk->sk_omem_alloc))
2210 __func__, atomic_read(&sk->sk_omem_alloc));
2212 if (sk->sk_frag.page) {
2213 put_page(sk->sk_frag.page);
2214 sk->sk_frag.page = NULL;
2217 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
2218 put_cred(sk->sk_peer_cred);
2219 put_pid(sk->sk_peer_pid);
2221 if (likely(sk->sk_net_refcnt))
2222 put_net_track(sock_net(sk), &sk->ns_tracker);
2224 __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
2226 sk_prot_free(sk->sk_prot_creator, sk);
2229 void sk_destruct(struct sock *sk)
2231 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
2233 if (rcu_access_pointer(sk->sk_reuseport_cb)) {
2234 reuseport_detach_sock(sk);
2239 call_rcu(&sk->sk_rcu, __sk_destruct);
2241 __sk_destruct(&sk->sk_rcu);
2244 static void __sk_free(struct sock *sk)
2246 if (likely(sk->sk_net_refcnt))
2247 sock_inuse_add(sock_net(sk), -1);
2249 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
2250 sock_diag_broadcast_destroy(sk);
2252 sk_destruct(sk);
2255 void sk_free(struct sock *sk)
2260 * If not null, sock_wfree() will call __sk_free(sk) later
2262 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2263 __sk_free(sk);
2267 static void sk_init_common(struct sock *sk)
2269 skb_queue_head_init(&sk->sk_receive_queue);
2270 skb_queue_head_init(&sk->sk_write_queue);
2271 skb_queue_head_init(&sk->sk_error_queue);
2273 rwlock_init(&sk->sk_callback_lock);
2274 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
2275 af_rlock_keys + sk->sk_family,
2276 af_family_rlock_key_strings[sk->sk_family]);
2277 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
2278 af_wlock_keys + sk->sk_family,
2279 af_family_wlock_key_strings[sk->sk_family]);
2280 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2281 af_elock_keys + sk->sk_family,
2282 af_family_elock_key_strings[sk->sk_family]);
2283 lockdep_set_class_and_name(&sk->sk_callback_lock,
2284 af_callback_keys + sk->sk_family,
2285 af_family_clock_key_strings[sk->sk_family]);
2290 * @sk: the socket to clone
2295 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2297 struct proto *prot = READ_ONCE(sk->sk_prot);
2302 newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2306 sock_copy(newsk, sk);
2344 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2349 /* sk->sk_memcg will be populated at accept() time */
2355 filter = rcu_dereference(sk->sk_filter);
2365 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2378 if (bpf_sk_storage_clone(sk, newsk)) {
2408 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2415 void sk_free_unlock_clone(struct sock *sk)
2419 sk->sk_destruct = NULL;
2420 bh_unlock_sock(sk);
2421 sk_free(sk);
2425 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
2431 is_ipv6 = (sk->sk_family == AF_INET6 &&
2432 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
2437 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
2443 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2447 sk->sk_route_caps = dst->dev->features;
2448 if (sk_is_tcp(sk))
2449 sk->sk_route_caps |= NETIF_F_GSO;
2450 if (sk->sk_route_caps & NETIF_F_GSO)
2451 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2452 if (unlikely(sk->sk_gso_disabled))
2453 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2454 if (sk_can_gso(sk)) {
2456 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2458 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2459 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
2464 sk->sk_gso_max_segs = max_segs;
2465 sk_dst_set(sk, dst);
2479 struct sock *sk = skb->sk;
2483 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2484 if (sock_flag(sk, SOCK_RCU_FREE) &&
2485 sk->sk_write_space == sock_def_write_space) {
2487 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
2488 sock_def_write_space_wfree(sk);
2491 __sk_free(sk);
2499 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2500 sk->sk_write_space(sk);
2507 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2508 __sk_free(sk);
2517 struct sock *sk = skb->sk;
2519 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2520 __sk_free(sk);
2523 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2526 skb->sk = sk;
2528 if (unlikely(!sk_fullsock(sk))) {
2530 sock_hold(sk);
2535 skb_set_hash_from_sk(skb, sk);
2537 * We used to take a refcount on sk, but following operation
2541 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2561 * But we also want to keep skb->sk set because some packet schedulers
2569 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2581 struct sock *sk = skb->sk;
2584 atomic_sub(len, &sk->sk_rmem_alloc);
2585 sk_mem_uncharge(sk, len);
2595 sock_put(skb->sk);
2605 if (sk_is_refcounted(skb->sk))
2606 sock_gen_put(skb->sk);
2611 kuid_t sock_i_uid(struct sock *sk)
2615 read_lock_bh(&sk->sk_callback_lock);
2616 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2617 read_unlock_bh(&sk->sk_callback_lock);
2622 unsigned long __sock_i_ino(struct sock *sk)
2626 read_lock(&sk->sk_callback_lock);
2627 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2628 read_unlock(&sk->sk_callback_lock);
2633 unsigned long sock_i_ino(struct sock *sk)
2638 ino = __sock_i_ino(sk);
2647 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2651 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2655 skb_set_owner_w(skb, sk);
2665 struct sock *sk = skb->sk;
2667 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2670 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2676 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2684 atomic_add(skb->truesize, &sk->sk_omem_alloc);
2685 skb->sk = sk;
2693 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2698 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
2703 atomic_add(size, &sk->sk_omem_alloc);
2707 atomic_sub(size, &sk->sk_omem_alloc);
2717 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2726 atomic_sub(size, &sk->sk_omem_alloc);
2729 void sock_kfree_s(struct sock *sk, void *mem, int size)
2731 __sock_kfree_s(sk, mem, size, false);
2735 void sock_kzfree_s(struct sock *sk, void *mem, int size)
2737 __sock_kfree_s(sk, mem, size, true);
2744 static long sock_wait_for_wmem(struct sock *sk, long timeo)
2748 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2754 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2755 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2756 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2758 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2760 if (READ_ONCE(sk->sk_err))
2764 finish_wait(sk_sleep(sk), &wait);
2773 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2781 timeo = sock_sndtimeo(sk, noblock);
2783 err = sock_error(sk);
2788 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2791 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2794 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2795 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2801 timeo = sock_wait_for_wmem(sk, timeo);
2804 errcode, sk->sk_allocation);
2806 skb_set_owner_w(skb, sk);
2817 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
2824 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
2825 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2844 if (!sock_flag(sk, SOCK_TXTIME))
2861 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2872 ret = __sock_cmsg_send(sk, cmsg, sockc);
2880 static void sk_enter_memory_pressure(struct sock *sk)
2882 if (!sk->sk_prot->enter_memory_pressure)
2885 sk->sk_prot->enter_memory_pressure(sk);
2888 static void sk_leave_memory_pressure(struct sock *sk)
2890 if (sk->sk_prot->leave_memory_pressure) {
2891 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
2892 tcp_leave_memory_pressure, sk);
2894 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2947 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2949 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2952 sk_enter_memory_pressure(sk);
2953 sk_stream_moderate_sndbuf(sk);
2958 void __lock_sock(struct sock *sk)
2959 __releases(&sk->sk_lock.slock)
2960 __acquires(&sk->sk_lock.slock)
2965 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2967 spin_unlock_bh(&sk->sk_lock.slock);
2969 spin_lock_bh(&sk->sk_lock.slock);
2970 if (!sock_owned_by_user(sk))
2973 finish_wait(&sk->sk_lock.wq, &wait);
2976 void __release_sock(struct sock *sk)
2977 __releases(&sk->sk_lock.slock)
2978 __acquires(&sk->sk_lock.slock)
2982 while ((skb = sk->sk_backlog.head) != NULL) {
2983 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2985 spin_unlock_bh(&sk->sk_lock.slock);
2992 sk_backlog_rcv(sk, skb);
2999 spin_lock_bh(&sk->sk_lock.slock);
3006 sk->sk_backlog.len = 0;
3009 void __sk_flush_backlog(struct sock *sk)
3011 spin_lock_bh(&sk->sk_lock.slock);
3012 __release_sock(sk);
3013 spin_unlock_bh(&sk->sk_lock.slock);
3019 * @sk: sock to wait on
3023 * Now socket state including sk->sk_err is changed only under lock,
3028 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
3033 add_wait_queue(sk_sleep(sk), &wait);
3034 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3035 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
3036 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3037 remove_wait_queue(sk_sleep(sk), &wait);
3044 * @sk: socket
3051 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
3053 bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
3054 struct proto *prot = sk->sk_prot;
3058 sk_memory_allocated_add(sk, amt);
3059 allocated = sk_memory_allocated(sk);
3061 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
3066 if (allocated <= sk_prot_mem_limits(sk, 0)) {
3067 sk_leave_memory_pressure(sk);
3072 if (allocated > sk_prot_mem_limits(sk, 1))
3073 sk_enter_memory_pressure(sk);
3076 if (allocated > sk_prot_mem_limits(sk, 2))
3081 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
3085 int wmem0 = sk_get_wmem0(sk, prot);
3087 if (sk->sk_type == SOCK_STREAM) {
3088 if (sk->sk_wmem_queued < wmem0)
3090 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
3095 if (sk_has_memory_pressure(sk)) {
3098 if (!sk_under_memory_pressure(sk))
3100 alloc = sk_sockets_allocated_read_positive(sk);
3101 if (sk_prot_mem_limits(sk, 2) > alloc *
3102 sk_mem_pages(sk->sk_wmem_queued +
3103 atomic_read(&sk->sk_rmem_alloc) +
3104 sk->sk_forward_alloc))
3110 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
3111 sk_stream_moderate_sndbuf(sk);
3116 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
3119 mem_cgroup_charge_skmem(sk->sk_memcg, amt,
3127 trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
3129 sk_memory_allocated_sub(sk, amt);
3132 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
3139 * @sk: socket
3147 int __sk_mem_schedule(struct sock *sk, int size, int kind)
3151 sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3152 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
3154 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
3161 * @sk: socket
3166 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
3168 sk_memory_allocated_sub(sk, amount);
3170 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3171 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
3173 if (sk_under_global_memory_pressure(sk) &&
3174 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
3175 sk_leave_memory_pressure(sk);
3180 * @sk: socket
3183 void __sk_mem_reclaim(struct sock *sk, int amount)
3186 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
3187 __sk_mem_reduce_allocated(sk, amount);
3191 int sk_set_peek_off(struct sock *sk, int val)
3193 WRITE_ONCE(sk->sk_peek_off, val);
3262 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
3292 sock_update_netprioidx(&sock->sk->sk_cgrp_data);
3293 sock_update_classid(&sock->sk->sk_cgrp_data);
3301 static void sock_def_wakeup(struct sock *sk)
3306 wq = rcu_dereference(sk->sk_wq);
3312 static void sock_def_error_report(struct sock *sk)
3317 wq = rcu_dereference(sk->sk_wq);
3320 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
3324 void sock_def_readable(struct sock *sk)
3328 trace_sk_data_ready(sk);
3331 wq = rcu_dereference(sk->sk_wq);
3335 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3339 static void sock_def_write_space(struct sock *sk)
3348 if (sock_writeable(sk)) {
3349 wq = rcu_dereference(sk->sk_wq);
3355 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
3365 static void sock_def_write_space_wfree(struct sock *sk)
3370 if (sock_writeable(sk)) {
3371 struct socket_wq *wq = rcu_dereference(sk->sk_wq);
3380 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
3384 static void sock_def_destruct(struct sock *sk)
3388 void sk_send_sigurg(struct sock *sk)
3390 if (sk->sk_socket && sk->sk_socket->file)
3391 if (send_sigurg(&sk->sk_socket->file->f_owner))
3392 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3396 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3400 sock_hold(sk);
3404 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3407 __sock_put(sk);
3411 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3414 __sock_put(sk);
3418 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
3420 sk_init_common(sk);
3421 sk->sk_send_head = NULL;
3423 timer_setup(&sk->sk_timer, NULL, 0);
3425 sk->sk_allocation = GFP_KERNEL;
3426 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
3427 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
3428 sk->sk_state = TCP_CLOSE;
3429 sk->sk_use_task_frag = true;
3430 sk_set_socket(sk, sock);
3432 sock_set_flag(sk, SOCK_ZAPPED);
3435 sk->sk_type = sock->type;
3436 RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3437 sock->sk = sk;
3439 RCU_INIT_POINTER(sk->sk_wq, NULL);
3441 sk->sk_uid = uid;
3443 rwlock_init(&sk->sk_callback_lock);
3444 if (sk->sk_kern_sock)
3446 &sk->sk_callback_lock,
3447 af_kern_callback_keys + sk->sk_family,
3448 af_family_kern_clock_key_strings[sk->sk_family]);
3451 &sk->sk_callback_lock,
3452 af_callback_keys + sk->sk_family,
3453 af_family_clock_key_strings[sk->sk_family]);
3455 sk->sk_state_change = sock_def_wakeup;
3456 sk->sk_data_ready = sock_def_readable;
3457 sk->sk_write_space = sock_def_write_space;
3458 sk->sk_error_report = sock_def_error_report;
3459 sk->sk_destruct = sock_def_destruct;
3461 sk->sk_frag.page = NULL;
3462 sk->sk_frag.offset = 0;
3463 sk->sk_peek_off = -1;
3465 sk->sk_peer_pid = NULL;
3466 sk->sk_peer_cred = NULL;
3467 spin_lock_init(&sk->sk_peer_lock);
3469 sk->sk_write_pending = 0;
3470 sk->sk_rcvlowat = 1;
3471 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
3472 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
3474 sk->sk_stamp = SK_DEFAULT_STAMP;
3476 seqlock_init(&sk->sk_stamp_seq);
3478 atomic_set(&sk->sk_zckey, 0);
3481 sk->sk_napi_id = 0;
3482 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
3485 sk->sk_max_pacing_rate = ~0UL;
3486 sk->sk_pacing_rate = ~0UL;
3487 WRITE_ONCE(sk->sk_pacing_shift, 10);
3488 sk->sk_incoming_cpu = -1;
3490 sk_rx_queue_clear(sk);
3496 refcount_set(&sk->sk_refcnt, 1);
3497 atomic_set(&sk->sk_drops, 0);
3501 void sock_init_data(struct socket *sock, struct sock *sk)
3505 make_kuid(sock_net(sk)->user_ns, 0);
3507 sock_init_data_uid(sock, sk, uid);
3511 void lock_sock_nested(struct sock *sk, int subclass)
3514 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3517 spin_lock_bh(&sk->sk_lock.slock);
3518 if (sock_owned_by_user_nocheck(sk))
3519 __lock_sock(sk);
3520 sk->sk_lock.owned = 1;
3521 spin_unlock_bh(&sk->sk_lock.slock);
3525 void release_sock(struct sock *sk)
3527 spin_lock_bh(&sk->sk_lock.slock);
3528 if (sk->sk_backlog.tail)
3529 __release_sock(sk);
3531 /* Warning : release_cb() might need to release sk ownership,
3532 * ie call sock_release_ownership(sk) before us.
3534 if (sk->sk_prot->release_cb)
3535 sk->sk_prot->release_cb(sk);
3537 sock_release_ownership(sk);
3538 if (waitqueue_active(&sk->sk_lock.wq))
3539 wake_up(&sk->sk_lock.wq);
3540 spin_unlock_bh(&sk->sk_lock.slock);
3544 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
3547 spin_lock_bh(&sk->sk_lock.slock);
3549 if (!sock_owned_by_user_nocheck(sk)) {
3568 __lock_sock(sk);
3569 sk->sk_lock.owned = 1;
3570 __acquire(&sk->sk_lock.slock);
3571 spin_unlock_bh(&sk->sk_lock.slock);
3579 struct sock *sk = sock->sk;
3582 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3583 ts = ktime_to_timespec64(sock_read_timestamp(sk));
3588 sock_write_timestamp(sk, kt);
3615 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3617 if (!sock_flag(sk, flag)) {
3618 unsigned long previous_flags = sk->sk_flags;
3620 sock_set_flag(sk, flag);
3626 if (sock_needs_netstamp(sk) &&
3632 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3640 skb = sock_dequeue_err_skb(sk);
3653 sock_recv_timestamp(msg, sk, skb);
3678 struct sock *sk = sock->sk;
3680 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
3681 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen);
3688 struct sock *sk = sock->sk;
3692 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len);
3705 struct sock *sk = sock->sk;
3707 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
3708 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen);
3712 void sk_common_release(struct sock *sk)
3714 if (sk->sk_prot->destroy)
3715 sk->sk_prot->destroy(sk);
3725 sk->sk_prot->unhash(sk);
3739 sock_orphan(sk);
3741 xfrm_sk_free_policy(sk);
3743 sock_put(sk);
3747 void sk_get_meminfo(const struct sock *sk, u32 *mem)
3751 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3752 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3753 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3754 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3755 mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk);
3756 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3757 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3758 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3759 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
4138 struct sock *sk = p;
4140 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
4143 if (sk_is_udp(sk) &&
4144 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
4147 return sk_busy_loop_timeout(sk, start_time);
4152 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
4154 if (!sk->sk_prot->bind_add)
4156 return sk->sk_prot->bind_add(sk, addr, addr_len);
4161 int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
4169 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg);
4184 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg)
4188 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg);
4200 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
4204 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET)
4205 rc = ipmr_sk_ioctl(sk, cmd, arg);
4206 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6)
4207 rc = ip6mr_sk_ioctl(sk, cmd, arg);
4208 else if (sk_is_phonet(sk))
4209 rc = phonet_sk_ioctl(sk, cmd, arg);
4216 return sock_ioctl_out(sk, cmd, arg);