Lines Matching refs:sk

84 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
89 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
277 * @sk_napi_id: id of the last napi context to receive data for sk
515 void (*sk_state_change)(struct sock *sk);
516 void (*sk_data_ready)(struct sock *sk);
517 void (*sk_write_space)(struct sock *sk);
518 void (*sk_error_report)(struct sock *sk);
519 int (*sk_backlog_rcv)(struct sock *sk,
522 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
526 void (*sk_destruct)(struct sock *sk);
549 * when sk_user_data's sk is added to the bpf's reuseport_array.
563 * @sk: socket
565 static inline bool sk_user_data_is_nocopy(const struct sock *sk)
567 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
570 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
577 * @sk: socket
581 __rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
584 uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
593 #define rcu_dereference_sk_user_data(sk) \
594 __rcu_dereference_sk_user_data_with_flags(sk, 0)
595 #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
601 rcu_assign_pointer(__sk_user_data((sk)), \
604 #define rcu_assign_sk_user_data(sk, ptr) \
605 __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
618 int sk_set_peek_off(struct sock *sk, int val);
620 static inline int sk_peek_offset(struct sock *sk, int flags)
623 return READ_ONCE(sk->sk_peek_off);
629 static inline void sk_peek_offset_bwd(struct sock *sk, int val)
631 s32 off = READ_ONCE(sk->sk_peek_off);
635 WRITE_ONCE(sk->sk_peek_off, off);
639 static inline void sk_peek_offset_fwd(struct sock *sk, int val)
641 sk_peek_offset_bwd(sk, -val);
672 static inline struct sock *sk_next(const struct sock *sk)
674 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
677 static inline struct sock *sk_nulls_next(const struct sock *sk)
679 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
680 hlist_nulls_entry(sk->sk_nulls_node.next,
685 static inline bool sk_unhashed(const struct sock *sk)
687 return hlist_unhashed(&sk->sk_node);
690 static inline bool sk_hashed(const struct sock *sk)
692 return !sk_unhashed(sk);
705 static inline void __sk_del_node(struct sock *sk)
707 __hlist_del(&sk->sk_node);
711 static inline bool __sk_del_node_init(struct sock *sk)
713 if (sk_hashed(sk)) {
714 __sk_del_node(sk);
715 sk_node_init(&sk->sk_node);
722 when sk is ALREADY grabbed f.e. it is found in hash table
727 static __always_inline void sock_hold(struct sock *sk)
729 refcount_inc(&sk->sk_refcnt);
735 static __always_inline void __sock_put(struct sock *sk)
737 refcount_dec(&sk->sk_refcnt);
740 static inline bool sk_del_node_init(struct sock *sk)
742 bool rc = __sk_del_node_init(sk);
746 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
747 __sock_put(sk);
751 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
753 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
755 if (sk_hashed(sk)) {
756 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
762 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
764 bool rc = __sk_nulls_del_node_init_rcu(sk);
768 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
769 __sock_put(sk);
774 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
776 hlist_add_head(&sk->sk_node, list);
779 static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
781 sock_hold(sk);
782 __sk_add_node(sk, list);
785 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
787 sock_hold(sk);
788 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
789 sk->sk_family == AF_INET6)
790 hlist_add_tail_rcu(&sk->sk_node, list);
792 hlist_add_head_rcu(&sk->sk_node, list);
795 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
797 sock_hold(sk);
798 hlist_add_tail_rcu(&sk->sk_node, list);
801 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
803 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
806 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
808 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
811 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
813 sock_hold(sk);
814 __sk_nulls_add_node_rcu(sk, list);
817 static inline void __sk_del_bind_node(struct sock *sk)
819 __hlist_del(&sk->sk_bind_node);
822 static inline void sk_add_bind_node(struct sock *sk,
825 hlist_add_head(&sk->sk_bind_node, list);
860 static inline struct user_namespace *sk_user_ns(struct sock *sk)
866 return sk->sk_socket->file->f_cred->user_ns;
880 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
910 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
912 __set_bit(flag, &sk->sk_flags);
915 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
917 __clear_bit(flag, &sk->sk_flags);
920 static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
924 sock_set_flag(sk, bit);
926 sock_reset_flag(sk, bit);
929 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
931 return test_bit(flag, &sk->sk_flags);
953 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
955 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
958 static inline void sk_acceptq_removed(struct sock *sk)
960 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
963 static inline void sk_acceptq_added(struct sock *sk)
965 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
968 static inline bool sk_acceptq_is_full(const struct sock *sk)
970 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
976 static inline int sk_stream_min_wspace(const struct sock *sk)
978 return READ_ONCE(sk->sk_wmem_queued) >> 1;
981 static inline int sk_stream_wspace(const struct sock *sk)
983 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
986 static inline void sk_wmem_queued_add(struct sock *sk, int val)
988 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
991 void sk_stream_write_space(struct sock *sk);
994 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
999 if (!sk->sk_backlog.tail)
1000 WRITE_ONCE(sk->sk_backlog.head, skb);
1002 sk->sk_backlog.tail->next = skb;
1004 WRITE_ONCE(sk->sk_backlog.tail, skb);
1013 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
1015 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
1021 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
1024 if (sk_rcvqueues_full(sk, limit))
1032 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
1035 __sk_add_backlog(sk, skb);
1036 sk->sk_backlog.len += skb->truesize;
1040 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1042 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1045 return __sk_backlog_rcv(sk, skb);
1047 return sk->sk_backlog_rcv(sk, skb);
1050 static inline void sk_incoming_cpu_update(struct sock *sk)
1054 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
1055 WRITE_ONCE(sk->sk_incoming_cpu, cpu);
1070 static inline void sock_rps_record_flow(const struct sock *sk)
1074 /* Reading sk->sk_rxhash might incur an expensive cache line
1079 * IPv4: inet_sk(sk)->inet_daddr
1080 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
1084 if (sk->sk_state == TCP_ESTABLISHED) {
1088 sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
1094 static inline void sock_rps_save_rxhash(struct sock *sk,
1101 if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
1102 WRITE_ONCE(sk->sk_rxhash, skb->hash);
1106 static inline void sock_rps_reset_rxhash(struct sock *sk)
1110 WRITE_ONCE(sk->sk_rxhash, 0);
1131 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1132 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1133 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1134 int sk_stream_error(struct sock *sk, int flags, int err);
1135 void sk_stream_kill_queues(struct sock *sk);
1136 void sk_set_memalloc(struct sock *sk);
1137 void sk_clear_memalloc(struct sock *sk);
1139 void __sk_flush_backlog(struct sock *sk);
1141 static inline bool sk_flush_backlog(struct sock *sk)
1143 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1144 __sk_flush_backlog(sk);
1150 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1163 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1166 memset(sk, 0, offsetof(struct sock, sk_node.next));
1167 memset(&sk->sk_node.pprev, 0,
1175 void (*close)(struct sock *sk,
1177 int (*pre_connect)(struct sock *sk,
1180 int (*connect)(struct sock *sk,
1183 int (*disconnect)(struct sock *sk, int flags);
1185 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1188 int (*ioctl)(struct sock *sk, int cmd,
1190 int (*init)(struct sock *sk);
1191 void (*destroy)(struct sock *sk);
1192 void (*shutdown)(struct sock *sk, int how);
1193 int (*setsockopt)(struct sock *sk, int level,
1196 int (*getsockopt)(struct sock *sk, int level,
1199 void (*keepalive)(struct sock *sk, int valbool);
1201 int (*compat_ioctl)(struct sock *sk,
1204 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1206 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1209 int (*sendpage)(struct sock *sk, struct page *page,
1211 int (*bind)(struct sock *sk,
1213 int (*bind_add)(struct sock *sk,
1216 int (*backlog_rcv) (struct sock *sk,
1221 void (*release_cb)(struct sock *sk);
1223 /* Keeping track of sk's, looking them up, and port selection methods. */
1224 int (*hash)(struct sock *sk);
1225 void (*unhash)(struct sock *sk);
1226 void (*rehash)(struct sock *sk);
1227 int (*get_port)(struct sock *sk, unsigned short snum);
1234 bool (*stream_memory_free)(const struct sock *sk, int wake);
1235 bool (*stream_memory_read)(const struct sock *sk);
1237 void (*enter_memory_pressure)(struct sock *sk);
1238 void (*leave_memory_pressure)(struct sock *sk);
1285 int (*diag_destroy)(struct sock *sk, int err);
1293 static inline void sk_refcnt_debug_inc(struct sock *sk)
1295 atomic_inc(&sk->sk_prot->socks);
1298 static inline void sk_refcnt_debug_dec(struct sock *sk)
1300 atomic_dec(&sk->sk_prot->socks);
1302 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1305 static inline void sk_refcnt_debug_release(const struct sock *sk)
1307 if (refcount_read(&sk->sk_refcnt) != 1)
1309 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1312 #define sk_refcnt_debug_inc(sk) do { } while (0)
1313 #define sk_refcnt_debug_dec(sk) do { } while (0)
1314 #define sk_refcnt_debug_release(sk) do { } while (0)
1317 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1319 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1322 return sk->sk_prot->stream_memory_free ?
1323 sk->sk_prot->stream_memory_free(sk, wake) : true;
1326 static inline bool sk_stream_memory_free(const struct sock *sk)
1328 return __sk_stream_memory_free(sk, 0);
1331 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1333 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1334 __sk_stream_memory_free(sk, wake);
1337 static inline bool sk_stream_is_writeable(const struct sock *sk)
1339 return __sk_stream_is_writeable(sk, 0);
1342 static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1346 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1353 static inline bool sk_has_memory_pressure(const struct sock *sk)
1355 return sk->sk_prot->memory_pressure != NULL;
1358 static inline bool sk_under_global_memory_pressure(const struct sock *sk)
1360 return sk->sk_prot->memory_pressure &&
1361 !!READ_ONCE(*sk->sk_prot->memory_pressure);
1364 static inline bool sk_under_memory_pressure(const struct sock *sk)
1366 if (!sk->sk_prot->memory_pressure)
1369 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1370 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1373 return !!READ_ONCE(*sk->sk_prot->memory_pressure);
1377 sk_memory_allocated(const struct sock *sk)
1379 return atomic_long_read(sk->sk_prot->memory_allocated);
1383 sk_memory_allocated_add(struct sock *sk, int amt)
1385 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1389 sk_memory_allocated_sub(struct sock *sk, int amt)
1391 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1394 static inline void sk_sockets_allocated_dec(struct sock *sk)
1396 percpu_counter_dec(sk->sk_prot->sockets_allocated);
1399 static inline void sk_sockets_allocated_inc(struct sock *sk)
1401 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1405 sk_sockets_allocated_read_positive(struct sock *sk)
1407 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1447 static inline int __sk_prot_rehash(struct sock *sk)
1449 sk->sk_prot->unhash(sk);
1450 return sk->sk_prot->hash(sk);
1486 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1487 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1488 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1489 void __sk_mem_reclaim(struct sock *sk, int amount);
1500 static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1502 long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]);
1517 static inline bool sk_has_account(struct sock *sk)
1520 return !!sk->sk_prot->memory_allocated;
1523 static inline bool sk_wmem_schedule(struct sock *sk, int size)
1527 if (!sk_has_account(sk))
1529 delta = size - sk->sk_forward_alloc;
1530 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND);
1534 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1538 if (!sk_has_account(sk))
1540 delta = size - sk->sk_forward_alloc;
1541 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
1545 static inline void sk_mem_reclaim(struct sock *sk)
1547 if (!sk_has_account(sk))
1549 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1550 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1553 static inline void sk_mem_reclaim_partial(struct sock *sk)
1555 if (!sk_has_account(sk))
1557 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1558 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1561 static inline void sk_mem_charge(struct sock *sk, int size)
1563 if (!sk_has_account(sk))
1565 sk->sk_forward_alloc -= size;
1568 static inline void sk_mem_uncharge(struct sock *sk, int size)
1570 if (!sk_has_account(sk))
1572 sk->sk_forward_alloc += size;
1581 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1582 __sk_mem_reclaim(sk, 1 << 20);
1586 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1588 sk_wmem_queued_add(sk, -skb->truesize);
1589 sk_mem_uncharge(sk, skb->truesize);
1591 !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
1594 sk->sk_tx_skb_cache = skb;
1600 static inline void sock_release_ownership(struct sock *sk)
1602 if (sk->sk_lock.owned) {
1603 sk->sk_lock.owned = 0;
1606 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
1617 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1619 sk->sk_lock.owned = 0; \
1620 init_waitqueue_head(&sk->sk_lock.wq); \
1621 spin_lock_init(&(sk)->sk_lock.slock); \
1622 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1623 sizeof((sk)->sk_lock)); \
1624 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1626 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1630 static inline bool lockdep_sock_is_held(const struct sock *sk)
1632 return lockdep_is_held(&sk->sk_lock) ||
1633 lockdep_is_held(&sk->sk_lock.slock);
1637 void lock_sock_nested(struct sock *sk, int subclass);
1639 static inline void lock_sock(struct sock *sk)
1641 lock_sock_nested(sk, 0);
1644 void __release_sock(struct sock *sk);
1645 void release_sock(struct sock *sk);
1654 bool lock_sock_fast(struct sock *sk);
1657 * @sk: socket
1663 static inline void unlock_sock_fast(struct sock *sk, bool slow)
1666 release_sock(sk);
1668 spin_unlock_bh(&sk->sk_lock.slock);
1685 static inline void sock_owned_by_me(const struct sock *sk)
1688 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1692 static inline void sock_not_owned_by_me(const struct sock *sk)
1695 WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
1699 static inline bool sock_owned_by_user(const struct sock *sk)
1701 sock_owned_by_me(sk);
1702 return sk->sk_lock.owned;
1705 static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1707 return sk->sk_lock.owned;
1713 struct sock *sk = (struct sock *)csk;
1715 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1720 void sk_free(struct sock *sk);
1721 void sk_destruct(struct sock *sk);
1722 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1723 void sk_free_unlock_clone(struct sock *sk);
1725 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1729 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1748 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1750 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1753 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1754 void sock_kfree_s(struct sock *sk, void *mem, int size);
1755 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1756 void sk_send_sigurg(struct sock *sk);
1765 const struct sock *sk)
1767 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1770 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1772 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1788 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1794 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1808 void sk_common_release(struct sock *sk);
1815 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
1820 void sock_init_data(struct socket *sock, struct sock *sk);
1848 static inline void sock_put(struct sock *sk)
1850 if (refcount_dec_and_test(&sk->sk_refcnt))
1851 sk_free(sk);
1856 void sock_gen_put(struct sock *sk);
1858 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1860 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1863 return __sk_receive_skb(sk, skb, nested, 1, true);
1866 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1874 WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
1879 static inline void sk_tx_queue_clear(struct sock *sk)
1884 WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
1887 static inline int sk_tx_queue_get(const struct sock *sk)
1889 if (sk) {
1893 int val = READ_ONCE(sk->sk_tx_queue_mapping);
1901 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1910 sk->sk_rx_queue_mapping = rx_queue;
1915 static inline void sk_rx_queue_clear(struct sock *sk)
1918 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1923 static inline int sk_rx_queue_get(const struct sock *sk)
1925 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1926 return sk->sk_rx_queue_mapping;
1932 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1934 sk->sk_socket = sock;
1937 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1940 return &rcu_dereference_raw(sk->sk_wq)->wait;
1949 static inline void sock_orphan(struct sock *sk)
1951 write_lock_bh(&sk->sk_callback_lock);
1952 sock_set_flag(sk, SOCK_DEAD);
1953 sk_set_socket(sk, NULL);
1954 sk->sk_wq = NULL;
1955 write_unlock_bh(&sk->sk_callback_lock);
1958 static inline void sock_graft(struct sock *sk, struct socket *parent)
1960 WARN_ON(parent->sk);
1961 write_lock_bh(&sk->sk_callback_lock);
1962 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1963 parent->sk = sk;
1964 sk_set_socket(sk, parent);
1965 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1966 security_sock_graft(sk, parent);
1967 write_unlock_bh(&sk->sk_callback_lock);
1970 kuid_t sock_i_uid(struct sock *sk);
1971 unsigned long __sock_i_ino(struct sock *sk);
1972 unsigned long sock_i_ino(struct sock *sk);
1974 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1976 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1986 static inline void sk_set_txhash(struct sock *sk)
1989 WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
1992 static inline bool sk_rethink_txhash(struct sock *sk)
1994 if (sk->sk_txhash) {
1995 sk_set_txhash(sk);
2002 __sk_dst_get(struct sock *sk)
2004 return rcu_dereference_check(sk->sk_dst_cache,
2005 lockdep_sock_is_held(sk));
2009 sk_dst_get(struct sock *sk)
2014 dst = rcu_dereference(sk->sk_dst_cache);
2021 static inline void __dst_negative_advice(struct sock *sk)
2023 struct dst_entry *dst = __sk_dst_get(sk);
2026 dst->ops->negative_advice(sk, dst);
2029 static inline void dst_negative_advice(struct sock *sk)
2031 sk_rethink_txhash(sk);
2032 __dst_negative_advice(sk);
2036 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
2040 sk_tx_queue_clear(sk);
2041 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2042 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
2043 lockdep_sock_is_held(sk));
2044 rcu_assign_pointer(sk->sk_dst_cache, dst);
2049 sk_dst_set(struct sock *sk, struct dst_entry *dst)
2053 sk_tx_queue_clear(sk);
2054 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2055 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
2060 __sk_dst_reset(struct sock *sk)
2062 __sk_dst_set(sk, NULL);
2066 sk_dst_reset(struct sock *sk)
2068 sk_dst_set(sk, NULL);
2071 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2073 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2075 static inline void sk_dst_confirm(struct sock *sk)
2077 if (!READ_ONCE(sk->sk_dst_pending_confirm))
2078 WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
2084 struct sock *sk = skb->sk;
2090 if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
2091 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2095 bool sk_mc_loop(struct sock *sk);
2097 static inline bool sk_can_gso(const struct sock *sk)
2099 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2102 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2104 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
2106 sk->sk_route_nocaps |= flags;
2107 sk->sk_route_caps &= ~flags;
2110 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
2119 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
2128 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
2133 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2141 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2148 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2156 sk_wmem_queued_add(sk, copy);
2157 sk_mem_charge(sk, copy);
2163 * @sk: socket
2167 static inline int sk_wmem_alloc_get(const struct sock *sk)
2169 return refcount_read(&sk->sk_wmem_alloc) - 1;
2174 * @sk: socket
2178 static inline int sk_rmem_alloc_get(const struct sock *sk)
2180 return atomic_read(&sk->sk_rmem_alloc);
2185 * @sk: socket
2189 static inline bool sk_has_allocations(const struct sock *sk)
2191 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2213 * wq = rcu_dereference(sk->sk_wq);
2252 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2255 u32 txhash = READ_ONCE(sk->sk_txhash);
2263 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2273 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2276 skb->sk = sk;
2278 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2279 sk_mem_charge(sk, skb->truesize);
2282 static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
2284 if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
2287 skb->sk = sk;
2293 static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
2295 skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
2297 if (sk_rmem_schedule(sk, skb, skb->truesize)) {
2298 skb_set_owner_r(skb, sk);
2306 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2309 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2311 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2313 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2315 void (*destructor)(struct sock *sk,
2317 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2318 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2320 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2321 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2327 static inline int sock_error(struct sock *sk)
2334 if (likely(data_race(!sk->sk_err)))
2337 err = xchg(&sk->sk_err, 0);
2341 static inline unsigned long sock_wspace(struct sock *sk)
2345 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2346 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2354 * We use sk->sk_wq_raw, from contexts knowing this
2357 static inline void sk_set_bit(int nr, struct sock *sk)
2360 !sock_flag(sk, SOCK_FASYNC))
2363 set_bit(nr, &sk->sk_wq_raw->flags);
2366 static inline void sk_clear_bit(int nr, struct sock *sk)
2369 !sock_flag(sk, SOCK_FASYNC))
2372 clear_bit(nr, &sk->sk_wq_raw->flags);
2375 static inline void sk_wake_async(const struct sock *sk, int how, int band)
2377 if (sock_flag(sk, SOCK_FASYNC)) {
2379 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2394 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2398 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2401 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2403 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
2406 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2411 * @sk: socket
2426 static inline struct page_frag *sk_page_frag(struct sock *sk)
2428 if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
2432 return &sk->sk_frag;
2435 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2440 static inline bool sock_writeable(const struct sock *sk)
2442 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
2450 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2452 return noblock ? 0 : sk->sk_rcvtimeo;
2455 static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2457 return noblock ? 0 : sk->sk_sndtimeo;
2460 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2462 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2493 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2495 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2496 atomic_read(&sk->sk_drops) : 0;
2499 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2503 atomic_add(segs, &sk->sk_drops);
2506 static inline ktime_t sock_read_timestamp(struct sock *sk)
2513 seq = read_seqbegin(&sk->sk_stamp_seq);
2514 kt = sk->sk_stamp;
2515 } while (read_seqretry(&sk->sk_stamp_seq, seq));
2519 return READ_ONCE(sk->sk_stamp);
2523 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2526 write_seqlock(&sk->sk_stamp_seq);
2527 sk->sk_stamp = kt;
2528 write_sequnlock(&sk->sk_stamp_seq);
2530 WRITE_ONCE(sk->sk_stamp, kt);
2534 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2536 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2540 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2551 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2552 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2553 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2555 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2556 __sock_recv_timestamp(msg, sk, skb);
2558 sock_write_timestamp(sk, kt);
2560 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2561 __sock_recv_wifi_status(msg, sk, skb);
2564 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2568 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2576 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2577 __sock_recv_ts_and_drops(msg, sk, skb);
2578 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2579 sock_write_timestamp(sk, skb->tstamp);
2580 else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
2581 sock_write_timestamp(sk, 0);
2588 * @sk: socket sending this packet
2595 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2602 *tskey = sk->sk_tskey++;
2604 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2608 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2611 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2616 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2623 * @sk: socket to eat this skb from
2629 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2631 __skb_unlink(skb, &sk->sk_receive_queue);
2633 !sk->sk_rx_skb_cache) {
2634 sk->sk_rx_skb_cache = skb;
2642 struct net *sock_net(const struct sock *sk)
2644 return read_pnet(&sk->sk_net);
2648 void sock_net_set(struct sock *sk, struct net *net)
2650 write_pnet(&sk->sk_net, net);
2666 static inline bool sk_fullsock(const struct sock *sk)
2668 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2672 sk_is_refcounted(struct sock *sk)
2674 /* Only full sockets have sk->sk_flags. */
2675 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
2686 if (skb->sk) {
2687 struct sock *sk = skb->sk;
2691 *refcounted = sk_is_refcounted(sk);
2693 skb->sk = NULL;
2694 return sk;
2708 struct sock *sk = skb->sk;
2710 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2711 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2727 static inline bool sk_listener(const struct sock *sk)
2729 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2732 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2733 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2736 bool sk_ns_capable(const struct sock *sk,
2738 bool sk_capable(const struct sock *sk, int cap);
2739 bool sk_net_capable(const struct sock *sk, int cap);
2741 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2765 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2769 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
2774 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2778 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
2787 static inline void sk_pacing_shift_update(struct sock *sk, int val)
2789 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
2791 WRITE_ONCE(sk->sk_pacing_shift, val);
2799 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2803 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2806 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2807 if (mdif && mdif == sk->sk_bound_dev_if)
2813 void sock_def_readable(struct sock *sk);
2815 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2816 void sock_enable_timestamps(struct sock *sk);
2817 void sock_no_linger(struct sock *sk);
2818 void sock_set_keepalive(struct sock *sk);
2819 void sock_set_priority(struct sock *sk, u32 priority);
2820 void sock_set_rcvbuf(struct sock *sk, int val);
2821 void sock_set_mark(struct sock *sk, u32 val);
2822 void sock_set_reuseaddr(struct sock *sk);
2823 void sock_set_reuseport(struct sock *sk);
2824 void sock_set_sndtimeo(struct sock *sk, s64 secs);
2826 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);