/kernel/linux/linux-6.6/net/core/ |
H A D | sock_reuseport.c | 22 struct sock_reuseport *reuse, bool bind_inany); 26 struct sock_reuseport *reuse; in reuseport_has_conns_set() local 32 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_has_conns_set() 34 if (likely(reuse)) in reuseport_has_conns_set() 35 reuse->has_conns = 1; in reuseport_has_conns_set() 40 static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse) in __reuseport_get_incoming_cpu() argument 43 WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1); in __reuseport_get_incoming_cpu() 46 static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse) in __reuseport_put_incoming_cpu() argument 49 WRITE_ONCE(reuse in __reuseport_put_incoming_cpu() 52 reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) reuseport_get_incoming_cpu() argument 58 reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) reuseport_put_incoming_cpu() argument 66 struct sock_reuseport *reuse; reuseport_update_incoming_cpu() local 104 reuseport_sock_index(struct sock *sk, const struct sock_reuseport *reuse, bool closed) reuseport_sock_index() argument 124 __reuseport_add_sock(struct sock *sk, struct sock_reuseport *reuse) __reuseport_add_sock() argument 134 __reuseport_detach_sock(struct sock *sk, struct sock_reuseport *reuse) __reuseport_detach_sock() argument 149 __reuseport_add_closed_sock(struct sock *sk, struct sock_reuseport *reuse) __reuseport_add_closed_sock() argument 158 __reuseport_detach_closed_sock(struct sock *sk, struct sock_reuseport *reuse) __reuseport_detach_closed_sock() argument 178 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); __reuseport_alloc() local 191 struct sock_reuseport *reuse; reuseport_alloc() local 247 reuseport_grow(struct sock_reuseport *reuse) reuseport_grow() argument 305 struct sock_reuseport *reuse; reuseport_free_rcu() local 323 struct sock_reuseport *old_reuse, *reuse; reuseport_add_sock() local 369 reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, struct sock_reuseport *reuse, bool bind_inany) reuseport_resurrect() argument 430 struct sock_reuseport *reuse; reuseport_detach_sock() local 466 struct sock_reuseport *reuse; reuseport_stop_listen_sock() local 498 run_bpf_filter(struct sock_reuseport *reuse, u16 socks, struct bpf_prog *prog, struct sk_buff *skb, int hdr_len) run_bpf_filter() argument 528 reuseport_select_sock_by_hash(struct sock_reuseport *reuse, u32 hash, u16 num_socks) reuseport_select_sock_by_hash() argument 574 struct sock_reuseport *reuse; reuseport_select_sock() local 625 struct sock_reuseport *reuse; reuseport_migrate_sock() local 686 struct sock_reuseport *reuse; reuseport_attach_prog() local 718 struct sock_reuseport *reuse; reuseport_detach_prog() local [all...] |
/kernel/linux/linux-5.10/net/core/ |
H A D | sock_reuseport.c | 23 struct sock_reuseport *reuse; in reuseport_has_conns_set() local 29 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, in reuseport_has_conns_set() 31 if (likely(reuse)) in reuseport_has_conns_set() 32 reuse->has_conns = 1; in reuseport_has_conns_set() 41 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); in __reuseport_alloc() local 43 if (!reuse) in __reuseport_alloc() 46 reuse->max_socks = max_socks; in __reuseport_alloc() 48 RCU_INIT_POINTER(reuse->prog, NULL); in __reuseport_alloc() 49 return reuse; in __reuseport_alloc() 54 struct sock_reuseport *reuse; in reuseport_alloc() local 103 reuseport_grow(struct sock_reuseport *reuse) reuseport_grow() argument 140 struct sock_reuseport *reuse; reuseport_free_rcu() local 158 struct sock_reuseport *old_reuse, *reuse; reuseport_add_sock() local 201 struct sock_reuseport *reuse; reuseport_detach_sock() local 233 run_bpf_filter(struct sock_reuseport *reuse, u16 socks, struct bpf_prog *prog, struct sk_buff *skb, int hdr_len) run_bpf_filter() argument 278 struct sock_reuseport *reuse; reuseport_select_sock() local 329 struct sock_reuseport *reuse; reuseport_attach_prog() local 357 struct sock_reuseport *reuse; reuseport_detach_prog() local [all...] |
/kernel/linux/linux-5.10/include/net/ |
H A D | sock_reuseport.h | 19 * reuse->socks[] group. 43 struct sock_reuseport *reuse; in reuseport_has_conns() local 47 reuse = rcu_dereference(sk->sk_reuseport_cb); in reuseport_has_conns() 48 if (reuse && reuse->has_conns) in reuseport_has_conns()
|
H A D | tcp.h | 496 struct sock_reuseport *reuse; in tcp_synq_overflow() local 498 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow() 499 if (likely(reuse)) { in tcp_synq_overflow() 500 last_overflow = READ_ONCE(reuse->synq_overflow_ts); in tcp_synq_overflow() 503 WRITE_ONCE(reuse->synq_overflow_ts, now); in tcp_synq_overflow() 520 struct sock_reuseport *reuse; in tcp_synq_no_recent_overflow() local 522 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow() 523 if (likely(reuse)) { in tcp_synq_no_recent_overflow() 524 last_overflow = READ_ONCE(reuse->synq_overflow_ts); in tcp_synq_no_recent_overflow()
|
/kernel/linux/linux-6.6/include/net/ |
H A D | sock_reuseport.h | 21 * reuse->socks[] group. 49 struct sock_reuseport *reuse; in reuseport_has_conns() local 53 reuse = rcu_dereference(sk->sk_reuseport_cb); in reuseport_has_conns() 54 if (reuse && reuse->has_conns) in reuseport_has_conns()
|
H A D | tcp.h | 515 struct sock_reuseport *reuse; in tcp_synq_overflow() local 517 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow() 518 if (likely(reuse)) { in tcp_synq_overflow() 519 last_overflow = READ_ONCE(reuse->synq_overflow_ts); in tcp_synq_overflow() 522 WRITE_ONCE(reuse->synq_overflow_ts, now); in tcp_synq_overflow() 539 struct sock_reuseport *reuse; in tcp_synq_no_recent_overflow() local 541 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow() 542 if (likely(reuse)) { in tcp_synq_no_recent_overflow() 543 last_overflow = READ_ONCE(reuse->synq_overflow_ts); in tcp_synq_no_recent_overflow()
|
/kernel/linux/linux-6.6/mm/ |
H A D | hugetlb_vmemmap.c | 104 * Because the reuse address is part of the range that we are in vmemmap_pte_range() 105 * walking, skip the reuse address range. in vmemmap_pte_range() 258 * How many struct page structs need to be reset. When we reuse the head 301 * to the page which @reuse is mapped to, then free vmemmap 307 * @reuse: reuse address. 312 unsigned long reuse) in vmemmap_remap_free() 318 .reuse_addr = reuse, in vmemmap_remap_free() 345 * - The range [@start, @end) and the range [@reuse, @reuse in vmemmap_remap_free() 311 vmemmap_remap_free(unsigned long start, unsigned long end, unsigned long reuse) vmemmap_remap_free() argument 414 vmemmap_remap_alloc(unsigned long start, unsigned long end, unsigned long reuse) vmemmap_remap_alloc() argument [all...] |
H A D | sparse-vmemmap.c | 146 struct page *reuse) in vmemmap_pte_populate() 153 if (!reuse) { in vmemmap_pte_populate() 167 get_page(reuse); in vmemmap_pte_populate() 168 p = page_to_virt(reuse); in vmemmap_pte_populate() 247 struct page *reuse) in vmemmap_populate_address() 267 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); in vmemmap_populate_address() 278 struct page *reuse) in vmemmap_populate_range() 284 pte = vmemmap_populate_address(addr, node, altmap, reuse); in vmemmap_populate_range() 144 vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) vmemmap_pte_populate() argument 245 vmemmap_populate_address(unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) vmemmap_populate_address() argument 275 vmemmap_populate_range(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap, struct page *reuse) vmemmap_populate_range() argument
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/ |
H A D | test_sockmap_listen.c | 78 int prog_reuseport(struct sk_reuseport_md *reuse) in prog_reuseport() argument 85 err = bpf_sk_select_reuseport(reuse, &sock_map, &zero, 0); in prog_reuseport() 87 err = bpf_sk_select_reuseport(reuse, &sock_hash, &zero, 0); in prog_reuseport()
|
H A D | test_tcpbpf_kern.c | 57 struct bpf_sock_ops *reuse = skops; in bpf_testcb() local 68 "%[reuse] = *(u32 *)(%[reuse] +96)" in bpf_testcb() 69 : [reuse] "+r"(reuse) in bpf_testcb()
|
/kernel/linux/linux-6.6/tools/testing/selftests/net/ |
H A D | bind_bhash.c | 33 int sock_fd, reuse = 1, err; in bind_socket() local 52 err = setsockopt(sock_fd, SOL_SOCKET, opt, &reuse, sizeof(reuse)); in bind_socket()
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/ |
H A D | test_sockmap_listen.c | 114 int prog_reuseport(struct sk_reuseport_md *reuse) in prog_reuseport() argument 121 err = bpf_sk_select_reuseport(reuse, &sock_map, &zero, 0); in prog_reuseport() 123 err = bpf_sk_select_reuseport(reuse, &sock_hash, &zero, 0); in prog_reuseport()
|
H A D | test_tcpbpf_kern.c | 46 struct bpf_sock_ops *reuse = skops; in bpf_testcb() local 56 "%[reuse] = *(u32 *)(%[reuse] +96)" in bpf_testcb() 57 : [reuse] "+r"(reuse) in bpf_testcb()
|
/kernel/linux/linux-5.10/drivers/staging/rtl8188eu/os_dep/ |
H A D | usb_ops_linux.c | 362 precvbuf->reuse = true; in usb_read_port_complete() 374 precvbuf->reuse = true; in usb_read_port_complete() 385 precvbuf->reuse = false; in usb_read_port_complete() 412 precvbuf->reuse = true; in usb_read_port_complete() 448 if (!precvbuf->reuse || !precvbuf->pskb) { in usb_read_port() 451 precvbuf->reuse = true; in usb_read_port() 455 if (!precvbuf->reuse || !precvbuf->pskb) { in usb_read_port() 462 } else { /* reuse skb */ in usb_read_port() 463 precvbuf->reuse = false; in usb_read_port() 502 precvbuf->reuse in rtw_hal_inirp_deinit() [all...] |
H A D | recv_linux.c | 21 precvbuf->reuse = false; in rtw_os_recvbuf_resource_alloc()
|
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | reuseport_array.c | 229 * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL). in reuseport_array_update_check() 247 * The "osk" and "reuse" are protected by reuseport_lock. 254 struct sock_reuseport *reuse; in bpf_fd_reuseport_array_update_elem() local 304 reuse = rcu_dereference_protected(nsk->sk_reuseport_cb, in bpf_fd_reuseport_array_update_elem() 306 err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags); in bpf_fd_reuseport_array_update_elem()
|
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | reuseport_array.c | 212 * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL). in reuseport_array_update_check() 230 * The "osk" and "reuse" are protected by reuseport_lock. 237 struct sock_reuseport *reuse; in bpf_fd_reuseport_array_update_elem() local 287 reuse = rcu_dereference_protected(nsk->sk_reuseport_cb, in bpf_fd_reuseport_array_update_elem() 289 err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags); in bpf_fd_reuseport_array_update_elem()
|
/kernel/linux/linux-6.6/drivers/rpmsg/ |
H A D | qcom_glink_native.c | 62 * @reuse: To mark if the intent can be reused after first use 71 bool reuse; member 262 /* Free all non-reuse intents pending rx_done work */ in qcom_glink_channel_release() 264 if (!intent->reuse) { in qcom_glink_channel_release() 537 bool reuse; in qcom_glink_rx_done_work() local 545 reuse = intent->reuse; in qcom_glink_rx_done_work() 547 cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE; in qcom_glink_rx_done_work() 552 if (!reuse) { in qcom_glink_rx_done_work() 573 if (!intent->reuse) { in qcom_glink_rx_done() 740 qcom_glink_handle_rx_done(struct qcom_glink *glink, u32 cid, uint32_t iid, bool reuse) qcom_glink_handle_rx_done() argument [all...] |
/kernel/linux/linux-5.10/drivers/rpmsg/ |
H A D | qcom_glink_native.c | 62 * @reuse: To mark if the intent can be reused after first use 71 bool reuse; member 257 /* Free all non-reuse intents pending rx_done work */ in qcom_glink_channel_release() 259 if (!intent->reuse) { in qcom_glink_channel_release() 491 bool reuse; in qcom_glink_rx_done_work() local 499 reuse = intent->reuse; in qcom_glink_rx_done_work() 501 cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; in qcom_glink_rx_done_work() 506 if (!reuse) { in qcom_glink_rx_done_work() 527 if (!intent->reuse) { in qcom_glink_rx_done() 694 qcom_glink_handle_rx_done(struct qcom_glink *glink, u32 cid, uint32_t iid, bool reuse) qcom_glink_handle_rx_done() argument [all...] |
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | inet_connection_sock.c | 139 bool reuse = sk->sk_reuse; in inet_csk_bind_conflict() local 159 if (reuse && sk2->sk_reuse && in inet_csk_bind_conflict() 308 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; in inet_csk_update_fastreuse() local 311 tb->fastreuse = reuse; in inet_csk_update_fastreuse() 325 if (!reuse) in inet_csk_update_fastreuse() 361 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; in inet_csk_get_port() local 396 if ((tb->fastreuse > 0 && reuse) || in inet_csk_get_port()
|
/kernel/linux/linux-5.10/include/xen/interface/io/ |
H A D | pvcalls.h | 57 uint8_t reuse; member
|
/kernel/linux/linux-6.6/include/xen/interface/io/ |
H A D | pvcalls.h | 59 uint8_t reuse; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/engleder/ |
H A D | tsnep_main.c | 1109 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument 1121 /* reuse only if no other allocation was successful */ in tsnep_rx_alloc() 1122 if (i == 0 && reuse) in tsnep_rx_alloc() 1137 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument 1141 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill() 1166 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument 1182 if (reuse) { in tsnep_rx_alloc_zc() 1207 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument 1211 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc() 1401 bool reuse in tsnep_rx_poll() local 1502 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; tsnep_rx_poll_zc() local [all...] |
/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/ |
H A D | radix_pgtable.c | 952 struct page *reuse) in radix__vmemmap_pte_populate() 960 if (!reuse) { in radix__vmemmap_pte_populate() 984 get_page(reuse); in radix__vmemmap_pte_populate() 985 p = page_to_virt(reuse); in radix__vmemmap_pte_populate() 986 pr_debug("Tail page reuse vmemmap mapping\n"); in radix__vmemmap_pte_populate() 1136 struct page *reuse) in radix__vmemmap_populate_address() 949 radix__vmemmap_pte_populate(pmd_t *pmdp, unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) radix__vmemmap_pte_populate() argument 1134 radix__vmemmap_populate_address(unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) radix__vmemmap_populate_address() argument
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | inet_connection_sock.c | 456 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; in inet_csk_update_fastreuse() local 459 tb->fastreuse = reuse; in inet_csk_update_fastreuse() 473 if (!reuse) in inet_csk_update_fastreuse() 510 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; in inet_csk_get_port() local 552 (tb->fastreuse > 0 && reuse) || in inet_csk_get_port()
|