/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_offload.c | 15 unsigned int seq, unsigned int mss) in tcp_gso_tstamp() 18 if (before(ts_seq, seq + mss)) { in tcp_gso_tstamp() 25 seq += mss; in tcp_gso_tstamp() 64 unsigned int mss; in tcp_gso_segment() local 80 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment() 81 if (unlikely(skb->len <= mss)) in tcp_gso_segment() 87 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment() 107 * cases return a GSO skb. So update the mss now. in tcp_gso_segment() 110 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment() 112 delta = htonl(oldlen + (thlen + mss)); in tcp_gso_segment() 14 tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, unsigned int seq, unsigned int mss) tcp_gso_tstamp() argument 189 unsigned int mss = 1; tcp_gro_receive() local [all...] |
H A D | tcp_recovery.c | 228 u32 mss; in tcp_newreno_mark_lost() local 233 mss = tcp_skb_mss(skb); in tcp_newreno_mark_lost() 234 if (tcp_skb_pcount(skb) > 1 && skb->len > mss) in tcp_newreno_mark_lost() 236 mss, mss, GFP_ATOMIC); in tcp_newreno_mark_lost()
|
H A D | udp_offload.c | 249 unsigned int mss = skb_shinfo(skb)->gso_size; in __udp_gso_segment_list() local 255 udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); in __udp_gso_segment_list() 267 unsigned int mss; in __udp_gso_segment() local 275 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment() 276 if (gso_skb->len <= sizeof(*uh) + mss) in __udp_gso_segment() 295 * cases return a GSO skb. So update the mss now. in __udp_gso_segment() 298 mss *= skb_shinfo(segs)->gso_segs; in __udp_gso_segment() 309 newlen = htons(sizeof(*uh) + mss); in __udp_gso_segment() 368 unsigned int mss; in udp4_ufo_fragment() local 389 mss in udp4_ufo_fragment() [all...] |
H A D | syncookies.c | 138 * .. in the 1300-1349 range account for about 15 to 20% of observed mss values 141 * 1460 is the single most frequently announced mss value (30 to 46% depending 152 * Generate a syncookie. mssp points to the mss, which is returned 159 const __u16 mss = *mssp; in __cookie_v4_init_sequence() local 162 if (mss >= msstab[mssind]) in __cookie_v4_init_sequence() 182 * Return the decoded mss if it is, or 0 if not. 332 int full_space, mss; in cookie_v4_check() local 345 mss = __cookie_v4_check(ip_hdr(skb), th, cookie); in cookie_v4_check() 346 if (mss == 0) { in cookie_v4_check() 379 req->mss in cookie_v4_check() [all...] |
/kernel/linux/linux-5.10/fs/proc/ |
H A D | task_mmu.c | 428 static void smaps_page_accumulate(struct mem_size_stats *mss, in smaps_page_accumulate() argument 432 mss->pss += pss; in smaps_page_accumulate() 435 mss->pss_anon += pss; in smaps_page_accumulate() 437 mss->pss_shmem += pss; in smaps_page_accumulate() 439 mss->pss_file += pss; in smaps_page_accumulate() 442 mss->pss_locked += pss; in smaps_page_accumulate() 446 mss->private_dirty += size; in smaps_page_accumulate() 448 mss->shared_dirty += size; in smaps_page_accumulate() 451 mss->private_clean += size; in smaps_page_accumulate() 453 mss in smaps_page_accumulate() 457 smaps_account(struct mem_size_stats *mss, struct page *page, bool compound, bool young, bool dirty, bool locked, bool migration) smaps_account() argument 513 struct mem_size_stats *mss = walk->private; smaps_pte_hole() local 527 struct mem_size_stats *mss = walk->private; smaps_pte_entry() local 577 struct mem_size_stats *mss = walk->private; smaps_pmd_entry() local 727 struct mem_size_stats *mss = walk->private; smaps_hugetlb_range() local 770 smap_gather_stats(struct vm_area_struct *vma, struct mem_size_stats *mss, unsigned long start) smap_gather_stats() argument 815 __show_smap(struct seq_file *m, const struct mem_size_stats *mss, bool rollup_mode) __show_smap() argument 856 struct mem_size_stats mss; show_smap() local 884 struct mem_size_stats mss; show_smaps_rollup() local [all...] |
/kernel/linux/linux-6.6/fs/proc/ |
H A D | task_mmu.c | 424 static void smaps_page_accumulate(struct mem_size_stats *mss, in smaps_page_accumulate() argument 428 mss->pss += pss; in smaps_page_accumulate() 431 mss->pss_anon += pss; in smaps_page_accumulate() 433 mss->pss_shmem += pss; in smaps_page_accumulate() 435 mss->pss_file += pss; in smaps_page_accumulate() 438 mss->pss_locked += pss; in smaps_page_accumulate() 441 mss->pss_dirty += pss; in smaps_page_accumulate() 443 mss->private_dirty += size; in smaps_page_accumulate() 445 mss->shared_dirty += size; in smaps_page_accumulate() 448 mss in smaps_page_accumulate() 454 smaps_account(struct mem_size_stats *mss, struct page *page, bool compound, bool young, bool dirty, bool locked, bool migration) smaps_account() argument 513 struct mem_size_stats *mss = walk->private; smaps_pte_hole() local 539 struct mem_size_stats *mss = walk->private; smaps_pte_entry() local 586 struct mem_size_stats *mss = walk->private; smaps_pmd_entry() local 738 struct mem_size_stats *mss = walk->private; smaps_hugetlb_range() local 782 smap_gather_stats(struct vm_area_struct *vma, struct mem_size_stats *mss, unsigned long start) smap_gather_stats() argument 823 __show_smap(struct seq_file *m, const struct mem_size_stats *mss, bool rollup_mode) __show_smap() argument 866 struct mem_size_stats mss; show_smap() local 894 struct mem_size_stats mss; show_smaps_rollup() local [all...] |
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp_offload.c | 17 unsigned int seq, unsigned int mss) in tcp_gso_tstamp() 20 if (before(ts_seq, seq + mss)) { in tcp_gso_tstamp() 27 seq += mss; in tcp_gso_tstamp() 65 unsigned int mss; in tcp_gso_segment() local 82 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment() 83 if (unlikely(skb->len <= mss)) in tcp_gso_segment() 89 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment() 109 * cases return a GSO skb. So update the mss now. in tcp_gso_segment() 112 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment() 114 delta = (__force __wsum)htonl(oldlen + thlen + mss); in tcp_gso_segment() 16 tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, unsigned int seq, unsigned int mss) tcp_gso_tstamp() argument 190 unsigned int mss = 1; tcp_gro_receive() local [all...] |
H A D | tcp_recovery.c | 225 u32 mss; in tcp_newreno_mark_lost() local 230 mss = tcp_skb_mss(skb); in tcp_newreno_mark_lost() 231 if (tcp_skb_pcount(skb) > 1 && skb->len > mss) in tcp_newreno_mark_lost() 233 mss, mss, GFP_ATOMIC); in tcp_newreno_mark_lost()
|
H A D | syncookies.c | 136 * .. in the 1300-1349 range account for about 15 to 20% of observed mss values 139 * 1460 is the single most frequently announced mss value (30 to 46% depending 150 * Generate a syncookie. mssp points to the mss, which is returned 157 const __u16 mss = *mssp; in __cookie_v4_init_sequence() local 160 if (mss >= msstab[mssind]) in __cookie_v4_init_sequence() 180 * Return the decoded mss if it is, or 0 if not. 330 int full_space, mss; in cookie_v4_check() local 343 mss = __cookie_v4_check(ip_hdr(skb), th, cookie); in cookie_v4_check() 344 if (mss == 0) { in cookie_v4_check() 377 req->mss in cookie_v4_check() [all...] |
/kernel/linux/common_modules/newip/third_party/linux-5.10/net/newip/ |
H A D | tcp_nip_output.c | 72 u16 mss; /* If it is zero, the MSS option is disabled */ member 89 /* Calculate base mss without TCP options: It is MMS_S - sizeof(tcphdr) of rfc1122 */ in __tcp_nip_mtu_to_mss() 174 int mss = tcp_nip_current_mss(sk); /* TCP_BASE_MSS */ in __nip_tcp_select_window() local 180 if (unlikely(mss > full_space)) { in __nip_tcp_select_window() 181 mss = full_space; in __nip_tcp_select_window() 182 if (mss <= 0) in __nip_tcp_select_window() 191 if (free_space < (allowed_space >> TCP_NUM_4) || free_space < mss) { in __nip_tcp_select_window() 192 nip_dbg("rcv_wnd is 0, [allowed|full|free]space=[%u, %u, %u], mss=%u", in __nip_tcp_select_window() 193 allowed_space, full_space, free_space, mss); in __nip_tcp_select_window() 216 * 1<<rcv_wscale > mss in __nip_tcp_select_window() 382 int mss = tp->advmss; tcp_nip_advertise_mss() local 723 tcp_nip_synack_options(struct request_sock *req, unsigned int mss, struct sk_buff *skb, struct tcp_nip_out_options *opts, const struct tcp_md5sig_key *md5, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) tcp_nip_synack_options() argument 751 int mss; get_nip_mss() local 795 int mss; tcp_nip_make_synack() local 1301 unsigned int mss = tcp_nip_current_mss(sk); tcp_nip_write_wakeup() local [all...] |
/kernel/linux/linux-6.6/drivers/pmdomain/qcom/ |
H A D | rpmhpd.c | 152 static struct rpmhpd mss = { variable 153 .pd = { .name = "mss", }, 154 .res_name = "mss.lvl", 254 [SDM670_MSS] = &mss, 272 [SDM845_MSS] = &mss, 285 [SDX55_MSS] = &mss, 298 [SDX65_MSS] = &mss, 313 [RPMHPD_MSS] = &mss, 330 [SM6350_MSS] = &mss, 349 [SM8150_MSS] = &mss, [all...] |
/kernel/linux/linux-5.10/net/ipv6/ |
H A D | syncookies.c | 96 const __u16 mss = *mssp; in __cookie_v6_init_sequence() local 99 if (mss >= msstab[mssind]) in __cookie_v6_init_sequence() 139 int full_space, mss; in cookie_v6_check() local 151 mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); in cookie_v6_check() 152 if (mss == 0) { in cookie_v6_check() 183 req->mss = mss; in cookie_v6_check() 253 tcp_select_initial_window(sk, full_space, req->mss, in cookie_v6_check()
|
/kernel/linux/linux-6.6/net/ipv6/ |
H A D | syncookies.c | 96 const __u16 mss = *mssp; in __cookie_v6_init_sequence() local 99 if (mss >= msstab[mssind]) in __cookie_v6_init_sequence() 139 int full_space, mss; in cookie_v6_check() local 151 mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); in cookie_v6_check() 152 if (mss == 0) { in cookie_v6_check() 183 req->mss = mss; in cookie_v6_check() 253 tcp_select_initial_window(sk, full_space, req->mss, in cookie_v6_check()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/cisco/enic/ |
H A D | wq_enet_desc.h | 54 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() 60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 73 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() 79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec() 53 wq_enet_desc_enc(struct wq_enet_desc *desc, u64 address, u16 length, u16 mss, u16 header_length, u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) wq_enet_desc_enc() argument 72 wq_enet_desc_dec(struct wq_enet_desc *desc, u64 *address, u16 *length, u16 *mss, u16 *header_length, u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) wq_enet_desc_dec() argument
|
/kernel/linux/linux-5.10/drivers/scsi/snic/ |
H A D | wq_enet_desc.h | 52 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() 58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 71 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() 77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec() 51 wq_enet_desc_enc(struct wq_enet_desc *desc, u64 address, u16 length, u16 mss, u16 header_length, u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) wq_enet_desc_enc() argument 70 wq_enet_desc_dec(struct wq_enet_desc *desc, u64 *address, u16 *length, u16 *mss, u16 *header_length, u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) wq_enet_desc_dec() argument
|
/kernel/linux/linux-5.10/drivers/scsi/fnic/ |
H A D | wq_enet_desc.h | 52 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() 58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 71 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() 77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec() 51 wq_enet_desc_enc(struct wq_enet_desc *desc, u64 address, u16 length, u16 mss, u16 header_length, u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) wq_enet_desc_enc() argument 70 wq_enet_desc_dec(struct wq_enet_desc *desc, u64 *address, u16 *length, u16 *mss, u16 *header_length, u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) wq_enet_desc_dec() argument
|
/kernel/linux/linux-6.6/drivers/net/ethernet/cisco/enic/ |
H A D | wq_enet_desc.h | 41 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() 47 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 60 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() 66 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec() 40 wq_enet_desc_enc(struct wq_enet_desc *desc, u64 address, u16 length, u16 mss, u16 header_length, u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) wq_enet_desc_enc() argument 59 wq_enet_desc_dec(struct wq_enet_desc *desc, u64 *address, u16 *length, u16 *mss, u16 *header_length, u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) wq_enet_desc_dec() argument
|
/kernel/linux/linux-6.6/drivers/scsi/fnic/ |
H A D | wq_enet_desc.h | 40 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() 46 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 59 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() 65 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec() 39 wq_enet_desc_enc(struct wq_enet_desc *desc, u64 address, u16 length, u16 mss, u16 header_length, u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) wq_enet_desc_enc() argument 58 wq_enet_desc_dec(struct wq_enet_desc *desc, u64 *address, u16 *length, u16 *mss, u16 *header_length, u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) wq_enet_desc_dec() argument
|
/kernel/linux/linux-6.6/drivers/scsi/snic/ |
H A D | wq_enet_desc.h | 38 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() 44 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 57 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() 63 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec() 37 wq_enet_desc_enc(struct wq_enet_desc *desc, u64 address, u16 length, u16 mss, u16 header_length, u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) wq_enet_desc_enc() argument 56 wq_enet_desc_dec(struct wq_enet_desc *desc, u64 *address, u16 *length, u16 *mss, u16 *header_length, u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) wq_enet_desc_dec() argument
|
/kernel/linux/linux-5.10/drivers/net/ethernet/sfc/ |
H A D | ef100_tx.c | 60 u32 mss; in ef100_tx_can_tso() local 69 mss = skb_shinfo(skb)->gso_size; in ef100_tx_can_tso() 70 if (unlikely(mss < 4)) { in ef100_tx_can_tso() 71 WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss); in ef100_tx_can_tso() 85 if (skb->data_len / mss > nic_data->tso_max_frames) in ef100_tx_can_tso() 214 u32 mss = skb_shinfo(skb)->gso_size; in ef100_make_tso_desc() local 224 ESF_GZ_TX_TSO_MSS, mss, in ef100_make_tso_desc()
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/ |
H A D | xdp_synproxy_kern.c | 308 static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale, in values_get_tcpipopts() argument 317 *mss = (*value >> 32) & 0xffff; in values_get_tcpipopts() 319 *mss = *value & 0xffff; in values_get_tcpipopts() 325 *mss = ipv6 ? DEFAULT_MSS6 : DEFAULT_MSS4; in values_get_tcpipopts() 480 static __always_inline __u8 tcp_mkoptions(__be32 *buf, __be32 *tsopt, __u16 mss, in tcp_mkoptions() argument 485 *buf++ = bpf_htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); in tcp_mkoptions() 514 __u16 mss, __u8 wscale) in tcp_gen_synack() 530 tcp_header->doff += tcp_mkoptions(tcp_options, tsopt, mss, wscale); in tcp_gen_synack() 537 __u16 mss; in tcpv4_gen_synack() local 540 values_get_tcpipopts(&mss, in tcpv4_gen_synack() 512 tcp_gen_synack(struct tcphdr *tcp_header, __u32 cookie, __be32 *tsopt, __u16 mss, __u8 wscale) tcp_gen_synack() argument 560 __u16 mss; tcpv6_gen_synack() local [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/net/ |
H A D | tcp_mmap.c | 380 int mss = 0; in main() local 411 mss = atoi(optarg); in main() 452 if (mss && in main() 454 &mss, sizeof(mss)) == -1) { in main() 484 if (mss && in main() 485 setsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss)) == -1) { in main()
|
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nfnetlink_osf.c | 73 u16 mss = 0; in nf_osf_match_one() local 106 mss = ctx->optp[3]; in nf_osf_match_one() 107 mss <<= 8; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() 110 mss = ntohs((__force __be16)mss); in nf_osf_match_one() 141 if (ctx->window == f->wss.val * mss || in nf_osf_match_one() 147 if (ctx->window == f->wss.val * (mss + 40) || in nf_osf_match_one()
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | nfnetlink_osf.c | 73 u16 mss = 0; in nf_osf_match_one() local 106 mss = ctx->optp[3]; in nf_osf_match_one() 107 mss <<= 8; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() 110 mss = ntohs((__force __be16)mss); in nf_osf_match_one() 141 if (ctx->window == f->wss.val * mss || in nf_osf_match_one() 147 if (ctx->window == f->wss.val * (mss + 40) || in nf_osf_match_one()
|
/kernel/linux/linux-6.6/tools/testing/selftests/net/ |
H A D | tcp_mmap.c | 455 int mss = 0; in main() local 486 mss = atoi(optarg); in main() 530 if (mss && in main() 532 &mss, sizeof(mss)) == -1) { in main() 562 if (mss && in main() 563 setsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss)) == -1) { in main()
|