Lines Matching refs:mss_now

63 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1443 static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1445 if (skb->len <= mss_now) {
1452 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1453 TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1533 unsigned int mss_now, gfp_t gfp)
1600 tcp_set_skb_tso_segs(skb, mss_now);
1601 tcp_set_skb_tso_segs(buff, mss_now);
1698 int mss_now;
1703 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1710 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1714 if (mss_now > tp->rx_opt.mss_clamp)
1715 mss_now = tp->rx_opt.mss_clamp;
1718 mss_now -= icsk->icsk_ext_hdr_len;
1721 mss_now = max(mss_now,
1723 return mss_now;
1801 int mss_now;
1806 mss_now = tcp_mtu_to_mss(sk, pmtu);
1807 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1812 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1813 tp->mss_cache = mss_now;
1815 return mss_now;
1826 u32 mss_now;
1831 mss_now = tp->mss_cache;
1836 mss_now = tcp_sync_mss(sk, mtu);
1844 * we have to adjust mss_now correspondingly */
1847 mss_now -= delta;
1850 return mss_now;
1937 * skb_pcount = skb->len / mss_now
1939 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1942 if (skb->len < tcp_skb_pcount(skb) * mss_now)
1964 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1978 segs = max_t(u32, bytes / mss_now, min_tso_segs);
1986 static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1995 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2002 unsigned int mss_now,
2010 max_len = mss_now * max_segs;
2020 partial = needed % mss_now;
2060 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2064 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
2065 tcp_set_skb_tso_segs(skb, mss_now);
2118 unsigned int mss_now, gfp_t gfp)
2127 skb, len, mss_now, gfp);
2159 tcp_set_skb_tso_segs(skb, mss_now);
2160 tcp_set_skb_tso_segs(buff, mss_now);
2337 int mss_now;
2356 mss_now = tcp_current_mss(sk);
2437 tcp_set_skb_tso_segs(skb, mss_now);
2612 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2636 max_segs = tcp_tso_segs(sk, mss_now);
2644 tcp_init_tso_segs(skb, mss_now);
2651 tso_segs = tcp_init_tso_segs(skb, mss_now);
2663 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2669 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2680 limit = mss_now;
2682 limit = tcp_mss_split_point(sk, skb, mss_now,
2689 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2712 tcp_minshall_update(tp, mss_now, skb);
2892 void tcp_push_one(struct sock *sk, unsigned int mss_now)
2896 BUG_ON(!skb || skb->len < mss_now);
2898 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);