Lines Matching refs:mss_now

61 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1455 static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1457 if (skb->len <= mss_now) {
1464 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1465 TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1545 unsigned int mss_now, gfp_t gfp)
1609 tcp_set_skb_tso_segs(skb, mss_now);
1610 tcp_set_skb_tso_segs(buff, mss_now);
1699 int mss_now;
1704 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1711 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1715 if (mss_now > tp->rx_opt.mss_clamp)
1716 mss_now = tp->rx_opt.mss_clamp;
1719 mss_now -= icsk->icsk_ext_hdr_len;
1722 mss_now = max(mss_now,
1724 return mss_now;
1802 int mss_now;
1807 mss_now = tcp_mtu_to_mss(sk, pmtu);
1808 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1813 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1814 tp->mss_cache = mss_now;
1816 return mss_now;
1827 u32 mss_now;
1832 mss_now = tp->mss_cache;
1837 mss_now = tcp_sync_mss(sk, mtu);
1845 * we have to adjust mss_now correspondingly */
1848 mss_now -= delta;
1851 return mss_now;
1938 * skb_pcount = skb->len / mss_now
1940 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1943 if (skb->len < tcp_skb_pcount(skb) * mss_now)
1976 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1990 return max_t(u32, bytes / mss_now, min_tso_segs);
1996 static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
2005 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2012 unsigned int mss_now,
2020 max_len = mss_now * max_segs;
2030 partial = needed % mss_now;
2070 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2074 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
2075 tcp_set_skb_tso_segs(skb, mss_now);
2128 unsigned int mss_now, gfp_t gfp)
2164 tcp_set_skb_tso_segs(skb, mss_now);
2165 tcp_set_skb_tso_segs(buff, mss_now);
2395 int mss_now;
2414 mss_now = tcp_current_mss(sk);
2496 tcp_set_skb_tso_segs(skb, mss_now);
2670 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2694 max_segs = tcp_tso_segs(sk, mss_now);
2703 tcp_init_tso_segs(skb, mss_now);
2710 tso_segs = tcp_init_tso_segs(skb, mss_now);
2722 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2728 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2739 limit = mss_now;
2741 limit = tcp_mss_split_point(sk, skb, mss_now,
2748 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2771 tcp_minshall_update(tp, mss_now, skb);
2955 void tcp_push_one(struct sock *sk, unsigned int mss_now)
2959 BUG_ON(!skb || skb->len < mss_now);
2961 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);