diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2005-04-24 19:12:33 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-04-24 19:12:33 -0700 |
commit | d5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2 (patch) | |
tree | 3811d84e83dab918c57aeca1081ae343cb97df8b /net/ipv4 | |
parent | 158a0e45b69254a9ee4d7795e3b98d8c959fb799 (diff) |
[TCP]: skb pcount with MTU discovery
The problem is that when doing MTU discovery, the too-large segments in
the write queue will be calculated as having a pcount of >1. When
tcp_write_xmit() is trying to send, tcp_snd_test() fails the cwnd test
when pcount > cwnd.
The segments are eventually transmitted one at a time by keepalive, but
this can take a long time.
This patch checks if TSO is enabled when setting pcount.
Signed-off-by: John Heffner <jheffner@psc.edu>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_output.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 13c14cb6dee..a12df6979ff 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -427,7 +427,7 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss) struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = sk->sk_send_head; - if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) { + if (tcp_snd_test(sk, skb, cur_mss, TCP_NAGLE_PUSH)) { /* Send it out now. */ TCP_SKB_CB(skb)->when = tcp_time_stamp; tcp_tso_set_push(skb); @@ -440,9 +440,12 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss) } } -void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std) +void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb) { - if (skb->len <= mss_std) { + struct tcp_sock *tp = tcp_sk(sk); + + if (skb->len <= tp->mss_cache_std || + !(sk->sk_route_caps & NETIF_F_TSO)) { /* Avoid the costly divide in the normal * non-TSO case. */ @@ -451,10 +454,10 @@ void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std) } else { unsigned int factor; - factor = skb->len + (mss_std - 1); - factor /= mss_std; + factor = skb->len + (tp->mss_cache_std - 1); + factor /= tp->mss_cache_std; skb_shinfo(skb)->tso_segs = factor; - skb_shinfo(skb)->tso_size = mss_std; + skb_shinfo(skb)->tso_size = tp->mss_cache_std; } } @@ -525,8 +528,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len) } /* Fix up tso_factor for both original and new SKB. */ - tcp_set_skb_tso_segs(skb, tp->mss_cache_std); - tcp_set_skb_tso_segs(buff, tp->mss_cache_std); + tcp_set_skb_tso_segs(sk, skb); + tcp_set_skb_tso_segs(sk, buff); if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { tp->lost_out += tcp_skb_pcount(skb); @@ -601,7 +604,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) * factor and mss. */ if (tcp_skb_pcount(skb) > 1) - tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); + tcp_set_skb_tso_segs(sk, skb); return 0; } @@ -752,7 +755,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle) mss_now = tcp_current_mss(sk, 1); while ((skb = sk->sk_send_head) && - tcp_snd_test(tp, skb, mss_now, + tcp_snd_test(sk, skb, mss_now, tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)) { if (skb->len > mss_now) { @@ -1676,7 +1679,7 @@ int tcp_write_wakeup(struct sock *sk) tp->mss_cache = tp->mss_cache_std; } } else if (!tcp_skb_pcount(skb)) - tcp_set_skb_tso_segs(skb, tp->mss_cache_std); + tcp_set_skb_tso_segs(sk, skb); TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->when = tcp_time_stamp; |