aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/tcp_input.c17
6 files changed, 24 insertions, 19 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 68b72a7a180..418862f1bf2 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -570,7 +570,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
* Allocate a buffer
*/
- skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
return NULL;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 05afb576d93..2c0e4572cc9 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -338,7 +338,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
return -ENOENT;
hash = cipso_v4_map_cache_hash(key, key_len);
- bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
+ bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
if (entry->hash == hash &&
@@ -417,7 +417,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
atomic_inc(&secattr->cache->refcount);
entry->lsm_data = secattr->cache;
- bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
+ bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
list_add(&entry->list, &cipso_v4_cache[bkt].list);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6250f4239b6..2769dc4a4c8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -292,7 +292,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct iphdr *pip;
struct igmpv3_report *pig;
- skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
return NULL;
@@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
return -1;
}
- skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+ skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 89dee4346f6..ed45037ce9b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -710,14 +710,14 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
struct net_device *dev = d->dev;
struct sk_buff *skb;
struct bootp_pkt *b;
- int hh_len = LL_RESERVED_SPACE(dev);
struct iphdr *h;
/* Allocate packet */
- skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL);
+ skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+ GFP_KERNEL);
if (!skb)
return;
- skb_reserve(skb, hh_len);
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
memset(b, 0, sizeof(struct bootp_pkt));
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11d7f753a82..fead049daf4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -322,7 +322,6 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
- int hh_len;
struct iphdr *iph;
struct sk_buff *skb;
unsigned int iphlen;
@@ -336,13 +335,12 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
if (flags&MSG_PROBE)
goto out;
- hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
-
- skb = sock_alloc_send_skb(sk, length+hh_len+15,
- flags&MSG_DONTWAIT, &err);
+ skb = sock_alloc_send_skb(sk,
+ length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+ flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, hh_len);
+ skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 26c936930e9..b54d9d37b63 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1842,9 +1842,16 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
}
- /* Don't lost mark skbs that were fwd transmitted after RTO */
- if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) &&
- !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) {
+ /* Marking forward transmissions that were made after RTO lost
+ * can cause unnecessary retransmissions in some scenarios,
+ * SACK blocks will mitigate that in some but not in all cases.
+ * We used to not mark them but it was causing break-ups with
+ * receivers that do only in-order receival.
+ *
+ * TODO: we could detect presence of such receiver and select
+ * different behavior per flow.
+ */
+ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
}
@@ -1860,7 +1867,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
- tp->high_seq = tp->frto_highmark;
+ tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
tcp_clear_retrans_hints_partial(tp);
@@ -2482,7 +2489,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
tcp_verify_left_out(tp);
- if (tp->retrans_out == 0)
+ if (!tp->frto_counter && tp->retrans_out == 0)
tp->retrans_stamp = 0;
if (flag & FLAG_ECE)