aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c13
-rw-r--r--net/atm/ipcommon.c17
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_dev.c4
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/core/dev.c42
-rw-r--r--net/decnet/dn_rules.c3
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ipv6_sockglue.c89
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/sched/act_api.c2
20 files changed, 206 insertions, 67 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 121bf6f4914..2e62105d91b 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = {
static int __init atm_clip_init(void)
{
- struct proc_dir_entry *p;
neigh_table_init_no_netlink(&clip_tbl);
clip_tbl_hook = &clip_tbl;
@@ -972,9 +971,15 @@ static int __init atm_clip_init(void)
setup_timer(&idle_timer, idle_timer_check, 0);
- p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
- if (p)
- p->proc_fops = &arp_seq_fops;
+#ifdef CONFIG_PROC_FS
+ {
+ struct proc_dir_entry *p;
+
+ p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
+ if (p)
+ p->proc_fops = &arp_seq_fops;
+ }
+#endif
return 0;
}
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 4b1faca5013..1d3de42fada 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -25,22 +25,27 @@
/*
* skb_migrate appends the list at "from" to "to", emptying "from" in the
* process. skb_migrate is atomic with respect to all other skb operations on
- * "from" and "to". Note that it locks both lists at the same time, so beware
- * of potential deadlocks.
+ * "from" and "to". Note that it locks both lists at the same time, so to deal
+ * with the lock ordering, the locks are taken in address order.
*
* This function should live in skbuff.c or skbuff.h.
*/
-void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
+void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
{
unsigned long flags;
struct sk_buff *skb_from = (struct sk_buff *) from;
struct sk_buff *skb_to = (struct sk_buff *) to;
struct sk_buff *prev;
- spin_lock_irqsave(&from->lock,flags);
- spin_lock(&to->lock);
+ if ((unsigned long) from < (unsigned long) to) {
+ spin_lock_irqsave(&from->lock, flags);
+ spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irqsave(&to->lock, flags);
+ spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
+ }
prev = from->prev;
from->next->prev = to->prev;
prev->next = skb_to;
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
from->prev = skb_from;
from->next = skb_from;
from->qlen = 0;
- spin_unlock_irqrestore(&from->lock,flags);
+ spin_unlock_irqrestore(&from->lock, flags);
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 10a3c0aa839..f12be2acf9b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
- if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
+ if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
- memset(ax25, 0x00, sizeof(*ax25));
atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 47e6e790bd6..b787678220f 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
- if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
+ if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
ax25_unregister_sysctl();
- memset(ax25_dev, 0x00, sizeof(*ax25_dev));
-
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold(dev);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 8be9f2123e5..6ccd32b3080 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb)
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* drop mtu oversized packets except gso */
- if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
+ if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 8298a5179ae..cbc8a389a0a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP) &&
skb->len > skb->dev->mtu &&
- !skb_shinfo(skb)->gso_size)
+ !skb_is_gso(skb))
return ip_fragment(skb, br_dev_queue_push_xmit);
else
return br_dev_queue_push_xmit(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 066a60a7528..4d2b5167d7f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
unsigned int csum;
int ret = 0, offset = skb->h.raw - skb->data;
- if (inward) {
- skb->ip_summed = CHECKSUM_NONE;
- goto out;
+ if (inward)
+ goto out_set_summed;
+
+ if (unlikely(skb_shinfo(skb)->gso_size)) {
+ static int warned;
+
+ WARN_ON(!warned);
+ warned = 1;
+
+ /* Let GSO fix up the checksum. */
+ goto out_set_summed;
}
if (skb_cloned(skb)) {
@@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
BUG_ON(skb->csum + 2 > offset);
*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
+
+out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
return ret;
@@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
int type = skb->protocol;
+ int err;
BUG_ON(skb_shinfo(skb)->frag_list);
- BUG_ON(skb->ip_summed != CHECKSUM_HW);
skb->mac.raw = skb->data;
skb->mac_len = skb->nh.raw - skb->data;
__skb_pull(skb, skb->mac_len);
+ if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+ static int warned;
+
+ WARN_ON(!warned);
+ warned = 1;
+
+ if (skb_header_cloned(skb) &&
+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return ERR_PTR(err);
+ }
+
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+ if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+ err = ptype->gso_send_check(skb);
+ segs = ERR_PTR(err);
+ if (err || skb_gso_ok(skb, features))
+ break;
+ __skb_push(skb, skb->data - skb->nh.raw);
+ }
segs = ptype->gso_segment(skb, features);
break;
}
@@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb)
if (dev->qdisc_ingress) {
__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
if (MAX_RED_LOOP < ttl++) {
- printk("Redir loop detected Dropping packet (%s->%s)\n",
+ printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n",
skb->input_dev->name, skb->dev->name);
return TC_ACT_SHOT;
}
@@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev)
/* Fix illegal SG+CSUM combinations. */
if ((dev->features & NETIF_F_SG) &&
!(dev->features & NETIF_F_ALL_CSUM)) {
- printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
dev->name);
dev->features &= ~NETIF_F_SG;
}
@@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev)
/* TSO requires that SG is present as well. */
if ((dev->features & NETIF_F_TSO) &&
!(dev->features & NETIF_F_SG)) {
- printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
dev->name);
dev->features &= ~NETIF_F_TSO;
}
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 06e785fe575..22f321d9bf9 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
if (idx < s_idx)
- continue;
+ goto next;
if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
break;
+next:
idx++;
}
rcu_read_unlock();
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 318d4674faa..c84a32070f8 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk)
EXPORT_SYMBOL(inet_sk_rebuild_header);
+static int inet_gso_send_check(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ struct net_protocol *ops;
+ int proto;
+ int ihl;
+ int err = -EINVAL;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = skb->nh.iph;
+ ihl = iph->ihl * 4;
+ if (ihl < sizeof(*iph))
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, ihl)))
+ goto out;
+
+ skb->h.raw = __skb_pull(skb, ihl);
+ iph = skb->nh.iph;
+ proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ err = -EPROTONOSUPPORT;
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet_protos[proto]);
+ if (likely(ops && ops->gso_send_check))
+ err = ops->gso_send_check(skb);
+ rcu_read_unlock();
+
+out:
+ return err;
+}
+
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = {
static struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
+ .gso_send_check = tcp_v4_gso_send_check,
.gso_segment = tcp_tso_segment,
.no_policy = 1,
};
@@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void);
static struct packet_type ip_packet_type = {
.type = __constant_htons(ETH_P_IP),
.func = ip_rcv,
+ .gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
};
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6c642d11d4c..773b12ba4e3 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
hlist_for_each_entry(r, node, &fib_rules, hlist) {
-
if (idx < s_idx)
- continue;
+ goto next;
if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWRULE, NLM_F_MULTI) < 0)
break;
+next:
idx++;
}
rcu_read_unlock();
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ca0e714613f..7c9f9a6421b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
return dst_output(skb);
}
#endif
- if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
+ if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
@@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
while (size > 0) {
int i;
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
len = size;
else {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a886e6efbb..a891133f00e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
}
}
+int tcp_v4_gso_send_check(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ struct tcphdr *th;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return -EINVAL;
+
+ iph = skb->nh.iph;
+ th = skb->h.th;
+
+ th->check = 0;
+ th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
+ skb->csum = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_HW;
+ return 0;
+}
+
/*
* This routine will send an RST to the other tcp.
*
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 193363e2293..d16f863cf68 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
}
#endif
- if (!skb_shinfo(skb)->gso_size)
+ if (!skb_is_gso(skb))
return xfrm4_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2c5b44575af..3bc74ce7880 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
int ip6_output(struct sk_buff *skb)
{
- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
+ if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
dst_allfrag(skb->dst))
return ip6_fragment(skb, ip6_output2);
else
@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb->priority = sk->sk_priority;
mtu = dst_mtu(dst);
- if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
+ if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0c17dec11c8..43327264e69 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -57,29 +57,11 @@
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
+ int proto)
{
- struct sk_buff *segs = ERR_PTR(-EINVAL);
- struct ipv6hdr *ipv6h;
- struct inet6_protocol *ops;
- int proto;
+ struct inet6_protocol *ops = NULL;
- if (unlikely(skb_shinfo(skb)->gso_type &
- ~(SKB_GSO_UDP |
- SKB_GSO_DODGY |
- SKB_GSO_TCP_ECN |
- SKB_GSO_TCPV6 |
- 0)))
- goto out;
-
- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
- goto out;
-
- ipv6h = skb->nh.ipv6h;
- proto = ipv6h->nexthdr;
- __skb_pull(skb, sizeof(*ipv6h));
-
- rcu_read_lock();
for (;;) {
struct ipv6_opt_hdr *opth;
int len;
@@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
ops = rcu_dereference(inet6_protos[proto]);
if (unlikely(!ops))
- goto unlock;
+ break;
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
if (unlikely(!pskb_may_pull(skb, 8)))
- goto unlock;
+ break;
opth = (void *)skb->data;
len = opth->hdrlen * 8 + 8;
if (unlikely(!pskb_may_pull(skb, len)))
- goto unlock;
+ break;
proto = opth->nexthdr;
__skb_pull(skb, len);
}
- skb->h.raw = skb->data;
- if (likely(ops->gso_segment))
- segs = ops->gso_segment(skb, features);
+ return ops;
+}
+
+static int ipv6_gso_send_check(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct inet6_protocol *ops;
+ int err = -EINVAL;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ goto out;
-unlock:
+ ipv6h = skb->nh.ipv6h;
+ __skb_pull(skb, sizeof(*ipv6h));
+ err = -EPROTONOSUPPORT;
+
+ rcu_read_lock();
+ ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ if (likely(ops && ops->gso_send_check)) {
+ skb->h.raw = skb->data;
+ err = ops->gso_send_check(skb);
+ }
+ rcu_read_unlock();
+
+out:
+ return err;
+}
+
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct ipv6hdr *ipv6h;
+ struct inet6_protocol *ops;
+
+ if (unlikely(skb_shinfo(skb)->gso_type &
+ ~(SKB_GSO_UDP |
+ SKB_GSO_DODGY |
+ SKB_GSO_TCP_ECN |
+ SKB_GSO_TCPV6 |
+ 0)))
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ goto out;
+
+ ipv6h = skb->nh.ipv6h;
+ __skb_pull(skb, sizeof(*ipv6h));
+ segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ rcu_read_lock();
+ ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ if (likely(ops && ops->gso_segment)) {
+ skb->h.raw = skb->data;
+ segs = ops->gso_segment(skb, features);
+ }
rcu_read_unlock();
if (unlikely(IS_ERR(segs)))
@@ -130,6 +162,7 @@ out:
static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6),
.func = ipv6_rcv,
+ .gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5bdcb9002cf..923989d0520 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
}
}
+static int tcp_v6_gso_send_check(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return -EINVAL;
+
+ ipv6h = skb->nh.ipv6h;
+ th = skb->h.th;
+
+ th->check = 0;
+ th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
+ IPPROTO_TCP, 0);
+ skb->csum = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_HW;
+ return 0;
+}
static void tcp_v6_send_reset(struct sk_buff *skb)
{
@@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = {
static struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
+ .gso_send_check = tcp_v6_gso_send_check,
.gso_segment = tcp_tso_segment,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 48fccb1eca0..0eea60ea9eb 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
{
struct sk_buff *segs;
- if (!skb_shinfo(skb)->gso_size)
+ if (!skb_is_gso(skb))
return xfrm6_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 389a4119e1b..ecc796878f3 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1382,14 +1382,12 @@ static int __init nr_proto_init(void)
return -1;
}
- dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
+ dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
- memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *));
-
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d0a67bb3136..c115295ab43 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1490,14 +1490,13 @@ static int __init rose_proto_init(void)
rose_callsign = null_ax25_address;
- dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
+ dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
- memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 599423cc9d0..0972247a839 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
return err;
rtattr_failure:
- module_put(a->ops->owner);
nlmsg_failure:
+ module_put(a->ops->owner);
err_out:
kfree_skb(skb);
kfree(a);