aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-09-14 01:24:25 +1000
committerPaul Mackerras <paulus@samba.org>2007-09-14 01:24:25 +1000
commitb2315372eac9cd9f622c32a93e323cf6f0f03462 (patch)
tree9e1faa7cdcddf5d90bec4fb9523742d4cce699a1 /net
parent5326152fa182b0a16e4abf913ce403e3c7ab53b7 (diff)
parentc87ce65868bbf9bbea9c3f112ff8315302daf8f2 (diff)
Merge branch 'linux-2.6' into for-2.6.24
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_sock.c28
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_if.c16
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c11
-rw-r--r--net/ipv4/tcp_input.c14
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/netfilter/nf_sockopt.c36
-rw-r--r--net/netfilter/xt_tcpudp.c2
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sctp/associola.c7
-rw-r--r--net/sctp/outqueue.c7
-rw-r--r--net/sctp/sm_make_chunk.c112
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c51
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sctp/ulpqueue.c75
29 files changed, 276 insertions, 143 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 63caa414945..18e3afc964d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -183,6 +183,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
struct sk_buff *skb;
__le16 param;
+ __u8 flt_type;
BT_DBG("%s %ld", hdev->name, opt);
@@ -233,11 +234,8 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Optional initialization */
/* Clear Event Filters */
- {
- struct hci_cp_set_event_flt cp;
- cp.flt_type = HCI_FLT_CLEAR_ALL;
- hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
- }
+ flt_type = HCI_FLT_CLEAR_ALL;
+ hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type);
/* Page timeout ~20 secs */
param = cpu_to_le16(0x8000);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1dae3dfc66a..5ccea5fbd23 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -37,6 +37,7 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/compat.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <net/sock.h>
@@ -70,15 +71,15 @@ static struct hci_sec_filter hci_sec_filter = {
{
{ 0x0 },
/* OGF_LINK_CTL */
- { 0xbe000006, 0x00000001, 0x000000, 0x00 },
+ { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
/* OGF_LINK_POLICY */
- { 0x00005200, 0x00000000, 0x000000, 0x00 },
+ { 0x00005200, 0x00000000, 0x00000000, 0x00 },
/* OGF_HOST_CTL */
- { 0xaab00200, 0x2b402aaa, 0x020154, 0x00 },
+ { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
/* OGF_INFO_PARAM */
- { 0x000002be, 0x00000000, 0x000000, 0x00 },
+ { 0x000002be, 0x00000000, 0x00000000, 0x00 },
/* OGF_STATUS_PARAM */
- { 0x000000ea, 0x00000000, 0x000000, 0x00 }
+ { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
}
};
@@ -342,9 +343,24 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
if (mask & HCI_CMSG_TSTAMP) {
struct timeval tv;
+ void *data;
+ int len;
skb_get_timestamp(skb, &tv);
- put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv);
+
+ data = &tv;
+ len = sizeof(tv);
+#ifdef CONFIG_COMPAT
+ if (msg->msg_flags & MSG_CMSG_COMPAT) {
+ struct compat_timeval ctv;
+ ctv.tv_sec = tv.tv_sec;
+ ctv.tv_usec = tv.tv_usec;
+ data = &ctv;
+ len = sizeof(ctv);
+ }
+#endif
+
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
}
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 69b70977f00..eb57502bb26 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
if (hold_time(br) == 0)
return;
+ /* ignore packets unless we are using this port */
+ if (!(source->state == BR_STATE_LEARNING ||
+ source->state == BR_STATE_FORWARDING))
+ return;
+
fdb = fdb_find(head, addr);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 749f0e8f541..9272f12f664 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -33,17 +33,17 @@
*/
static int port_cost(struct net_device *dev)
{
- if (dev->ethtool_ops->get_settings) {
- struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- int err = dev->ethtool_ops->get_settings(dev, &ecmd);
- if (!err) {
+ if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
+
+ if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
switch(ecmd.speed) {
- case SPEED_100:
- return 19;
- case SPEED_1000:
- return 4;
case SPEED_10000:
return 2;
+ case SPEED_1000:
+ return 4;
+ case SPEED_100:
+ return 19;
case SPEED_10:
return 100;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5c18595b761..6f468fc3357 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
- if (p && p->state != BR_STATE_DISABLED)
+ if (p)
br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
-
return 0; /* process further */
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4169a2a89a3..6018d0e5193 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1513,6 +1513,7 @@ static struct nf_sockopt_ops ebt_sockopts =
.get_optmin = EBT_BASE_CTL,
.get_optmax = EBT_SO_GET_MAX + 1,
.get = do_ebt_get_ctl,
+ .owner = THIS_MODULE,
};
static int __init ebtables_init(void)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb056f47612..029b93e246b 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -450,6 +450,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
__wsum csum;
int chunk = skb->len - hlen;
+ if (!chunk)
+ return 0;
+
/* Skip filled elements.
* Pretty silly, look at memcpy_toiovec, though 8)
*/
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7bae576ac11..36fdea71d74 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -380,7 +380,6 @@ struct pktgen_thread {
/* Field for thread to receive "posted" events terminate, stop ifs etc. */
u32 control;
- int pid;
int cpu;
wait_queue_head_t queue;
@@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
if ((netif_queue_stopped(odev) ||
- netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
- need_resched()) {
+ (pkt_dev->skb &&
+ netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
+ need_resched()) {
idle_start = getCurUs();
if (!netif_running(odev)) {
@@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg)
init_waitqueue_head(&t->queue);
- t->pid = current->pid;
-
pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
max_before_softirq = t->max_before_softirq;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index fa6604fcf0e..8def68209ed 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -814,7 +814,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
ifa = ifa->ifa_next, dn_idx++) {
if (dn_idx < skip_naddr)
- goto cont;
+ continue;
if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWADDR,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5b77bdaa57d..5dbe5803b7d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1193,7 +1193,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
if (ip_idx < s_ip_idx)
- goto cont;
+ continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWADDR, NLM_F_MULTI) <= 0)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index dbeacd8b0f9..def007ec1d6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -836,12 +836,16 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return inet_diag_get_exact(skb, nlh);
}
+static DEFINE_MUTEX(inet_diag_mutex);
+
static void inet_diag_rcv(struct sock *sk, int len)
{
unsigned int qlen = 0;
do {
+ mutex_lock(&inet_diag_mutex);
netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg);
+ mutex_unlock(&inet_diag_mutex);
} while (qlen);
}
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 902fd578aa3..f656d41d8d4 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -2339,6 +2339,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
+ .owner = THIS_MODULE,
};
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index d1149aba935..29114a9ccd1 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1161,6 +1161,7 @@ static struct nf_sockopt_ops arpt_sockopts = {
.get_optmin = ARPT_BASE_CTL,
.get_optmax = ARPT_SO_GET_MAX+1,
.get = do_arpt_get_ctl,
+ .owner = THIS_MODULE,
};
static int __init arp_tables_init(void)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e1b402c6b85..6486894f450 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2296,6 +2296,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
#ifdef CONFIG_COMPAT
.compat_get = compat_do_ipt_get_ctl,
#endif
+ .owner = THIS_MODULE,
};
static struct xt_match icmp_matchstruct __read_mostly = {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d9b5177989c..f813e02aab3 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -87,14 +87,10 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
if (iph == NULL)
return -NF_DROP;
- /* Never happen */
- if (iph->frag_off & htons(IP_OFFSET)) {
- if (net_ratelimit()) {
- printk(KERN_ERR "ipv4_get_l4proto: Frag of proto %u\n",
- iph->protocol);
- }
+ /* Conntrack defragments packets, we might still see fragments
+ * inside ICMP packets though. */
+ if (iph->frag_off & htons(IP_OFFSET))
return -NF_DROP;
- }
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
@@ -403,6 +399,7 @@ static struct nf_sockopt_ops so_getorigdst = {
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1,
.get = &getorigdst,
+ .owner = THIS_MODULE,
};
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9785df37a65..bbad2cdb74b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
tcp_grow_window(sk, skb);
}
+static u32 tcp_rto_min(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ u32 rto_min = TCP_RTO_MIN;
+
+ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
+ rto_min = dst->metrics[RTAX_RTO_MIN-1];
+ return rto_min;
+}
+
/* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge
@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
if (tp->mdev_max < tp->rttvar)
tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
tp->rtt_seq = tp->snd_nxt;
- tp->mdev_max = TCP_RTO_MIN;
+ tp->mdev_max = tcp_rto_min(sk);
}
} else {
/* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<1; /* make sure rto = 3*rtt */
- tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt;
}
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5dead399fe6..26de3c0ea31 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1427,8 +1427,9 @@ void ip6_flush_pending_frames(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
- IP6_INC_STATS(ip6_dst_idev(skb->dst),
- IPSTATS_MIB_OUTDISCARDS);
+ if (skb->dst)
+ IP6_INC_STATS(ip6_dst_idev(skb->dst),
+ IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0358e6066a4..73a894a2152 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -736,7 +736,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
* so fail our DAD process
*/
addrconf_dad_failure(ifp);
- goto out;
+ return;
} else {
/*
* This is not a dad solicitation.
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index aeda617246b..cd9df02bb85 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1462,6 +1462,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
.get_optmin = IP6T_BASE_CTL,
.get_optmax = IP6T_SO_GET_MAX+1,
.get = do_ip6t_get_ctl,
+ .owner = THIS_MODULE,
};
static struct xt_match icmp6_matchstruct __read_mostly = {
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 8b8ece75031..e32761ce260 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -55,18 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
{
- /* No point being interruptible: we're probably in cleanup_module() */
- restart:
mutex_lock(&nf_sockopt_mutex);
- if (reg->use != 0) {
- /* To be woken by nf_sockopt call... */
- /* FIXME: Stuart Young's name appears gratuitously. */
- set_current_state(TASK_UNINTERRUPTIBLE);
- reg->cleanup_task = current;
- mutex_unlock(&nf_sockopt_mutex);
- schedule();
- goto restart;
- }
list_del(&reg->list);
mutex_unlock(&nf_sockopt_mutex);
}
@@ -86,10 +75,11 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
list_for_each(i, &nf_sockopts) {
ops = (struct nf_sockopt_ops *)i;
if (ops->pf == pf) {
+ if (!try_module_get(ops->owner))
+ goto out_nosup;
if (get) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
ret = ops->get(sk, val, opt, len);
goto out;
@@ -97,23 +87,20 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
ret = ops->set(sk, val, opt, *len);
goto out;
}
}
+ module_put(ops->owner);
}
}
+ out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
- mutex_lock(&nf_sockopt_mutex);
- ops->use--;
- if (ops->cleanup_task)
- wake_up_process(ops->cleanup_task);
- mutex_unlock(&nf_sockopt_mutex);
+ module_put(ops->owner);
return ret;
}
@@ -144,10 +131,12 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
list_for_each(i, &nf_sockopts) {
ops = (struct nf_sockopt_ops *)i;
if (ops->pf == pf) {
+ if (!try_module_get(ops->owner))
+ goto out_nosup;
+
if (get) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_get)
ret = ops->compat_get(sk,
@@ -160,7 +149,6 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
- ops->use++;
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_set)
ret = ops->compat_set(sk,
@@ -171,17 +159,15 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
goto out;
}
}
+ module_put(ops->owner);
}
}
+ out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
- mutex_lock(&nf_sockopt_mutex);
- ops->use--;
- if (ops->cleanup_task)
- wake_up_process(ops->cleanup_task);
- mutex_unlock(&nf_sockopt_mutex);
+ module_put(ops->owner);
return ret;
}
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index ab7d845224f..223f9bded67 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
void *matchinfo,
unsigned int hook_mask)
{
- const struct xt_tcp *udpinfo = matchinfo;
+ const struct xt_udp *udpinfo = matchinfo;
/* Must specify no unknown invflags */
return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 4a49db65772..abd82fc3ec6 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (TC_H_MAJ(skb->priority) != sch->handle) {
err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT
- switch (tc_classify(skb, q->filter_list, &res)) {
+ switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 498edb0cd4e..2ad1caf1ea4 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
break;
case SCTP_TRANSPORT_DOWN:
- transport->state = SCTP_INACTIVE;
+ /* if the transort was never confirmed, do not transition it
+ * to inactive state.
+ */
+ if (transport->state != SCTP_UNCONFIRMED)
+ transport->state = SCTP_INACTIVE;
+
spc_state = SCTP_ADDR_UNREACHABLE;
break;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 992f361084b..28f4fe77cee 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
*/
if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
(!fast_retransmit && !chunk->tsn_gap_acked)) {
+ /* If this chunk was sent less then 1 rto ago, do not
+ * retransmit this chunk, but give the peer time
+ * to acknowlege it.
+ */
+ if ((jiffies - chunk->sent_at) < transport->rto)
+ continue;
+
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 51c4d7fef1d..79856c92452 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
* abort chunk.
*/
void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
- const void *payload, size_t paylen)
+ size_t paylen)
{
sctp_errhdr_t err;
__u16 len;
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
len = sizeof(sctp_errhdr_t) + paylen;
err.length = htons(len);
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
- sctp_addto_chunk(chunk, paylen, payload);
}
/* 3.3.2 Initiation (INIT) (1)
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
/* Put the tsn back into network byte order. */
payload = htonl(tsn);
- sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload,
- sizeof(payload));
+ sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
+ sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
goto err_copy;
}
- sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
+ sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
if (paylen)
kfree(payload);
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
struct sctp_paramhdr phdr;
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
- + sizeof(sctp_chunkhdr_t));
+ + sizeof(sctp_paramhdr_t));
if (!retval)
goto end;
- sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen);
+ sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
+ + sizeof(sctp_paramhdr_t));
phdr.type = htons(chunk->chunk_hdr->type);
phdr.length = chunk->chunk_hdr->length;
- sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr);
+ sctp_addto_chunk(retval, paylen, payload);
+ sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
end:
return retval;
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
if (!retval)
goto nodata;
- sctp_init_cause(retval, cause_code, payload, paylen);
+ sctp_init_cause(retval, cause_code, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
nodata:
return retval;
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
void *target;
void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length);
- int padlen = chunklen % 4;
+ int padlen = WORD_ROUND(chunklen) - chunklen;
padding = skb_put(chunk->skb, padlen);
target = skb_put(chunk->skb, len);
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target;
}
+/* Append bytes to the end of a parameter. Will panic if chunk is not big
+ * enough.
+ */
+void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
+{
+ void *target;
+ int chunklen = ntohs(chunk->chunk_hdr->length);
+
+ target = skb_put(chunk->skb, len);
+
+ memcpy(target, data, len);
+
+ /* Adjust the chunk length field. */
+ chunk->chunk_hdr->length = htons(chunklen + len);
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+ return target;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
@@ -1174,25 +1196,36 @@ out:
*/
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{
+ struct sctp_datamsg *msg;
+ struct sctp_chunk *lchunk;
+ struct sctp_stream *stream;
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
- /* This is the last possible instant to assign a SSN. */
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
- ssn = 0;
- } else {
- sid = ntohs(chunk->subh.data_hdr->stream);
- if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
- ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
- else
- ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
- }
+ /* All fragments will be on the same stream */
+ sid = ntohs(chunk->subh.data_hdr->stream);
+ stream = &chunk->asoc->ssnmap->out;
- chunk->subh.data_hdr->ssn = htons(ssn);
- chunk->has_ssn = 1;
+ /* Now assign the sequence number to the entire message.
+ * All fragments must have the same stream sequence number.
+ */
+ msg = chunk->msg;
+ list_for_each_entry(lchunk, &msg->chunks, frag_list) {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ ssn = 0;
+ } else {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
+ ssn = sctp_ssn_next(stream, sid);
+ else
+ ssn = sctp_ssn_peek(stream, sid);
+ }
+
+ lchunk->subh.data_hdr->ssn = htons(ssn);
+ lchunk->has_ssn = 1;
+ }
}
/* Helper function to assign a TSN if needed. This assumes that both
@@ -1466,7 +1499,8 @@ no_hmac:
__be32 n = htonl(usecs);
sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
- &n, sizeof(n));
+ sizeof(n));
+ sctp_addto_chunk(*errp, sizeof(n), &n);
*error = -SCTP_IERROR_STALE_COOKIE;
} else
*error = -SCTP_IERROR_NOMEM;
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
report.num_missing = htonl(1);
report.type = paramtype;
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
- &report, sizeof(report));
+ sizeof(report));
+ sctp_addto_chunk(*errp, sizeof(report), &report);
}
/* Stop processing this chunk. */
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, 0);
if (*errp)
- sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0);
+ sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
/* Stop processing this chunk. */
return 0;
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error,
- sizeof(error));
- sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param);
+ sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
+ sizeof(error) + sizeof(sctp_paramhdr_t));
+ sctp_addto_chunk(*errp, sizeof(error), error);
+ sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
}
return 0;
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
if (!*errp)
*errp = sctp_make_op_error_space(asoc, chunk, len);
- if (*errp)
- sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED,
- param.v, len);
+ if (*errp) {
+ sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
+ sctp_addto_chunk(*errp, len, param.v);
+ }
/* Stop processing this chunk. */
return 0;
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
*errp = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
- if (*errp)
+ if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- param.v,
WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
+ }
break;
case SCTP_PARAM_ACTION_SKIP:
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- param.v,
WORD_ROUND(ntohs(param.p->length)));
+ sctp_addto_chunk(*errp,
+ WORD_ROUND(ntohs(param.p->length)),
+ param.v);
} else {
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
- if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) {
+ if (param.v != (void*)chunk->chunk_end) {
sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
return 0;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d9fad4f6ffc..8d789008349 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_DISPOSITION_VIOLATION:
- printk(KERN_ERR "sctp protocol violation state %d "
- "chunkid %d\n", state, subtype.chunk);
+ if (net_ratelimit())
+ printk(KERN_ERR "sctp protocol violation state %d "
+ "chunkid %d\n", state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
+ /* purge the fragmentation queue */
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
+
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 71cad56dd73..177528ed3e1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
- struct sock *sk;
int len;
/* 6.10 Bundling
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
- sk = ep->base.sk;
- /* If the endpoint is not listening or if the number of associations
- * on the TCP-style socket exceed the max backlog, respond with an
- * ABORT.
- */
- if (!sctp_sstate(sk, LISTENING) ||
- (sctp_style(sk, TCP) &&
- sk_acceptq_is_full(sk)))
- return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
+ struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* If the endpoint is not listening or if the number of associations
+ * on the TCP-style socket exceed the max backlog, respond with an
+ * ABORT.
+ */
+ sk = ep->base.sk;
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
@@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
- printk(KERN_WARNING
- "%s association %p could not find address "
- NIP6_FMT "\n",
- __FUNCTION__,
- asoc,
- NIP6(from_addr.v6.sin6_addr));
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "%s association %p could not find address "
+ NIP6_FMT "\n",
+ __FUNCTION__,
+ asoc,
+ NIP6(from_addr.v6.sin6_addr));
} else {
- printk(KERN_WARNING
- "%s association %p could not find address "
- NIPQUAD_FMT "\n",
- __FUNCTION__,
- asoc,
- NIPQUAD(from_addr.v4.sin_addr.s_addr));
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "%s association %p could not find address "
+ NIPQUAD_FMT "\n",
+ __FUNCTION__,
+ asoc,
+ NIPQUAD(from_addr.v4.sin_addr.s_addr));
}
return SCTP_DISPOSITION_DISCARD;
}
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
- sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0);
+ sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
- sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0);
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 01c6364245b..33354602ae8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
* The function sctp_get_port_local() does duplicate address
* detection.
*/
+ addr->v4.sin_port = htons(snum);
if ((ret = sctp_get_port_local(sk, addr))) {
if (ret == (long) sk) {
/* This endpoint has a conflicting address. */
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
+ return 0;
}
/* Return if we are already listening. */
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
+ return 0;
}
if (sctp_sstate(sk, LISTENING))
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 34eb977a204..fa0ba2a5564 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -659,6 +659,46 @@ done:
return retval;
}
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue. The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *event;
+ __u32 tsn;
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ event = sctp_skb2event(pos);
+ tsn = event->tsn;
+
+ /* Since the entire message must be abandoned by the
+ * sender (item A3 in Section 3.5, RFC 3758), we can
+ * free all fragments on the list that are less then
+ * or equal to ctsn_point
+ */
+ if (TSN_lte(tsn, fwd_tsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ } else
+ break;
+ }
+}
+
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
-static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
+static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
csid = cevent->stream;
cssn = cevent->ssn;
- if (cssn != sctp_ssn_peek(in, csid))
+ /* Have we gone too far? */
+ if (csid > sid)
break;
- /* Found it, so mark in the ssnmap. */
- sctp_ssn_next(in, csid);
+ /* Have we not gone far enough? */
+ if (csid < sid)
+ continue;
+
+ /* see if this ssn has been marked by skipping */
+ if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+ break;
__skb_unlink(pos, &ulpq->lobby);
- if (!event) {
+ if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- } else {
- /* Attach all gathered skbs to the event. */
- __skb_queue_tail(&temp, pos);
- }
+
+ /* Attach all gathered skbs to the event. */
+ __skb_queue_tail(&temp, pos);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
- if (event)
+ if (event) {
+ /* see if we have more ordered that we can deliver */
+ sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
+ }
}
-/* Skip over an SSN. */
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
- sctp_ulpq_reap_ordered(ulpq);
+ sctp_ulpq_reap_ordered(ulpq, sid);
return;
}