aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-13 09:04:48 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-13 09:04:48 -0800
commit31083eba370fbc5d544ac2fe67ca549c0aa2bdf7 (patch)
tree9531e720d8d3cd8d82b7a3e4d2a1c6c306e769b4 /net
parenta7fe77161da48a74c60dc19fc4ca3a73ab761d37 (diff)
parent53756524e42a71011f5ae6410d6ac386bf3a9e7b (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits) [NETFILTER]: xt_time should not assume CONFIG_KTIME_SCALAR [NET]: Move unneeded data to initdata section. [NET]: Cleanup pernet operation without CONFIG_NET_NS [TEHUTI]: Fix incorrect usage of strncat in bdx_get_drvinfo() [MYRI_SBUS]: Prevent that myri_do_handshake lies about ticks. [NETFILTER]: bridge: fix double POSTROUTING hook invocation [NETFILTER]: Consolidate nf_sockopt and compat_nf_sockopt [NETFILTER]: nf_nat: fix memset error [INET]: Use list_head-s in inetpeer.c [IPVS]: Remove unused exports. [NET]: Unexport sysctl_{r,w}mem_max. [TG3]: Update version to 3.86 [TG3]: MII => TP [TG3]: Add A1 revs [TG3]: Increase the PCI MRRS [TG3]: Prescaler fix [TG3]: Limit 5784 / 5764 to MAC LED mode [TG3]: Disable GPHY autopowerdown [TG3]: CPMU adjustments for loopback tests [TG3]: Fix nvram selftest failures ...
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_mcast.c2
-rw-r--r--net/core/net_namespace.c18
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/netfilter/nf_sockopt.c106
-rw-r--r--net/netfilter/xt_time.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/rxrpc/ar-local.c4
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/bind_addr.c13
-rw-r--r--net/sctp/endpointola.c35
-rw-r--r--net/sctp/input.c43
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/outqueue.c41
-rw-r--r--net/sctp/proc.c6
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c170
-rw-r--r--net/sctp/sm_sideeffect.c10
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c5
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c6
31 files changed, 303 insertions, 296 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index da22f900e89..c1757c79dfb 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -766,6 +766,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
if (!nf_bridge)
return NF_ACCEPT;
+ if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
+ return NF_ACCEPT;
+
if (!realoutdev)
return NF_DROP;
diff --git a/net/core/dev.c b/net/core/dev.c
index dd7e30754cb..dd40b35bb00 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2688,7 +2688,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
proc_net_remove(net, "dev");
}
-static struct pernet_operations dev_proc_ops = {
+static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init,
.exit = dev_proc_net_exit,
};
@@ -4353,7 +4353,7 @@ static void __net_exit netdev_exit(struct net *net)
kfree(net->dev_index_head);
}
-static struct pernet_operations netdev_net_ops = {
+static struct pernet_operations __net_initdata netdev_net_ops = {
.init = netdev_init,
.exit = netdev_exit,
};
@@ -4384,7 +4384,7 @@ static void __net_exit default_device_exit(struct net *net)
rtnl_unlock();
}
-static struct pernet_operations default_device_ops = {
+static struct pernet_operations __net_initdata default_device_ops = {
.exit = default_device_exit,
};
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 647973daca2..69fff16ece1 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
proc_net_remove(net, "dev_mcast");
}
-static struct pernet_operations dev_mc_net_ops = {
+static struct pernet_operations __net_initdata dev_mc_net_ops = {
.init = dev_mc_net_init,
.exit = dev_mc_net_exit,
};
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 3f6d37deac4..383252b5041 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -188,6 +188,7 @@ static int __init net_ns_init(void)
pure_initcall(net_ns_init);
+#ifdef CONFIG_NET_NS
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
@@ -228,6 +229,23 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
ops->exit(net);
}
+#else
+
+static int register_pernet_operations(struct list_head *list,
+ struct pernet_operations *ops)
+{
+ if (ops->init == NULL)
+ return 0;
+ return ops->init(&init_net);
+}
+
+static void unregister_pernet_operations(struct pernet_operations *ops)
+{
+ if (ops->exit)
+ ops->exit(&init_net);
+}
+#endif
+
/**
* register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem
diff --git a/net/core/sock.c b/net/core/sock.c
index 8fc2f84209e..c519b439b8b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2097,7 +2097,3 @@ EXPORT_SYMBOL(sock_wmalloc);
EXPORT_SYMBOL(sock_i_uid);
EXPORT_SYMBOL(sock_i_ino);
EXPORT_SYMBOL(sysctl_optmem_max);
-#ifdef CONFIG_SYSCTL
-EXPORT_SYMBOL(sysctl_rmem_max);
-EXPORT_SYMBOL(sysctl_wmem_max);
-#endif
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 771031dfbd0..af995198f64 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -61,7 +61,7 @@
* 4. Global variable peer_total is modified under the pool lock.
* 5. struct inet_peer fields modification:
* avl_left, avl_right, avl_parent, avl_height: pool lock
- * unused_next, unused_prevp: unused node list lock
+ * unused: unused node list lock
* refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing
* dtime: unused node list lock
@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
int inet_peer_gc_mintime __read_mostly = 10 * HZ;
int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
-static struct inet_peer *inet_peer_unused_head;
-static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
+static LIST_HEAD(unused_peers);
static DEFINE_SPINLOCK(inet_peer_unused_lock);
static void peer_check_expire(unsigned long dummy);
@@ -138,15 +137,7 @@ void __init inet_initpeers(void)
static void unlink_from_unused(struct inet_peer *p)
{
spin_lock_bh(&inet_peer_unused_lock);
- if (p->unused_prevp != NULL) {
- /* On unused list. */
- *p->unused_prevp = p->unused_next;
- if (p->unused_next != NULL)
- p->unused_next->unused_prevp = p->unused_prevp;
- else
- inet_peer_unused_tailp = p->unused_prevp;
- p->unused_prevp = NULL; /* mark it as removed */
- }
+ list_del_init(&p->unused);
spin_unlock_bh(&inet_peer_unused_lock);
}
@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p)
/* May be called with local BH enabled. */
static int cleanup_once(unsigned long ttl)
{
- struct inet_peer *p;
+ struct inet_peer *p = NULL;
/* Remove the first entry from the list of unused nodes. */
spin_lock_bh(&inet_peer_unused_lock);
- p = inet_peer_unused_head;
- if (p != NULL) {
- __u32 delta = (__u32)jiffies - p->dtime;
+ if (!list_empty(&unused_peers)) {
+ __u32 delta;
+
+ p = list_first_entry(&unused_peers, struct inet_peer, unused);
+ delta = (__u32)jiffies - p->dtime;
+
if (delta < ttl) {
/* Do not prune fresh entries. */
spin_unlock_bh(&inet_peer_unused_lock);
return -1;
}
- inet_peer_unused_head = p->unused_next;
- if (p->unused_next != NULL)
- p->unused_next->unused_prevp = p->unused_prevp;
- else
- inet_peer_unused_tailp = p->unused_prevp;
- p->unused_prevp = NULL; /* mark as not on the list */
+
+ list_del_init(&p->unused);
+
/* Grab an extra reference to prevent node disappearing
* before unlink_from_pool() call. */
atomic_inc(&p->refcnt);
@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
/* Link the node. */
link_to_pool(n);
- n->unused_prevp = NULL; /* not on the list */
+ INIT_LIST_HEAD(&n->unused);
peer_total++;
write_unlock_bh(&peer_pool_lock);
@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p)
{
spin_lock_bh(&inet_peer_unused_lock);
if (atomic_dec_and_test(&p->refcnt)) {
- p->unused_prevp = inet_peer_unused_tailp;
- p->unused_next = NULL;
- *inet_peer_unused_tailp = p;
- inet_peer_unused_tailp = &p->unused_next;
+ list_add_tail(&p->unused, &unused_peers);
p->dtime = (__u32)jiffies;
}
spin_unlock_bh(&inet_peer_unused_lock);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index b7eeae622d9..0a9f3c37e18 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -441,7 +441,6 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
} else
return NULL;
}
-EXPORT_SYMBOL(ip_vs_try_bind_dest);
/*
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 3c4d22a468e..b64cf45a9ea 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -604,7 +604,6 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
ip_vs_service_put(svc);
return dest;
}
-EXPORT_SYMBOL(ip_vs_find_dest);
/*
* Lookup dest by {svc,addr,port} in the destination trash.
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 56e93f692e8..70e7997ea28 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -681,7 +681,7 @@ static int clean_nat(struct nf_conn *i, void *data)
if (!nat)
return 0;
- memset(nat, 0, sizeof(nat));
+ memset(nat, 0, sizeof(*nat));
i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
return 0;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 36f7dbfb6db..67997a74ddc 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1037,6 +1037,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
ndmsg = nlmsg_data(nlh);
ndmsg->nduseropt_family = AF_INET6;
+ ndmsg->nduseropt_ifindex = ra->dev->ifindex;
ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type;
ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code;
ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3;
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 2dfac325356..87bc1443c52 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -60,46 +60,57 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
}
EXPORT_SYMBOL(nf_unregister_sockopt);
-/* Call get/setsockopt() */
-static int nf_sockopt(struct sock *sk, int pf, int val,
- char __user *opt, int *len, int get)
+static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf,
+ int val, int get)
{
struct nf_sockopt_ops *ops;
- int ret;
if (sk->sk_net != &init_net)
- return -ENOPROTOOPT;
+ return ERR_PTR(-ENOPROTOOPT);
if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
- return -EINTR;
+ return ERR_PTR(-EINTR);
list_for_each_entry(ops, &nf_sockopts, list) {
if (ops->pf == pf) {
if (!try_module_get(ops->owner))
goto out_nosup;
+
if (get) {
- if (val >= ops->get_optmin
- && val < ops->get_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- ret = ops->get(sk, val, opt, len);
+ if (val >= ops->get_optmin &&
+ val < ops->get_optmax)
goto out;
- }
} else {
- if (val >= ops->set_optmin
- && val < ops->set_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- ret = ops->set(sk, val, opt, *len);
+ if (val >= ops->set_optmin &&
+ val < ops->set_optmax)
goto out;
- }
}
module_put(ops->owner);
}
}
- out_nosup:
+out_nosup:
+ ops = ERR_PTR(-ENOPROTOOPT);
+out:
mutex_unlock(&nf_sockopt_mutex);
- return -ENOPROTOOPT;
+ return ops;
+}
+
+/* Call get/setsockopt() */
+static int nf_sockopt(struct sock *sk, int pf, int val,
+ char __user *opt, int *len, int get)
+{
+ struct nf_sockopt_ops *ops;
+ int ret;
+
+ ops = nf_sockopt_find(sk, pf, val, get);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (get)
+ ret = ops->get(sk, val, opt, len);
+ else
+ ret = ops->set(sk, val, opt, *len);
- out:
module_put(ops->owner);
return ret;
}
@@ -124,51 +135,22 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
struct nf_sockopt_ops *ops;
int ret;
- if (sk->sk_net != &init_net)
- return -ENOPROTOOPT;
-
-
- if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
- return -EINTR;
-
- list_for_each_entry(ops, &nf_sockopts, list) {
- if (ops->pf == pf) {
- if (!try_module_get(ops->owner))
- goto out_nosup;
-
- if (get) {
- if (val >= ops->get_optmin
- && val < ops->get_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- if (ops->compat_get)
- ret = ops->compat_get(sk,
- val, opt, len);
- else
- ret = ops->get(sk,
- val, opt, len);
- goto out;
- }
- } else {
- if (val >= ops->set_optmin
- && val < ops->set_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- if (ops->compat_set)
- ret = ops->compat_set(sk,
- val, opt, *len);
- else
- ret = ops->set(sk,
- val, opt, *len);
- goto out;
- }
- }
- module_put(ops->owner);
- }
+ ops = nf_sockopt_find(sk, pf, val, get);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (get) {
+ if (ops->compat_get)
+ ret = ops->compat_get(sk, val, opt, len);
+ else
+ ret = ops->get(sk, val, ops, len);
+ } else {
+ if (ops->compat_set)
+ ret = ops->compat_set(sk, val, ops, *len);
+ else
+ ret = ops->set(sk, val, ops, *len);
}
- out_nosup:
- mutex_unlock(&nf_sockopt_mutex);
- return -ENOPROTOOPT;
- out:
module_put(ops->owner);
return ret;
}
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index ff44f86c24c..f9c55dcd894 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -170,7 +170,7 @@ static bool xt_time_match(const struct sk_buff *skb,
if (skb->tstamp.tv64 == 0)
__net_timestamp((struct sk_buff *)skb);
- stamp = skb->tstamp.tv64;
+ stamp = ktime_to_ns(skb->tstamp);
do_div(stamp, NSEC_PER_SEC);
if (info->flags & XT_TIME_LOCAL_TZ)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 415c97236f6..de3988ba1f4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1888,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net)
#endif
}
-static struct pernet_operations netlink_net_ops = {
+static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
};
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index eb6be5030c7..8a7807dbba0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -881,20 +881,14 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
if (protocol == 0)
goto out_unlock;
- if (dev) {
- if (dev->flags&IFF_UP) {
- dev_add_pack(&po->prot_hook);
- sock_hold(sk);
- po->running = 1;
- } else {
- sk->sk_err = ENETDOWN;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
- }
- } else {
+ if (!dev || (dev->flags & IFF_UP)) {
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
+ } else {
+ sk->sk_err = ENETDOWN;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
}
out_unlock:
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index fe03f71f17d..f3a2bd747a8 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -114,7 +114,7 @@ static int rxrpc_create_local(struct rxrpc_local *local)
return 0;
error:
- local->socket->ops->shutdown(local->socket, 2);
+ kernel_sock_shutdown(local->socket, SHUT_RDWR);
local->socket->sk->sk_user_data = NULL;
sock_release(local->socket);
local->socket = NULL;
@@ -267,7 +267,7 @@ static void rxrpc_destroy_local(struct work_struct *work)
/* finish cleaning up the local descriptor */
rxrpc_purge_queue(&local->accept_queue);
rxrpc_purge_queue(&local->reject_queue);
- local->socket->ops->shutdown(local->socket, 2);
+ kernel_sock_shutdown(local->socket, SHUT_RDWR);
sock_release(local->socket);
up_read(&rxrpc_local_sem);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 03158e3665d..013e3d3ab0f 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/
asoc->peer.sack_needed = 1;
- /* Assume that the peer recongizes ASCONF until reported otherwise
- * via an ERROR chunk.
+ /* Assume that the peer will tell us if he recognizes ASCONF
+ * as part of INIT exchange.
+ * The sctp_addip_noauth option is there for backward compatibilty
+ * and will revert old behavior.
*/
- asoc->peer.asconf_capable = 1;
+ asoc->peer.asconf_capable = 0;
+ if (sctp_addip_noauth)
+ asoc->peer.asconf_capable = 1;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index dfffa94fb9f..cae95af9a8c 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
/* Delete an address from the bind address list in the SCTP_bind_addr
* structure.
*/
-int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
- void fastcall (*rcu_call)(struct rcu_head *head,
- void (*func)(struct rcu_head *head)))
+int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
{
struct sctp_sockaddr_entry *addr, *temp;
@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
}
}
- /* Call the rcu callback provided in the args. This function is
- * called by both BH packet processing and user side socket option
- * processing, but it works on different lists in those 2 contexts.
- * Each context provides it's own callback, whether call_rcu_bh()
- * or call_rcu(), to make sure that we wait for an appropriate time.
- */
if (addr && !addr->valid) {
- rcu_call(&addr->rcu, sctp_local_addr_free);
+ call_rcu(&addr->rcu, sctp_local_addr_free);
SCTP_DBG_OBJCNT_DEC(addr);
+ return 0;
}
return -EINVAL;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2d2d81ef4a6..de6f505d6ff 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
const union sctp_addr *paddr,
struct sctp_transport **transport)
{
+ struct sctp_association *asoc = NULL;
+ struct sctp_transport *t = NULL;
+ struct sctp_hashbucket *head;
+ struct sctp_ep_common *epb;
+ struct hlist_node *node;
+ int hash;
int rport;
- struct sctp_association *asoc;
- struct list_head *pos;
+ *transport = NULL;
rport = ntohs(paddr->v4.sin_port);
- list_for_each(pos, &ep->asocs) {
- asoc = list_entry(pos, struct sctp_association, asocs);
- if (rport == asoc->peer.port) {
- *transport = sctp_assoc_lookup_paddr(asoc, paddr);
-
- if (*transport)
- return asoc;
+ hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
+ head = &sctp_assoc_hashtable[hash];
+ read_lock(&head->lock);
+ sctp_for_each_hentry(epb, node, &head->chain) {
+ asoc = sctp_assoc(epb);
+ if (asoc->ep != ep || rport != asoc->peer.port)
+ goto next;
+
+ t = sctp_assoc_lookup_paddr(asoc, paddr);
+ if (t) {
+ *transport = t;
+ break;
}
+next:
+ asoc = NULL;
}
-
- *transport = NULL;
- return NULL;
+ read_unlock(&head->lock);
+ return asoc;
}
/* Lookup association on an endpoint based on a peer address. BH-safe. */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 86503e7fa21..91ae463b079 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -656,7 +656,6 @@ discard:
/* Insert endpoint into the hash table. */
static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
{
- struct sctp_ep_common **epp;
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- epp = &head->chain;
- epb->next = *epp;
- if (epb->next)
- (*epp)->pprev = &epb->next;
- *epp = epb;
- epb->pprev = epp;
+ hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
}
@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
+ if (hlist_unhashed(&epb->node))
+ return;
+
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
-
- if (epb->pprev) {
- if (epb->next)
- epb->next->pprev = epb->pprev;
- *epb->pprev = epb->next;
- epb->pprev = NULL;
- }
-
+ __hlist_del(&epb->node);
sctp_write_unlock(&head->lock);
}
@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
+ struct hlist_node *node;
int hash;
hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb);
if (sctp_endpoint_is_match(ep, laddr))
goto hit;
@@ -744,7 +735,6 @@ hit:
/* Insert association into the hash table. */
static void __sctp_hash_established(struct sctp_association *asoc)
{
- struct sctp_ep_common **epp;
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- epp = &head->chain;
- epb->next = *epp;
- if (epb->next)
- (*epp)->pprev = &epb->next;
- *epp = epb;
- epb->pprev = epp;
+ hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
}
@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
-
- if (epb->pprev) {
- if (epb->next)
- epb->next->pprev = epb->pprev;
- *epb->pprev = epb->next;
- epb->pprev = NULL;
- }
-
+ __hlist_del(&epb->node);
sctp_write_unlock(&head->lock);
}
@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_ep_common *epb;
struct sctp_association *asoc;
struct sctp_transport *transport;
+ struct hlist_node *node;
int hash;
/* Optimize here for direct hit, only listening connections can
@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association(
hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb);
transport = sctp_assoc_is_match(asoc, local, peer);
if (transport)
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f10fe7fbf24..cf4b7eb023b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue)
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
{
/* Directly call the packet handling routine. */
+ if (chunk->rcvr->dead) {
+ sctp_chunk_free(chunk);
+ return;
+ }
/* We are now calling this either from the soft interrupt
* or from the backlog processing.
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 28f4fe77cee..fa76f235169 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
/* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(struct sctp_outq *q,
struct sctp_transport *transport,
- __u8 fast_retransmit)
+ __u8 reason)
{
struct list_head *lchunk, *ltemp;
struct sctp_chunk *chunk;
@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q,
continue;
}
- /* If we are doing retransmission due to a fast retransmit,
- * only the chunk's that are marked for fast retransmit
- * should be added to the retransmit queue. If we are doing
- * retransmission due to a timeout or pmtu discovery, only the
- * chunks that are not yet acked should be added to the
- * retransmit queue.
+ /* If we are doing retransmission due to a timeout or pmtu
+ * discovery, only the chunks that are not yet acked should
+ * be added to the retransmit queue.
*/
- if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
- (!fast_retransmit && !chunk->tsn_gap_acked)) {
+ if ((reason == SCTP_RTXR_FAST_RTX &&
+ (chunk->fast_retransmit > 0)) ||
+ (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
/* If this chunk was sent less then 1 rto ago, do not
* retransmit this chunk, but give the peer time
- * to acknowlege it.
+ * to acknowlege it. Do this only when
+ * retransmitting due to T3 timeout.
*/
- if ((jiffies - chunk->sent_at) < transport->rto)
+ if (reason == SCTP_RTXR_T3_RTX &&
+ (jiffies - chunk->sent_at) < transport->last_rto)
continue;
/* RFC 2960 6.2.1 Processing a Received SACK
@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
}
}
- SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, "
+ SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
"cwnd: %d, ssthresh: %d, flight_size: %d, "
"pba: %d\n", __FUNCTION__,
- transport, fast_retransmit,
+ transport, reason,
transport->cwnd, transport->ssthresh,
transport->flight_size,
transport->partial_bytes_acked);
@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason)
{
int error = 0;
- __u8 fast_retransmit = 0;
switch(reason) {
case SCTP_RTXR_T3_RTX:
@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
- fast_retransmit = 1;
break;
case SCTP_RTXR_PMTUD:
SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
break;
+ case SCTP_RTXR_T1_RTX:
+ SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
+ break;
default:
BUG();
}
- sctp_retransmit_mark(q, transport, fast_retransmit);
+ sctp_retransmit_mark(q, transport, reason);
/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
/* If we are here due to a retransmit timeout or a fast
* retransmit and if there are any chunks left in the retransmit
- * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
+ * queue that could not fit in the PMTU sized packet, they need
+ * to be marked as ineligible for a subsequent fast retransmit.
*/
if (rtx_timeout && !lchunk) {
list_for_each(lchunk1, lqueue) {
@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
int sctp_outq_uncork(struct sctp_outq *q)
{
int error = 0;
- if (q->cork) {
+ if (q->cork)
q->cork = 0;
- error = sctp_outq_flush(q, 0);
- }
+ error = sctp_outq_flush(q, 0);
return error;
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index e4cd841a22e..24997320407 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
+ struct hlist_node *node;
int hash = *(loff_t *)v;
if (hash >= sctp_ep_hashsize)
@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb);
sk = epb->sk;
seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_association *assoc;
struct sock *sk;
+ struct hlist_node *node;
int hash = *(loff_t *)v;
if (hash >= sctp_assoc_hashsize)
@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
assoc = sctp_assoc(epb);
sk = epb->sk;
seq_printf(seq,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 40c1a47d1b8..d50f610d1b0 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void)
}
for (i = 0; i < sctp_assoc_hashsize; i++) {
rwlock_init(&sctp_assoc_hashtable[i].lock);
- sctp_assoc_hashtable[i].chain = NULL;
+ INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
}
/* Allocate and initialize the endpoint hash table. */
@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void)
}
for (i = 0; i < sctp_ep_hashsize; i++) {
rwlock_init(&sctp_ep_hashtable[i].lock);
- sctp_ep_hashtable[i].chain = NULL;
+ INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
}
/* Allocate and initialize the SCTP port hash table. */
@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void)
}
for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock);
- sctp_port_hashtable[i].chain = NULL;
+ INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
}
printk(KERN_INFO "SCTP: Hash tables configured "
@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable ADDIP by default. */
sctp_addip_enable = 0;
+ sctp_addip_noauth = 0;
/* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index c377e4e8f65..5a9783c38de 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1788,9 +1788,14 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
sizeof(sctp_paramhdr_t);
+ /* This is a fatal error. Any accumulated non-fatal errors are
+ * not reported.
+ */
+ if (*errp)
+ sctp_chunk_free(*errp);
+
/* Create an error chunk and fill it in with our payload. */
- if (!*errp)
- *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
+ *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
@@ -1813,9 +1818,15 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
{
__u16 len = ntohs(param.p->length);
- /* Make an ERROR chunk. */
- if (!*errp)
- *errp = sctp_make_op_error_space(asoc, chunk, len);
+ /* Processing of the HOST_NAME parameter will generate an
+ * ABORT. If we've accumulated any non-fatal errors, they
+ * would be unrecognized parameters and we should not include
+ * them in the ABORT.
+ */
+ if (*errp)
+ sctp_chunk_free(*errp);
+
+ *errp = sctp_make_op_error_space(asoc, chunk, len);
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
@@ -1847,7 +1858,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
break;
case SCTP_CID_ASCONF:
case SCTP_CID_ASCONF_ACK:
- asoc->peer.addip_capable = 1;
+ asoc->peer.asconf_capable = 1;
break;
default:
break;
@@ -1862,56 +1873,40 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
* taken if the processing endpoint does not recognize the
* Parameter Type.
*
- * 00 - Stop processing this SCTP chunk and discard it,
- * do not process any further chunks within it.
+ * 00 - Stop processing this parameter; do not process any further
+ * parameters within this chunk
*
- * 01 - Stop processing this SCTP chunk and discard it,
- * do not process any further chunks within it, and report
- * the unrecognized parameter in an 'Unrecognized
- * Parameter Type' (in either an ERROR or in the INIT ACK).
+ * 01 - Stop processing this parameter, do not process any further
+ * parameters within this chunk, and report the unrecognized
+ * parameter in an 'Unrecognized Parameter' ERROR chunk.
*
* 10 - Skip this parameter and continue processing.
*
* 11 - Skip this parameter and continue processing but
* report the unrecognized parameter in an
- * 'Unrecognized Parameter Type' (in either an ERROR or in
- * the INIT ACK).
+ * 'Unrecognized Parameter' ERROR chunk.
*
* Return value:
- * 0 - discard the chunk
- * 1 - continue with the chunk
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
+ * SCTP_IERROR_ERROR - stop and report an error.
+ * SCTP_IERROR_NOMEME - out of memory.
*/
-static int sctp_process_unk_param(const struct sctp_association *asoc,
- union sctp_params param,
- struct sctp_chunk *chunk,
- struct sctp_chunk **errp)
+static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
{
- int retval = 1;
+ int retval = SCTP_IERROR_NO_ERROR;
switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
case SCTP_PARAM_ACTION_DISCARD:
- retval = 0;
- break;
- case SCTP_PARAM_ACTION_DISCARD_ERR:
- retval = 0;
- /* Make an ERROR chunk, preparing enough room for
- * returning multiple unknown parameters.
- */
- if (NULL == *errp)
- *errp = sctp_make_op_error_space(asoc, chunk,
- ntohs(chunk->chunk_hdr->length));
-
- if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk(*errp,
- WORD_ROUND(ntohs(param.p->length)),
- param.v);
- }
-
+ retval = SCTP_IERROR_ERROR;
break;
case SCTP_PARAM_ACTION_SKIP:
break;
+ case SCTP_PARAM_ACTION_DISCARD_ERR:
+ retval = SCTP_IERROR_ERROR;
+ /* Fall through */
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
@@ -1932,9 +1927,8 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
* to the peer and the association won't be
* established.
*/
- retval = 0;
+ retval = SCTP_IERROR_NOMEM;
}
-
break;
default:
break;
@@ -1943,18 +1937,20 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
return retval;
}
-/* Find unrecognized parameters in the chunk.
+/* Verify variable length parameters
* Return values:
- * 0 - discard the chunk
- * 1 - continue with the chunk
+ * SCTP_IERROR_ABORT - trigger an ABORT
+ * SCTP_IERROR_NOMEM - out of memory (abort)
+ * SCTP_IERROR_ERROR - stop processing, trigger an ERROR
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
*/
-static int sctp_verify_param(const struct sctp_association *asoc,
- union sctp_params param,
- sctp_cid_t cid,
- struct sctp_chunk *chunk,
- struct sctp_chunk **err_chunk)
+static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
+ union sctp_params param,
+ sctp_cid_t cid,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **err_chunk)
{
- int retval = 1;
+ int retval = SCTP_IERROR_NO_ERROR;
/* FIXME - This routine is not looking at each parameter per the
* chunk type, i.e., unrecognized parameters should be further
@@ -1976,7 +1972,9 @@ static int sctp_verify_param(const struct sctp_association *asoc,
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */
- return sctp_process_hn_param(asoc, param, chunk, err_chunk);
+ sctp_process_hn_param(asoc, param, chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ break;
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable)
@@ -1993,9 +1991,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
* cause 'Protocol Violation'.
*/
if (SCTP_AUTH_RANDOM_LENGTH !=
- ntohs(param.p->length) - sizeof(sctp_paramhdr_t))
- return sctp_process_inv_paramlength(asoc, param.p,
+ ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) {
+ sctp_process_inv_paramlength(asoc, param.p,
chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
break;
case SCTP_PARAM_CHUNKS:
@@ -2007,9 +2007,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
* INIT-ACK chunk if the sender wants to receive authenticated
* chunks. Its maximum length is 260 bytes.
*/
- if (260 < ntohs(param.p->length))
- return sctp_process_inv_paramlength(asoc, param.p,
- chunk, err_chunk);
+ if (260 < ntohs(param.p->length)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
break;
case SCTP_PARAM_HMAC_ALGO:
@@ -2020,8 +2022,7 @@ fallthrough:
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
- return sctp_process_unk_param(asoc, param, chunk, err_chunk);
-
+ retval = sctp_process_unk_param(asoc, param, chunk, err_chunk);
break;
}
return retval;
@@ -2036,6 +2037,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
{
union sctp_params param;
int has_cookie = 0;
+ int result;
/* Verify stream values are non-zero. */
if ((0 == peer_init->init_hdr.num_outbound_streams) ||
@@ -2043,8 +2045,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
(0 == peer_init->init_hdr.init_tag) ||
(SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
- sctp_process_inv_mandatory(asoc, chunk, errp);
- return 0;
+ return sctp_process_inv_mandatory(asoc, chunk, errp);
}
/* Check for missing mandatory parameters. */
@@ -2062,29 +2063,29 @@ int sctp_verify_init(const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
- if (param.v != (void*)chunk->chunk_end) {
- sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
- return 0;
- }
+ if (param.v != (void*)chunk->chunk_end)
+ return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
/* The only missing mandatory param possible today is
* the state cookie for an INIT-ACK chunk.
*/
- if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) {
- sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
- chunk, errp);
- return 0;
- }
-
- /* Find unrecognized parameters. */
+ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
+ return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
+ chunk, errp);
+ /* Verify all the variable length parameters */
sctp_walk_params(param, peer_init, init_hdr.params) {
- if (!sctp_verify_param(asoc, param, cid, chunk, errp)) {
- if (SCTP_PARAM_HOST_NAME_ADDRESS == param.p->type)
+ result = sctp_verify_param(asoc, param, cid, chunk, errp);
+ switch (result) {
+ case SCTP_IERROR_ABORT:
+ case SCTP_IERROR_NOMEM:
return 0;
- else
+ case SCTP_IERROR_ERROR:
return 1;
+ case SCTP_IERROR_NO_ERROR:
+ default:
+ break;
}
} /* for (loop through all parameters) */
@@ -2137,11 +2138,14 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
/* If the peer claims support for ADD-IP without support
* for AUTH, disable support for ADD-IP.
+ * Do this only if backward compatible mode is turned off.
*/
- if (asoc->peer.addip_capable && !asoc->peer.auth_capable) {
+ if (!sctp_addip_noauth &&
+ (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
SCTP_PARAM_DEL_IP |
SCTP_PARAM_SET_PRIMARY);
+ asoc->peer.asconf_capable = 0;
}
/* Walk list of transports, removing transports in the UNKNOWN state. */
@@ -2848,10 +2852,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
__be16 err_code;
int length = 0;
- int chunk_len = asconf->skb->len;
+ int chunk_len;
__u32 serial;
int all_param_pass = 1;
+ chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
@@ -2952,13 +2957,17 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
/* This is always done in BH context with a socket lock
* held, so the list can not change.
*/
+ local_bh_disable();
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, &addr))
saddr->use_as_src = 1;
}
+ local_bh_enable();
break;
case SCTP_PARAM_DEL_IP:
- retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh);
+ local_bh_disable();
+ retval = sctp_del_bind_addr(bp, &addr);
+ local_bh_enable();
list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport,
transports);
@@ -2990,7 +2999,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
sctp_addip_param_t *asconf_ack_param;
sctp_errhdr_t *err_param;
int length;
- int asconf_ack_len = asconf_ack->skb->len;
+ int asconf_ack_len;
__be16 err_code;
if (no_err)
@@ -2998,6 +3007,9 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
else
err_code = SCTP_ERROR_REQ_REFUSED;
+ asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
+ sizeof(sctp_chunkhdr_t);
+
/* Skip the addiphdr from the asconf_ack chunk and store a pointer to
* the first asconf_ack parameter.
*/
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bbdc938da86..78d1a8a49bd 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
+ transport->last_rto = transport->rto;
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
}
@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_ootb_pkt_free(packet);
break;
+ case SCTP_CMD_T1_RETRAN:
+ /* Mark a transport for retransmission. */
+ sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
+ SCTP_RTXR_T1_RTX);
+ break;
+
case SCTP_CMD_RETRAN:
/* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport,
transports);
- sctp_retransmit_mark(&asoc->outqueue, t, 0);
+ sctp_retransmit_mark(&asoc->outqueue, t,
+ SCTP_RTXR_T1_RTX);
}
sctp_add_cmd_sf(commands,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f01b408508f..5ebbe808d80 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
+ sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just
@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation(
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
- /* Make the abort chunk. */
- abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
- if (!abort)
- goto nomem;
-
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation(
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
+ /* Make the abort chunk. */
+ abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
+ if (!abort)
+ goto nomem;
+
if (asoc) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a7ecf3159e5..ff8bc95670e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy
*/
- retval = sctp_del_bind_addr(bp, sa_addr, call_rcu);
+ retval = sctp_del_bind_addr(bp, sa_addr);
addr_buf += af->sockaddr_len;
err_bindx_rem:
@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp; /* hash list port iterator */
+ struct hlist_node *node;
unsigned short snum;
int ret;
@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
index = sctp_phashfn(rover);
head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock);
- for (pp = head->chain; pp; pp = pp->next)
+ sctp_for_each_hentry(pp, node, &head->chain)
if (pp->port == rover)
goto next;
break;
@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
*/
head = &sctp_port_hashtable[sctp_phashfn(snum)];
sctp_spin_lock(&head->lock);
- for (pp = head->chain; pp; pp = pp->next) {
+ sctp_for_each_hentry(pp, node, &head->chain) {
if (pp->port == snum)
goto pp_found;
}
@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
pp->port = snum;
pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner);
- if ((pp->next = head->chain) != NULL)
- pp->next->pprev = &pp->next;
- head->chain = pp;
- pp->pprev = &head->chain;
+ hlist_add_head(&pp->node, &head->chain);
}
return pp;
}
@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
if (pp && hlist_empty(&pp->owner)) {
- if (pp->next)
- pp->next->pprev = pp->pprev;
- *(pp->pprev) = pp->next;
+ __hlist_del(&pp->node);
kmem_cache_free(sctp_bucket_cachep, pp);
SCTP_DBG_OBJCNT_DEC(bind_bucket);
}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 0669778e433..da4f15734fb 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = {
.proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "addip_noauth_enable",
+ .data = &sctp_addip_noauth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec
+ },
{ .ctl_name = 0 }
};
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 5f467c914f8..d55ce83a020 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* given destination transport address, set RTO to the protocol
* parameter 'RTO.Initial'.
*/
+ peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rtt = 0;
- peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rttvar = 0;
peer->srtt = 0;
peer->rto_pending = 0;
@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto = tp->asoc->rto_max;
tp->rtt = rtt;
+ tp->last_rto = tp->rto;
/* Reset rto_pending so that a new RTT measurement is started when a
* new data chunk is sent.
@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t)
*/
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
t->ssthresh = asoc->peer.i.a_rwnd;
- t->rto = asoc->rto_initial;
+ t->last_rto = t->rto = asoc->rto_initial;
t->rtt = 0;
t->srtt = 0;
t->rttvar = 0;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 4be92d0a2ca..4908041ffb3 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
continue;
/* see if this ssn has been marked by skipping */
- if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+ if (!SSN_lte(cssn, sctp_ssn_peek(in, csid)))
break;
__skb_unlink(pos, &ulpq->lobby);
diff --git a/net/socket.c b/net/socket.c
index 5d879fd3d01..74784dfe8e5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2319,6 +2319,11 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
return err;
}
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
+{
+ return sock->ops->shutdown(sock, how);
+}
+
/* ABI emulation layers need these two */
EXPORT_SYMBOL(move_addr_to_kernel);
EXPORT_SYMBOL(move_addr_to_user);
@@ -2345,3 +2350,4 @@ EXPORT_SYMBOL(kernel_getsockopt);
EXPORT_SYMBOL(kernel_setsockopt);
EXPORT_SYMBOL(kernel_sendpage);
EXPORT_SYMBOL(kernel_sock_ioctl);
+EXPORT_SYMBOL(kernel_sock_shutdown);