aboutsummaryrefslogtreecommitdiff
path: root/net/sctp/input.c
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2010-03-04 18:01:44 +0000
committerDavid S. Miller <davem@davemloft.net>2010-03-05 13:34:01 -0800
commit50b1a782f845140f4138f14a1ce8a4a6dd0cc82f (patch)
tree3cb1df5d01fb46ca4fef4146dc50eb455643e252 /net/sctp/input.c
parent79545b681961d7001c1f4c3eb9ffb87bed4485db (diff)
sctp: use limited socket backlog
Make sctp adapt to the limited socket backlog change. Cc: Vlad Yasevich <vladislav.yasevich@hp.com> Cc: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/input.c')
-rw-r--r--net/sctp/input.c42
1 files changed, 27 insertions, 15 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e67ad..cbc063665e6 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer,
struct sctp_transport **pt);
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
}
if (sock_owned_by_user(sk)) {
+ if (sctp_add_backlog(sk, skb)) {
+ sctp_bh_unlock_sock(sk);
+ sctp_chunk_free(chunk);
+ skb = NULL; /* sctp_chunk_free already freed the skb */
+ goto discard_release;
+ }
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
- sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk_add_backlog(sk, skb);
- backloged = 1;
+ if (sk_add_backlog_limited(sk, skb))
+ sctp_chunk_free(chunk);
+ else
+ backloged = 1;
} else
sctp_inq_push(inqueue, chunk);
@@ -362,22 +369,27 @@ done:
return 0;
}
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_ep_common *rcvr = chunk->rcvr;
+ int ret;
- /* Hold the assoc/ep while hanging on the backlog queue.
- * This way, we know structures we need will not disappear from us
- */
- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_hold(sctp_assoc(rcvr));
- else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
- sctp_endpoint_hold(sctp_ep(rcvr));
- else
- BUG();
+ ret = sk_add_backlog_limited(sk, skb);
+ if (!ret) {
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear
+ * from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+ }
+ return ret;
- sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */