aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/rtnetlink.h176
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/tcp.h28
-rw-r--r--include/linux/xfrm.h4
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/request_sock.h255
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h160
-rw-r--r--include/net/tcp_ecn.h13
-rw-r--r--include/net/xfrm.h24
-rw-r--r--mm/slab.c6
-rw-r--r--net/core/Makefile3
-rw-r--r--net/core/neighbour.c333
-rw-r--r--net/core/request_sock.c64
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/sock.c35
-rw-r--r--net/decnet/dn_dev.c9
-rw-r--r--net/decnet/dn_neigh.c1
-rw-r--r--net/decnet/dn_route.c11
-rw-r--r--net/decnet/dn_rules.c7
-rw-r--r--net/decnet/dn_table.c8
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/fib_hash.c3
-rw-r--r--net/ipv4/fib_lookup.h3
-rw-r--r--net/ipv4/fib_rules.c7
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/raw.c22
-rw-r--r--net/ipv4/route.c11
-rw-r--r--net/ipv4/syncookies.c49
-rw-r--r--net/ipv4/tcp.c84
-rw-r--r--net/ipv4/tcp_diag.c37
-rw-r--r--net/ipv4/tcp_ipv4.c172
-rw-r--r--net/ipv4/tcp_minisocks.c68
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c18
-rw-r--r--net/ipv6/addrconf.c57
-rw-r--r--net/ipv6/datagram.c6
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c148
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/key/af_key.c369
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/cls_api.c5
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_dsmark.c357
-rw-r--r--net/sched/sch_fifo.c152
-rw-r--r--net/sched/sch_generic.c84
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/xfrm/xfrm_policy.c8
-rw-r--r--net/xfrm/xfrm_state.c81
-rw-r--r--net/xfrm/xfrm_user.c288
-rw-r--r--security/selinux/nlmsgtab.c2
59 files changed, 2406 insertions, 1099 deletions
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 8438c68591f..31e7cedd9f8 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -81,6 +81,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/types.h>
+#include <net/request_sock.h>
#include <net/sock.h>
#include <linux/igmp.h>
#include <net/flow.h>
@@ -107,6 +108,26 @@ struct ip_options {
#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
+struct inet_request_sock {
+ struct request_sock req;
+ u32 loc_addr;
+ u32 rmt_addr;
+ u16 rmt_port;
+ u16 snd_wscale : 4,
+ rcv_wscale : 4,
+ tstamp_ok : 1,
+ sack_ok : 1,
+ wscale_ok : 1,
+ ecn_ok : 1,
+ acked : 1;
+ struct ip_options *opt;
+};
+
+static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
+{
+ return (struct inet_request_sock *)sk;
+}
+
struct ipv6_pinfo;
struct inet_sock {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ab0d0efbf24..6fcd6a0ade2 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -193,6 +193,19 @@ struct inet6_skb_parm {
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+struct tcp6_request_sock {
+ struct tcp_request_sock req;
+ struct in6_addr loc_addr;
+ struct in6_addr rmt_addr;
+ struct sk_buff *pktopts;
+ int iif;
+};
+
+static inline struct tcp6_request_sock *tcp6_rsk(const struct request_sock *sk)
+{
+ return (struct tcp6_request_sock *)sk;
+}
+
/**
* struct ipv6_pinfo - ipv6 private area
*
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index b2738ac8bc9..e38407a23d0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -156,7 +156,7 @@ struct netlink_notify
};
static __inline__ struct nlmsghdr *
-__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
+__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);
@@ -164,15 +164,31 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
- nlh->nlmsg_flags = 0;
+ nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
return nlh;
}
+#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
+({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \
+ goto nlmsg_failure; \
+ __nlmsg_put(skb, pid, seq, type, len, flags); })
+
#define NLMSG_PUT(skb, pid, seq, type, len) \
-({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; \
- __nlmsg_put(skb, pid, seq, type, len); })
+ NLMSG_NEW(skb, pid, seq, type, len, 0)
+
+#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \
+ NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \
+ (cb)->nlh->nlmsg_seq, type, len, flags)
+
+#define NLMSG_END(skb, nlh) \
+({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \
+ (skb)->len; })
+
+#define NLMSG_CANCEL(skb, nlh) \
+({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \
+ -1; })
extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 91ac97c2077..e68dbf0bf57 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -89,6 +89,13 @@ enum {
RTM_GETANYCAST = 62,
#define RTM_GETANYCAST RTM_GETANYCAST
+ RTM_NEWNEIGHTBL = 64,
+#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL
+ RTM_GETNEIGHTBL = 66,
+#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL
+ RTM_SETNEIGHTBL,
+#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -493,6 +500,106 @@ struct nda_cacheinfo
__u32 ndm_refcnt;
};
+
+/*****************************************************************
+ * Neighbour tables specific messages.
+ *
+ * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the
+ * NLM_F_DUMP flag set. Every neighbour table configuration is
+ * spread over multiple messages to avoid running into message
+ * size limits on systems with many interfaces. The first message
+ * in the sequence transports all not device specific data such as
+ * statistics, configuration, and the default parameter set.
+ * This message is followed by 0..n messages carrying device
+ * specific parameter sets.
+ * Although the ordering should be sufficient, NDTA_NAME can be
+ * used to identify sequences. The initial message can be identified
+ * by checking for NDTA_CONFIG. The device specific messages do
+ * not contain this TLV but have NDTPA_IFINDEX set to the
+ * corresponding interface index.
+ *
+ * To change neighbour table attributes, send RTM_SETNEIGHTBL
+ * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3],
+ * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked
+ * otherwise. Device specific parameter sets can be changed by
+ * setting NDTPA_IFINDEX to the interface index of the corresponding
+ * device.
+ ****/
+
+struct ndt_stats
+{
+ __u64 ndts_allocs;
+ __u64 ndts_destroys;
+ __u64 ndts_hash_grows;
+ __u64 ndts_res_failed;
+ __u64 ndts_lookups;
+ __u64 ndts_hits;
+ __u64 ndts_rcv_probes_mcast;
+ __u64 ndts_rcv_probes_ucast;
+ __u64 ndts_periodic_gc_runs;
+ __u64 ndts_forced_gc_runs;
+};
+
+enum {
+ NDTPA_UNSPEC,
+ NDTPA_IFINDEX, /* u32, unchangeable */
+ NDTPA_REFCNT, /* u32, read-only */
+ NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */
+ NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */
+ NDTPA_RETRANS_TIME, /* u64, msecs */
+ NDTPA_GC_STALETIME, /* u64, msecs */
+ NDTPA_DELAY_PROBE_TIME, /* u64, msecs */
+ NDTPA_QUEUE_LEN, /* u32 */
+ NDTPA_APP_PROBES, /* u32 */
+ NDTPA_UCAST_PROBES, /* u32 */
+ NDTPA_MCAST_PROBES, /* u32 */
+ NDTPA_ANYCAST_DELAY, /* u64, msecs */
+ NDTPA_PROXY_DELAY, /* u64, msecs */
+ NDTPA_PROXY_QLEN, /* u32 */
+ NDTPA_LOCKTIME, /* u64, msecs */
+ __NDTPA_MAX
+};
+#define NDTPA_MAX (__NDTPA_MAX - 1)
+
+struct ndtmsg
+{
+ __u8 ndtm_family;
+ __u8 ndtm_pad1;
+ __u16 ndtm_pad2;
+};
+
+struct ndt_config
+{
+ __u16 ndtc_key_len;
+ __u16 ndtc_entry_size;
+ __u32 ndtc_entries;
+ __u32 ndtc_last_flush; /* delta to now in msecs */
+ __u32 ndtc_last_rand; /* delta to now in msecs */
+ __u32 ndtc_hash_rnd;
+ __u32 ndtc_hash_mask;
+ __u32 ndtc_hash_chain_gc;
+ __u32 ndtc_proxy_qlen;
+};
+
+enum {
+ NDTA_UNSPEC,
+ NDTA_NAME, /* char *, unchangeable */
+ NDTA_THRESH1, /* u32 */
+ NDTA_THRESH2, /* u32 */
+ NDTA_THRESH3, /* u32 */
+ NDTA_CONFIG, /* struct ndt_config, read-only */
+ NDTA_PARMS, /* nested TLV NDTPA_* */
+ NDTA_STATS, /* struct ndt_stats, read-only */
+ NDTA_GC_INTERVAL, /* u64, msecs */
+ __NDTA_MAX
+};
+#define NDTA_MAX (__NDTA_MAX - 1)
+
+#define NDTA_RTA(r) ((struct rtattr*)(((char*)(r)) + \
+ NLMSG_ALIGN(sizeof(struct ndtmsg))))
+#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg))
+
+
/****
* General form of address family dependent message.
****/
@@ -789,6 +896,75 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
goto rtattr_failure; \
memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); })
+
+#define RTA_PUT_U8(skb, attrtype, value) \
+({ u8 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
+
+#define RTA_PUT_U16(skb, attrtype, value) \
+({ u16 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
+
+#define RTA_PUT_U32(skb, attrtype, value) \
+({ u32 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
+
+#define RTA_PUT_U64(skb, attrtype, value) \
+({ u64 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
+
+#define RTA_PUT_SECS(skb, attrtype, value) \
+ RTA_PUT_U64(skb, attrtype, (value) / HZ)
+
+#define RTA_PUT_MSECS(skb, attrtype, value) \
+ RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
+
+#define RTA_PUT_STRING(skb, attrtype, value) \
+ RTA_PUT(skb, attrtype, strlen(value) + 1, value)
+
+#define RTA_PUT_FLAG(skb, attrtype) \
+ RTA_PUT(skb, attrtype, 0, NULL);
+
+#define RTA_NEST(skb, type) \
+({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \
+ RTA_PUT(skb, type, 0, NULL); \
+ __start; })
+
+#define RTA_NEST_END(skb, start) \
+({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \
+ (skb)->len; })
+
+#define RTA_NEST_CANCEL(skb, start) \
+({ if (start) \
+ skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
+ -1; })
+
+#define RTA_GET_U8(rta) \
+({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
+ goto rtattr_failure; \
+ *(u8 *) RTA_DATA(rta); })
+
+#define RTA_GET_U16(rta) \
+({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
+ goto rtattr_failure; \
+ *(u16 *) RTA_DATA(rta); })
+
+#define RTA_GET_U32(rta) \
+({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
+ goto rtattr_failure; \
+ *(u32 *) RTA_DATA(rta); })
+
+#define RTA_GET_U64(rta) \
+({ u64 _tmp; \
+ if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
+ goto rtattr_failure; \
+ memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
+ _tmp; })
+
+#define RTA_GET_FLAG(rta) (!!(rta))
+
+#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
+#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
static inline struct rtattr *
__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7d66385ae75..76cf7e60216 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,7 @@ extern int kmem_cache_shrink(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
+extern const char *kmem_cache_name(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
/* Size description struct for general caches. */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14a55e3e3a5..97a7c9e03df 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -230,6 +230,17 @@ struct tcp_options_received {
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};
+struct tcp_request_sock {
+ struct inet_request_sock req;
+ __u32 rcv_isn;
+ __u32 snt_isn;
+};
+
+static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
+{
+ return (struct tcp_request_sock *)req;
+}
+
struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */
struct inet_sock inet;
@@ -368,22 +379,7 @@ struct tcp_sock {
__u32 total_retrans; /* Total retransmits for entire connection */
- /* The syn_wait_lock is necessary only to avoid proc interface having
- * to grab the main lock sock while browsing the listening hash
- * (otherwise it's deadlock prone).
- * This lock is acquired in read mode only from listening_get_next()
- * and it's acquired in write mode _only_ from code that is actively
- * changing the syn_wait_queue. All readers that are holding
- * the master sock lock don't need to grab this lock in read mode
- * too as the syn_wait_queue writes are always protected from
- * the main sock lock.
- */
- rwlock_t syn_wait_lock;
- struct tcp_listen_opt *listen_opt;
-
- /* FIFO of established children */
- struct open_request *accept_queue;
- struct open_request *accept_queue_tail;
+ struct request_sock_queue accept_queue; /* FIFO of established children */
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index fd2ef742a9f..d68391a9b9f 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -174,6 +174,8 @@ enum xfrm_attr_type_t {
XFRMA_ALG_COMP, /* struct xfrm_algo */
XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */
XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */
+ XFRMA_SA,
+ XFRMA_POLICY,
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -257,5 +259,7 @@ struct xfrm_usersa_flush {
#define XFRMGRP_ACQUIRE 1
#define XFRMGRP_EXPIRE 2
+#define XFRMGRP_SA 4
+#define XFRMGRP_POLICY 8
#endif /* _LINUX_XFRM_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 4f33bbc21e7..89809891e5a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -65,11 +65,10 @@ struct neighbour;
struct neigh_parms
{
+ struct net_device *dev;
struct neigh_parms *next;
int (*neigh_setup)(struct neighbour *);
struct neigh_table *tbl;
- int entries;
- void *priv;
void *sysctl_table;
@@ -192,7 +191,6 @@ struct neigh_table
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
- struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep;
struct neigh_statistics *stats;
struct neighbour **hash_buckets;
@@ -252,6 +250,9 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern void neigh_app_ns(struct neighbour *n);
+extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
+extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+
extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
new file mode 100644
index 00000000000..72fd6f5e86b
--- /dev/null
+++ b/include/net/request_sock.h
@@ -0,0 +1,255 @@
+/*
+ * NET Generic infrastructure for Network protocols.
+ *
+ * Definitions for request_sock
+ *
+ * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * From code originally in include/net/tcp.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _REQUEST_SOCK_H
+#define _REQUEST_SOCK_H
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <net/sock.h>
+
+struct request_sock;
+struct sk_buff;
+struct dst_entry;
+struct proto;
+
+struct request_sock_ops {
+ int family;
+ kmem_cache_t *slab;
+ int obj_size;
+ int (*rtx_syn_ack)(struct sock *sk,
+ struct request_sock *req,
+ struct dst_entry *dst);
+ void (*send_ack)(struct sk_buff *skb,
+ struct request_sock *req);
+ void (*send_reset)(struct sk_buff *skb);
+ void (*destructor)(struct request_sock *req);
+};
+
+/* struct request_sock - mini sock to represent a connection request
+ */
+struct request_sock {
+ struct request_sock *dl_next; /* Must be first member! */
+ u16 mss;
+ u8 retrans;
+ u8 __pad;
+ /* The following two fields can be easily recomputed I think -AK */
+ u32 window_clamp; /* window clamp at creation time */
+ u32 rcv_wnd; /* rcv_wnd offered first time */
+ u32 ts_recent;
+ unsigned long expires;
+ struct request_sock_ops *rsk_ops;
+ struct sock *sk;
+};
+
+static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
+{
+ struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+
+ if (req != NULL)
+ req->rsk_ops = ops;
+
+ return req;
+}
+
+static inline void __reqsk_free(struct request_sock *req)
+{
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_free(struct request_sock *req)
+{
+ req->rsk_ops->destructor(req);
+ __reqsk_free(req);
+}
+
+extern int sysctl_max_syn_backlog;
+
+/** struct listen_sock - listen state
+ *
+ * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
+ */
+struct listen_sock {
+ u8 max_qlen_log;
+ /* 3 bytes hole, try to use */
+ int qlen;
+ int qlen_young;
+ int clock_hand;
+ u32 hash_rnd;
+ struct request_sock *syn_table[0];
+};
+
+/** struct request_sock_queue - queue of request_socks
+ *
+ * @rskq_accept_head - FIFO head of established children
+ * @rskq_accept_tail - FIFO tail of established children
+ * @syn_wait_lock - serializer
+ *
+ * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
+ * lock sock while browsing the listening hash (otherwise it's deadlock prone).
+ *
+ * This lock is acquired in read mode only from listening_get_next() seq_file
+ * op and it's acquired in write mode _only_ from code that is actively
+ * changing rskq_accept_head. All readers that are holding the master sock lock
+ * don't need to grab this lock in read mode too as rskq_accept_head. writes
+ * are always protected from the main sock lock.
+ */
+struct request_sock_queue {
+ struct request_sock *rskq_accept_head;
+ struct request_sock *rskq_accept_tail;
+ rwlock_t syn_wait_lock;
+ struct listen_sock *listen_opt;
+};
+
+extern int reqsk_queue_alloc(struct request_sock_queue *queue,
+ const int nr_table_entries);
+
+static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt;
+
+ write_lock_bh(&queue->syn_wait_lock);
+ lopt = queue->listen_opt;
+ queue->listen_opt = NULL;
+ write_unlock_bh(&queue->syn_wait_lock);
+
+ return lopt;
+}
+
+static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
+{
+ kfree(reqsk_queue_yank_listen_sk(queue));
+}
+
+static inline struct request_sock *
+ reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
+{
+ struct request_sock *req = queue->rskq_accept_head;
+
+ queue->rskq_accept_head = queue->rskq_accept_head = NULL;
+ return req;
+}
+
+static inline int reqsk_queue_empty(struct request_sock_queue *queue)
+{
+ return queue->rskq_accept_head == NULL;
+}
+
+static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
+ struct request_sock *req,
+ struct request_sock **prev_req)
+{
+ write_lock(&queue->syn_wait_lock);
+ *prev_req = req->dl_next;
+ write_unlock(&queue->syn_wait_lock);
+}
+
+static inline void reqsk_queue_add(struct request_sock_queue *queue,
+ struct request_sock *req,
+ struct sock *parent,
+ struct sock *child)
+{
+ req->sk = child;
+ sk_acceptq_added(parent);
+
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_head = req;
+ else
+ queue->rskq_accept_tail->dl_next = req;
+
+ queue->rskq_accept_tail = req;
+ req->dl_next = NULL;
+}
+
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
+{
+ struct request_sock *req = queue->rskq_accept_head;
+
+ BUG_TRAP(req != NULL);
+
+ queue->rskq_accept_head = req->dl_next;
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_tail = NULL;
+
+ return req;
+}
+
+static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
+ struct sock *parent)
+{
+ struct request_sock *req = reqsk_queue_remove(queue);
+ struct sock *child = req->sk;
+
+ BUG_TRAP(child != NULL);
+
+ sk_acceptq_removed(parent);
+ __reqsk_free(req);
+ return child;
+}
+
+static inline int reqsk_queue_removed(struct request_sock_queue *queue,
+ struct request_sock *req)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+
+ if (req->retrans == 0)
+ --lopt->qlen_young;
+
+ return --lopt->qlen;
+}
+
+static inline int reqsk_queue_added(struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+ const int prev_qlen = lopt->qlen;
+
+ lopt->qlen_young++;
+ lopt->qlen++;
+ return prev_qlen;
+}
+
+static inline int reqsk_queue_len(struct request_sock_queue *queue)
+{
+ return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+}
+
+static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
+{
+ return queue->listen_opt->qlen_young;
+}
+
+static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
+{
+ return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+}
+
+static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned timeout)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+
+ req->expires = jiffies + timeout;
+ req->retrans = 0;
+ req->sk = NULL;
+ req->dl_next = lopt->syn_table[hash];
+
+ write_lock(&queue->syn_wait_lock);
+ lopt->syn_table[hash] = req;
+ write_unlock(&queue->syn_wait_lock);
+}
+
+#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c57504b3b51..7b97405e2db 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp)
kfree(tp);
}
+static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ __skb_queue_tail(list, skb);
+ sch->qstats.backlog += skb->len;
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+
+ return NET_XMIT_SUCCESS;
+}
+
+static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ return __qdisc_enqueue_tail(skb, sch, &sch->q);
+}
+
+static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb = __skb_dequeue(list);
+
+ if (likely(skb != NULL))
+ sch->qstats.backlog -= skb->len;
+
+ return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+{
+ return __qdisc_dequeue_head(sch, &sch->q);
+}
+
+static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb = __skb_dequeue_tail(list);
+
+ if (likely(skb != NULL))
+ sch->qstats.backlog -= skb->len;
+
+ return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
+{
+ return __qdisc_dequeue_tail(sch, &sch->q);
+}
+
+static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ __skb_queue_head(list, skb);
+ sch->qstats.backlog += skb->len;
+ sch->qstats.requeues++;
+
+ return NET_XMIT_SUCCESS;
+}
+
+static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ return __qdisc_requeue(skb, sch, &sch->q);
+}
+
+static inline void __qdisc_reset_queue(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ /*
+ * We do not know the backlog in bytes of this list, it
+ * is up to the caller to correct it
+ */
+ skb_queue_purge(list);
+}
+
+static inline void qdisc_reset_queue(struct Qdisc *sch)
+{
+ __qdisc_reset_queue(sch, &sch->q);
+ sch->qstats.backlog = 0;
+}
+
+static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
+
+ if (likely(skb != NULL)) {
+ unsigned int len = skb->len;
+ kfree_skb(skb);
+ return len;
+ }
+
+ return 0;
+}
+
+static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
+{
+ return __qdisc_queue_drop(sch, &sch->q);
+}
+
+static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
+{
+ kfree_skb(skb);
+ sch->qstats.drops++;
+
+ return NET_XMIT_DROP;
+}
+
+static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ sch->qstats.drops++;
+
+#ifdef CONFIG_NET_CLS_POLICE
+ if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
+ goto drop;
+
+ return NET_XMIT_SUCCESS;
+
+drop:
+#endif
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index a9ef3a6a13f..e593af5b1ec 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -484,6 +484,8 @@ extern void sk_stream_kill_queues(struct sock *sk);
extern int sk_wait_data(struct sock *sk, long *timeo);
+struct request_sock_ops;
+
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
@@ -547,6 +549,8 @@ struct proto {
kmem_cache_t *slab;
unsigned int obj_size;
+ struct request_sock_ops *rsk_prot;
+
struct module *owner;
char name[32];
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e71f8ba3e10..f730935b824 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
#include <linux/cache.h>
#include <linux/percpu.h>
#include <net/checksum.h>
+#include <net/request_sock.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -563,7 +564,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
/* sysctl variables for tcp */
-extern int sysctl_max_syn_backlog;
extern int sysctl_tcp_timestamps;
extern int sysctl_tcp_window_scaling;
extern int sysctl_tcp_sack;
@@ -613,74 +613,6 @@ extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
extern int tcp_memory_pressure;
-struct open_request;
-
-struct or_calltable {
- int family;
- int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
- void (*send_ack) (struct sk_buff *skb, struct open_request *req);
- void (*destructor) (struct open_request *req);
- void (*send_reset) (struct sk_buff *skb);
-};
-
-struct tcp_v4_open_req {
- __u32 loc_addr;
- __u32 rmt_addr;
- struct ip_options *opt;
-};
-
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-struct tcp_v6_open_req {
- struct in6_addr loc_addr;
- struct in6_addr rmt_addr;
- struct sk_buff *pktopts;
- int iif;
-};
-#endif
-
-/* this structure is too big */
-struct open_request {
- struct open_request *dl_next; /* Must be first member! */
- __u32 rcv_isn;
- __u32 snt_isn;
- __u16 rmt_port;
- __u16 mss;
- __u8 retrans;
- __u8 __pad;
- __u16 snd_wscale : 4,
- rcv_wscale : 4,
- tstamp_ok : 1,
- sack_ok : 1,
- wscale_ok : 1,
- ecn_ok : 1,
- acked : 1;
- /* The following two fields can be easily recomputed I think -AK */
- __u32 window_clamp; /* window clamp at creation time */
- __u32 rcv_wnd; /* rcv_wnd offered first time */
- __u32 ts_recent;
- unsigned long expires;
- struct or_calltable *class;
- struct sock *sk;
- union {
- struct tcp_v4_open_req v4_req;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- struct tcp_v6_open_req v6_req;
-#endif
- } af;
-};
-
-/* SLAB cache for open requests. */
-extern kmem_cache_t *tcp_openreq_cachep;
-
-#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
-#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
-
-static inline void tcp_openreq_free(struct open_request *req)
-{
- req->class->destructor(req);
- tcp_openreq_fastfree(req);
-}
-
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
#else
@@ -708,7 +640,7 @@ struct tcp_func {
struct sock * (*syn_recv_sock) (struct sock *sk,
struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst);
int (*remember_stamp) (struct sock *sk);
@@ -852,8 +784,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
unsigned len);
extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
- struct open_request *req,
- struct open_request **prev);
+ struct request_sock *req,
+ struct request_sock **prev);
extern int tcp_child_process(struct sock *parent,
struct sock *child,
struct sk_buff *skb);
@@ -903,12 +835,12 @@ extern int tcp_v4_conn_request(struct sock *sk,
struct sk_buff *skb);
extern struct sock * tcp_create_openreq_child(struct sock *sk,
- struct open_request *req,
+ struct request_sock *req,
struct sk_buff *skb);
extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst);
extern int tcp_v4_do_rcv(struct sock *sk,
@@ -922,7 +854,7 @@ extern int tcp_connect(struct sock *sk);
extern struct sk_buff * tcp_make_synack(struct sock *sk,
struct dst_entry *dst,
- struct open_request *req);
+ struct request_sock *req);
extern int tcp_disconnect(struct sock *sk, int flags);
@@ -1750,99 +1682,71 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf);
}
-static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
+static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
struct sock *child)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
- req->sk = child;
- sk_acceptq_added(sk);
-
- if (!tp->accept_queue_tail) {
- tp->accept_queue = req;
- } else {
- tp->accept_queue_tail->dl_next = req;
- }
- tp->accept_queue_tail = req;
- req->dl_next = NULL;
+ reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
}
-struct tcp_listen_opt
-{
- u8 max_qlen_log; /* log_2 of maximal queued SYNs */
- int qlen;
- int qlen_young;
- int clock_hand;
- u32 hash_rnd;
- struct open_request *syn_table[TCP_SYNQ_HSIZE];
-};
-
static inline void
-tcp_synq_removed(struct sock *sk, struct open_request *req)
+tcp_synq_removed(struct sock *sk, struct request_sock *req)
{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (--lopt->qlen == 0)
+ if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
tcp_delete_keepalive_timer(sk);
- if (req->retrans == 0)
- lopt->qlen_young--;
}
static inline void tcp_synq_added(struct sock *sk)
{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (lopt->qlen++ == 0)
+ if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
- lopt->qlen_young++;
}
static inline int tcp_synq_len(struct sock *sk)
{
- return tcp_sk(sk)->listen_opt->qlen;
+ return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
}
static inline int tcp_synq_young(struct sock *sk)
{
- return tcp_sk(sk)->listen_opt->qlen_young;
+ return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
}
static inline int tcp_synq_is_full(struct sock *sk)
{
- return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
+ return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
}
-static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req,
- struct open_request **prev)
+static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
+ struct request_sock **prev)
{
- write_lock(&tp->syn_wait_lock);
- *prev = req->dl_next;
- write_unlock(&tp->syn_wait_lock);
+ reqsk_queue_unlink(&tp->accept_queue, req, prev);
}
-static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
- struct open_request **prev)
+static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
+ struct request_sock **prev)
{
tcp_synq_unlink(tcp_sk(sk), req, prev);
tcp_synq_removed(sk, req);
- tcp_openreq_free(req);
+ reqsk_free(req);
}
-static __inline__ void tcp_openreq_init(struct open_request *req,
+static __inline__ void tcp_openreq_init(struct request_sock *req,
struct tcp_options_received *rx_opt,
struct sk_buff *skb)
{
+ struct inet_request_sock *ireq = inet_rsk(req);
+
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
- req->rcv_isn = TCP_SKB_CB(skb)->seq;
+ tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
- req->tstamp_ok = rx_opt->tstamp_ok;
- req->sack_ok = rx_opt->sack_ok;
- req->snd_wscale = rx_opt->snd_wscale;
- req->wscale_ok = rx_opt->wscale_ok;
- req->acked = 0;
- req->ecn_ok = 0;
- req->rmt_port = skb->h.th->source;
+ ireq->tstamp_ok = rx_opt->tstamp_ok;
+ ireq->sack_ok = rx_opt->sack_ok;
+ ireq->snd_wscale = rx_opt->snd_wscale;
+ ireq->wscale_ok = rx_opt->wscale_ok;
+ ireq->acked = 0;
+ ireq->ecn_ok = 0;
+ ireq->rmt_port = skb->h.th->source;
}
extern void tcp_enter_memory_pressure(void);
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index dc1456389a9..64980ee8c92 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -2,6 +2,7 @@
#define _NET_TCP_ECN_H_ 1
#include <net/inet_ecn.h>
+#include <net/request_sock.h>
#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
@@ -38,9 +39,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
}
static __inline__ void
-TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th)
+TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
{
- if (req->ecn_ok)
+ if (inet_rsk(req)->ecn_ok)
th->ece = 1;
}
@@ -111,16 +112,16 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
}
static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
- struct open_request *req)
+ struct request_sock *req)
{
- tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0;
+ tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
}
static __inline__ void
-TCP_ECN_create_request(struct open_request *req, struct tcphdr *th)
+TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
{
if (sysctl_tcp_ecn && th->ece && th->cwr)
- req->ecn_ok = 1;
+ inet_rsk(req)->ecn_ok = 1;
}
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d675836ba6c..0e65e02b7a1 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -158,6 +158,20 @@ enum {
XFRM_STATE_DEAD
};
+/* callback structure passed from either netlink or pfkey */
+struct km_event
+{
+ union {
+ u32 hard;
+ u32 proto;
+ u32 byid;
+ } data;
+
+ u32 seq;
+ u32 pid;
+ u32 event;
+};
+
struct xfrm_type;
struct xfrm_dst;
struct xfrm_policy_afinfo {
@@ -179,6 +193,8 @@ struct xfrm_policy_afinfo {
extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
+extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
#define XFRM_ACQ_EXPIRES 30
@@ -290,11 +306,11 @@ struct xfrm_mgr
{
struct list_head list;
char *id;
- int (*notify)(struct xfrm_state *x, int event);
+ int (*notify)(struct xfrm_state *x, struct km_event *c);
int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir);
int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
- int (*notify_policy)(struct xfrm_policy *x, int dir, int event);
+ int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
};
extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -656,7 +672,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
return 0;
}
-extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
static inline void xfrm_sk_free_policy(struct sock *sk)
{
@@ -817,7 +833,7 @@ extern int xfrm_state_add(struct xfrm_state *x);
extern int xfrm_state_update(struct xfrm_state *x);
extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
-extern void xfrm_state_delete(struct xfrm_state *x);
+extern int xfrm_state_delete(struct xfrm_state *x);
extern void xfrm_state_flush(u8 proto);
extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
diff --git a/mm/slab.c b/mm/slab.c
index 84074264115..c78d343b3c5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2620,6 +2620,12 @@ unsigned int kmem_cache_size(kmem_cache_t *cachep)
}
EXPORT_SYMBOL(kmem_cache_size);
+const char *kmem_cache_name(kmem_cache_t *cachep)
+{
+ return cachep->name;
+}
+EXPORT_SYMBOL_GPL(kmem_cache_name);
+
struct ccupdate_struct {
kmem_cache_t *cachep;
struct array_cache *new[NR_CPUS];
diff --git a/net/core/Makefile b/net/core/Makefile
index 81f03243fe2..5e0c56b7f60 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -2,7 +2,8 @@
# Makefile for the Linux networking core.
#
-obj-y := sock.o skbuff.o iovec.o datagram.o stream.o scm.o gen_stats.o gen_estimator.o
+obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
+ gen_stats.o gen_estimator.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 43bdc521e20..f6bdcad47da 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1276,9 +1276,14 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
INIT_RCU_HEAD(&p->rcu_head);
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
- if (dev && dev->neigh_setup && dev->neigh_setup(dev, p)) {
- kfree(p);
- return NULL;
+ if (dev) {
+ if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
+ kfree(p);
+ return NULL;
+ }
+
+ dev_hold(dev);
+ p->dev = dev;
}
p->sysctl_table = NULL;
write_lock_bh(&tbl->lock);
@@ -1309,6 +1314,8 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
*p = parms->next;
parms->dead = 1;
write_unlock_bh(&tbl->lock);
+ if (parms->dev)
+ dev_put(parms->dev);
call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
return;
}
@@ -1546,20 +1553,323 @@ out:
return err;
}
+static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
+{
+ struct rtattr *nest = NULL;
+
+ nest = RTA_NEST(skb, NDTA_PARMS);
+
+ if (parms->dev)
+ RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
+
+ RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
+ RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+ RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
+ RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
+ RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
+ RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
+ RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
+ RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
+ parms->base_reachable_time);
+ RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
+ RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
+ RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
+ RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
+ RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
+ RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
+
+ return RTA_NEST_END(skb, nest);
+
+rtattr_failure:
+ return RTA_NEST_CANCEL(skb, nest);
+}
+
+static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlmsghdr *nlh;
+ struct ndtmsg *ndtmsg;
+
+ nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
+ NLM_F_MULTI);
+
+ ndtmsg = NLMSG_DATA(nlh);
+
+ read_lock_bh(&tbl->lock);
+ ndtmsg->ndtm_family = tbl->family;
+
+ RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
+ RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
+ RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
+ RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
+ RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
+
+ {
+ unsigned long now = jiffies;
+ unsigned int flush_delta = now - tbl->last_flush;
+ unsigned int rand_delta = now - tbl->last_rand;
+
+ struct ndt_config ndc = {
+ .ndtc_key_len = tbl->key_len,
+ .ndtc_entry_size = tbl->entry_size,
+ .ndtc_entries = atomic_read(&tbl->entries),
+ .ndtc_last_flush = jiffies_to_msecs(flush_delta),
+ .ndtc_last_rand = jiffies_to_msecs(rand_delta),
+ .ndtc_hash_rnd = tbl->hash_rnd,
+ .ndtc_hash_mask = tbl->hash_mask,
+ .ndtc_hash_chain_gc = tbl->hash_chain_gc,
+ .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ };
+
+ RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+ }
+
+ {
+ int cpu;
+ struct ndt_stats ndst;
+
+ memset(&ndst, 0, sizeof(ndst));
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct neigh_statistics *st;
+
+ if (!cpu_possible(cpu))
+ continue;
+
+ st = per_cpu_ptr(tbl->stats, cpu);
+ ndst.ndts_allocs += st->allocs;
+ ndst.ndts_destroys += st->destroys;
+ ndst.ndts_hash_grows += st->hash_grows;
+ ndst.ndts_res_failed += st->res_failed;
+ ndst.ndts_lookups += st->lookups;
+ ndst.ndts_hits += st->hits;
+ ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
+ ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
+ ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
+ ndst.ndts_forced_gc_runs += st->forced_gc_runs;
+ }
+
+ RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+ }
+
+ BUG_ON(tbl->parms.dev);
+ if (neightbl_fill_parms(skb, &tbl->parms) < 0)
+ goto rtattr_failure;
+
+ read_unlock_bh(&tbl->lock);
+ return NLMSG_END(skb, nlh);
+
+rtattr_failure:
+ read_unlock_bh(&tbl->lock);
+ return NLMSG_CANCEL(skb, nlh);
+
+nlmsg_failure:
+ return -1;
+}
+
+static int neightbl_fill_param_info(struct neigh_table *tbl,
+ struct neigh_parms *parms,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct ndtmsg *ndtmsg;
+ struct nlmsghdr *nlh;
+
+ nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
+ NLM_F_MULTI);
+
+ ndtmsg = NLMSG_DATA(nlh);
+
+ read_lock_bh(&tbl->lock);
+ ndtmsg->ndtm_family = tbl->family;
+ RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
+
+ if (neightbl_fill_parms(skb, parms) < 0)
+ goto rtattr_failure;
+
+ read_unlock_bh(&tbl->lock);
+ return NLMSG_END(skb, nlh);
+
+rtattr_failure:
+ read_unlock_bh(&tbl->lock);
+ return NLMSG_CANCEL(skb, nlh);
+
+nlmsg_failure:
+ return -1;
+}
+
+static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
+ int ifindex)
+{
+ struct neigh_parms *p;
+
+ for (p = &tbl->parms; p; p = p->next)
+ if ((p->dev && p->dev->ifindex == ifindex) ||
+ (!p->dev && !ifindex))
+ return p;
+
+ return NULL;
+}
+
+int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct neigh_table *tbl;
+ struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
+ struct rtattr **tb = arg;
+ int err = -EINVAL;
+
+ if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
+ return -EINVAL;
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
+ continue;
+
+ if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
+ break;
+ }
+
+ if (tbl == NULL) {
+ err = -ENOENT;
+ goto errout;
+ }
+
+ /*
+ * We acquire tbl->lock to be nice to the periodic timers and
+ * make sure they always see a consistent set of values.
+ */
+ write_lock_bh(&tbl->lock);
+
+ if (tb[NDTA_THRESH1 - 1])
+ tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
+
+ if (tb[NDTA_THRESH2 - 1])
+ tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
+
+ if (tb[NDTA_THRESH3 - 1])
+ tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
+
+ if (tb[NDTA_GC_INTERVAL - 1])
+ tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
+
+ if (tb[NDTA_PARMS - 1]) {
+ struct rtattr *tbp[NDTPA_MAX];
+ struct neigh_parms *p;
+ u32 ifindex = 0;
+
+ if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
+ goto rtattr_failure;
+
+ if (tbp[NDTPA_IFINDEX - 1])
+ ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
+
+ p = lookup_neigh_params(tbl, ifindex);
+ if (p == NULL) {
+ err = -ENOENT;
+ goto rtattr_failure;
+ }
+
+ if (tbp[NDTPA_QUEUE_LEN - 1])
+ p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
+
+ if (tbp[NDTPA_PROXY_QLEN - 1])
+ p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
+
+ if (tbp[NDTPA_APP_PROBES - 1])
+ p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
+
+ if (tbp[NDTPA_UCAST_PROBES - 1])
+ p->ucast_probes =
+ RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
+
+ if (tbp[NDTPA_MCAST_PROBES - 1])
+ p->mcast_probes =
+ RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
+
+ if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
+ p->base_reachable_time =
+ RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
+
+ if (tbp[NDTPA_GC_STALETIME - 1])
+ p->gc_staletime =
+ RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
+
+ if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
+ p->delay_probe_time =
+ RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
+
+ if (tbp[NDTPA_RETRANS_TIME - 1])
+ p->retrans_time =
+ RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
+
+ if (tbp[NDTPA_ANYCAST_DELAY - 1])
+ p->anycast_delay =
+ RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
+
+ if (tbp[NDTPA_PROXY_DELAY - 1])
+ p->proxy_delay =
+ RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
+
+ if (tbp[NDTPA_LOCKTIME - 1])
+ p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
+ }
+
+ err = 0;
+
+rtattr_failure:
+ write_unlock_bh(&tbl->lock);
+errout:
+ read_unlock(&neigh_tbl_lock);
+ return err;
+}
+
+int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx, family;
+ int s_idx = cb->args[0];
+ struct neigh_table *tbl;
+
+ family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
+ struct neigh_parms *p;
+
+ if (idx < s_idx || (family && tbl->family != family))
+ continue;
+
+ if (neightbl_fill_info(tbl, skb, cb) <= 0)
+ break;
+
+ for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
+ if (idx < s_idx)
+ continue;
+
+ if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
+ goto out;
+ }
+
+ }
+out:
+ read_unlock(&neigh_tbl_lock);
+ cb->args[0] = idx;
+
+ return skb->len;
+}
static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, unsigned int flags)
{
unsigned long now = jiffies;
unsigned char *b = skb->tail;
struct nda_cacheinfo ci;
int locked = 0;
u32 probes;
- struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, event,
- sizeof(struct ndmsg));
+ struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
+ sizeof(struct ndmsg), flags);
struct ndmsg *ndm = NLMSG_DATA(nlh);
- nlh->nlmsg_flags = pid ? NLM_F_MULTI : 0;
ndm->ndm_family = n->ops->family;
ndm->ndm_flags = n->flags;
ndm->ndm_type = n->type;
@@ -1609,7 +1919,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
continue;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH) <= 0) {
+ RTM_NEWNEIGH,
+ NLM_F_MULTI) <= 0) {
read_unlock_bh(&tbl->lock);
rc = -1;
goto out;
@@ -2018,7 +2329,7 @@ void neigh_app_ns(struct neighbour *n)
if (!skb)
return;
- if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
+ if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
kfree_skb(skb);
return;
}
@@ -2037,7 +2348,7 @@ static void neigh_app_notify(struct neighbour *n)
if (!skb)
return;
- if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
+ if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
kfree_skb(skb);
return;
}
@@ -2352,6 +2663,8 @@ EXPORT_SYMBOL(neigh_update);
EXPORT_SYMBOL(neigh_update_hhs);
EXPORT_SYMBOL(pneigh_enqueue);
EXPORT_SYMBOL(pneigh_lookup);
+EXPORT_SYMBOL(neightbl_dump_info);
+EXPORT_SYMBOL(neightbl_set);
#ifdef CONFIG_ARPD
EXPORT_SYMBOL(neigh_app_ns);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
new file mode 100644
index 00000000000..bb55675f068
--- /dev/null
+++ b/net/core/request_sock.c
@@ -0,0 +1,64 @@
+/*
+ * NET Generic infrastructure for Network protocols.
+ *
+ * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * From code originally in include/net/tcp.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <net/request_sock.h>
+
+/*
+ * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
+ * One SYN_RECV socket costs about 80bytes on a 32bit machine.
+ * It would be better to replace it with a global counter for all sockets
+ * but then some measure against one socket starving all other sockets
+ * would be needed.
+ *
+ * It was 128 by default. Experiments with real servers show, that
+ * it is absolutely not enough even at 100conn/sec. 256 cures most
+ * of problems. This value is adjusted to 128 for very small machines
+ * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * Further increasing requires to change hash table size.
+ */
+int sysctl_max_syn_backlog = 256;
+EXPORT_SYMBOL(sysctl_max_syn_backlog);
+
+int reqsk_queue_alloc(struct request_sock_queue *queue,
+ const int nr_table_entries)
+{
+ const int lopt_size = sizeof(struct listen_sock) +
+ nr_table_entries * sizeof(struct request_sock *);
+ struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL);
+
+ if (lopt == NULL)
+ return -ENOMEM;
+
+ memset(lopt, 0, lopt_size);
+
+ for (lopt->max_qlen_log = 6;
+ (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
+ lopt->max_qlen_log++);
+
+ get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
+ rwlock_init(&queue->syn_wait_lock);
+ queue->rskq_accept_head = queue->rskq_accept_head = NULL;
+
+ write_lock_bh(&queue->syn_wait_lock);
+ queue->listen_opt = lopt;
+ write_unlock_bh(&queue->syn_wait_lock);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(reqsk_queue_alloc);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 00caf4b318b..e013d836a7a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -100,6 +100,7 @@ static const int rtm_min[RTM_NR_FAMILIES] =
[RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
[RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
[RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
+ [RTM_FAM(RTM_NEWNEIGHTBL)] = NLMSG_LENGTH(sizeof(struct ndtmsg)),
};
static const int rta_max[RTM_NR_FAMILIES] =
@@ -113,6 +114,7 @@ static const int rta_max[RTM_NR_FAMILIES] =
[RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
[RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
[RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
+ [RTM_FAM(RTM_NEWNEIGHTBL)] = NDTA_MAX,
};
void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
@@ -176,14 +178,14 @@ rtattr_failure:
static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
- int type, u32 pid, u32 seq, u32 change)
+ int type, u32 pid, u32 seq, u32 change,
+ unsigned int flags)
{
struct ifinfomsg *r;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, type, sizeof(*r));
- if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags);
r = NLMSG_DATA(nlh);
r->ifi_family = AF_UNSPEC;
r->ifi_type = dev->type;
@@ -273,7 +275,10 @@ static int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *c
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
- if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0) <= 0)
+ if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0,
+ NLM_F_MULTI) <= 0)
break;
}
read_unlock(&dev_base_lock);
@@ -447,7 +452,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
if (!skb)
return;
- if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change) < 0) {
+ if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) {
kfree_skb(skb);
return;
}
@@ -649,14 +654,16 @@ static void rtnetlink_rcv(struct sock *sk, int len)
static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] =
{
- [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo },
- [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink },
- [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
- [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
- [RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add },
- [RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete },
- [RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info },
- [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
+ [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo },
+ [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink },
+ [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
+ [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
+ [RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add },
+ [RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete },
+ [RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info },
+ [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnetlink_dump_all },
+ [RTM_GETNEIGHTBL - RTM_BASE] = { .dumpit = neightbl_dump_info },
+ [RTM_SETNEIGHTBL - RTM_BASE] = { .doit = neightbl_set },
};
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
diff --git a/net/core/sock.c b/net/core/sock.c
index 96e00b08698..a6ec3ada7f9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -118,6 +118,7 @@
#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
+#include <net/request_sock.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <linux/ipsec.h>
@@ -1363,6 +1364,7 @@ static LIST_HEAD(proto_list);
int proto_register(struct proto *prot, int alloc_slab)
{
+ char *request_sock_slab_name;
int rc = -ENOBUFS;
if (alloc_slab) {
@@ -1374,6 +1376,25 @@ int proto_register(struct proto *prot, int alloc_slab)
prot->name);
goto out;
}
+
+ if (prot->rsk_prot != NULL) {
+ static const char mask[] = "request_sock_%s";
+
+ request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ if (request_sock_slab_name == NULL)
+ goto out_free_sock_slab;
+
+ sprintf(request_sock_slab_name, mask, prot->name);
+ prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
+ prot->rsk_prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ if (prot->rsk_prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
+ prot->name);
+ goto out_free_request_sock_slab_name;
+ }
+ }
}
write_lock(&proto_list_lock);
@@ -1382,6 +1403,12 @@ int proto_register(struct proto *prot, int alloc_slab)
rc = 0;
out:
return rc;
+out_free_request_sock_slab_name:
+ kfree(request_sock_slab_name);
+out_free_sock_slab:
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+ goto out;
}
EXPORT_SYMBOL(proto_register);
@@ -1395,6 +1422,14 @@ void proto_unregister(struct proto *prot)
prot->slab = NULL;
}
+ if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
+ const char *name = kmem_cache_name(prot->rsk_prot->slab);
+
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ kfree(name);
+ prot->rsk_prot->slab = NULL;
+ }
+
list_del(&prot->node);
write_unlock(&proto_list_lock);
}
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ee7bf46eb78..00233ecbc9c 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -716,13 +716,13 @@ static int dn_dev_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *a
}
static int dn_dev_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, unsigned int flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ifm));
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags);
ifm = NLMSG_DATA(nlh);
ifm->ifa_family = AF_DECnet;
@@ -755,7 +755,7 @@ static void rtmsg_ifa(int event, struct dn_ifaddr *ifa)
netlink_set_err(rtnl, 0, RTMGRP_DECnet_IFADDR, ENOBUFS);
return;
}
- if (dn_dev_fill_ifaddr(skb, ifa, 0, 0, event) < 0) {
+ if (dn_dev_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_DECnet_IFADDR, EINVAL);
return;
@@ -790,7 +790,8 @@ static int dn_dev_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
if (dn_dev_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
- RTM_NEWADDR) <= 0)
+ RTM_NEWADDR,
+ NLM_F_MULTI) <= 0)
goto done;
}
}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index f6dfe96f45b..f32dba9e26f 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -101,7 +101,6 @@ struct neigh_table dn_neigh_table = {
.id = "dn_neigh_cache",
.parms ={
.tbl = &dn_neigh_table,
- .entries = 0,
.base_reachable_time = 30 * HZ,
.retrans_time = 1 * HZ,
.gc_staletime = 60 * HZ,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1e7b5c3ea21..2399fa8a3f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1465,7 +1465,8 @@ int dn_route_input(struct sk_buff *skb)
return dn_route_input_slow(skb);
}
-static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int nowait)
+static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ int event, int nowait, unsigned int flags)
{
struct dn_route *rt = (struct dn_route *)skb->dst;
struct rtmsg *r;
@@ -1473,9 +1474,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int
unsigned char *b = skb->tail;
struct rta_cacheinfo ci;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r));
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
r = NLMSG_DATA(nlh);
- nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
r->rtm_family = AF_DECnet;
r->rtm_dst_len = 16;
r->rtm_src_len = 0;
@@ -1596,7 +1596,7 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
- err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0);
+ err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
if (err == 0)
goto out_free;
@@ -1644,7 +1644,8 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1) <= 0) {
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ 1, NLM_F_MULTI) <= 0) {
dst_release(xchg(&skb->dst, NULL));
rcu_read_unlock_bh();
goto done;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 597587d170d..1060de70bc0 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -342,14 +342,15 @@ static struct notifier_block dn_fib_rules_notifier = {
.notifier_call = dn_fib_rules_event,
};
-static int dn_fib_fill_rule(struct sk_buff *skb, struct dn_fib_rule *r, struct netlink_callback *cb)
+static int dn_fib_fill_rule(struct sk_buff *skb, struct dn_fib_rule *r,
+ struct netlink_callback *cb, unsigned int flags)
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, NETLINK_CREDS(cb->skb)->pid, cb->nlh->nlmsg_seq, RTM_NEWRULE, sizeof(*rtm));
+ nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags);
rtm = NLMSG_DATA(nlh);
rtm->rtm_family = AF_DECnet;
rtm->rtm_dst_len = r->r_dst_len;
@@ -394,7 +395,7 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
for(r = dn_fib_rules, idx = 0; r; r = r->r_next, idx++) {
if (idx < s_idx)
continue;
- if (dn_fib_fill_rule(skb, r, cb) < 0)
+ if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
break;
}
read_unlock(&dn_fib_rules_lock);
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index dad5603912b..28ba5777a25 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -270,13 +270,13 @@ static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern
static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst, int dst_len,
- struct dn_fib_info *fi)
+ struct dn_fib_info *fi, unsigned int flags)
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*rtm));
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
rtm = NLMSG_DATA(nlh);
rtm->rtm_family = AF_DECnet;
rtm->rtm_dst_len = dst_len;
@@ -345,7 +345,7 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, int tb_id,
if (dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
f->fn_type, f->fn_scope, &f->fn_key, z,
- DN_FIB_INFO(f)) < 0) {
+ DN_FIB_INFO(f), 0) < 0) {
kfree_skb(skb);
return;
}
@@ -377,7 +377,7 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
tb->n,
(f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
f->fn_scope, &f->fn_key, dz->dz_order,
- f->fn_info) < 0) {
+ f->fn_info, NLM_F_MULTI) < 0) {
cb->args[3] = i;
return -1;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 478a30179a5..650dcb12d9a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1030,14 +1030,13 @@ static struct notifier_block ip_netdev_notifier = {
};
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, unsigned int flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ifm));
- if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags);
ifm = NLMSG_DATA(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
@@ -1090,7 +1089,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
- RTM_NEWADDR) <= 0) {
+ RTM_NEWADDR, NLM_F_MULTI) <= 0) {
rcu_read_unlock();
goto done;
}
@@ -1113,7 +1112,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
if (!skb)
netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, ENOBUFS);
- else if (inet_fill_ifaddr(skb, ifa, 0, 0, event) < 0) {
+ else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, EINVAL);
} else {
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 6506dcc01b4..b10d6bb5ef3 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -703,7 +703,8 @@ fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
&f->fn_key,
fz->fz_order,
fa->fa_tos,
- fa->fa_info) < 0) {
+ fa->fa_info,
+ NLM_F_MULTI) < 0) {
cb->args[3] = i;
return -1;
}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index ac4485f75e9..b729d97cfa9 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -30,7 +30,8 @@ extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *,
struct kern_rta *rta, struct fib_info *fi);
extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst,
- int dst_len, u8 tos, struct fib_info *fi);
+ int dst_len, u8 tos, struct fib_info *fi,
+ unsigned int);
extern void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
int z, int tb_id,
struct nlmsghdr *n, struct netlink_skb_parms *req);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 39d0aadb9a2..0b298bbc151 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -367,13 +367,14 @@ static struct notifier_block fib_rules_notifier = {
static __inline__ int inet_fill_rule(struct sk_buff *skb,
struct fib_rule *r,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ unsigned int flags)
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, NETLINK_CREDS(cb->skb)->pid, cb->nlh->nlmsg_seq, RTM_NEWRULE, sizeof(*rtm));
+ nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags);
rtm = NLMSG_DATA(nlh);
rtm->rtm_family = AF_INET;
rtm->rtm_dst_len = r->r_dst_len;
@@ -422,7 +423,7 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
for (r=fib_rules, idx=0; r; r = r->r_next, idx++) {
if (idx < s_idx)
continue;
- if (inet_fill_rule(skb, r, cb) < 0)
+ if (inet_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
break;
}
read_unlock(&fib_rules_lock);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 029362d6613..c886b28ba9f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -276,7 +276,7 @@ void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
struct nlmsghdr *n, struct netlink_skb_parms *req)
{
struct sk_buff *skb;
- u32 pid = req ? req->pid : 0;
+ u32 pid = req ? req->pid : n->nlmsg_pid;
int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
skb = alloc_skb(size, GFP_KERNEL);
@@ -286,7 +286,7 @@ void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id,
fa->fa_type, fa->fa_scope, &key, z,
fa->fa_tos,
- fa->fa_info) < 0) {
+ fa->fa_info, 0) < 0) {
kfree_skb(skb);
return;
}
@@ -932,13 +932,13 @@ u32 __fib_res_prefsrc(struct fib_result *res)
int
fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos,
- struct fib_info *fi)
+ struct fib_info *fi, unsigned int flags)
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*rtm));
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
rtm = NLMSG_DATA(nlh);
rtm->rtm_family = AF_INET;
rtm->rtm_dst_len = dst_len;
@@ -1035,7 +1035,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
}
nl->nlmsg_flags = NLM_F_REQUEST;
- nl->nlmsg_pid = 0;
+ nl->nlmsg_pid = current->pid;
nl->nlmsg_seq = 0;
nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm));
if (cmd == SIOCDELRT) {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 47012b93cad..f8b172f8981 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -360,14 +360,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
err = copied;
/* Reset and regenerate socket error */
- spin_lock_irq(&sk->sk_error_queue.lock);
+ spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_irq(&sk->sk_error_queue.lock);
+ spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else
- spin_unlock_irq(&sk->sk_error_queue.lock);
+ spin_unlock_bh(&sk->sk_error_queue.lock);
out_free_skb:
kfree_skb(skb);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5b1ec586bae..d1835b1bc8c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -259,7 +259,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
-static int raw_send_hdrinc(struct sock *sk, void *from, int length,
+static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
struct rtable *rt,
unsigned int flags)
{
@@ -298,7 +298,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, int length,
goto error_fault;
/* We don't modify invalid header */
- if (length >= sizeof(*iph) && iph->ihl * 4 <= length) {
+ if (length >= sizeof(*iph) && iph->ihl * 4U <= length) {
if (!iph->saddr)
iph->saddr = rt->rt_src;
iph->check = 0;
@@ -332,7 +332,7 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
u8 __user *type = NULL;
u8 __user *code = NULL;
int probed = 0;
- int i;
+ unsigned int i;
if (!msg->msg_iov)
return;
@@ -384,7 +384,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int err;
err = -EMSGSIZE;
- if (len < 0 || len > 0xFFFF)
+ if (len > 0xFFFF)
goto out;
/*
@@ -514,7 +514,10 @@ done:
kfree(ipc.opt);
ip_rt_put(rt);
-out: return err < 0 ? err : len;
+out:
+ if (err < 0)
+ return err;
+ return len;
do_confirm:
dst_confirm(&rt->u.dst);
@@ -610,7 +613,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
copied = skb->len;
done:
skb_free_datagram(sk, skb);
-out: return err ? err : copied;
+out:
+ if (err)
+ return err;
+ return copied;
}
static int raw_init(struct sock *sk)
@@ -691,11 +697,11 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
struct sk_buff *skb;
int amount = 0;
- spin_lock_irq(&sk->sk_receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
- spin_unlock_irq(&sk->sk_receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a682d28e247..f4d53c91986 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2581,7 +2581,7 @@ int ip_route_output_key(struct rtable **rp, struct flowi *flp)
}
static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
- int nowait)
+ int nowait, unsigned int flags)
{
struct rtable *rt = (struct rtable*)skb->dst;
struct rtmsg *r;
@@ -2591,9 +2591,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
#ifdef CONFIG_IP_MROUTE
struct rtattr *eptr;
#endif
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r));
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
r = NLMSG_DATA(nlh);
- nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
@@ -2744,7 +2743,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
- RTM_NEWROUTE, 0);
+ RTM_NEWROUTE, 0, 0);
if (!err)
goto out_free;
if (err < 0) {
@@ -2781,8 +2780,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE, 1) <= 0) {
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ 1, NLM_F_MULTI) <= 0) {
dst_release(xchg(&skb->dst, NULL));
rcu_read_unlock_bh();
goto done;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index e923d2f021a..72d01444218 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -169,10 +169,10 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
}
-extern struct or_calltable or_ipv4;
+extern struct request_sock_ops tcp_request_sock_ops;
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -182,7 +182,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
if (child)
tcp_acceptq_queue(sk, req, child);
else
- tcp_openreq_free(req);
+ reqsk_free(req);
return child;
}
@@ -190,10 +190,12 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
+ struct inet_request_sock *ireq;
+ struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
__u32 cookie = ntohl(skb->h.th->ack_seq) - 1;
struct sock *ret = sk;
- struct open_request *req;
+ struct request_sock *req;
int mss;
struct rtable *rt;
__u8 rcv_wscale;
@@ -209,19 +211,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
- req = tcp_openreq_alloc();
ret = NULL;
+ req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
goto out;
- req->rcv_isn = htonl(skb->h.th->seq) - 1;
- req->snt_isn = cookie;
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
+ treq->rcv_isn = htonl(skb->h.th->seq) - 1;
+ treq->snt_isn = cookie;
req->mss = mss;
- req->rmt_port = skb->h.th->source;
- req->af.v4_req.loc_addr = skb->nh.iph->daddr;
- req->af.v4_req.rmt_addr = skb->nh.iph->saddr;
- req->class = &or_ipv4; /* for savety */
- req->af.v4_req.opt = NULL;
+ ireq->rmt_port = skb->h.th->source;
+ ireq->loc_addr = skb->nh.iph->daddr;
+ ireq->rmt_addr = skb->nh.iph->saddr;
+ ireq->opt = NULL;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -229,17 +232,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
if (opt && opt->optlen) {
int opt_size = sizeof(struct ip_options) + opt->optlen;
- req->af.v4_req.opt = kmalloc(opt_size, GFP_ATOMIC);
- if (req->af.v4_req.opt) {
- if (ip_options_echo(req->af.v4_req.opt, skb)) {
- kfree(req->af.v4_req.opt);
- req->af.v4_req.opt = NULL;
- }
+ ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
+ if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) {
+ kfree(ireq->opt);
+ ireq->opt = NULL;
}
}
- req->snd_wscale = req->rcv_wscale = req->tstamp_ok = 0;
- req->wscale_ok = req->sack_ok = 0;
+ ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0;
+ ireq->wscale_ok = ireq->sack_ok = 0;
req->expires = 0UL;
req->retrans = 0;
@@ -253,15 +254,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = ((opt && opt->srr) ?
opt->faddr :
- req->af.v4_req.rmt_addr),
- .saddr = req->af.v4_req.loc_addr,
+ ireq->rmt_addr),
+ .saddr = ireq->loc_addr,
.tos = RT_CONN_FLAGS(sk) } },
.proto = IPPROTO_TCP,
.uli_u = { .ports =
{ .sport = skb->h.th->dest,
.dport = skb->h.th->source } } };
if (ip_route_output_key(&rt, &fl)) {
- tcp_openreq_free(req);
+ reqsk_free(req);
goto out;
}
}
@@ -272,7 +273,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
&req->rcv_wnd, &req->window_clamp,
0, &rcv_wscale);
/* BTW win scale with syncookies is 0 by definition */
- req->rcv_wscale = rcv_wscale;
+ ireq->rcv_wscale = rcv_wscale;
ret = get_cookie_sock(sk, skb, req, &rt->u.dst);
out: return ret;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0d9a4fd5f1a..674bbd8cfd3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -271,7 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
-kmem_cache_t *tcp_openreq_cachep;
kmem_cache_t *tcp_bucket_cachep;
kmem_cache_t *tcp_timewait_cachep;
@@ -317,7 +316,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure);
static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
poll_table *wait)
{
- return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
+ return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0;
}
/*
@@ -463,28 +462,15 @@ int tcp_listen_start(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt;
+ int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE);
+
+ if (rc != 0)
+ return rc;
sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0;
- tp->accept_queue = tp->accept_queue_tail = NULL;
- rwlock_init(&tp->syn_wait_lock);
tcp_delack_init(tp);
- lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
- if (!lopt)
- return -ENOMEM;
-
- memset(lopt, 0, sizeof(struct tcp_listen_opt));
- for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
- if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
- break;
- get_random_bytes(&lopt->hash_rnd, 4);
-
- write_lock_bh(&tp->syn_wait_lock);
- tp->listen_opt = lopt;
- write_unlock_bh(&tp->syn_wait_lock);
-
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only
@@ -501,10 +487,7 @@ int tcp_listen_start(struct sock *sk)
}
sk->sk_state = TCP_CLOSE;
- write_lock_bh(&tp->syn_wait_lock);
- tp->listen_opt = NULL;
- write_unlock_bh(&tp->syn_wait_lock);
- kfree(lopt);
+ reqsk_queue_destroy(&tp->accept_queue);
return -EADDRINUSE;
}
@@ -516,25 +499,23 @@ int tcp_listen_start(struct sock *sk)
static void tcp_listen_stop (struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt = tp->listen_opt;
- struct open_request *acc_req = tp->accept_queue;
- struct open_request *req;
+ struct listen_sock *lopt;
+ struct request_sock *acc_req;
+ struct request_sock *req;
int i;
tcp_delete_keepalive_timer(sk);
/* make all the listen_opt local to us */
- write_lock_bh(&tp->syn_wait_lock);
- tp->listen_opt = NULL;
- write_unlock_bh(&tp->syn_wait_lock);
- tp->accept_queue = tp->accept_queue_tail = NULL;
+ lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue);
+ acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
if (lopt->qlen) {
for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next;
lopt->qlen--;
- tcp_openreq_free(req);
+ reqsk_free(req);
/* Following specs, it would be better either to send FIN
* (and enter FIN-WAIT-1, it is normal close)
@@ -574,7 +555,7 @@ static void tcp_listen_stop (struct sock *sk)
sock_put(child);
sk_acceptq_removed(sk);
- tcp_openreq_fastfree(req);
+ __reqsk_free(req);
}
BUG_TRAP(!sk->sk_ack_backlog);
}
@@ -1345,7 +1326,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
cleanup_rbuf(sk, copied);
- if (tp->ucopy.task == user_recv) {
+ if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
/* Install new reader */
if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
user_recv = current;
@@ -1868,11 +1849,11 @@ static int wait_for_connect(struct sock *sk, long timeo)
prepare_to_wait_exclusive(sk->sk_sleep, &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
- if (!tp->accept_queue)
+ if (reqsk_queue_empty(&tp->accept_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
err = 0;
- if (tp->accept_queue)
+ if (!reqsk_queue_empty(&tp->accept_queue))
break;
err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
@@ -1895,7 +1876,6 @@ static int wait_for_connect(struct sock *sk, long timeo)
struct sock *tcp_accept(struct sock *sk, int flags, int *err)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct open_request *req;
struct sock *newsk;
int error;
@@ -1906,37 +1886,31 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
*/
error = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
- goto out;
+ goto out_err;
/* Find already established connection */
- if (!tp->accept_queue) {
+ if (reqsk_queue_empty(&tp->accept_queue)) {
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */
error = -EAGAIN;
if (!timeo)
- goto out;
+ goto out_err;
error = wait_for_connect(sk, timeo);
if (error)
- goto out;
+ goto out_err;
}
- req = tp->accept_queue;
- if ((tp->accept_queue = req->dl_next) == NULL)
- tp->accept_queue_tail = NULL;
-
- newsk = req->sk;
- sk_acceptq_removed(sk);
- tcp_openreq_fastfree(req);
+ newsk = reqsk_queue_get_child(&tp->accept_queue, sk);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
- release_sock(sk);
- return newsk;
-
out:
release_sock(sk);
+ return newsk;
+out_err:
+ newsk = NULL;
*err = error;
- return NULL;
+ goto out;
}
/*
@@ -2271,13 +2245,6 @@ void __init tcp_init(void)
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
sizeof(skb->cb));
- tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
- sizeof(struct open_request),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!tcp_openreq_cachep)
- panic("tcp_init: Cannot alloc open_request cache.");
-
tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
sizeof(struct tcp_bind_bucket),
0, SLAB_HWCACHE_ALIGN,
@@ -2374,7 +2341,6 @@ EXPORT_SYMBOL(tcp_destroy_sock);
EXPORT_SYMBOL(tcp_disconnect);
EXPORT_SYMBOL(tcp_getsockopt);
EXPORT_SYMBOL(tcp_ioctl);
-EXPORT_SYMBOL(tcp_openreq_cachep);
EXPORT_SYMBOL(tcp_poll);
EXPORT_SYMBOL(tcp_read_sock);
EXPORT_SYMBOL(tcp_recvmsg);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 8faa8948f75..634befc0792 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -455,9 +455,10 @@ static int tcpdiag_dump_sock(struct sk_buff *skb, struct sock *sk,
}
static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk,
- struct open_request *req,
+ struct request_sock *req,
u32 pid, u32 seq)
{
+ const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *inet = inet_sk(sk);
unsigned char *b = skb->tail;
struct tcpdiagmsg *r;
@@ -482,9 +483,9 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk,
tmo = 0;
r->id.tcpdiag_sport = inet->sport;
- r->id.tcpdiag_dport = req->rmt_port;
- r->id.tcpdiag_src[0] = req->af.v4_req.loc_addr;
- r->id.tcpdiag_dst[0] = req->af.v4_req.rmt_addr;
+ r->id.tcpdiag_dport = ireq->rmt_port;
+ r->id.tcpdiag_src[0] = ireq->loc_addr;
+ r->id.tcpdiag_dst[0] = ireq->rmt_addr;
r->tcpdiag_expires = jiffies_to_msecs(tmo),
r->tcpdiag_rqueue = 0;
r->tcpdiag_wqueue = 0;
@@ -493,9 +494,9 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk,
#ifdef CONFIG_IP_TCPDIAG_IPV6
if (r->tcpdiag_family == AF_INET6) {
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
- &req->af.v6_req.loc_addr);
+ &tcp6_rsk(req)->loc_addr);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
- &req->af.v6_req.rmt_addr);
+ &tcp6_rsk(req)->rmt_addr);
}
#endif
nlh->nlmsg_len = skb->tail - b;
@@ -513,7 +514,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
struct tcpdiag_entry entry;
struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt;
+ struct listen_sock *lopt;
struct rtattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
int j, s_j;
@@ -528,9 +529,9 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
entry.family = sk->sk_family;
- read_lock_bh(&tp->syn_wait_lock);
+ read_lock_bh(&tp->accept_queue.syn_wait_lock);
- lopt = tp->listen_opt;
+ lopt = tp->accept_queue.listen_opt;
if (!lopt || !lopt->qlen)
goto out;
@@ -541,13 +542,15 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
}
for (j = s_j; j < TCP_SYNQ_HSIZE; j++) {
- struct open_request *req, *head = lopt->syn_table[j];
+ struct request_sock *req, *head = lopt->syn_table[j];
reqnum = 0;
for (req = head; req; reqnum++, req = req->dl_next) {
+ struct inet_request_sock *ireq = inet_rsk(req);
+
if (reqnum < s_reqnum)
continue;
- if (r->id.tcpdiag_dport != req->rmt_port &&
+ if (r->id.tcpdiag_dport != ireq->rmt_port &&
r->id.tcpdiag_dport)
continue;
@@ -555,16 +558,16 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
entry.saddr =
#ifdef CONFIG_IP_TCPDIAG_IPV6
(entry.family == AF_INET6) ?
- req->af.v6_req.loc_addr.s6_addr32 :
+ tcp6_rsk(req)->loc_addr.s6_addr32 :
#endif
- &req->af.v4_req.loc_addr;
+ &ireq->loc_addr;
entry.daddr =
#ifdef CONFIG_IP_TCPDIAG_IPV6
(entry.family == AF_INET6) ?
- req->af.v6_req.rmt_addr.s6_addr32 :
+ tcp6_rsk(req)->rmt_addr.s6_addr32 :
#endif
- &req->af.v4_req.rmt_addr;
- entry.dport = ntohs(req->rmt_port);
+ &ireq->rmt_addr;
+ entry.dport = ntohs(ireq->rmt_port);
if (!tcpdiag_bc_run(RTA_DATA(bc),
RTA_PAYLOAD(bc), &entry))
@@ -585,7 +588,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
}
out:
- read_unlock_bh(&tp->syn_wait_lock);
+ read_unlock_bh(&tp->accept_queue.syn_wait_lock);
return err;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index dad98e4a504..2d41d5d6ad1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -36,7 +36,7 @@
* ACK bit.
* Andi Kleen : Implemented fast path mtu discovery.
* Fixed many serious bugs in the
- * open_request handling and moved
+ * request_sock handling and moved
* most of it into the af independent code.
* Added tail drop and some other bugfixes.
* Added new listen sematics.
@@ -869,21 +869,23 @@ static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
}
-static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
- struct open_request ***prevp,
+static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
+ struct request_sock ***prevp,
__u16 rport,
__u32 raddr, __u32 laddr)
{
- struct tcp_listen_opt *lopt = tp->listen_opt;
- struct open_request *req, **prev;
+ struct listen_sock *lopt = tp->accept_queue.listen_opt;
+ struct request_sock *req, **prev;
for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
(req = *prev) != NULL;
prev = &req->dl_next) {
- if (req->rmt_port == rport &&
- req->af.v4_req.rmt_addr == raddr &&
- req->af.v4_req.loc_addr == laddr &&
- TCP_INET_FAMILY(req->class->family)) {
+ const struct inet_request_sock *ireq = inet_rsk(req);
+
+ if (ireq->rmt_port == rport &&
+ ireq->rmt_addr == raddr &&
+ ireq->loc_addr == laddr &&
+ TCP_INET_FAMILY(req->rsk_ops->family)) {
BUG_TRAP(!req->sk);
*prevp = prev;
break;
@@ -893,21 +895,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
return req;
}
-static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
+static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt = tp->listen_opt;
- u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
-
- req->expires = jiffies + TCP_TIMEOUT_INIT;
- req->retrans = 0;
- req->sk = NULL;
- req->dl_next = lopt->syn_table[h];
-
- write_lock(&tp->syn_wait_lock);
- lopt->syn_table[h] = req;
- write_unlock(&tp->syn_wait_lock);
+ struct listen_sock *lopt = tp->accept_queue.listen_opt;
+ u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
+ reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
tcp_synq_added(sk);
}
@@ -1050,7 +1044,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
}
switch (sk->sk_state) {
- struct open_request *req, **prev;
+ struct request_sock *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
@@ -1065,7 +1059,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
*/
BUG_TRAP(!req->sk);
- if (seq != req->snt_isn) {
+ if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
@@ -1254,28 +1248,29 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_tw_put(tw);
}
-static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
+static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
{
- tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
+ tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
req->ts_recent);
}
static struct dst_entry* tcp_v4_route_req(struct sock *sk,
- struct open_request *req)
+ struct request_sock *req)
{
struct rtable *rt;
- struct ip_options *opt = req->af.v4_req.opt;
+ const struct inet_request_sock *ireq = inet_rsk(req);
+ struct ip_options *opt = inet_rsk(req)->opt;
struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = ((opt && opt->srr) ?
opt->faddr :
- req->af.v4_req.rmt_addr),
- .saddr = req->af.v4_req.loc_addr,
+ ireq->rmt_addr),
+ .saddr = ireq->loc_addr,
.tos = RT_CONN_FLAGS(sk) } },
.proto = IPPROTO_TCP,
.uli_u = { .ports =
{ .sport = inet_sk(sk)->sport,
- .dport = req->rmt_port } } };
+ .dport = ireq->rmt_port } } };
if (ip_route_output_flow(&rt, &fl, sk, 0)) {
IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
@@ -1291,12 +1286,13 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk,
/*
* Send a SYN-ACK after having received an ACK.
- * This still operates on a open_request only, not on a big
+ * This still operates on a request_sock only, not on a big
* socket.
*/
-static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
+static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
{
+ const struct inet_request_sock *ireq = inet_rsk(req);
int err = -1;
struct sk_buff * skb;
@@ -1310,14 +1306,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
struct tcphdr *th = skb->h.th;
th->check = tcp_v4_check(th, skb->len,
- req->af.v4_req.loc_addr,
- req->af.v4_req.rmt_addr,
+ ireq->loc_addr,
+ ireq->rmt_addr,
csum_partial((char *)th, skb->len,
skb->csum));
- err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
- req->af.v4_req.rmt_addr,
- req->af.v4_req.opt);
+ err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
+ ireq->rmt_addr,
+ ireq->opt);
if (err == NET_XMIT_CN)
err = 0;
}
@@ -1328,12 +1324,12 @@ out:
}
/*
- * IPv4 open_request destructor.
+ * IPv4 request_sock destructor.
*/
-static void tcp_v4_or_free(struct open_request *req)
+static void tcp_v4_reqsk_destructor(struct request_sock *req)
{
- if (req->af.v4_req.opt)
- kfree(req->af.v4_req.opt);
+ if (inet_rsk(req)->opt)
+ kfree(inet_rsk(req)->opt);
}
static inline void syn_flood_warning(struct sk_buff *skb)
@@ -1349,7 +1345,7 @@ static inline void syn_flood_warning(struct sk_buff *skb)
}
/*
- * Save and compile IPv4 options into the open_request if needed.
+ * Save and compile IPv4 options into the request_sock if needed.
*/
static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
struct sk_buff *skb)
@@ -1370,33 +1366,20 @@ static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
return dopt;
}
-/*
- * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
- * One SYN_RECV socket costs about 80bytes on a 32bit machine.
- * It would be better to replace it with a global counter for all sockets
- * but then some measure against one socket starving all other sockets
- * would be needed.
- *
- * It was 128 by default. Experiments with real servers show, that
- * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
- * Further increasing requires to change hash table size.
- */
-int sysctl_max_syn_backlog = 256;
-
-struct or_calltable or_ipv4 = {
+struct request_sock_ops tcp_request_sock_ops = {
.family = PF_INET,
+ .obj_size = sizeof(struct tcp_request_sock),
.rtx_syn_ack = tcp_v4_send_synack,
- .send_ack = tcp_v4_or_send_ack,
- .destructor = tcp_v4_or_free,
+ .send_ack = tcp_v4_reqsk_send_ack,
+ .destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
+ struct inet_request_sock *ireq;
struct tcp_options_received tmp_opt;
- struct open_request *req;
+ struct request_sock *req;
__u32 saddr = skb->nh.iph->saddr;
__u32 daddr = skb->nh.iph->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
@@ -1433,7 +1416,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
goto drop;
- req = tcp_openreq_alloc();
+ req = reqsk_alloc(&tcp_request_sock_ops);
if (!req)
goto drop;
@@ -1461,10 +1444,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_openreq_init(req, &tmp_opt, skb);
- req->af.v4_req.loc_addr = daddr;
- req->af.v4_req.rmt_addr = saddr;
- req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
- req->class = &or_ipv4;
+ ireq = inet_rsk(req);
+ ireq->loc_addr = daddr;
+ ireq->rmt_addr = saddr;
+ ireq->opt = tcp_v4_save_options(sk, skb);
if (!want_cookie)
TCP_ECN_create_request(req, skb->h.th);
@@ -1523,20 +1506,20 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
isn = tcp_v4_init_sequence(sk, skb);
}
- req->snt_isn = isn;
+ tcp_rsk(req)->snt_isn = isn;
if (tcp_v4_send_synack(sk, req, dst))
goto drop_and_free;
if (want_cookie) {
- tcp_openreq_free(req);
+ reqsk_free(req);
} else {
tcp_v4_synq_add(sk, req);
}
return 0;
drop_and_free:
- tcp_openreq_free(req);
+ reqsk_free(req);
drop:
TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
return 0;
@@ -1548,9 +1531,10 @@ drop:
* now create the new socket.
*/
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst)
{
+ struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
@@ -1570,11 +1554,12 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
- newinet->daddr = req->af.v4_req.rmt_addr;
- newinet->rcv_saddr = req->af.v4_req.loc_addr;
- newinet->saddr = req->af.v4_req.loc_addr;
- newinet->opt = req->af.v4_req.opt;
- req->af.v4_req.opt = NULL;
+ ireq = inet_rsk(req);
+ newinet->daddr = ireq->rmt_addr;
+ newinet->rcv_saddr = ireq->loc_addr;
+ newinet->saddr = ireq->loc_addr;
+ newinet->opt = ireq->opt;
+ ireq->opt = NULL;
newinet->mc_index = tcp_v4_iif(skb);
newinet->mc_ttl = skb->nh.iph->ttl;
newtp->ext_header_len = 0;
@@ -1605,9 +1590,9 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
struct iphdr *iph = skb->nh.iph;
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk;
- struct open_request **prev;
+ struct request_sock **prev;
/* Find possible connection requests. */
- struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
+ struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source,
iph->saddr, iph->daddr);
if (req)
return tcp_check_req(sk, skb, req, prev);
@@ -2144,13 +2129,13 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
++st->num;
if (st->state == TCP_SEQ_STATE_OPENREQ) {
- struct open_request *req = cur;
+ struct request_sock *req = cur;
tp = tcp_sk(st->syn_wait_sk);
req = req->dl_next;
while (1) {
while (req) {
- if (req->class->family == st->family) {
+ if (req->rsk_ops->family == st->family) {
cur = req;
goto out;
}
@@ -2159,17 +2144,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
if (++st->sbucket >= TCP_SYNQ_HSIZE)
break;
get_req:
- req = tp->listen_opt->syn_table[st->sbucket];
+ req = tp->accept_queue.listen_opt->syn_table[st->sbucket];
}
sk = sk_next(st->syn_wait_sk);
st->state = TCP_SEQ_STATE_LISTENING;
- read_unlock_bh(&tp->syn_wait_lock);
+ read_unlock_bh(&tp->accept_queue.syn_wait_lock);
} else {
tp = tcp_sk(sk);
- read_lock_bh(&tp->syn_wait_lock);
- if (tp->listen_opt && tp->listen_opt->qlen)
+ read_lock_bh(&tp->accept_queue.syn_wait_lock);
+ if (reqsk_queue_len(&tp->accept_queue))
goto start_req;
- read_unlock_bh(&tp->syn_wait_lock);
+ read_unlock_bh(&tp->accept_queue.syn_wait_lock);
sk = sk_next(sk);
}
get_sk:
@@ -2179,8 +2164,8 @@ get_sk:
goto out;
}
tp = tcp_sk(sk);
- read_lock_bh(&tp->syn_wait_lock);
- if (tp->listen_opt && tp->listen_opt->qlen) {
+ read_lock_bh(&tp->accept_queue.syn_wait_lock);
+ if (reqsk_queue_len(&tp->accept_queue)) {
start_req:
st->uid = sock_i_uid(sk);
st->syn_wait_sk = sk;
@@ -2188,7 +2173,7 @@ start_req:
st->sbucket = 0;
goto get_req;
}
- read_unlock_bh(&tp->syn_wait_lock);
+ read_unlock_bh(&tp->accept_queue.syn_wait_lock);
}
if (++st->bucket < TCP_LHTABLE_SIZE) {
sk = sk_head(&tcp_listening_hash[st->bucket]);
@@ -2375,7 +2360,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
case TCP_SEQ_STATE_OPENREQ:
if (v) {
struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
- read_unlock_bh(&tp->syn_wait_lock);
+ read_unlock_bh(&tp->accept_queue.syn_wait_lock);
}
case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN)
@@ -2451,18 +2436,19 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
}
-static void get_openreq4(struct sock *sk, struct open_request *req,
+static void get_openreq4(struct sock *sk, struct request_sock *req,
char *tmpbuf, int i, int uid)
{
+ const struct inet_request_sock *ireq = inet_rsk(req);
int ttd = req->expires - jiffies;
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
i,
- req->af.v4_req.loc_addr,
+ ireq->loc_addr,
ntohs(inet_sk(sk)->sport),
- req->af.v4_req.rmt_addr,
- ntohs(req->rmt_port),
+ ireq->rmt_addr,
+ ntohs(ireq->rmt_port),
TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
@@ -2618,6 +2604,7 @@ struct proto tcp_prot = {
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock),
+ .rsk_prot = &tcp_request_sock_ops,
};
@@ -2660,7 +2647,6 @@ EXPORT_SYMBOL(tcp_proc_register);
EXPORT_SYMBOL(tcp_proc_unregister);
#endif
EXPORT_SYMBOL(sysctl_local_port_range);
-EXPORT_SYMBOL(sysctl_max_syn_backlog);
EXPORT_SYMBOL(sysctl_tcp_low_latency);
EXPORT_SYMBOL(sysctl_tcp_tw_reuse);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index eea1a17a9ac..b3943e7562f 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -684,7 +684,7 @@ out:
* Actually, we could lots of memory writes here. tp of listening
* socket contains all necessary default parameters.
*/
-struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
+struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
{
/* allocate the newsk from the same slab of the master sock,
* if not, at sk_free time we'll try to free it from the wrong
@@ -692,6 +692,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
if(newsk != NULL) {
+ struct inet_request_sock *ireq = inet_rsk(req);
+ struct tcp_request_sock *treq = tcp_rsk(req);
struct tcp_sock *newtp;
struct sk_filter *filter;
@@ -703,7 +705,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
tcp_sk(newsk)->bind_hash = NULL;
/* Clone the TCP header template */
- inet_sk(newsk)->dport = req->rmt_port;
+ inet_sk(newsk)->dport = ireq->rmt_port;
sock_lock_init(newsk);
bh_lock_sock(newsk);
@@ -739,14 +741,14 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
/* Now setup tcp_sock */
newtp = tcp_sk(newsk);
newtp->pred_flags = 0;
- newtp->rcv_nxt = req->rcv_isn + 1;
- newtp->snd_nxt = req->snt_isn + 1;
- newtp->snd_una = req->snt_isn + 1;
- newtp->snd_sml = req->snt_isn + 1;
+ newtp->rcv_nxt = treq->rcv_isn + 1;
+ newtp->snd_nxt = treq->snt_isn + 1;
+ newtp->snd_una = treq->snt_isn + 1;
+ newtp->snd_sml = treq->snt_isn + 1;
tcp_prequeue_init(newtp);
- tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
+ tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
newtp->retransmits = 0;
newtp->backoff = 0;
@@ -775,10 +777,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
tcp_set_ca_state(newtp, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue);
- newtp->rcv_wup = req->rcv_isn + 1;
- newtp->write_seq = req->snt_isn + 1;
+ newtp->rcv_wup = treq->rcv_isn + 1;
+ newtp->write_seq = treq->snt_isn + 1;
newtp->pushed_seq = newtp->write_seq;
- newtp->copied_seq = req->rcv_isn + 1;
+ newtp->copied_seq = treq->rcv_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
@@ -788,10 +790,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newtp->probes_out = 0;
newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0;
- newtp->listen_opt = NULL;
- newtp->accept_queue = newtp->accept_queue_tail = NULL;
- /* Deinitialize syn_wait_lock to trap illegal accesses. */
- memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
+ /* Deinitialize accept_queue to trap illegal accesses. */
+ memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
/* Back to base struct sock members. */
newsk->sk_err = 0;
@@ -808,18 +808,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->sk_socket = NULL;
newsk->sk_sleep = NULL;
- newtp->rx_opt.tstamp_ok = req->tstamp_ok;
- if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) {
+ newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
+ if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
if (sysctl_tcp_fack)
newtp->rx_opt.sack_ok |= 2;
}
newtp->window_clamp = req->window_clamp;
newtp->rcv_ssthresh = req->rcv_wnd;
newtp->rcv_wnd = req->rcv_wnd;
- newtp->rx_opt.wscale_ok = req->wscale_ok;
+ newtp->rx_opt.wscale_ok = ireq->wscale_ok;
if (newtp->rx_opt.wscale_ok) {
- newtp->rx_opt.snd_wscale = req->snd_wscale;
- newtp->rx_opt.rcv_wscale = req->rcv_wscale;
+ newtp->rx_opt.snd_wscale = ireq->snd_wscale;
+ newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
} else {
newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
newtp->window_clamp = min(newtp->window_clamp, 65535U);
@@ -851,12 +851,12 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
/*
* Process an incoming packet for SYN_RECV sockets represented
- * as an open_request.
+ * as a request_sock.
*/
struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
- struct open_request *req,
- struct open_request **prev)
+ struct request_sock *req,
+ struct request_sock **prev)
{
struct tcphdr *th = skb->h.th;
struct tcp_sock *tp = tcp_sk(sk);
@@ -881,7 +881,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
}
/* Check for pure retransmitted SYN. */
- if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
+ if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
flg == TCP_FLAG_SYN &&
!paws_reject) {
/*
@@ -901,7 +901,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
* Enforce "SYN-ACK" according to figure 8, figure 6
* of RFC793, fixed by RFC1122.
*/
- req->class->rtx_syn_ack(sk, req, NULL);
+ req->rsk_ops->rtx_syn_ack(sk, req, NULL);
return NULL;
}
@@ -959,7 +959,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
* Invalid ACK: reset will be sent by listening socket
*/
if ((flg & TCP_FLAG_ACK) &&
- (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
+ (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
return sk;
/* Also, it would be not so bad idea to check rcv_tsecr, which
@@ -970,10 +970,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
/* RFC793: "first check sequence number". */
if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
+ tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST))
- req->class->send_ack(skb, req);
+ req->rsk_ops->send_ack(skb, req);
if (paws_reject)
NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
return NULL;
@@ -981,12 +981,12 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
/* In sequence, PAWS is OK. */
- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
+ if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
req->ts_recent = tmp_opt.rcv_tsval;
- if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
+ if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
/* Truncate SYN, it is out of window starting
- at req->rcv_isn+1. */
+ at tcp_rsk(req)->rcv_isn + 1. */
flg &= ~TCP_FLAG_SYN;
}
@@ -1003,8 +1003,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
return NULL;
/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
- if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
- req->acked = 1;
+ if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
+ inet_rsk(req)->acked = 1;
return NULL;
}
@@ -1026,14 +1026,14 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
listen_overflow:
if (!sysctl_tcp_abort_on_overflow) {
- req->acked = 1;
+ inet_rsk(req)->acked = 1;
return NULL;
}
embryonic_reset:
NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST))
- req->class->send_reset(skb);
+ req->rsk_ops->send_reset(skb);
tcp_synq_drop(sk, req, prev);
return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fa24e7ae1f4..f17c6577e33 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1356,8 +1356,9 @@ int tcp_send_synack(struct sock *sk)
* Prepare a SYN-ACK.
*/
struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- struct open_request *req)
+ struct request_sock *req)
{
+ struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
int tcp_header_size;
@@ -1373,47 +1374,47 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb->dst = dst_clone(dst);
tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
- (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
- (req->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
+ (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
+ (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
/* SACK_PERM is in the place of NOP NOP of TS */
- ((req->sack_ok && !req->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
+ ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
memset(th, 0, sizeof(struct tcphdr));
th->syn = 1;
th->ack = 1;
if (dst->dev->features&NETIF_F_TSO)
- req->ecn_ok = 0;
+ ireq->ecn_ok = 0;
TCP_ECN_make_synack(req, th);
th->source = inet_sk(sk)->sport;
- th->dest = req->rmt_port;
- TCP_SKB_CB(skb)->seq = req->snt_isn;
+ th->dest = ireq->rmt_port;
+ TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
TCP_SKB_CB(skb)->sacked = 0;
skb_shinfo(skb)->tso_segs = 1;
skb_shinfo(skb)->tso_size = 0;
th->seq = htonl(TCP_SKB_CB(skb)->seq);
- th->ack_seq = htonl(req->rcv_isn + 1);
+ th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
__u8 rcv_wscale;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
- dst_metric(dst, RTAX_ADVMSS) - (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
+ dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rcv_wnd,
&req->window_clamp,
- req->wscale_ok,
+ ireq->wscale_ok,
&rcv_wscale);
- req->rcv_wscale = rcv_wscale;
+ ireq->rcv_wscale = rcv_wscale;
}
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(req->rcv_wnd);
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), req->tstamp_ok,
- req->sack_ok, req->wscale_ok, req->rcv_wscale,
+ tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
+ ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
TCP_SKB_CB(skb)->when,
req->ts_recent);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 799ebe061e2..b127b449856 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -464,11 +464,11 @@ out_unlock:
static void tcp_synack_timer(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt = tp->listen_opt;
+ struct listen_sock *lopt = tp->accept_queue.listen_opt;
int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
int thresh = max_retries;
unsigned long now = jiffies;
- struct open_request **reqp, *req;
+ struct request_sock **reqp, *req;
int i, budget;
if (lopt == NULL || lopt->qlen == 0)
@@ -513,8 +513,8 @@ static void tcp_synack_timer(struct sock *sk)
while ((req = *reqp) != NULL) {
if (time_after_eq(now, req->expires)) {
if ((req->retrans < thresh ||
- (req->acked && req->retrans < max_retries))
- && !req->class->rtx_syn_ack(sk, req, NULL)) {
+ (inet_rsk(req)->acked && req->retrans < max_retries))
+ && !req->rsk_ops->rtx_syn_ack(sk, req, NULL)) {
unsigned long timeo;
if (req->retrans++ == 0)
@@ -527,13 +527,9 @@ static void tcp_synack_timer(struct sock *sk)
}
/* Drop this request */
- write_lock(&tp->syn_wait_lock);
- *reqp = req->dl_next;
- write_unlock(&tp->syn_wait_lock);
- lopt->qlen--;
- if (req->retrans == 0)
- lopt->qlen_young--;
- tcp_openreq_free(req);
+ tcp_synq_unlink(tp, req, reqp);
+ reqsk_queue_removed(&tp->accept_queue, req);
+ reqsk_free(req);
continue;
}
reqp = &req->dl_next;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2720899d516..47a30c3188e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -131,7 +131,7 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
static int addrconf_ifdown(struct net_device *dev, int how);
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags);
+static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
static void addrconf_dad_timer(unsigned long data);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
static void addrconf_rs_timer(unsigned long data);
@@ -492,7 +492,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
static struct inet6_ifaddr *
ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
- int scope, unsigned flags)
+ int scope, u32 flags)
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
@@ -1320,7 +1320,7 @@ static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpad
static void
addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
- unsigned long expires, unsigned flags)
+ unsigned long expires, u32 flags)
{
struct in6_rtmsg rtmsg;
@@ -2229,7 +2229,7 @@ out:
/*
* Duplicate Address Detection
*/
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
@@ -2622,15 +2622,14 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
}
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, unsigned int flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
struct ifa_cacheinfo ci;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ifm));
- if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags);
ifm = NLMSG_DATA(nlh);
ifm->ifa_family = AF_INET6;
ifm->ifa_prefixlen = ifa->prefix_len;
@@ -2672,15 +2671,14 @@ rtattr_failure:
}
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, u16 flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
struct ifa_cacheinfo ci;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ifm));
- if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags);
ifm = NLMSG_DATA(nlh);
ifm->ifa_family = AF_INET6;
ifm->ifa_prefixlen = 128;
@@ -2709,15 +2707,14 @@ rtattr_failure:
}
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, unsigned int flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
struct ifa_cacheinfo ci;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ifm));
- if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags);
ifm = NLMSG_DATA(nlh);
ifm->ifa_family = AF_INET6;
ifm->ifa_prefixlen = 128;
@@ -2786,7 +2783,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
continue;
if ((err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWADDR)) <= 0)
+ cb->nlh->nlmsg_seq, RTM_NEWADDR,
+ NLM_F_MULTI)) <= 0)
goto done;
}
/* temp addr */
@@ -2797,7 +2795,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
continue;
if ((err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWADDR)) <= 0)
+ cb->nlh->nlmsg_seq, RTM_NEWADDR,
+ NLM_F_MULTI)) <= 0)
goto done;
}
#endif
@@ -2810,7 +2809,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
continue;
if ((err = inet6_fill_ifmcaddr(skb, ifmca,
NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_GETMULTICAST)) <= 0)
+ cb->nlh->nlmsg_seq, RTM_GETMULTICAST,
+ NLM_F_MULTI)) <= 0)
goto done;
}
break;
@@ -2822,7 +2822,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
continue;
if ((err = inet6_fill_ifacaddr(skb, ifaca,
NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_GETANYCAST)) <= 0)
+ cb->nlh->nlmsg_seq, RTM_GETANYCAST,
+ NLM_F_MULTI)) <= 0)
goto done;
}
break;
@@ -2872,7 +2873,7 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFADDR, ENOBUFS);
return;
}
- if (inet6_fill_ifaddr(skb, ifa, 0, 0, event) < 0) {
+ if (inet6_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFADDR, EINVAL);
return;
@@ -2907,7 +2908,7 @@ static void inline ipv6_store_devconf(struct ipv6_devconf *cnf,
}
static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
- u32 pid, u32 seq, int event)
+ u32 pid, u32 seq, int event, unsigned int flags)
{
struct net_device *dev = idev->dev;
__s32 *array = NULL;
@@ -2918,8 +2919,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
__u32 mtu = dev->mtu;
struct ifla_cacheinfo ci;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r));
- if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
r = NLMSG_DATA(nlh);
r->ifi_family = AF_INET6;
r->ifi_type = dev->type;
@@ -2986,7 +2986,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
if ((idev = in6_dev_get(dev)) == NULL)
continue;
err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWLINK);
+ cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
in6_dev_put(idev);
if (err <= 0)
break;
@@ -3008,7 +3008,7 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFINFO, ENOBUFS);
return;
}
- if (inet6_fill_ifinfo(skb, idev, 0, 0, event) < 0) {
+ if (inet6_fill_ifinfo(skb, idev, current->pid, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFINFO, EINVAL);
return;
@@ -3018,18 +3018,15 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
}
static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
- struct prefix_info *pinfo, u32 pid, u32 seq, int event)
+ struct prefix_info *pinfo, u32 pid, u32 seq,
+ int event, unsigned int flags)
{
struct prefixmsg *pmsg;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
struct prefix_cacheinfo ci;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*pmsg));
-
- if (pid)
- nlh->nlmsg_flags |= NLM_F_MULTI;
-
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags);
pmsg = NLMSG_DATA(nlh);
pmsg->prefix_family = AF_INET6;
pmsg->prefix_ifindex = idev->dev->ifindex;
@@ -3068,7 +3065,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
netlink_set_err(rtnl, 0, RTMGRP_IPV6_PREFIX, ENOBUFS);
return;
}
- if (inet6_fill_prefix(skb, idev, pinfo, 0, 0, event) < 0) {
+ if (inet6_fill_prefix(skb, idev, pinfo, current->pid, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_IPV6_PREFIX, EINVAL);
return;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 65b9375df57..5229365cd8b 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -353,14 +353,14 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
err = copied;
/* Reset and regenerate socket error */
- spin_lock_irq(&sk->sk_error_queue.lock);
+ spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_irq(&sk->sk_error_queue.lock);
+ spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else {
- spin_unlock_irq(&sk->sk_error_queue.lock);
+ spin_unlock_bh(&sk->sk_error_queue.lock);
}
out_free_skb:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 617645bc5ed..e2b848ec985 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -434,12 +434,12 @@ csum_copy_err:
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->sk_receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
if (skb == skb_peek(&sk->sk_receive_queue)) {
__skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->sk_receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -971,11 +971,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
struct sk_buff *skb;
int amount = 0;
- spin_lock_irq(&sk->sk_receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->tail - skb->h.raw;
- spin_unlock_irq(&sk->sk_receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3bf8a0254f8..1f5b226c357 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1570,7 +1570,8 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
struct in6_addr *src,
int iif,
int type, u32 pid, u32 seq,
- struct nlmsghdr *in_nlh, int prefix)
+ struct nlmsghdr *in_nlh, int prefix,
+ unsigned int flags)
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
@@ -1588,7 +1589,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
pid = in_nlh->nlmsg_pid;
}
- nlh = NLMSG_PUT(skb, pid, seq, type, sizeof(*rtm));
+ nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*rtm), flags);
rtm = NLMSG_DATA(nlh);
rtm->rtm_family = AF_INET6;
rtm->rtm_dst_len = rt->rt6i_dst.plen;
@@ -1674,7 +1675,7 @@ static int rt6_dump_route(struct rt6_info *rt, void *p_arg)
return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
- NULL, prefix);
+ NULL, prefix, NLM_F_MULTI);
}
static int fib6_dump_node(struct fib6_walker_t *w)
@@ -1822,7 +1823,7 @@ int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
&fl.fl6_dst, &fl.fl6_src,
iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
- nlh->nlmsg_seq, nlh, 0);
+ nlh->nlmsg_seq, nlh, 0, 0);
if (err < 0) {
err = -EMSGSIZE;
goto out_free;
@@ -1848,7 +1849,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nlmsghdr *nlh)
netlink_set_err(rtnl, 0, RTMGRP_IPV6_ROUTE, ENOBUFS);
return;
}
- if (rt6_fill_node(skb, rt, NULL, NULL, 0, event, 0, 0, nlh, 0) < 0) {
+ if (rt6_fill_node(skb, rt, NULL, NULL, 0, event, 0, 0, nlh, 0, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_IPV6_ROUTE, EINVAL);
return;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0f69e800a0a..2414937f2a8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -65,7 +65,7 @@
#include <linux/seq_file.h>
static void tcp_v6_send_reset(struct sk_buff *skb);
-static void tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req);
+static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb);
@@ -394,24 +394,26 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
return c & (TCP_SYNQ_HSIZE - 1);
}
-static struct open_request *tcp_v6_search_req(struct tcp_sock *tp,
- struct open_request ***prevp,
+static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp,
+ struct request_sock ***prevp,
__u16 rport,
struct in6_addr *raddr,
struct in6_addr *laddr,
int iif)
{
- struct tcp_listen_opt *lopt = tp->listen_opt;
- struct open_request *req, **prev;
+ struct listen_sock *lopt = tp->accept_queue.listen_opt;
+ struct request_sock *req, **prev;
for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
(req = *prev) != NULL;
prev = &req->dl_next) {
- if (req->rmt_port == rport &&
- req->class->family == AF_INET6 &&
- ipv6_addr_equal(&req->af.v6_req.rmt_addr, raddr) &&
- ipv6_addr_equal(&req->af.v6_req.loc_addr, laddr) &&
- (!req->af.v6_req.iif || req->af.v6_req.iif == iif)) {
+ const struct tcp6_request_sock *treq = tcp6_rsk(req);
+
+ if (inet_rsk(req)->rmt_port == rport &&
+ req->rsk_ops->family == AF_INET6 &&
+ ipv6_addr_equal(&treq->rmt_addr, raddr) &&
+ ipv6_addr_equal(&treq->loc_addr, laddr) &&
+ (!treq->iif || treq->iif == iif)) {
BUG_TRAP(req->sk == NULL);
*prevp = prev;
return req;
@@ -906,9 +908,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
icmpv6_err_convert(type, code, &err);
- /* Might be for an open_request */
+ /* Might be for an request_sock */
switch (sk->sk_state) {
- struct open_request *req, **prev;
+ struct request_sock *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
@@ -923,7 +925,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
*/
BUG_TRAP(req->sk == NULL);
- if (seq != req->snt_isn) {
+ if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
@@ -957,9 +959,10 @@ out:
}
-static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
+static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
{
+ struct tcp6_request_sock *treq = tcp6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff * skb;
struct ipv6_txoptions *opt = NULL;
@@ -969,19 +972,19 @@ static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
- ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
+ ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
+ ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
fl.fl6_flowlabel = 0;
- fl.oif = req->af.v6_req.iif;
- fl.fl_ip_dport = req->rmt_port;
+ fl.oif = treq->iif;
+ fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->sport;
if (dst == NULL) {
opt = np->opt;
if (opt == NULL &&
np->rxopt.bits.srcrt == 2 &&
- req->af.v6_req.pktopts) {
- struct sk_buff *pktopts = req->af.v6_req.pktopts;
+ treq->pktopts) {
+ struct sk_buff *pktopts = treq->pktopts;
struct inet6_skb_parm *rxopt = IP6CB(pktopts);
if (rxopt->srcrt)
opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
@@ -1008,10 +1011,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
struct tcphdr *th = skb->h.th;
th->check = tcp_v6_check(th, skb->len,
- &req->af.v6_req.loc_addr, &req->af.v6_req.rmt_addr,
+ &treq->loc_addr, &treq->rmt_addr,
csum_partial((char *)th, skb->len, skb->csum));
- ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
+ ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
err = ip6_xmit(sk, skb, &fl, opt, 0);
if (err == NET_XMIT_CN)
err = 0;
@@ -1024,17 +1027,18 @@ done:
return err;
}
-static void tcp_v6_or_free(struct open_request *req)
+static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
- if (req->af.v6_req.pktopts)
- kfree_skb(req->af.v6_req.pktopts);
+ if (tcp6_rsk(req)->pktopts)
+ kfree_skb(tcp6_rsk(req)->pktopts);
}
-static struct or_calltable or_ipv6 = {
+static struct request_sock_ops tcp6_request_sock_ops = {
.family = AF_INET6,
+ .obj_size = sizeof(struct tcp6_request_sock),
.rtx_syn_ack = tcp_v6_send_synack,
- .send_ack = tcp_v6_or_send_ack,
- .destructor = tcp_v6_or_free,
+ .send_ack = tcp_v6_reqsk_send_ack,
+ .destructor = tcp_v6_reqsk_destructor,
.send_reset = tcp_v6_send_reset
};
@@ -1219,15 +1223,15 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_tw_put(tw);
}
-static void tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req)
+static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
{
- tcp_v6_send_ack(skb, req->snt_isn+1, req->rcv_isn+1, req->rcv_wnd, req->ts_recent);
+ tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
}
static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
- struct open_request *req, **prev;
+ struct request_sock *req, **prev;
struct tcphdr *th = skb->h.th;
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk;
@@ -1260,21 +1264,13 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
return sk;
}
-static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
+static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt = tp->listen_opt;
- u32 h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
-
- req->sk = NULL;
- req->expires = jiffies + TCP_TIMEOUT_INIT;
- req->retrans = 0;
- req->dl_next = lopt->syn_table[h];
-
- write_lock(&tp->syn_wait_lock);
- lopt->syn_table[h] = req;
- write_unlock(&tp->syn_wait_lock);
+ struct listen_sock *lopt = tp->accept_queue.listen_opt;
+ u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
+ reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
tcp_synq_added(sk);
}
@@ -1284,10 +1280,11 @@ static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
*/
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
+ struct tcp6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_options_received tmp_opt;
struct tcp_sock *tp = tcp_sk(sk);
- struct open_request *req = NULL;
+ struct request_sock *req = NULL;
__u32 isn = TCP_SKB_CB(skb)->when;
if (skb->protocol == htons(ETH_P_IP))
@@ -1308,7 +1305,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
goto drop;
- req = tcp_openreq_alloc();
+ req = reqsk_alloc(&tcp6_request_sock_ops);
if (req == NULL)
goto drop;
@@ -1321,28 +1318,28 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
- req->class = &or_ipv6;
- ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr);
- ipv6_addr_copy(&req->af.v6_req.loc_addr, &skb->nh.ipv6h->daddr);
+ treq = tcp6_rsk(req);
+ ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
+ ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
TCP_ECN_create_request(req, skb->h.th);
- req->af.v6_req.pktopts = NULL;
+ treq->pktopts = NULL;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxhlim) {
atomic_inc(&skb->users);
- req->af.v6_req.pktopts = skb;
+ treq->pktopts = skb;
}
- req->af.v6_req.iif = sk->sk_bound_dev_if;
+ treq->iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&req->af.v6_req.rmt_addr) & IPV6_ADDR_LINKLOCAL)
- req->af.v6_req.iif = tcp_v6_iif(skb);
+ ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ treq->iif = tcp_v6_iif(skb);
if (isn == 0)
isn = tcp_v6_init_sequence(sk,skb);
- req->snt_isn = isn;
+ tcp_rsk(req)->snt_isn = isn;
if (tcp_v6_send_synack(sk, req, NULL))
goto drop;
@@ -1353,16 +1350,17 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
drop:
if (req)
- tcp_openreq_free(req);
+ reqsk_free(req);
TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
return 0; /* don't send reset */
}
static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst)
{
+ struct tcp6_request_sock *treq = tcp6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
@@ -1426,10 +1424,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
goto out_overflow;
if (np->rxopt.bits.srcrt == 2 &&
- opt == NULL && req->af.v6_req.pktopts) {
- struct inet6_skb_parm *rxopt = IP6CB(req->af.v6_req.pktopts);
+ opt == NULL && treq->pktopts) {
+ struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
if (rxopt->srcrt)
- opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(req->af.v6_req.pktopts->nh.raw+rxopt->srcrt));
+ opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
}
if (dst == NULL) {
@@ -1438,16 +1436,16 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
+ ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
if (opt && opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
ipv6_addr_copy(&final, &fl.fl6_dst);
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
final_p = &final;
}
- ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
+ ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
fl.oif = sk->sk_bound_dev_if;
- fl.fl_ip_dport = req->rmt_port;
+ fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->sport;
if (ip6_dst_lookup(sk, &dst, &fl))
@@ -1482,10 +1480,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_copy(&newnp->daddr, &req->af.v6_req.rmt_addr);
- ipv6_addr_copy(&newnp->saddr, &req->af.v6_req.loc_addr);
- ipv6_addr_copy(&newnp->rcv_saddr, &req->af.v6_req.loc_addr);
- newsk->sk_bound_dev_if = req->af.v6_req.iif;
+ ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
+ ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
+ ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
+ newsk->sk_bound_dev_if = treq->iif;
/* Now IPv6 options...
@@ -1498,11 +1496,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
- if (req->af.v6_req.pktopts) {
- newnp->pktoptions = skb_clone(req->af.v6_req.pktopts,
- GFP_ATOMIC);
- kfree_skb(req->af.v6_req.pktopts);
- req->af.v6_req.pktopts = NULL;
+ if (treq->pktopts != NULL) {
+ newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
+ kfree_skb(treq->pktopts);
+ treq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
@@ -2050,7 +2047,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
- struct sock *sk, struct open_request *req, int i, int uid)
+ struct sock *sk, struct request_sock *req, int i, int uid)
{
struct in6_addr *dest, *src;
int ttd = req->expires - jiffies;
@@ -2058,8 +2055,8 @@ static void get_openreq6(struct seq_file *seq,
if (ttd < 0)
ttd = 0;
- src = &req->af.v6_req.loc_addr;
- dest = &req->af.v6_req.rmt_addr;
+ src = &tcp6_rsk(req)->loc_addr;
+ dest = &tcp6_rsk(req)->rmt_addr;
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
@@ -2069,7 +2066,7 @@ static void get_openreq6(struct seq_file *seq,
ntohs(inet_sk(sk)->sport),
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3],
- ntohs(req->rmt_port),
+ ntohs(inet_rsk(req)->rmt_port),
TCP_SYN_RECV,
0,0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
@@ -2239,6 +2236,7 @@ struct proto tcpv6_prot = {
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp6_sock),
+ .rsk_prot = &tcp6_request_sock_ops,
};
static struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e251d0ba4f3..eff050ac704 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -300,12 +300,12 @@ csum_copy_err:
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->sk_receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
if (skb == skb_peek(&sk->sk_receive_queue)) {
__skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->sk_receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ce980aa94ed..98b72f2024f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -656,13 +656,18 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
sa->sadb_sa_exttype = SADB_EXT_SA;
sa->sadb_sa_spi = x->id.spi;
sa->sadb_sa_replay = x->props.replay_window;
- sa->sadb_sa_state = SADB_SASTATE_DYING;
- if (x->km.state == XFRM_STATE_VALID && !x->km.dying)
- sa->sadb_sa_state = SADB_SASTATE_MATURE;
- else if (x->km.state == XFRM_STATE_ACQ)
+ switch (x->km.state) {
+ case XFRM_STATE_VALID:
+ sa->sadb_sa_state = x->km.dying ?
+ SADB_SASTATE_DYING : SADB_SASTATE_MATURE;
+ break;
+ case XFRM_STATE_ACQ:
sa->sadb_sa_state = SADB_SASTATE_LARVAL;
- else if (x->km.state == XFRM_STATE_EXPIRED)
+ break;
+ default:
sa->sadb_sa_state = SADB_SASTATE_DEAD;
+ break;
+ }
sa->sadb_sa_auth = 0;
if (x->aalg) {
struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
@@ -1240,13 +1245,78 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
return 0;
}
+static inline int event2poltype(int event)
+{
+ switch (event) {
+ case XFRM_MSG_DELPOLICY:
+ return SADB_X_SPDDELETE;
+ case XFRM_MSG_NEWPOLICY:
+ return SADB_X_SPDADD;
+ case XFRM_MSG_UPDPOLICY:
+ return SADB_X_SPDUPDATE;
+ case XFRM_MSG_POLEXPIRE:
+ // return SADB_X_SPDEXPIRE;
+ default:
+ printk("pfkey: Unknown policy event %d\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static inline int event2keytype(int event)
+{
+ switch (event) {
+ case XFRM_MSG_DELSA:
+ return SADB_DELETE;
+ case XFRM_MSG_NEWSA:
+ return SADB_ADD;
+ case XFRM_MSG_UPDSA:
+ return SADB_UPDATE;
+ case XFRM_MSG_EXPIRE:
+ return SADB_EXPIRE;
+ default:
+ printk("pfkey: Unknown SA event %d\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+/* ADD/UPD/DEL */
+static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
+{
+ struct sk_buff *skb;
+ struct sadb_msg *hdr;
+ int hsc = 3;
+
+ if (c->event == XFRM_MSG_DELSA)
+ hsc = 0;
+
+ skb = pfkey_xfrm_state2msg(x, 0, hsc);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ hdr = (struct sadb_msg *) skb->data;
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_type = event2keytype(c->event);
+ hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
+ hdr->sadb_msg_errno = 0;
+ hdr->sadb_msg_reserved = 0;
+ hdr->sadb_msg_seq = c->seq;
+ hdr->sadb_msg_pid = c->pid;
+
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
+
+ return 0;
+}
static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
{
- struct sk_buff *out_skb;
- struct sadb_msg *out_hdr;
struct xfrm_state *x;
int err;
+ struct km_event c;
xfrm_probe_algs();
@@ -1254,6 +1324,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
if (IS_ERR(x))
return PTR_ERR(x);
+ xfrm_state_hold(x);
if (hdr->sadb_msg_type == SADB_ADD)
err = xfrm_state_add(x);
else
@@ -1262,30 +1333,26 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
- return err;
+ goto out;
}
- out_skb = pfkey_xfrm_state2msg(x, 0, 3);
- if (IS_ERR(out_skb))
- return PTR_ERR(out_skb); /* XXX Should we return 0 here ? */
-
- out_hdr = (struct sadb_msg *) out_skb->data;
- out_hdr->sadb_msg_version = hdr->sadb_msg_version;
- out_hdr->sadb_msg_type = hdr->sadb_msg_type;
- out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
- out_hdr->sadb_msg_errno = 0;
- out_hdr->sadb_msg_reserved = 0;
- out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
- out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, sk);
-
- return 0;
+ if (hdr->sadb_msg_type == SADB_ADD)
+ c.event = XFRM_MSG_NEWSA;
+ else
+ c.event = XFRM_MSG_UPDSA;
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
+ km_state_notify(x, &c);
+out:
+ xfrm_state_put(x);
+ return err;
}
static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
{
struct xfrm_state *x;
+ struct km_event c;
+ int err;
if (!ext_hdrs[SADB_EXT_SA-1] ||
!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
@@ -1301,13 +1368,19 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
return -EPERM;
}
- xfrm_state_delete(x);
- xfrm_state_put(x);
+ err = xfrm_state_delete(x);
+ if (err < 0) {
+ xfrm_state_put(x);
+ return err;
+ }
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
- BROADCAST_ALL, sk);
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
+ c.event = XFRM_MSG_DELSA;
+ km_state_notify(x, &c);
+ xfrm_state_put(x);
- return 0;
+ return err;
}
static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
@@ -1445,28 +1518,42 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
return 0;
}
+static int key_notify_sa_flush(struct km_event *c)
+{
+ struct sk_buff *skb;
+ struct sadb_msg *hdr;
+
+ skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
+ if (!skb)
+ return -ENOBUFS;
+ hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
+ hdr->sadb_msg_seq = c->seq;
+ hdr->sadb_msg_pid = c->pid;
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
+
+ return 0;
+}
+
static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
{
unsigned proto;
- struct sk_buff *skb_out;
- struct sadb_msg *hdr_out;
+ struct km_event c;
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
return -EINVAL;
- skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
- if (!skb_out)
- return -ENOBUFS;
-
xfrm_state_flush(proto);
-
- hdr_out = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
- pfkey_hdr_dup(hdr_out, hdr);
- hdr_out->sadb_msg_errno = (uint8_t) 0;
- hdr_out->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
-
- pfkey_broadcast(skb_out, GFP_KERNEL, BROADCAST_ALL, NULL);
+ c.data.proto = proto;
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
+ c.event = XFRM_MSG_FLUSHSA;
+ km_state_notify(NULL, &c);
return 0;
}
@@ -1859,6 +1946,35 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
hdr->sadb_msg_reserved = atomic_read(&xp->refcnt);
}
+static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+{
+ struct sk_buff *out_skb;
+ struct sadb_msg *out_hdr;
+ int err;
+
+ out_skb = pfkey_xfrm_policy2msg_prep(xp);
+ if (IS_ERR(out_skb)) {
+ err = PTR_ERR(out_skb);
+ goto out;
+ }
+ pfkey_xfrm_policy2msg(out_skb, xp, dir);
+
+ out_hdr = (struct sadb_msg *) out_skb->data;
+ out_hdr->sadb_msg_version = PF_KEY_V2;
+
+ if (c->data.byid && c->event == XFRM_MSG_DELPOLICY)
+ out_hdr->sadb_msg_type = SADB_X_SPDDELETE2;
+ else
+ out_hdr->sadb_msg_type = event2poltype(c->event);
+ out_hdr->sadb_msg_errno = 0;
+ out_hdr->sadb_msg_seq = c->seq;
+ out_hdr->sadb_msg_pid = c->pid;
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
+out:
+ return 0;
+
+}
+
static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
{
int err;
@@ -1866,8 +1982,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
struct sadb_address *sa;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
- struct sk_buff *out_skb;
- struct sadb_msg *out_hdr;
+ struct km_event c;
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
@@ -1935,31 +2050,23 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
(err = parse_ipsecrequests(xp, pol)) < 0)
goto out;
- out_skb = pfkey_xfrm_policy2msg_prep(xp);
- if (IS_ERR(out_skb)) {
- err = PTR_ERR(out_skb);
- goto out;
- }
-
err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
hdr->sadb_msg_type != SADB_X_SPDUPDATE);
if (err) {
- kfree_skb(out_skb);
- goto out;
+ kfree(xp);
+ return err;
}
- pfkey_xfrm_policy2msg(out_skb, xp, pol->sadb_x_policy_dir-1);
+ if (hdr->sadb_msg_type == SADB_X_SPDUPDATE)
+ c.event = XFRM_MSG_UPDPOLICY;
+ else
+ c.event = XFRM_MSG_NEWPOLICY;
- xfrm_pol_put(xp);
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
- out_hdr = (struct sadb_msg *) out_skb->data;
- out_hdr->sadb_msg_version = hdr->sadb_msg_version;
- out_hdr->sadb_msg_type = hdr->sadb_msg_type;
- out_hdr->sadb_msg_satype = 0;
- out_hdr->sadb_msg_errno = 0;
- out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
- out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, sk);
+ km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
+ xfrm_pol_put(xp);
return 0;
out:
@@ -1973,9 +2080,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
struct sadb_address *sa;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
- struct sk_buff *out_skb;
- struct sadb_msg *out_hdr;
struct xfrm_selector sel;
+ struct km_event c;
if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
@@ -2010,25 +2116,40 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
err = 0;
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
+ c.event = XFRM_MSG_DELPOLICY;
+ km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
+
+ xfrm_pol_put(xp);
+ return err;
+}
+
+static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb_msg *hdr, int dir)
+{
+ int err;
+ struct sk_buff *out_skb;
+ struct sadb_msg *out_hdr;
+ err = 0;
+
out_skb = pfkey_xfrm_policy2msg_prep(xp);
if (IS_ERR(out_skb)) {
err = PTR_ERR(out_skb);
goto out;
}
- pfkey_xfrm_policy2msg(out_skb, xp, pol->sadb_x_policy_dir-1);
+ pfkey_xfrm_policy2msg(out_skb, xp, dir);
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
- out_hdr->sadb_msg_type = SADB_X_SPDDELETE;
+ out_hdr->sadb_msg_type = hdr->sadb_msg_type;
out_hdr->sadb_msg_satype = 0;
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, sk);
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk);
err = 0;
out:
- xfrm_pol_put(xp);
return err;
}
@@ -2037,8 +2158,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
int err;
struct sadb_x_policy *pol;
struct xfrm_policy *xp;
- struct sk_buff *out_skb;
- struct sadb_msg *out_hdr;
+ struct km_event c;
if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL)
return -EINVAL;
@@ -2050,24 +2170,16 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
err = 0;
- out_skb = pfkey_xfrm_policy2msg_prep(xp);
- if (IS_ERR(out_skb)) {
- err = PTR_ERR(out_skb);
- goto out;
+ c.seq = hdr->sadb_msg_seq;
+ c.pid = hdr->sadb_msg_pid;
+ if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) {
+ c.data.byid = 1;
+ c.event = XFRM_MSG_DELPOLICY;
+ km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
+ } else {
+ err = key_pol_get_resp(sk, xp, hdr, pol->sadb_x_policy_dir-1);
}
- pfkey_xfrm_policy2msg(out_skb, xp, pol->sadb_x_policy_dir-1);
-
- out_hdr = (struct sadb_msg *) out_skb->data;
- out_hdr->sadb_msg_version = hdr->sadb_msg_version;
- out_hdr->sadb_msg_type = hdr->sadb_msg_type;
- out_hdr->sadb_msg_satype = 0;
- out_hdr->sadb_msg_errno = 0;
- out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
- out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, sk);
- err = 0;
-out:
xfrm_pol_put(xp);
return err;
}
@@ -2102,22 +2214,34 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
return xfrm_policy_walk(dump_sp, &data);
}
-static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int key_notify_policy_flush(struct km_event *c)
{
struct sk_buff *skb_out;
- struct sadb_msg *hdr_out;
+ struct sadb_msg *hdr;
- skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
+ skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
+ hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
+ hdr->sadb_msg_seq = c->seq;
+ hdr->sadb_msg_pid = c->pid;
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL);
+ return 0;
- xfrm_policy_flush();
+}
+
+static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+{
+ struct km_event c;
- hdr_out = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
- pfkey_hdr_dup(hdr_out, hdr);
- hdr_out->sadb_msg_errno = (uint8_t) 0;
- hdr_out->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- pfkey_broadcast(skb_out, GFP_KERNEL, BROADCAST_ALL, NULL);
+ xfrm_policy_flush();
+ c.event = XFRM_MSG_FLUSHPOLICY;
+ c.pid = hdr->sadb_msg_pid;
+ c.seq = hdr->sadb_msg_seq;
+ km_policy_notify(NULL, 0, &c);
return 0;
}
@@ -2317,11 +2441,23 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
}
}
-static int pfkey_send_notify(struct xfrm_state *x, int hard)
+static int key_notify_policy_expire(struct xfrm_policy *xp, struct km_event *c)
+{
+ return 0;
+}
+
+static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
{
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
- int hsc = (hard ? 2 : 1);
+ int hard;
+ int hsc;
+
+ hard = c->data.hard;
+ if (hard)
+ hsc = 2;
+ else
+ hsc = 1;
out_skb = pfkey_xfrm_state2msg(x, 0, hsc);
if (IS_ERR(out_skb))
@@ -2340,6 +2476,44 @@ static int pfkey_send_notify(struct xfrm_state *x, int hard)
return 0;
}
+static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
+{
+ switch (c->event) {
+ case XFRM_MSG_EXPIRE:
+ return key_notify_sa_expire(x, c);
+ case XFRM_MSG_DELSA:
+ case XFRM_MSG_NEWSA:
+ case XFRM_MSG_UPDSA:
+ return key_notify_sa(x, c);
+ case XFRM_MSG_FLUSHSA:
+ return key_notify_sa_flush(c);
+ default:
+ printk("pfkey: Unknown SA event %d\n", c->event);
+ break;
+ }
+
+ return 0;
+}
+
+static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+{
+ switch (c->event) {
+ case XFRM_MSG_POLEXPIRE:
+ return key_notify_policy_expire(xp, c);
+ case XFRM_MSG_DELPOLICY:
+ case XFRM_MSG_NEWPOLICY:
+ case XFRM_MSG_UPDPOLICY:
+ return key_notify_policy(xp, dir, c);
+ case XFRM_MSG_FLUSHPOLICY:
+ return key_notify_policy_flush(c);
+ default:
+ printk("pfkey: Unknown policy event %d\n", c->event);
+ break;
+ }
+
+ return 0;
+}
+
static u32 get_acqseq(void)
{
u32 res;
@@ -2856,6 +3030,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
.acquire = pfkey_send_acquire,
.compile_policy = pfkey_compile_policy,
.new_mapping = pfkey_send_new_mapping,
+ .notify_policy = pfkey_send_policy_notify,
};
static void __exit ipsec_pfkey_exit(void)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e41ce458c2a..70bcd4744d9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1095,8 +1095,7 @@ static int netlink_dump(struct sock *sk)
return 0;
}
- nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
- nlh->nlmsg_flags |= NLM_F_MULTI;
+ nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
@@ -1107,6 +1106,9 @@ static int netlink_dump(struct sock *sk)
netlink_destroy_callback(cb);
return 0;
+
+nlmsg_failure:
+ return -ENOBUFS;
}
int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -1178,7 +1180,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
}
rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
- NLMSG_ERROR, sizeof(struct nlmsgerr));
+ NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
errmsg = NLMSG_DATA(rep);
errmsg->error = err;
memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 914c85ff8fe..9594206e603 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -428,15 +428,15 @@ errout:
static int
tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
- unsigned flags, int event, int bind, int ref)
+ u16 flags, int event, int bind, int ref)
{
struct tcamsg *t;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
struct rtattr *x;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*t));
- nlh->nlmsg_flags = flags;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
+
t = NLMSG_DATA(nlh);
t->tca_family = AF_UNSPEC;
@@ -669,7 +669,7 @@ err:
}
static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
- unsigned flags)
+ u16 flags)
{
struct tcamsg *t;
struct nlmsghdr *nlh;
@@ -684,8 +684,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
b = (unsigned char *)skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*t));
- nlh->nlmsg_flags = flags;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
t = NLMSG_DATA(nlh);
t->tca_family = AF_UNSPEC;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 56e66c3fe0f..1616bf5c962 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -322,14 +322,13 @@ errout:
static int
tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
- u32 pid, u32 seq, unsigned flags, int event)
+ u32 pid, u32 seq, u16 flags, int event)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*tcm));
- nlh->nlmsg_flags = flags;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm_ifindex = tp->q->dev->ifindex;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 07977f8f267..97c1c75d5c7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -760,15 +760,14 @@ graft:
}
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
- u32 pid, u32 seq, unsigned flags, int event)
+ u32 pid, u32 seq, u16 flags, int event)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
struct gnet_dump d;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*tcm));
- nlh->nlmsg_flags = flags;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm_ifindex = q->dev->ifindex;
@@ -997,7 +996,7 @@ out:
static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
unsigned long cl,
- u32 pid, u32 seq, unsigned flags, int event)
+ u32 pid, u32 seq, u16 flags, int event)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -1005,8 +1004,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
struct gnet_dump d;
struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
- nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*tcm));
- nlh->nlmsg_flags = flags;
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm_ifindex = q->dev->ifindex;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d8bd2a569c7..13e0e7b3856 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -31,7 +31,7 @@
#endif
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct dsmark_qdisc_data *) qdisc_priv(sch))
/*
@@ -55,24 +55,38 @@
struct dsmark_qdisc_data {
struct Qdisc *q;
struct tcf_proto *filter_list;
- __u8 *mask; /* "owns" the array */
- __u8 *value;
- __u16 indices;
- __u32 default_index; /* index range is 0...0xffff */
+ u8 *mask; /* "owns" the array */
+ u8 *value;
+ u16 indices;
+ u32 default_index; /* index range is 0...0xffff */
int set_tc_index;
};
+static inline int dsmark_valid_indices(u16 indices)
+{
+ while (indices != 1) {
+ if (indices & 1)
+ return 0;
+ indices >>= 1;
+ }
+
+ return 1;
+}
-/* ------------------------- Class/flow operations ------------------------- */
+static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
+{
+ return (index <= p->indices && index > 0);
+}
+/* ------------------------- Class/flow operations ------------------------- */
-static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
- struct Qdisc *new,struct Qdisc **old)
+static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
+ struct Qdisc *new, struct Qdisc **old)
{
struct dsmark_qdisc_data *p = PRIV(sch);
- DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new,
- old);
+ DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",
+ sch, p, new, old);
if (new == NULL) {
new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
@@ -81,91 +95,95 @@ static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
}
sch_tree_lock(sch);
- *old = xchg(&p->q,new);
- if (*old)
- qdisc_reset(*old);
+ *old = xchg(&p->q, new);
+ qdisc_reset(*old);
sch->q.qlen = 0;
- sch_tree_unlock(sch); /* @@@ move up ? */
+ sch_tree_unlock(sch);
+
return 0;
}
-
static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct dsmark_qdisc_data *p = PRIV(sch);
-
- return p->q;
+ return PRIV(sch)->q;
}
-
-static unsigned long dsmark_get(struct Qdisc *sch,u32 classid)
+static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
{
- struct dsmark_qdisc_data *p __attribute__((unused)) = PRIV(sch);
+ DPRINTK("dsmark_get(sch %p,[qdisc %p],classid %x)\n",
+ sch, PRIV(sch), classid);
- DPRINTK("dsmark_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid);
- return TC_H_MIN(classid)+1;
+ return TC_H_MIN(classid) + 1;
}
-
static unsigned long dsmark_bind_filter(struct Qdisc *sch,
- unsigned long parent, u32 classid)
+ unsigned long parent, u32 classid)
{
- return dsmark_get(sch,classid);
+ return dsmark_get(sch, classid);
}
-
static void dsmark_put(struct Qdisc *sch, unsigned long cl)
{
}
-
static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct rtattr **tca, unsigned long *arg)
+ struct rtattr **tca, unsigned long *arg)
{
struct dsmark_qdisc_data *p = PRIV(sch);
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct rtattr *tb[TCA_DSMARK_MAX];
+ int err = -EINVAL;
+ u8 mask = 0;
DPRINTK("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
- "arg 0x%lx\n",sch,p,classid,parent,*arg);
- if (*arg > p->indices)
- return -ENOENT;
- if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt))
- return -EINVAL;
- if (tb[TCA_DSMARK_MASK-1]) {
- if (!RTA_PAYLOAD(tb[TCA_DSMARK_MASK-1]))
- return -EINVAL;
- p->mask[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_MASK-1]);
- }
- if (tb[TCA_DSMARK_VALUE-1]) {
- if (!RTA_PAYLOAD(tb[TCA_DSMARK_VALUE-1]))
- return -EINVAL;
- p->value[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_VALUE-1]);
+ "arg 0x%lx\n", sch, p, classid, parent, *arg);
+
+ if (!dsmark_valid_index(p, *arg)) {
+ err = -ENOENT;
+ goto rtattr_failure;
}
- return 0;
-}
+ if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt))
+ goto rtattr_failure;
+
+ if (tb[TCA_DSMARK_MASK-1])
+ mask = RTA_GET_U8(tb[TCA_DSMARK_MASK-1]);
+
+ if (tb[TCA_DSMARK_VALUE-1])
+ p->value[*arg-1] = RTA_GET_U8(tb[TCA_DSMARK_VALUE-1]);
+
+ if (tb[TCA_DSMARK_MASK-1])
+ p->mask[*arg-1] = mask;
+
+ err = 0;
-static int dsmark_delete(struct Qdisc *sch,unsigned long arg)
+rtattr_failure:
+ return err;
+}
+
+static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
{
struct dsmark_qdisc_data *p = PRIV(sch);
- if (!arg || arg > p->indices)
+ if (!dsmark_valid_index(p, arg))
return -EINVAL;
+
p->mask[arg-1] = 0xff;
p->value[arg-1] = 0;
+
return 0;
}
-
static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker)
{
struct dsmark_qdisc_data *p = PRIV(sch);
int i;
- DPRINTK("dsmark_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker);
+ DPRINTK("dsmark_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
+
if (walker->stop)
return;
+
for (i = 0; i < p->indices; i++) {
if (p->mask[i] == 0xff && !p->value[i])
goto ignore;
@@ -180,26 +198,20 @@ ignore:
}
}
-
static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl)
{
- struct dsmark_qdisc_data *p = PRIV(sch);
-
- return &p->filter_list;
+ return &PRIV(sch)->filter_list;
}
-
/* --------------------------- Qdisc operations ---------------------------- */
-
static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = PRIV(sch);
- struct tcf_result res;
- int result;
- int ret = NET_XMIT_POLICED;
+ int err;
+
+ D2PRINTK("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
- D2PRINTK("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
if (p->set_tc_index) {
/* FIXME: Safe with non-linear skbs? --RR */
switch (skb->protocol) {
@@ -216,17 +228,21 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
break;
};
}
- result = TC_POLICE_OK; /* be nice to gcc */
- if (TC_H_MAJ(skb->priority) == sch->handle) {
+
+ if (TC_H_MAJ(skb->priority) == sch->handle)
skb->tc_index = TC_H_MIN(skb->priority);
- } else {
- result = tc_classify(skb,p->filter_list,&res);
- D2PRINTK("result %d class 0x%04x\n",result,res.classid);
+ else {
+ struct tcf_result res;
+ int result = tc_classify(skb, p->filter_list, &res);
+
+ D2PRINTK("result %d class 0x%04x\n", result, res.classid);
+
switch (result) {
#ifdef CONFIG_NET_CLS_POLICE
case TC_POLICE_SHOT:
kfree_skb(skb);
- break;
+ sch->qstats.drops++;
+ return NET_XMIT_POLICED;
#if 0
case TC_POLICE_RECLASSIFY:
/* FIXME: what to do here ??? */
@@ -243,43 +259,45 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
break;
};
}
- if (
-#ifdef CONFIG_NET_CLS_POLICE
- result == TC_POLICE_SHOT ||
-#endif
- ((ret = p->q->enqueue(skb,p->q)) != 0)) {
+ err = p->q->enqueue(skb,p->q);
+ if (err != NET_XMIT_SUCCESS) {
sch->qstats.drops++;
- return ret;
+ return err;
}
+
sch->bstats.bytes += skb->len;
sch->bstats.packets++;
sch->q.qlen++;
- return ret;
-}
+ return NET_XMIT_SUCCESS;
+}
static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = PRIV(sch);
struct sk_buff *skb;
- int index;
+ u32 index;
+
+ D2PRINTK("dsmark_dequeue(sch %p,[qdisc %p])\n", sch, p);
- D2PRINTK("dsmark_dequeue(sch %p,[qdisc %p])\n",sch,p);
skb = p->q->ops->dequeue(p->q);
- if (!skb)
+ if (skb == NULL)
return NULL;
+
sch->q.qlen--;
- index = skb->tc_index & (p->indices-1);
- D2PRINTK("index %d->%d\n",skb->tc_index,index);
+
+ index = skb->tc_index & (p->indices - 1);
+ D2PRINTK("index %d->%d\n", skb->tc_index, index);
+
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
- ipv4_change_dsfield(skb->nh.iph,
- p->mask[index],p->value[index]);
+ ipv4_change_dsfield(skb->nh.iph, p->mask[index],
+ p->value[index]);
break;
case __constant_htons(ETH_P_IPV6):
- ipv6_change_dsfield(skb->nh.ipv6h,
- p->mask[index],p->value[index]);
+ ipv6_change_dsfield(skb->nh.ipv6h, p->mask[index],
+ p->value[index]);
break;
default:
/*
@@ -293,152 +311,162 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
htons(skb->protocol));
break;
};
+
return skb;
}
-
static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
{
- int ret;
struct dsmark_qdisc_data *p = PRIV(sch);
+ int err;
- D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
- if ((ret = p->q->ops->requeue(skb, p->q)) == 0) {
- sch->q.qlen++;
- sch->qstats.requeues++;
- return 0;
+ D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
+
+ err = p->q->ops->requeue(skb, p->q);
+ if (err != NET_XMIT_SUCCESS) {
+ sch->qstats.drops++;
+ return err;
}
- sch->qstats.drops++;
- return ret;
-}
+ sch->q.qlen++;
+ sch->qstats.requeues++;
+
+ return NET_XMIT_SUCCESS;
+}
static unsigned int dsmark_drop(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = PRIV(sch);
unsigned int len;
- DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
- if (!p->q->ops->drop)
- return 0;
- if (!(len = p->q->ops->drop(p->q)))
+ DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
+
+ if (p->q->ops->drop == NULL)
return 0;
- sch->q.qlen--;
+
+ len = p->q->ops->drop(p->q);
+ if (len)
+ sch->q.qlen--;
+
return len;
}
-
-static int dsmark_init(struct Qdisc *sch,struct rtattr *opt)
+static int dsmark_init(struct Qdisc *sch, struct rtattr *opt)
{
struct dsmark_qdisc_data *p = PRIV(sch);
struct rtattr *tb[TCA_DSMARK_MAX];
- __u16 tmp;
-
- DPRINTK("dsmark_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
- if (!opt ||
- rtattr_parse(tb,TCA_DSMARK_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0 ||
- !tb[TCA_DSMARK_INDICES-1] ||
- RTA_PAYLOAD(tb[TCA_DSMARK_INDICES-1]) < sizeof(__u16))
- return -EINVAL;
- p->indices = *(__u16 *) RTA_DATA(tb[TCA_DSMARK_INDICES-1]);
- if (!p->indices)
- return -EINVAL;
- for (tmp = p->indices; tmp != 1; tmp >>= 1) {
- if (tmp & 1)
- return -EINVAL;
- }
- p->default_index = NO_DEFAULT_INDEX;
- if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) {
- if (RTA_PAYLOAD(tb[TCA_DSMARK_DEFAULT_INDEX-1]) < sizeof(__u16))
- return -EINVAL;
- p->default_index =
- *(__u16 *) RTA_DATA(tb[TCA_DSMARK_DEFAULT_INDEX-1]);
+ int err = -EINVAL;
+ u32 default_index = NO_DEFAULT_INDEX;
+ u16 indices;
+ u8 *mask;
+
+ DPRINTK("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
+
+ if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt) < 0)
+ goto errout;
+
+ indices = RTA_GET_U16(tb[TCA_DSMARK_INDICES-1]);
+ if (!indices || !dsmark_valid_indices(indices))
+ goto errout;
+
+ if (tb[TCA_DSMARK_DEFAULT_INDEX-1])
+ default_index = RTA_GET_U16(tb[TCA_DSMARK_DEFAULT_INDEX-1]);
+
+ mask = kmalloc(indices * 2, GFP_KERNEL);
+ if (mask == NULL) {
+ err = -ENOMEM;
+ goto errout;
}
- p->set_tc_index = !!tb[TCA_DSMARK_SET_TC_INDEX-1];
- p->mask = kmalloc(p->indices*2,GFP_KERNEL);
- if (!p->mask)
- return -ENOMEM;
- p->value = p->mask+p->indices;
- memset(p->mask,0xff,p->indices);
- memset(p->value,0,p->indices);
- if (!(p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
+
+ p->mask = mask;
+ memset(p->mask, 0xff, indices);
+
+ p->value = p->mask + indices;
+ memset(p->value, 0, indices);
+
+ p->indices = indices;
+ p->default_index = default_index;
+ p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]);
+
+ p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ if (p->q == NULL)
p->q = &noop_qdisc;
- DPRINTK("dsmark_init: qdisc %p\n",&p->q);
- return 0;
-}
+ DPRINTK("dsmark_init: qdisc %p\n", p->q);
+
+ err = 0;
+errout:
+rtattr_failure:
+ return err;
+}
static void dsmark_reset(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = PRIV(sch);
- DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
+ DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
qdisc_reset(p->q);
sch->q.qlen = 0;
}
-
static void dsmark_destroy(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = PRIV(sch);
struct tcf_proto *tp;
- DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n",sch,p);
+ DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p);
+
while (p->filter_list) {
tp = p->filter_list;
p->filter_list = tp->next;
tcf_destroy(tp);
}
+
qdisc_destroy(p->q);
kfree(p->mask);
}
-
static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
+ struct sk_buff *skb, struct tcmsg *tcm)
{
struct dsmark_qdisc_data *p = PRIV(sch);
- unsigned char *b = skb->tail;
- struct rtattr *rta;
+ struct rtattr *opts = NULL;
+
+ DPRINTK("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl);
- DPRINTK("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n",sch,p,cl);
- if (!cl || cl > p->indices)
+ if (!dsmark_valid_index(p, cl))
return -EINVAL;
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle),cl-1);
- rta = (struct rtattr *) b;
- RTA_PUT(skb,TCA_OPTIONS,0,NULL);
- RTA_PUT(skb,TCA_DSMARK_MASK,1,&p->mask[cl-1]);
- RTA_PUT(skb,TCA_DSMARK_VALUE,1,&p->value[cl-1]);
- rta->rta_len = skb->tail-b;
- return skb->len;
+
+ tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+
+ opts = RTA_NEST(skb, TCA_OPTIONS);
+ RTA_PUT_U8(skb,TCA_DSMARK_MASK, p->mask[cl-1]);
+ RTA_PUT_U8(skb,TCA_DSMARK_VALUE, p->value[cl-1]);
+
+ return RTA_NEST_END(skb, opts);
rtattr_failure:
- skb_trim(skb,b-skb->data);
- return -1;
+ return RTA_NEST_CANCEL(skb, opts);
}
static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct dsmark_qdisc_data *p = PRIV(sch);
- unsigned char *b = skb->tail;
- struct rtattr *rta;
+ struct rtattr *opts = NULL;
- rta = (struct rtattr *) b;
- RTA_PUT(skb,TCA_OPTIONS,0,NULL);
- RTA_PUT(skb,TCA_DSMARK_INDICES,sizeof(__u16),&p->indices);
- if (p->default_index != NO_DEFAULT_INDEX) {
- __u16 tmp = p->default_index;
+ opts = RTA_NEST(skb, TCA_OPTIONS);
+ RTA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
+
+ if (p->default_index != NO_DEFAULT_INDEX)
+ RTA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
- RTA_PUT(skb,TCA_DSMARK_DEFAULT_INDEX, sizeof(__u16), &tmp);
- }
if (p->set_tc_index)
- RTA_PUT(skb, TCA_DSMARK_SET_TC_INDEX, 0, NULL);
- rta->rta_len = skb->tail-b;
- return skb->len;
+ RTA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
+
+ return RTA_NEST_END(skb, opts);
rtattr_failure:
- skb_trim(skb,b-skb->data);
- return -1;
+ return RTA_NEST_CANCEL(skb, opts);
}
static struct Qdisc_class_ops dsmark_class_ops = {
@@ -476,10 +504,13 @@ static int __init dsmark_module_init(void)
{
return register_qdisc(&dsmark_qdisc_ops);
}
+
static void __exit dsmark_module_exit(void)
{
unregister_qdisc(&dsmark_qdisc_ops);
}
+
module_init(dsmark_module_init)
module_exit(dsmark_module_exit)
+
MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 4888305c96d..033083bf0e7 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -11,131 +11,38 @@
#include <linux/config.h>
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
/* 1 band FIFO pseudo-"scheduler" */
struct fifo_sched_data
{
- unsigned limit;
+ u32 limit;
};
-static int
-bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
- if (sch->qstats.backlog + skb->len <= q->limit) {
- __skb_queue_tail(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->bstats.bytes += skb->len;
- sch->bstats.packets++;
- return 0;
- }
- sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_POLICE
- if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
-#endif
- kfree_skb(skb);
- return NET_XMIT_DROP;
-}
-
-static int
-bfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
-{
- __skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->qstats.requeues++;
- return 0;
-}
-
-static struct sk_buff *
-bfifo_dequeue(struct Qdisc* sch)
-{
- struct sk_buff *skb;
+ if (likely(sch->qstats.backlog + skb->len <= q->limit))
+ return qdisc_enqueue_tail(skb, sch);
- skb = __skb_dequeue(&sch->q);
- if (skb)
- sch->qstats.backlog -= skb->len;
- return skb;
+ return qdisc_reshape_fail(skb, sch);
}
-static unsigned int
-fifo_drop(struct Qdisc* sch)
-{
- struct sk_buff *skb;
-
- skb = __skb_dequeue_tail(&sch->q);
- if (skb) {
- unsigned int len = skb->len;
- sch->qstats.backlog -= len;
- kfree_skb(skb);
- return len;
- }
- return 0;
-}
-
-static void
-fifo_reset(struct Qdisc* sch)
-{
- skb_queue_purge(&sch->q);
- sch->qstats.backlog = 0;
-}
-
-static int
-pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
- if (sch->q.qlen < q->limit) {
- __skb_queue_tail(&sch->q, skb);
- sch->bstats.bytes += skb->len;
- sch->bstats.packets++;
- return 0;
- }
- sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_POLICE
- if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
-#endif
- kfree_skb(skb);
- return NET_XMIT_DROP;
-}
-
-static int
-pfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
-{
- __skb_queue_head(&sch->q, skb);
- sch->qstats.requeues++;
- return 0;
-}
-
+ if (likely(skb_queue_len(&sch->q) < q->limit))
+ return qdisc_enqueue_tail(skb, sch);
-static struct sk_buff *
-pfifo_dequeue(struct Qdisc* sch)
-{
- return __skb_dequeue(&sch->q);
+ return qdisc_reshape_fail(skb, sch);
}
static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
@@ -143,66 +50,59 @@ static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
struct fifo_sched_data *q = qdisc_priv(sch);
if (opt == NULL) {
- unsigned int limit = sch->dev->tx_queue_len ? : 1;
+ u32 limit = sch->dev->tx_queue_len ? : 1;
if (sch->ops == &bfifo_qdisc_ops)
- q->limit = limit*sch->dev->mtu;
- else
- q->limit = limit;
+ limit *= sch->dev->mtu;
+
+ q->limit = limit;
} else {
struct tc_fifo_qopt *ctl = RTA_DATA(opt);
- if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
+
+ if (RTA_PAYLOAD(opt) < sizeof(*ctl))
return -EINVAL;
+
q->limit = ctl->limit;
}
+
return 0;
}
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fifo_sched_data *q = qdisc_priv(sch);
- unsigned char *b = skb->tail;
- struct tc_fifo_qopt opt;
+ struct tc_fifo_qopt opt = { .limit = q->limit };
- opt.limit = q->limit;
RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
-
return skb->len;
rtattr_failure:
- skb_trim(skb, b - skb->data);
return -1;
}
struct Qdisc_ops pfifo_qdisc_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "pfifo",
.priv_size = sizeof(struct fifo_sched_data),
.enqueue = pfifo_enqueue,
- .dequeue = pfifo_dequeue,
- .requeue = pfifo_requeue,
- .drop = fifo_drop,
+ .dequeue = qdisc_dequeue_head,
+ .requeue = qdisc_requeue,
+ .drop = qdisc_queue_drop,
.init = fifo_init,
- .reset = fifo_reset,
- .destroy = NULL,
+ .reset = qdisc_reset_queue,
.change = fifo_init,
.dump = fifo_dump,
.owner = THIS_MODULE,
};
struct Qdisc_ops bfifo_qdisc_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "bfifo",
.priv_size = sizeof(struct fifo_sched_data),
.enqueue = bfifo_enqueue,
- .dequeue = bfifo_dequeue,
- .requeue = bfifo_requeue,
- .drop = fifo_drop,
+ .dequeue = qdisc_dequeue_head,
+ .requeue = qdisc_requeue,
+ .drop = qdisc_queue_drop,
.init = fifo_init,
- .reset = fifo_reset,
- .destroy = NULL,
+ .reset = qdisc_reset_queue,
.change = fifo_init,
.dump = fifo_dump,
.owner = THIS_MODULE,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 87e48a4e105..7683b34dc6a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -243,31 +243,27 @@ static void dev_watchdog_down(struct net_device *dev)
cheaper.
*/
-static int
-noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
+static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
{
kfree_skb(skb);
return NET_XMIT_CN;
}
-static struct sk_buff *
-noop_dequeue(struct Qdisc * qdisc)
+static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
{
return NULL;
}
-static int
-noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
if (net_ratelimit())
- printk(KERN_DEBUG "%s deferred output. It is buggy.\n", skb->dev->name);
+ printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
+ skb->dev->name);
kfree_skb(skb);
return NET_XMIT_CN;
}
struct Qdisc_ops noop_qdisc_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "noop",
.priv_size = 0,
.enqueue = noop_enqueue,
@@ -285,8 +281,6 @@ struct Qdisc noop_qdisc = {
};
static struct Qdisc_ops noqueue_qdisc_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "noqueue",
.priv_size = 0,
.enqueue = noop_enqueue,
@@ -311,97 +305,87 @@ static const u8 prio2band[TC_PRIO_MAX+1] =
generic prio+fifo combination.
*/
-static int
-pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+#define PFIFO_FAST_BANDS 3
+
+static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
+ struct Qdisc *qdisc)
{
struct sk_buff_head *list = qdisc_priv(qdisc);
+ return list + prio2band[skb->priority & TC_PRIO_MAX];
+}
- list += prio2band[skb->priority&TC_PRIO_MAX];
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+{
+ struct sk_buff_head *list = prio2list(skb, qdisc);
- if (list->qlen < qdisc->dev->tx_queue_len) {
- __skb_queue_tail(list, skb);
+ if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
qdisc->q.qlen++;
- qdisc->bstats.bytes += skb->len;
- qdisc->bstats.packets++;
- return 0;
+ return __qdisc_enqueue_tail(skb, qdisc, list);
}
- qdisc->qstats.drops++;
- kfree_skb(skb);
- return NET_XMIT_DROP;
+
+ return qdisc_drop(skb, qdisc);
}
-static struct sk_buff *
-pfifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
{
int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
- struct sk_buff *skb;
- for (prio = 0; prio < 3; prio++, list++) {
- skb = __skb_dequeue(list);
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++, list++) {
+ struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
if (skb) {
qdisc->q.qlen--;
return skb;
}
}
+
return NULL;
}
-static int
-pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = qdisc_priv(qdisc);
-
- list += prio2band[skb->priority&TC_PRIO_MAX];
-
- __skb_queue_head(list, skb);
qdisc->q.qlen++;
- qdisc->qstats.requeues++;
- return 0;
+ return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
}
-static void
-pfifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc* qdisc)
{
int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
- for (prio=0; prio < 3; prio++)
- skb_queue_purge(list+prio);
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
+ __qdisc_reset_queue(qdisc, list + prio);
+
+ qdisc->qstats.backlog = 0;
qdisc->q.qlen = 0;
}
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
- unsigned char *b = skb->tail;
- struct tc_prio_qopt opt;
+ struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
- opt.bands = 3;
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
rtattr_failure:
- skb_trim(skb, b - skb->data);
return -1;
}
static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
{
- int i;
+ int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
- for (i=0; i<3; i++)
- skb_queue_head_init(list+i);
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
+ skb_queue_head_init(list + prio);
return 0;
}
static struct Qdisc_ops pfifo_fast_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "pfifo_fast",
- .priv_size = 3 * sizeof(struct sk_buff_head),
+ .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
.enqueue = pfifo_fast_enqueue,
.dequeue = pfifo_fast_dequeue,
.requeue = pfifo_fast_requeue,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2a3c0e08a09..e6926cb1942 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4368,15 +4368,11 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
* However, this function was corrent in any case. 8)
*/
if (flags & MSG_PEEK) {
- unsigned long cpu_flags;
-
- sctp_spin_lock_irqsave(&sk->sk_receive_queue.lock,
- cpu_flags);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
- sctp_spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
- cpu_flags);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
} else {
skb = skb_dequeue(&sk->sk_receive_queue);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d07f5ce3182..0a4260719a1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -216,8 +216,8 @@ out:
expired:
read_unlock(&xp->lock);
- km_policy_expired(xp, dir, 1);
- xfrm_policy_delete(xp, dir);
+ if (!xfrm_policy_delete(xp, dir))
+ km_policy_expired(xp, dir, 1);
xfrm_pol_put(xp);
}
@@ -555,7 +555,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
return NULL;
}
-void xfrm_policy_delete(struct xfrm_policy *pol, int dir)
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
write_lock_bh(&xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
@@ -564,7 +564,9 @@ void xfrm_policy_delete(struct xfrm_policy *pol, int dir)
if (dir < XFRM_POLICY_MAX)
atomic_inc(&flow_cache_genid);
xfrm_policy_kill(pol);
+ return 0;
}
+ return -ENOENT;
}
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d11747c2a76..2537f26f097 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -50,7 +50,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
static int xfrm_state_gc_flush_bundles;
-static void __xfrm_state_delete(struct xfrm_state *x);
+static int __xfrm_state_delete(struct xfrm_state *x);
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -154,6 +154,7 @@ static void xfrm_timer_handler(unsigned long data)
next = tmo;
}
+ x->km.dying = warn;
if (warn)
km_state_expired(x, 0);
resched:
@@ -169,9 +170,8 @@ expired:
next = 2;
goto resched;
}
- if (x->id.spi != 0)
+ if (!__xfrm_state_delete(x) && x->id.spi)
km_state_expired(x, 1);
- __xfrm_state_delete(x);
out:
spin_unlock(&x->lock);
@@ -215,8 +215,10 @@ void __xfrm_state_destroy(struct xfrm_state *x)
}
EXPORT_SYMBOL(__xfrm_state_destroy);
-static void __xfrm_state_delete(struct xfrm_state *x)
+static int __xfrm_state_delete(struct xfrm_state *x)
{
+ int err = -ESRCH;
+
if (x->km.state != XFRM_STATE_DEAD) {
x->km.state = XFRM_STATE_DEAD;
spin_lock(&xfrm_state_lock);
@@ -245,14 +247,21 @@ static void __xfrm_state_delete(struct xfrm_state *x)
* is what we are dropping here.
*/
atomic_dec(&x->refcnt);
+ err = 0;
}
+
+ return err;
}
-void xfrm_state_delete(struct xfrm_state *x)
+int xfrm_state_delete(struct xfrm_state *x)
{
+ int err;
+
spin_lock_bh(&x->lock);
- __xfrm_state_delete(x);
+ err = __xfrm_state_delete(x);
spin_unlock_bh(&x->lock);
+
+ return err;
}
EXPORT_SYMBOL(xfrm_state_delete);
@@ -557,16 +566,18 @@ int xfrm_state_check_expire(struct xfrm_state *x)
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
- km_state_expired(x, 1);
- if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
+ x->km.state = XFRM_STATE_EXPIRED;
+ if (!mod_timer(&x->timer, jiffies))
xfrm_state_hold(x);
return -EINVAL;
}
if (!x->km.dying &&
(x->curlft.bytes >= x->lft.soft_byte_limit ||
- x->curlft.packets >= x->lft.soft_packet_limit))
+ x->curlft.packets >= x->lft.soft_packet_limit)) {
+ x->km.dying = 1;
km_state_expired(x, 0);
+ }
return 0;
}
EXPORT_SYMBOL(xfrm_state_check_expire);
@@ -796,34 +807,56 @@ EXPORT_SYMBOL(xfrm_replay_advance);
static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
static DEFINE_RWLOCK(xfrm_km_lock);
-static void km_state_expired(struct xfrm_state *x, int hard)
+void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
{
struct xfrm_mgr *km;
- if (hard)
- x->km.state = XFRM_STATE_EXPIRED;
- else
- x->km.dying = 1;
+ read_lock(&xfrm_km_lock);
+ list_for_each_entry(km, &xfrm_km_list, list)
+ if (km->notify_policy)
+ km->notify_policy(xp, dir, c);
+ read_unlock(&xfrm_km_lock);
+}
+void km_state_notify(struct xfrm_state *x, struct km_event *c)
+{
+ struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list)
- km->notify(x, hard);
+ if (km->notify)
+ km->notify(x, c);
read_unlock(&xfrm_km_lock);
+}
+
+EXPORT_SYMBOL(km_policy_notify);
+EXPORT_SYMBOL(km_state_notify);
+
+static void km_state_expired(struct xfrm_state *x, int hard)
+{
+ struct km_event c;
+
+ c.data.hard = hard;
+ c.event = XFRM_MSG_EXPIRE;
+ km_state_notify(x, &c);
if (hard)
wake_up(&km_waitq);
}
+/*
+ * We send to all registered managers regardless of failure
+ * We are happy with one success
+*/
static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
{
- int err = -EINVAL;
+ int err = -EINVAL, acqret;
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
- err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
- if (!err)
- break;
+ acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
+ if (!acqret)
+ err = acqret;
}
read_unlock(&xfrm_km_lock);
return err;
@@ -848,13 +881,11 @@ EXPORT_SYMBOL(km_new_mapping);
void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
{
- struct xfrm_mgr *km;
+ struct km_event c;
- read_lock(&xfrm_km_lock);
- list_for_each_entry(km, &xfrm_km_list, list)
- if (km->notify_policy)
- km->notify_policy(pol, dir, hard);
- read_unlock(&xfrm_km_lock);
+ c.data.hard = hard;
+ c.event = XFRM_MSG_POLEXPIRE;
+ km_policy_notify(pol, dir, &c);
if (hard)
wake_up(&km_waitq);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 97509011c27..5ce8558eac9 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -277,6 +277,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
struct xfrm_state *x;
int err;
+ struct km_event c;
err = verify_newsa_info(p, (struct rtattr **) xfrma);
if (err)
@@ -286,6 +287,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
if (!x)
return err;
+ xfrm_state_hold(x);
if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
err = xfrm_state_add(x);
else
@@ -294,14 +296,24 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
+ goto out;
}
+ c.seq = nlh->nlmsg_seq;
+ c.pid = nlh->nlmsg_pid;
+ c.event = nlh->nlmsg_type;
+
+ km_state_notify(x, &c);
+out:
+ xfrm_state_put(x);
return err;
}
static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
{
struct xfrm_state *x;
+ int err;
+ struct km_event c;
struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
@@ -313,10 +325,19 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
return -EPERM;
}
- xfrm_state_delete(x);
+ err = xfrm_state_delete(x);
+ if (err < 0) {
+ xfrm_state_put(x);
+ return err;
+ }
+
+ c.seq = nlh->nlmsg_seq;
+ c.pid = nlh->nlmsg_pid;
+ c.event = nlh->nlmsg_type;
+ km_state_notify(x, &c);
xfrm_state_put(x);
- return 0;
+ return err;
}
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
@@ -681,6 +702,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
{
struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
struct xfrm_policy *xp;
+ struct km_event c;
int err;
int excl;
@@ -692,6 +714,10 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
if (!xp)
return err;
+ /* shouldnt excl be based on nlh flags??
+ * Aha! this is anti-netlink really i.e more pfkey derived
+ * in netlink excl is a flag and you wouldnt need
+ * a type XFRM_MSG_UPDPOLICY - JHS */
excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
err = xfrm_policy_insert(p->dir, xp, excl);
if (err) {
@@ -699,6 +725,11 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
return err;
}
+ c.event = nlh->nlmsg_type;
+ c.seq = nlh->nlmsg_seq;
+ c.pid = nlh->nlmsg_pid;
+ km_policy_notify(xp, p->dir, &c);
+
xfrm_pol_put(xp);
return 0;
@@ -816,6 +847,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
struct xfrm_policy *xp;
struct xfrm_userpolicy_id *p;
int err;
+ struct km_event c;
int delete;
p = NLMSG_DATA(nlh);
@@ -843,6 +875,12 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
NETLINK_CB(skb).pid,
MSG_DONTWAIT);
}
+ } else {
+ c.data.byid = p->index;
+ c.event = nlh->nlmsg_type;
+ c.seq = nlh->nlmsg_seq;
+ c.pid = nlh->nlmsg_pid;
+ km_policy_notify(xp, p->dir, &c);
}
xfrm_pol_put(xp);
@@ -852,15 +890,28 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
{
+ struct km_event c;
struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
xfrm_state_flush(p->proto);
+ c.data.proto = p->proto;
+ c.event = nlh->nlmsg_type;
+ c.seq = nlh->nlmsg_seq;
+ c.pid = nlh->nlmsg_pid;
+ km_state_notify(NULL, &c);
+
return 0;
}
static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
{
+ struct km_event c;
+
xfrm_policy_flush();
+ c.event = nlh->nlmsg_type;
+ c.seq = nlh->nlmsg_seq;
+ c.pid = nlh->nlmsg_pid;
+ km_policy_notify(NULL, 0, &c);
return 0;
}
@@ -1069,15 +1120,16 @@ nlmsg_failure:
return -1;
}
-static int xfrm_send_state_notify(struct xfrm_state *x, int hard)
+static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
{
struct sk_buff *skb;
+ int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire));
- skb = alloc_skb(sizeof(struct xfrm_user_expire) + 16, GFP_ATOMIC);
+ skb = alloc_skb(len, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
- if (build_expire(skb, x, hard) < 0)
+ if (build_expire(skb, x, c->data.hard) < 0)
BUG();
NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
@@ -1085,6 +1137,131 @@ static int xfrm_send_state_notify(struct xfrm_state *x, int hard)
return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
}
+static int xfrm_notify_sa_flush(struct km_event *c)
+{
+ struct xfrm_usersa_flush *p;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ unsigned char *b;
+ int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+ b = skb->tail;
+
+ nlh = NLMSG_PUT(skb, c->pid, c->seq,
+ XFRM_MSG_FLUSHSA, sizeof(*p));
+ nlh->nlmsg_flags = 0;
+
+ p = NLMSG_DATA(nlh);
+ p->proto = c->data.proto;
+
+ nlh->nlmsg_len = skb->tail - b;
+
+ return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_SA, GFP_ATOMIC);
+
+nlmsg_failure:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int inline xfrm_sa_len(struct xfrm_state *x)
+{
+ int l = 0;
+ if (x->aalg)
+ l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8);
+ if (x->ealg)
+ l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8);
+ if (x->calg)
+ l += RTA_SPACE(sizeof(*x->calg));
+ if (x->encap)
+ l += RTA_SPACE(sizeof(*x->encap));
+
+ return l;
+}
+
+static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
+{
+ struct xfrm_usersa_info *p;
+ struct xfrm_usersa_id *id;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ unsigned char *b;
+ int len = xfrm_sa_len(x);
+ int headlen;
+
+ headlen = sizeof(*p);
+ if (c->event == XFRM_MSG_DELSA) {
+ len += RTA_SPACE(headlen);
+ headlen = sizeof(*id);
+ }
+ len += NLMSG_SPACE(headlen);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+ b = skb->tail;
+
+ nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
+ nlh->nlmsg_flags = 0;
+
+ p = NLMSG_DATA(nlh);
+ if (c->event == XFRM_MSG_DELSA) {
+ id = NLMSG_DATA(nlh);
+ memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
+ id->spi = x->id.spi;
+ id->family = x->props.family;
+ id->proto = x->id.proto;
+
+ p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p)));
+ }
+
+ copy_to_user_state(x, p);
+
+ if (x->aalg)
+ RTA_PUT(skb, XFRMA_ALG_AUTH,
+ sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
+ if (x->ealg)
+ RTA_PUT(skb, XFRMA_ALG_CRYPT,
+ sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
+ if (x->calg)
+ RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+
+ if (x->encap)
+ RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+
+ nlh->nlmsg_len = skb->tail - b;
+
+ return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_SA, GFP_ATOMIC);
+
+nlmsg_failure:
+rtattr_failure:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
+{
+
+ switch (c->event) {
+ case XFRM_MSG_EXPIRE:
+ return xfrm_exp_state_notify(x, c);
+ case XFRM_MSG_DELSA:
+ case XFRM_MSG_UPDSA:
+ case XFRM_MSG_NEWSA:
+ return xfrm_notify_sa(x, c);
+ case XFRM_MSG_FLUSHSA:
+ return xfrm_notify_sa_flush(c);
+ default:
+ printk("xfrm_user: Unknown SA event %d\n", c->event);
+ break;
+ }
+
+ return 0;
+
+}
+
static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_tmpl *xt, struct xfrm_policy *xp,
int dir)
@@ -1218,7 +1395,7 @@ nlmsg_failure:
return -1;
}
-static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, int hard)
+static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
{
struct sk_buff *skb;
size_t len;
@@ -1229,7 +1406,7 @@ static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, int hard)
if (skb == NULL)
return -ENOMEM;
- if (build_polexpire(skb, xp, dir, hard) < 0)
+ if (build_polexpire(skb, xp, dir, c->data.hard) < 0)
BUG();
NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
@@ -1237,6 +1414,103 @@ static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, int hard)
return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
}
+static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+{
+ struct xfrm_userpolicy_info *p;
+ struct xfrm_userpolicy_id *id;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ unsigned char *b;
+ int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
+ int headlen;
+
+ headlen = sizeof(*p);
+ if (c->event == XFRM_MSG_DELPOLICY) {
+ len += RTA_SPACE(headlen);
+ headlen = sizeof(*id);
+ }
+ len += NLMSG_SPACE(headlen);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+ b = skb->tail;
+
+ nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
+
+ p = NLMSG_DATA(nlh);
+ if (c->event == XFRM_MSG_DELPOLICY) {
+ id = NLMSG_DATA(nlh);
+ memset(id, 0, sizeof(*id));
+ id->dir = dir;
+ if (c->data.byid)
+ id->index = xp->index;
+ else
+ memcpy(&id->sel, &xp->selector, sizeof(id->sel));
+
+ p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p)));
+ }
+
+ nlh->nlmsg_flags = 0;
+
+ copy_to_user_policy(xp, p, dir);
+ if (copy_to_user_tmpl(xp, skb) < 0)
+ goto nlmsg_failure;
+
+ nlh->nlmsg_len = skb->tail - b;
+
+ return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_POLICY, GFP_ATOMIC);
+
+nlmsg_failure:
+rtattr_failure:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int xfrm_notify_policy_flush(struct km_event *c)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ unsigned char *b;
+ int len = NLMSG_LENGTH(0);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+ b = skb->tail;
+
+
+ nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0);
+
+ nlh->nlmsg_len = skb->tail - b;
+
+ return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_POLICY, GFP_ATOMIC);
+
+nlmsg_failure:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+{
+
+ switch (c->event) {
+ case XFRM_MSG_NEWPOLICY:
+ case XFRM_MSG_UPDPOLICY:
+ case XFRM_MSG_DELPOLICY:
+ return xfrm_notify_policy(xp, dir, c);
+ case XFRM_MSG_FLUSHPOLICY:
+ return xfrm_notify_policy_flush(c);
+ case XFRM_MSG_POLEXPIRE:
+ return xfrm_exp_policy_notify(xp, dir, c);
+ default:
+ printk("xfrm_user: Unknown Policy event %d\n", c->event);
+ }
+
+ return 0;
+
+}
+
static struct xfrm_mgr netlink_mgr = {
.id = "netlink",
.notify = xfrm_send_state_notify,
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index f0fb6d76f7c..92b057becb4 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -63,6 +63,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_GETPREFIX, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETMULTICAST, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETANYCAST, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_GETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
};
static struct nlmsg_perm nlmsg_firewall_perms[] =