aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-03-04 16:05:44 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-25 22:23:27 -0700
commitfa438ccfdfd3f6db02c13b61b21454eb81cd6a13 (patch)
treea1759259d7543586185e2fb9db21461147944f18
parente317f6f69cb95527799d308a9421b7dc1252989a (diff)
[NET]: Keep sk_backlog near sk_lock
sk_backlog is a critical field of struct sock. (known famous words) It is (ab)used in hot paths, in particular in release_sock(), tcp_recvmsg(), tcp_v4_rcv(), sk_receive_skb(). It really makes sense to place it next to sk_lock, because sk_backlog is only used after sk_lock locked (and thus memory cache line in L1 cache). This should reduce cache misses and sk_lock acquisition time. (In theory, we could only move the head pointer near sk_lock, and leaving tail far away, because 'tail' is normally not so hot, but keep it simple :) ) Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h18
-rw-r--r--net/core/sock.c2
2 files changed, 10 insertions, 10 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 2c7d60ca354..a3366c3c837 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -202,6 +202,15 @@ struct sock {
unsigned short sk_type;
int sk_rcvbuf;
socket_lock_t sk_lock;
+ /*
+ * The backlog queue is special, it is always used with
+ * the per-socket spinlock held and requires low latency
+ * access. Therefore we special case it's implementation.
+ */
+ struct {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ } sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
struct xfrm_policy *sk_policy[2];
@@ -221,15 +230,6 @@ struct sock {
int sk_rcvlowat;
unsigned long sk_flags;
unsigned long sk_lingertime;
- /*
- * The backlog queue is special, it is always used with
- * the per-socket spinlock held and requires low latency
- * access. Therefore we special case it's implementation.
- */
- struct {
- struct sk_buff *head;
- struct sk_buff *tail;
- } sk_backlog;
struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
diff --git a/net/core/sock.c b/net/core/sock.c
index 27c4f62382b..6d35d5775ba 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -904,6 +904,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
bh_lock_sock(newsk);
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
atomic_set(&newsk->sk_rmem_alloc, 0);
atomic_set(&newsk->sk_wmem_alloc, 0);
@@ -923,7 +924,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
newsk->sk_send_head = NULL;
- newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
sock_reset_flag(newsk, SOCK_DONE);