aboutsummaryrefslogtreecommitdiff
path: root/net/packet
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-17 10:24:53 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-17 10:24:53 +1000
commit492b057c426e4aa747484958e18e9da29003985d (patch)
tree34e08c24618688d8bcc190523028b5f94cce0c0b /net/packet
parent313485175da221c388f6a8ecf4c30062ba9bea17 (diff)
parent300df7dc89cc276377fc020704e34875d5c473b6 (diff)
Merge commit 'origin/master' into next
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c604
1 files changed, 487 insertions, 117 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f546e81acc4..4f76e5552d8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -39,6 +39,7 @@
* will simply extend the hardware address
* byte arrays at the end of sockaddr_ll
* and packet_mreq.
+ * Johann Baudy : Added TX RING.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -157,7 +158,25 @@ struct packet_mreq_max
};
#ifdef CONFIG_PACKET_MMAP
-static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
+static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
+ int closing, int tx_ring);
+
+struct packet_ring_buffer {
+ char * *pg_vec;
+ unsigned int head;
+ unsigned int frames_per_block;
+ unsigned int frame_size;
+ unsigned int frame_max;
+
+ unsigned int pg_vec_order;
+ unsigned int pg_vec_pages;
+ unsigned int pg_vec_len;
+
+ atomic_t pending;
+};
+
+struct packet_sock;
+static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
#endif
static void packet_flush_mclist(struct sock *sk);
@@ -167,11 +186,8 @@ struct packet_sock {
struct sock sk;
struct tpacket_stats stats;
#ifdef CONFIG_PACKET_MMAP
- char * *pg_vec;
- unsigned int head;
- unsigned int frames_per_block;
- unsigned int frame_size;
- unsigned int frame_max;
+ struct packet_ring_buffer rx_ring;
+ struct packet_ring_buffer tx_ring;
int copy_thresh;
#endif
struct packet_type prot_hook;
@@ -185,12 +201,10 @@ struct packet_sock {
struct packet_mclist *mclist;
#ifdef CONFIG_PACKET_MMAP
atomic_t mapped;
- unsigned int pg_vec_order;
- unsigned int pg_vec_pages;
- unsigned int pg_vec_len;
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
+ unsigned int tp_loss:1;
#endif
};
@@ -206,36 +220,33 @@ struct packet_skb_cb {
#ifdef CONFIG_PACKET_MMAP
-static void *packet_lookup_frame(struct packet_sock *po, unsigned int position,
- int status)
+static void __packet_set_status(struct packet_sock *po, void *frame, int status)
{
- unsigned int pg_vec_pos, frame_offset;
union {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
void *raw;
} h;
- pg_vec_pos = position / po->frames_per_block;
- frame_offset = position % po->frames_per_block;
-
- h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
+ h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
- if (status != (h.h1->tp_status ? TP_STATUS_USER :
- TP_STATUS_KERNEL))
- return NULL;
+ h.h1->tp_status = status;
+ flush_dcache_page(virt_to_page(&h.h1->tp_status));
break;
case TPACKET_V2:
- if (status != (h.h2->tp_status ? TP_STATUS_USER :
- TP_STATUS_KERNEL))
- return NULL;
+ h.h2->tp_status = status;
+ flush_dcache_page(virt_to_page(&h.h2->tp_status));
break;
+ default:
+ printk(KERN_ERR "TPACKET version not supported\n");
+ BUG();
}
- return h.raw;
+
+ smp_wmb();
}
-static void __packet_set_status(struct packet_sock *po, void *frame, int status)
+static int __packet_get_status(struct packet_sock *po, void *frame)
{
union {
struct tpacket_hdr *h1;
@@ -243,16 +254,66 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
void *raw;
} h;
+ smp_rmb();
+
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
- h.h1->tp_status = status;
- break;
+ flush_dcache_page(virt_to_page(&h.h1->tp_status));
+ return h.h1->tp_status;
case TPACKET_V2:
- h.h2->tp_status = status;
- break;
+ flush_dcache_page(virt_to_page(&h.h2->tp_status));
+ return h.h2->tp_status;
+ default:
+ printk(KERN_ERR "TPACKET version not supported\n");
+ BUG();
+ return 0;
}
}
+
+static void *packet_lookup_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ unsigned int position,
+ int status)
+{
+ unsigned int pg_vec_pos, frame_offset;
+ union {
+ struct tpacket_hdr *h1;
+ struct tpacket2_hdr *h2;
+ void *raw;
+ } h;
+
+ pg_vec_pos = position / rb->frames_per_block;
+ frame_offset = position % rb->frames_per_block;
+
+ h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
+
+ if (status != __packet_get_status(po, h.raw))
+ return NULL;
+
+ return h.raw;
+}
+
+static inline void *packet_current_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status)
+{
+ return packet_lookup_frame(po, rb, rb->head, status);
+}
+
+static inline void *packet_previous_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status)
+{
+ unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
+ return packet_lookup_frame(po, rb, previous, status);
+}
+
+static inline void packet_increment_head(struct packet_ring_buffer *buff)
+{
+ buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
+}
+
#endif
static inline struct packet_sock *pkt_sk(struct sock *sk)
@@ -311,8 +372,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
goto oom;
/* drop any routing info */
- dst_release(skb->dst);
- skb->dst = NULL;
+ skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset(skb);
@@ -560,8 +620,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
skb_set_owner_r(skb, sk);
skb->dev = NULL;
- dst_release(skb->dst);
- skb->dst = NULL;
+ skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset(skb);
@@ -648,7 +707,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
macoff = netoff - maclen;
}
- if (macoff + snaplen > po->frame_size) {
+ if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
(unsigned)sk->sk_rcvbuf) {
@@ -661,16 +720,16 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
if (copy_skb)
skb_set_owner_r(copy_skb, sk);
}
- snaplen = po->frame_size - macoff;
+ snaplen = po->rx_ring.frame_size - macoff;
if ((int)snaplen < 0)
snaplen = 0;
}
spin_lock(&sk->sk_receive_queue.lock);
- h.raw = packet_lookup_frame(po, po->head, TP_STATUS_KERNEL);
+ h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
if (!h.raw)
goto ring_is_full;
- po->head = po->head != po->frame_max ? po->head+1 : 0;
+ packet_increment_head(&po->rx_ring);
po->stats.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
@@ -727,7 +786,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
__packet_set_status(po, h.raw, status);
smp_mb();
-
{
struct page *p_start, *p_end;
u8 *h_end = h.raw + macoff + snaplen - 1;
@@ -760,10 +818,249 @@ ring_is_full:
goto drop_n_restore;
}
-#endif
+static void tpacket_destruct_skb(struct sk_buff *skb)
+{
+ struct packet_sock *po = pkt_sk(skb->sk);
+ void * ph;
+ BUG_ON(skb == NULL);
-static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
+ if (likely(po->tx_ring.pg_vec)) {
+ ph = skb_shinfo(skb)->destructor_arg;
+ BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
+ BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
+ atomic_dec(&po->tx_ring.pending);
+ __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
+ }
+
+ sock_wfree(skb);
+}
+
+static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
+ void * frame, struct net_device *dev, int size_max,
+ __be16 proto, unsigned char * addr)
+{
+ union {
+ struct tpacket_hdr *h1;
+ struct tpacket2_hdr *h2;
+ void *raw;
+ } ph;
+ int to_write, offset, len, tp_len, nr_frags, len_max;
+ struct socket *sock = po->sk.sk_socket;
+ struct page *page;
+ void *data;
+ int err;
+
+ ph.raw = frame;
+
+ skb->protocol = proto;
+ skb->dev = dev;
+ skb->priority = po->sk.sk_priority;
+ skb_shinfo(skb)->destructor_arg = ph.raw;
+
+ switch (po->tp_version) {
+ case TPACKET_V2:
+ tp_len = ph.h2->tp_len;
+ break;
+ default:
+ tp_len = ph.h1->tp_len;
+ break;
+ }
+ if (unlikely(tp_len > size_max)) {
+ printk(KERN_ERR "packet size is too long (%d > %d)\n",
+ tp_len, size_max);
+ return -EMSGSIZE;
+ }
+
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reset_network_header(skb);
+
+ data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
+ to_write = tp_len;
+
+ if (sock->type == SOCK_DGRAM) {
+ err = dev_hard_header(skb, dev, ntohs(proto), addr,
+ NULL, tp_len);
+ if (unlikely(err < 0))
+ return -EINVAL;
+ } else if (dev->hard_header_len ) {
+ /* net device doesn't like empty head */
+ if (unlikely(tp_len <= dev->hard_header_len)) {
+ printk(KERN_ERR "packet size is too short "
+ "(%d < %d)\n", tp_len,
+ dev->hard_header_len);
+ return -EINVAL;
+ }
+
+ skb_push(skb, dev->hard_header_len);
+ err = skb_store_bits(skb, 0, data,
+ dev->hard_header_len);
+ if (unlikely(err))
+ return err;
+
+ data += dev->hard_header_len;
+ to_write -= dev->hard_header_len;
+ }
+
+ err = -EFAULT;
+ page = virt_to_page(data);
+ offset = offset_in_page(data);
+ len_max = PAGE_SIZE - offset;
+ len = ((to_write > len_max) ? len_max : to_write);
+
+ skb->data_len = to_write;
+ skb->len += to_write;
+ skb->truesize += to_write;
+ atomic_add(to_write, &po->sk.sk_wmem_alloc);
+
+ while (likely(to_write)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
+ printk(KERN_ERR "Packet exceed the number "
+ "of skb frags(%lu)\n",
+ MAX_SKB_FRAGS);
+ return -EFAULT;
+ }
+
+ flush_dcache_page(page);
+ get_page(page);
+ skb_fill_page_desc(skb,
+ nr_frags,
+ page++, offset, len);
+ to_write -= len;
+ offset = 0;
+ len_max = PAGE_SIZE;
+ len = ((to_write > len_max) ? len_max : to_write);
+ }
+
+ return tp_len;
+}
+
+static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+{
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+ int ifindex, err, reserve = 0;
+ void * ph;
+ struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
+ int tp_len, size_max;
+ unsigned char *addr;
+ int len_sum = 0;
+ int status = 0;
+
+ sock = po->sk.sk_socket;
+
+ mutex_lock(&po->pg_vec_lock);
+
+ err = -EBUSY;
+ if (saddr == NULL) {
+ ifindex = po->ifindex;
+ proto = po->num;
+ addr = NULL;
+ } else {
+ err = -EINVAL;
+ if (msg->msg_namelen < sizeof(struct sockaddr_ll))
+ goto out;
+ if (msg->msg_namelen < (saddr->sll_halen
+ + offsetof(struct sockaddr_ll,
+ sll_addr)))
+ goto out;
+ ifindex = saddr->sll_ifindex;
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ }
+
+ dev = dev_get_by_index(sock_net(&po->sk), ifindex);
+ err = -ENXIO;
+ if (unlikely(dev == NULL))
+ goto out;
+
+ reserve = dev->hard_header_len;
+
+ err = -ENETDOWN;
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_put;
+
+ size_max = po->tx_ring.frame_size
+ - sizeof(struct skb_shared_info)
+ - po->tp_hdrlen
+ - LL_ALLOCATED_SPACE(dev)
+ - sizeof(struct sockaddr_ll);
+
+ if (size_max > dev->mtu + reserve)
+ size_max = dev->mtu + reserve;
+
+ do {
+ ph = packet_current_frame(po, &po->tx_ring,
+ TP_STATUS_SEND_REQUEST);
+
+ if (unlikely(ph == NULL)) {
+ schedule();
+ continue;
+ }
+
+ status = TP_STATUS_SEND_REQUEST;
+ skb = sock_alloc_send_skb(&po->sk,
+ LL_ALLOCATED_SPACE(dev)
+ + sizeof(struct sockaddr_ll),
+ 0, &err);
+
+ if (unlikely(skb == NULL))
+ goto out_status;
+
+ tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
+ addr);
+
+ if (unlikely(tp_len < 0)) {
+ if (po->tp_loss) {
+ __packet_set_status(po, ph,
+ TP_STATUS_AVAILABLE);
+ packet_increment_head(&po->tx_ring);
+ kfree_skb(skb);
+ continue;
+ } else {
+ status = TP_STATUS_WRONG_FORMAT;
+ err = tp_len;
+ goto out_status;
+ }
+ }
+
+ skb->destructor = tpacket_destruct_skb;
+ __packet_set_status(po, ph, TP_STATUS_SENDING);
+ atomic_inc(&po->tx_ring.pending);
+
+ status = TP_STATUS_SEND_REQUEST;
+ err = dev_queue_xmit(skb);
+ if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
+ goto out_xmit;
+ packet_increment_head(&po->tx_ring);
+ len_sum += tp_len;
+ }
+ while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
+ && (atomic_read(&po->tx_ring.pending))))
+ );
+
+ err = len_sum;
+ goto out_put;
+
+out_xmit:
+ skb->destructor = sock_wfree;
+ atomic_dec(&po->tx_ring.pending);
+out_status:
+ __packet_set_status(po, ph, status);
+ kfree_skb(skb);
+out_put:
+ dev_put(dev);
+out:
+ mutex_unlock(&po->pg_vec_lock);
+ return err;
+}
+#endif
+
+static int packet_snd(struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
@@ -854,6 +1151,19 @@ out:
return err;
}
+static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+#ifdef CONFIG_PACKET_MMAP
+ struct sock *sk = sock->sk;
+ struct packet_sock *po = pkt_sk(sk);
+ if (po->tx_ring.pg_vec)
+ return tpacket_snd(po, msg);
+ else
+#endif
+ return packet_snd(sock, msg, len);
+}
+
/*
* Close a PACKET socket. This is fairly simple. We immediately go
* to 'closed' state and remove our protocol entry in the device list.
@@ -864,6 +1174,9 @@ static int packet_release(struct socket *sock)
struct sock *sk = sock->sk;
struct packet_sock *po;
struct net *net;
+#ifdef CONFIG_PACKET_MMAP
+ struct tpacket_req req;
+#endif
if (!sk)
return 0;
@@ -893,11 +1206,13 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
#ifdef CONFIG_PACKET_MMAP
- if (po->pg_vec) {
- struct tpacket_req req;
- memset(&req, 0, sizeof(req));
- packet_set_ring(sk, &req, 1);
- }
+ memset(&req, 0, sizeof(req));
+
+ if (po->rx_ring.pg_vec)
+ packet_set_ring(sk, &req, 1, 0);
+
+ if (po->tx_ring.pg_vec)
+ packet_set_ring(sk, &req, 1, 1);
#endif
/*
@@ -1253,9 +1568,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
switch (i->type) {
case PACKET_MR_MULTICAST:
if (what > 0)
- dev_mc_add(dev, i->addr, i->alen, 0);
+ return dev_mc_add(dev, i->addr, i->alen, 0);
else
- dev_mc_delete(dev, i->addr, i->alen, 0);
+ return dev_mc_delete(dev, i->addr, i->alen, 0);
break;
case PACKET_MR_PROMISC:
return dev_set_promiscuity(dev, what);
@@ -1263,6 +1578,12 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
case PACKET_MR_ALLMULTI:
return dev_set_allmulti(dev, what);
break;
+ case PACKET_MR_UNICAST:
+ if (what > 0)
+ return dev_unicast_add(dev, i->addr);
+ else
+ return dev_unicast_delete(dev, i->addr);
+ break;
default:;
}
return 0;
@@ -1391,7 +1712,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (level != SOL_PACKET)
return -ENOPROTOOPT;
- switch(optname) {
+ switch (optname) {
case PACKET_ADD_MEMBERSHIP:
case PACKET_DROP_MEMBERSHIP:
{
@@ -1415,6 +1736,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
#ifdef CONFIG_PACKET_MMAP
case PACKET_RX_RING:
+ case PACKET_TX_RING:
{
struct tpacket_req req;
@@ -1422,7 +1744,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return -EINVAL;
if (copy_from_user(&req,optval,sizeof(req)))
return -EFAULT;
- return packet_set_ring(sk, &req, 0);
+ return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
}
case PACKET_COPY_THRESH:
{
@@ -1442,7 +1764,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->pg_vec)
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
@@ -1461,13 +1783,26 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->pg_vec)
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
po->tp_reserve = val;
return 0;
}
+ case PACKET_LOSS:
+ {
+ unsigned int val;
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+ return -EBUSY;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ po->tp_loss = !!val;
+ return 0;
+ }
#endif
case PACKET_AUXDATA:
{
@@ -1517,7 +1852,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
if (len < 0)
return -EINVAL;
- switch(optname) {
+ switch (optname) {
case PACKET_STATISTICS:
if (len > sizeof(struct tpacket_stats))
len = sizeof(struct tpacket_stats);
@@ -1573,6 +1908,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
val = po->tp_reserve;
data = &val;
break;
+ case PACKET_LOSS:
+ if (len > sizeof(unsigned int))
+ len = sizeof(unsigned int);
+ val = po->tp_loss;
+ data = &val;
+ break;
#endif
default:
return -ENOPROTOOPT;
@@ -1643,7 +1984,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
{
struct sock *sk = sock->sk;
- switch(cmd) {
+ switch (cmd) {
case SIOCOUTQ:
{
int amount = atomic_read(&sk->sk_wmem_alloc);
@@ -1705,13 +2046,17 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
unsigned int mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
- if (po->pg_vec) {
- unsigned last = po->head ? po->head-1 : po->frame_max;
-
- if (packet_lookup_frame(po, last, TP_STATUS_USER))
+ if (po->rx_ring.pg_vec) {
+ if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
+ spin_lock_bh(&sk->sk_write_queue.lock);
+ if (po->tx_ring.pg_vec) {
+ if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ spin_unlock_bh(&sk->sk_write_queue.lock);
return mask;
}
@@ -1788,21 +2133,33 @@ out_free_pgvec:
goto out;
}
-static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
+static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
+ int closing, int tx_ring)
{
char **pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
int was_running, order = 0;
+ struct packet_ring_buffer *rb;
+ struct sk_buff_head *rb_queue;
__be16 num;
- int err = 0;
+ int err;
- if (req->tp_block_nr) {
- int i;
+ rb = tx_ring ? &po->tx_ring : &po->rx_ring;
+ rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
- /* Sanity tests and some calculations */
+ err = -EBUSY;
+ if (!closing) {
+ if (atomic_read(&po->mapped))
+ goto out;
+ if (atomic_read(&rb->pending))
+ goto out;
+ }
- if (unlikely(po->pg_vec))
- return -EBUSY;
+ if (req->tp_block_nr) {
+ /* Sanity tests and some calculations */
+ err = -EBUSY;
+ if (unlikely(rb->pg_vec))
+ goto out;
switch (po->tp_version) {
case TPACKET_V1:
@@ -1813,42 +2170,35 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
break;
}
+ err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
- return -EINVAL;
+ goto out;
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
- return -EINVAL;
+ goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
- po->tp_reserve))
- return -EINVAL;
+ po->tp_reserve))
+ goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
- return -EINVAL;
+ goto out;
- po->frames_per_block = req->tp_block_size/req->tp_frame_size;
- if (unlikely(po->frames_per_block <= 0))
- return -EINVAL;
- if (unlikely((po->frames_per_block * req->tp_block_nr) !=
- req->tp_frame_nr))
- return -EINVAL;
+ rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
+ if (unlikely(rb->frames_per_block <= 0))
+ goto out;
+ if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
+ req->tp_frame_nr))
+ goto out;
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
-
- for (i = 0; i < req->tp_block_nr; i++) {
- void *ptr = pg_vec[i];
- int k;
-
- for (k = 0; k < po->frames_per_block; k++) {
- __packet_set_status(po, ptr, TP_STATUS_KERNEL);
- ptr += req->tp_frame_size;
- }
- }
- /* Done */
- } else {
+ }
+ /* Done */
+ else {
+ err = -EINVAL;
if (unlikely(req->tp_frame_nr))
- return -EINVAL;
+ goto out;
}
lock_sock(sk);
@@ -1872,23 +2222,24 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
-
- spin_lock_bh(&sk->sk_receive_queue.lock);
- pg_vec = XC(po->pg_vec, pg_vec);
- po->frame_max = (req->tp_frame_nr - 1);
- po->head = 0;
- po->frame_size = req->tp_frame_size;
- spin_unlock_bh(&sk->sk_receive_queue.lock);
-
- order = XC(po->pg_vec_order, order);
- req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
-
- po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
- po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
- skb_queue_purge(&sk->sk_receive_queue);
+ spin_lock_bh(&rb_queue->lock);
+ pg_vec = XC(rb->pg_vec, pg_vec);
+ rb->frame_max = (req->tp_frame_nr - 1);
+ rb->head = 0;
+ rb->frame_size = req->tp_frame_size;
+ spin_unlock_bh(&rb_queue->lock);
+
+ order = XC(rb->pg_vec_order, order);
+ req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
+
+ rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
+ po->prot_hook.func = (po->rx_ring.pg_vec) ?
+ tpacket_rcv : packet_rcv;
+ skb_queue_purge(rb_queue);
#undef XC
if (atomic_read(&po->mapped))
- printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
+ printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n",
+ atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
@@ -1909,11 +2260,13 @@ out:
return err;
}
-static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
+static int packet_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- unsigned long size;
+ unsigned long size, expected_size;
+ struct packet_ring_buffer *rb;
unsigned long start;
int err = -EINVAL;
int i;
@@ -1921,26 +2274,43 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
if (vma->vm_pgoff)
return -EINVAL;
- size = vma->vm_end - vma->vm_start;
-
mutex_lock(&po->pg_vec_lock);
- if (po->pg_vec == NULL)
+
+ expected_size = 0;
+ for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
+ if (rb->pg_vec) {
+ expected_size += rb->pg_vec_len
+ * rb->pg_vec_pages
+ * PAGE_SIZE;
+ }
+ }
+
+ if (expected_size == 0)
goto out;
- if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
+
+ size = vma->vm_end - vma->vm_start;
+ if (size != expected_size)
goto out;
start = vma->vm_start;
- for (i = 0; i < po->pg_vec_len; i++) {
- struct page *page = virt_to_page(po->pg_vec[i]);
- int pg_num;
-
- for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
- err = vm_insert_page(vma, start, page);
- if (unlikely(err))
- goto out;
- start += PAGE_SIZE;
+ for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
+ if (rb->pg_vec == NULL)
+ continue;
+
+ for (i = 0; i < rb->pg_vec_len; i++) {
+ struct page *page = virt_to_page(rb->pg_vec[i]);
+ int pg_num;
+
+ for (pg_num = 0; pg_num < rb->pg_vec_pages;
+ pg_num++,page++) {
+ err = vm_insert_page(vma, start, page);
+ if (unlikely(err))
+ goto out;
+ start += PAGE_SIZE;
+ }
}
}
+
atomic_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;