aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-09-04 08:08:42 -0700
committerH. Peter Anvin <hpa@zytor.com>2008-09-04 08:08:42 -0700
commit7203781c98ad9147564d327de6f6513ad8fc0f4e (patch)
tree5c29a2a04a626bf08a0d56fd8a0068b3c92ad284 /net/core
parent671eef85a3e885dff4ce210d8774ad50a91d5967 (diff)
parentaf2e1f276ff08f17192411ea3b71c13a758dfe12 (diff)
Merge branch 'x86/cpu' into x86/core
Conflicts: arch/x86/kernel/cpu/feature_names.c include/asm-x86/cpufeature.h
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c87
-rw-r--r--net/core/dev.c49
-rw-r--r--net/core/gen_estimator.c9
-rw-r--r--net/core/skbuff.c12
4 files changed, 123 insertions, 34 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index dd61dcad601..52f577a0f54 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -339,6 +339,93 @@ fault:
return -EFAULT;
}
+/**
+ * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying to
+ * @from: io vector to copy to
+ * @len: amount of data to copy to buffer from iovec
+ *
+ * Returns 0 or -EFAULT.
+ * Note: the iovec is modified during the copy.
+ */
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+ struct iovec *from, int len)
+{
+ int start = skb_headlen(skb);
+ int i, copy = start - offset;
+
+ /* Copy header. */
+ if (copy > 0) {
+ if (copy > len)
+ copy = len;
+ if (memcpy_fromiovec(skb->data + offset, from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+
+ /* Copy paged appendix. Hmm... why does this look so complicated? */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end - offset) > 0) {
+ int err;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = frag->page;
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap(page);
+ err = memcpy_fromiovec(vaddr + frag->page_offset +
+ offset - start, from, copy);
+ kunmap(page);
+ if (err)
+ goto fault;
+
+ if (!(len -= copy))
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list; list = list->next) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + list->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_copy_datagram_from_iovec(list,
+ offset - start,
+ from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+ }
+ if (!len)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 600bb23c4c2..60c51f76588 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
}
-void __netif_schedule(struct Qdisc *q)
+static inline void __netif_reschedule(struct Qdisc *q)
{
- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
- struct softnet_data *sd;
- unsigned long flags;
+ struct softnet_data *sd;
+ unsigned long flags;
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- }
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
@@ -1800,9 +1804,13 @@ gso:
spin_lock(root_lock);
- rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
-
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
spin_unlock(root_lock);
goto out;
@@ -1974,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
- smp_mb__before_clear_bit();
- clear_bit(__QDISC_STATE_SCHED, &q->state);
-
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
- __netif_schedule(q);
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state))
+ __netif_reschedule(q);
}
}
}
@@ -2084,7 +2094,8 @@ static int ing_filter(struct sk_buff *skb)
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
- result = qdisc_enqueue_root(skb, q);
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index a89f32fa94f..57abe8266be 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -99,7 +99,7 @@ struct gen_estimator_head
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
-/* Protects against NULL dereference and RCU write-side */
+/* Protects against NULL dereference */
static DEFINE_RWLOCK(est_lock);
static void est_timer(unsigned long arg)
@@ -185,7 +185,6 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
- write_lock_bh(&est_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -195,7 +194,6 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
list_add_rcu(&est->list, &elist[idx].list);
- write_unlock_bh(&est_lock);
return 0;
}
@@ -214,6 +212,7 @@ static void __gen_kill_estimator(struct rcu_head *head)
* Removes the rate estimator specified by &bstats and &rate_est
* and deletes the timer.
*
+ * NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est)
@@ -227,17 +226,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
if (!elist[idx].timer.function)
continue;
- write_lock_bh(&est_lock);
list_for_each_entry_safe(e, n, &elist[idx].list, list) {
if (e->rate_est != rate_est || e->bstats != bstats)
continue;
+ write_lock_bh(&est_lock);
e->bstats = NULL;
+ write_unlock_bh(&est_lock);
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
- write_unlock_bh(&est_lock);
}
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 84640172d65..ca1ccdf1ef7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
segs = nskb;
tail = nskb;
- nskb->dev = skb->dev;
- skb_copy_queue_mapping(nskb, skb);
- nskb->priority = skb->priority;
- nskb->protocol = skb->protocol;
- nskb->vlan_tci = skb->vlan_tci;
- nskb->dst = dst_clone(skb->dst);
- memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
- nskb->pkt_type = skb->pkt_type;
+ __copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
skb_reserve(nskb, headroom);
@@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
doffset);
if (!sg) {
+ nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(skb, offset,
skb_put(nskb, len),
len, 0);
@@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
frag = skb_shinfo(nskb)->frags;
k = 0;
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum = skb->csum;
skb_copy_from_linear_data_offset(skb, offset,
skb_put(nskb, hsize), hsize);