aboutsummaryrefslogtreecommitdiff
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorSimon Horman <horms@verge.net.au>2008-08-19 17:36:22 +1000
committerSimon Horman <horms@verge.net.au>2008-08-19 17:36:22 +1000
commit3f087668c4e7c97289f0a67f9278ae6e0a765a80 (patch)
tree6b278344bf96d31a328bf76e445b189bff5f0ce9 /net/sched/sch_generic.c
parent51df1901394a714d1a17202da02ae4957260eab5 (diff)
parente5befbd9525d92bb074b70192eb2c69aae65fc60 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c68
1 files changed, 19 insertions, 49 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 468574682ca..c3ed4d44fc1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -518,14 +518,17 @@ void qdisc_reset(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_reset);
-/* this is the rcu callback function to clean up a qdisc when there
- * are no further references to it */
-
-static void __qdisc_destroy(struct rcu_head *head)
+void qdisc_destroy(struct Qdisc *qdisc)
{
- struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
const struct Qdisc_ops *ops = qdisc->ops;
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !atomic_dec_and_test(&qdisc->refcnt))
+ return;
+
+ if (qdisc->parent)
+ list_del(&qdisc->list);
+
#ifdef CONFIG_NET_SCHED
qdisc_put_stab(qdisc->stab);
#endif
@@ -542,20 +545,6 @@ static void __qdisc_destroy(struct rcu_head *head)
kfree((char *) qdisc - qdisc->padded);
}
-
-/* Under qdisc_lock(qdisc) and BH! */
-
-void qdisc_destroy(struct Qdisc *qdisc)
-{
- if (qdisc->flags & TCQ_F_BUILTIN ||
- !atomic_dec_and_test(&qdisc->refcnt))
- return;
-
- if (qdisc->parent)
- list_del(&qdisc->list);
-
- call_rcu(&qdisc->q_rcu, __qdisc_destroy);
-}
EXPORT_SYMBOL(qdisc_destroy);
static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
@@ -597,6 +586,9 @@ static void transition_one_qdisc(struct net_device *dev,
struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
+ if (!(new_qdisc->flags & TCQ_F_BUILTIN))
+ clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
+
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
@@ -640,6 +632,9 @@ static void dev_deactivate_queue(struct net_device *dev,
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
+ if (!(qdisc->flags & TCQ_F_BUILTIN))
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
+
dev_queue->qdisc = qdisc_default;
qdisc_reset(qdisc);
@@ -647,7 +642,7 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}
-static bool some_qdisc_is_busy(struct net_device *dev, int lock)
+static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
@@ -661,14 +656,12 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
q = dev_queue->qdisc_sleeping;
root_lock = qdisc_lock(q);
- if (lock)
- spin_lock_bh(root_lock);
+ spin_lock_bh(root_lock);
val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
test_bit(__QDISC_STATE_SCHED, &q->state));
- if (lock)
- spin_unlock_bh(root_lock);
+ spin_unlock_bh(root_lock);
if (val)
return true;
@@ -678,8 +671,6 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
void dev_deactivate(struct net_device *dev)
{
- bool running;
-
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
@@ -689,25 +680,8 @@ void dev_deactivate(struct net_device *dev)
synchronize_rcu();
/* Wait for outstanding qdisc_run calls. */
- do {
- while (some_qdisc_is_busy(dev, 0))
- yield();
-
- /*
- * Double-check inside queue lock to ensure that all effects
- * of the queue run are visible when we return.
- */
- running = some_qdisc_is_busy(dev, 1);
-
- /*
- * The running flag should never be set at this point because
- * we've already set dev->qdisc to noop_qdisc *inside* the same
- * pair of spin locks. That is, if any qdisc_run starts after
- * our initial test it should see the noop_qdisc and then
- * clear the RUNNING bit before dropping the queue lock. So
- * if it is set here then we've found a bug.
- */
- } while (WARN_ON_ONCE(running));
+ while (some_qdisc_is_busy(dev))
+ yield();
}
static void dev_init_scheduler_queue(struct net_device *dev,
@@ -736,14 +710,10 @@ static void shutdown_scheduler_queue(struct net_device *dev,
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
- spinlock_t *root_lock = qdisc_lock(qdisc);
-
dev_queue->qdisc = qdisc_default;
dev_queue->qdisc_sleeping = qdisc_default;
- spin_lock_bh(root_lock);
qdisc_destroy(qdisc);
- spin_unlock_bh(root_lock);
}
}