diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2006-12-08 01:07:56 -0500 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2006-12-08 01:07:56 -0500 |
commit | bef986502fa398b1785a3979b1aa17cd902d3527 (patch) | |
tree | b59c1afe7b1dfcc001b86e54863f550d7ddc8c34 /net/sched | |
parent | 4bdbd2807deeccc0793d57fb5120d7a53f2c0b3c (diff) | |
parent | c99767974ebd2a719d849fdeaaa1674456f5283f (diff) |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/hid.h
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/Kconfig | 6 | ||||
-rw-r--r-- | net/sched/Makefile | 3 | ||||
-rw-r--r-- | net/sched/act_gact.c | 4 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 6 | ||||
-rw-r--r-- | net/sched/act_police.c | 26 | ||||
-rw-r--r-- | net/sched/act_simple.c | 3 | ||||
-rw-r--r-- | net/sched/cls_api.c | 3 | ||||
-rw-r--r-- | net/sched/cls_fw.c | 7 | ||||
-rw-r--r-- | net/sched/cls_rsvp.h | 16 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 2 | ||||
-rw-r--r-- | net/sched/em_meta.c | 13 | ||||
-rw-r--r-- | net/sched/em_nbyte.c | 4 | ||||
-rw-r--r-- | net/sched/ematch.c | 3 | ||||
-rw-r--r-- | net/sched/sch_api.c | 41 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 5 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 10 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 9 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 10 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 27 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 42 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 10 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 14 | ||||
-rw-r--r-- | net/sched/sch_red.c | 14 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 3 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 16 |
25 files changed, 187 insertions, 110 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 8298ea9ffe1..f4544dd8647 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -6,6 +6,7 @@ menu "QoS and/or fair queueing" config NET_SCHED bool "QoS and/or fair queueing" + select NET_SCH_FIFO ---help--- When the kernel has several packets to send out over a network device, it has to decide which ones to send first, which ones to @@ -40,6 +41,9 @@ config NET_SCHED The available schedulers are listed in the following questions; you can say Y to as many as you like. If unsure, say N now. +config NET_SCH_FIFO + bool + if NET_SCHED choice @@ -320,7 +324,7 @@ config CLS_U32_PERF config CLS_U32_MARK bool "Netfilter marks support" - depends on NET_CLS_U32 && NETFILTER + depends on NET_CLS_U32 ---help--- Say Y here to be able to use netfilter marks as u32 key. diff --git a/net/sched/Makefile b/net/sched/Makefile index 0f06aec6609..ff2d6e5e282 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -4,7 +4,7 @@ obj-y := sch_generic.o -obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o +obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o obj-$(CONFIG_NET_CLS) += cls_api.o obj-$(CONFIG_NET_CLS_ACT) += act_api.o obj-$(CONFIG_NET_ACT_POLICE) += act_police.o @@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o +obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 6cff56696a8..85de7efd5fe 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -48,14 +48,14 @@ static struct tcf_hashinfo gact_hash_info = { #ifdef CONFIG_GACT_PROB static int gact_net_rand(struct tcf_gact *gact) { - if (net_random() % gact->tcfg_pval) + if (!gact->tcfg_pval || net_random() % gact->tcfg_pval) return gact->tcf_action; return gact->tcfg_paction; } static int gact_determ(struct tcf_gact *gact) { - if (gact->tcf_bstats.packets % gact->tcfg_pval) + if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval) return gact->tcf_action; return gact->tcfg_paction; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index d8c9310da6e..a9608064a4c 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -156,10 +156,9 @@ static int tcf_ipt_init(struct rtattr *rta, struct rtattr *est, rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ) strcpy(tname, "mangle"); - t = kmalloc(td->u.target_size, GFP_KERNEL); + t = kmemdup(td, td->u.target_size, GFP_KERNEL); if (unlikely(!t)) goto err2; - memcpy(t, td, td->u.target_size); if ((err = ipt_init_target(t, tname, hook)) < 0) goto err3; @@ -256,13 +255,12 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ** for foolproof you need to not assume this */ - t = kmalloc(ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); + t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); if (unlikely(!t)) goto rtattr_failure; c.bindcnt = ipt->tcf_bindcnt - bind; c.refcnt = ipt->tcf_refcnt - ref; - memcpy(t, ipt->tcfi_t, ipt->tcfi_t->u.user.target_size); strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index fed47b65883..af68e1e8325 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -46,6 +46,18 @@ static struct tcf_hashinfo police_hash_info = { .lock = &police_lock, }; +/* old policer structure from before tc actions */ +struct tc_police_compat +{ + u32 index; + int action; + u32 limit; + u32 burst; + u32 mtu; + struct tc_ratespec rate; + struct tc_ratespec peakrate; +}; + /* Each policer is serialized by its individual spinlock */ #ifdef CONFIG_NET_CLS_ACT @@ -131,12 +143,15 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, struct tc_police *parm; struct tcf_police *police; struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; + int size; if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) return -EINVAL; - if (tb[TCA_POLICE_TBF-1] == NULL || - RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm)) + if (tb[TCA_POLICE_TBF-1] == NULL) + return -EINVAL; + size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]); + if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) return -EINVAL; parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); @@ -415,12 +430,15 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est) struct tcf_police *police; struct rtattr *tb[TCA_POLICE_MAX]; struct tc_police *parm; + int size; if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) return NULL; - if (tb[TCA_POLICE_TBF-1] == NULL || - RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm)) + if (tb[TCA_POLICE_TBF-1] == NULL) + return NULL; + size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]); + if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) return NULL; parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 901571a6770..5fe80854ca9 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -71,11 +71,10 @@ static int tcf_simp_release(struct tcf_defact *d, int bind) static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) { - d->tcfd_defdata = kmalloc(datalen, GFP_KERNEL); + d->tcfd_defdata = kmemdup(defdata, datalen, GFP_KERNEL); if (unlikely(!d->tcfd_defdata)) return -ENOMEM; d->tcfd_datalen = datalen; - memcpy(d->tcfd_defdata, defdata, datalen); return 0; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 37a18402164..edb8fc97ae1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -217,7 +217,7 @@ replay: /* Create new proto tcf */ err = -ENOBUFS; - if ((tp = kmalloc(sizeof(*tp), GFP_KERNEL)) == NULL) + if ((tp = kzalloc(sizeof(*tp), GFP_KERNEL)) == NULL) goto errout; err = -EINVAL; tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]); @@ -247,7 +247,6 @@ replay: kfree(tp); goto errout; } - memset(tp, 0, sizeof(*tp)); tp->ops = tp_ops; tp->protocol = protocol; tp->prio = nprio ? : tcf_auto_prio(*back); diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index e54acc6bccc..c797d6ada7d 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -101,13 +101,10 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f; int r; -#ifdef CONFIG_NETFILTER - u32 id = skb->nfmark & head->mask; -#else - u32 id = 0; -#endif + u32 id = skb->mark; if (head != NULL) { + id &= head->mask; for (f=head->ht[fw_hash(id)]; f; f=f->next) { if (f->id == id) { *res = f->res; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 6e230ecfba0..587b9adab38 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -77,7 +77,7 @@ struct rsvp_head struct rsvp_session { struct rsvp_session *next; - u32 dst[RSVP_DST_LEN]; + __be32 dst[RSVP_DST_LEN]; struct tc_rsvp_gpi dpi; u8 protocol; u8 tunnelid; @@ -89,7 +89,7 @@ struct rsvp_session struct rsvp_filter { struct rsvp_filter *next; - u32 src[RSVP_DST_LEN]; + __be32 src[RSVP_DST_LEN]; struct tc_rsvp_gpi spi; u8 tunnelhdr; @@ -100,17 +100,17 @@ struct rsvp_filter struct rsvp_session *sess; }; -static __inline__ unsigned hash_dst(u32 *dst, u8 protocol, u8 tunnelid) +static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) { - unsigned h = dst[RSVP_DST_LEN-1]; + unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; h ^= h>>16; h ^= h>>8; return (h ^ protocol ^ tunnelid) & 0xFF; } -static __inline__ unsigned hash_src(u32 *src) +static __inline__ unsigned hash_src(__be32 *src) { - unsigned h = src[RSVP_DST_LEN-1]; + unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; h ^= h>>16; h ^= h>>8; h ^= h>>4; @@ -138,7 +138,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, struct rsvp_session *s; struct rsvp_filter *f; unsigned h1, h2; - u32 *dst, *src; + __be32 *dst, *src; u8 protocol; u8 tunnelid = 0; u8 *xprt; @@ -410,7 +410,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, struct rtattr *tb[TCA_RSVP_MAX]; struct tcf_exts e; unsigned h1, h2; - u32 *dst; + __be32 *dst; int err; if (opt == NULL) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 0a6cfa0005b..8b519480199 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -143,7 +143,7 @@ next_knode: #endif #ifdef CONFIG_CLS_U32_MARK - if ((skb->nfmark & n->mark.mask) != n->mark.val) { + if ((skb->mark & n->mark.mask) != n->mark.val) { n = n->next; goto next_knode; } else { diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 61e3b740ab1..45d47d37155 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -208,13 +208,9 @@ META_COLLECTOR(int_maclen) * Netfilter **************************************************************************/ -META_COLLECTOR(int_nfmark) +META_COLLECTOR(int_mark) { -#ifdef CONFIG_NETFILTER - dst->value = skb->nfmark; -#else - dst->value = 0; -#endif + dst->value = skb->mark; } /************************************************************************** @@ -490,7 +486,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [META_ID(PKTLEN)] = META_FUNC(int_pktlen), [META_ID(DATALEN)] = META_FUNC(int_datalen), [META_ID(MACLEN)] = META_FUNC(int_maclen), - [META_ID(NFMARK)] = META_FUNC(int_nfmark), + [META_ID(NFMARK)] = META_FUNC(int_mark), [META_ID(TCINDEX)] = META_FUNC(int_tcindex), [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), [META_ID(RTIIF)] = META_FUNC(int_rtiif), @@ -550,10 +546,9 @@ static int meta_var_change(struct meta_value *dst, struct rtattr *rta) { int len = RTA_PAYLOAD(rta); - dst->val = (unsigned long) kmalloc(len, GFP_KERNEL); + dst->val = (unsigned long)kmemdup(RTA_DATA(rta), len, GFP_KERNEL); if (dst->val == 0UL) return -ENOMEM; - memcpy((void *) dst->val, RTA_DATA(rta), len); dst->len = len; return 0; } diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c index cc80babfd79..005db409be6 100644 --- a/net/sched/em_nbyte.c +++ b/net/sched/em_nbyte.c @@ -34,12 +34,10 @@ static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len, return -EINVAL; em->datalen = sizeof(*nbyte) + nbyte->len; - em->data = (unsigned long) kmalloc(em->datalen, GFP_KERNEL); + em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL); if (em->data == 0UL) return -ENOBUFS; - memcpy((void *) em->data, data, em->datalen); - return 0; } diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 0fd0768a17c..8f8a16da72a 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -251,12 +251,11 @@ static int tcf_em_validate(struct tcf_proto *tp, goto errout; em->data = *(u32 *) data; } else { - void *v = kmalloc(data_len, GFP_KERNEL); + void *v = kmemdup(data, data_len, GFP_KERNEL); if (v == NULL) { err = -ENOBUFS; goto errout; } - memcpy(v, data, data_len); em->data = (unsigned long) v; } } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0b648929114..65825f4409d 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -191,21 +191,27 @@ int unregister_qdisc(struct Qdisc_ops *qops) (root qdisc, all its children, children of children etc.) */ -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) +static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) { struct Qdisc *q; - read_lock(&qdisc_tree_lock); list_for_each_entry(q, &dev->qdisc_list, list) { - if (q->handle == handle) { - read_unlock(&qdisc_tree_lock); + if (q->handle == handle) return q; - } } - read_unlock(&qdisc_tree_lock); return NULL; } +struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) +{ + struct Qdisc *q; + + read_lock(&qdisc_tree_lock); + q = __qdisc_lookup(dev, handle); + read_unlock(&qdisc_tree_lock); + return q; +} + static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; @@ -348,6 +354,26 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) return oqdisc; } +void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) +{ + struct Qdisc_class_ops *cops; + unsigned long cl; + u32 parentid; + + if (n == 0) + return; + while ((parentid = sch->parent)) { + sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); + cops = sch->ops->cl_ops; + if (cops->qlen_notify) { + cl = cops->get(sch, parentid); + cops->qlen_notify(sch, cl); + cops->put(sch, cl); + } + sch->q.qlen -= n; + } +} +EXPORT_SYMBOL(qdisc_tree_decrease_qlen); /* Graft qdisc "new" to class "classid" of qdisc "parent" or to device "dev". @@ -1112,7 +1138,7 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { int err = 0; - u32 protocol = skb->protocol; + __be16 protocol = skb->protocol; #ifdef CONFIG_NET_CLS_ACT struct tcf_proto *otp = tp; reclassify: @@ -1277,7 +1303,6 @@ static int __init pktsched_init(void) subsys_initcall(pktsched_init); -EXPORT_SYMBOL(qdisc_lookup); EXPORT_SYMBOL(qdisc_get_rtab); EXPORT_SYMBOL(qdisc_put_rtab); EXPORT_SYMBOL(register_qdisc); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index dbf44da0912..edc7bb0b9c8 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -316,7 +316,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, } memset(flow,0,sizeof(*flow)); flow->filter_list = NULL; - if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops))) + if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,classid))) flow->q = &noop_qdisc; DPRINTK("atm_tc_change: qdisc %p\n",flow->q); flow->sock = sock; @@ -576,7 +576,8 @@ static int atm_tc_init(struct Qdisc *sch,struct rtattr *opt) DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); p->flows = &p->link; - if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops))) + if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops, + sch->handle))) p->link.q = &noop_qdisc; DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q); p->link.filter_list = NULL; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index bac881bfe36..ba82dfab604 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1429,7 +1429,8 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) q->link.sibling = &q->link; q->link.classid = sch->handle; q->link.qdisc = sch; - if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops))) + if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + sch->handle))) q->link.q = &noop_qdisc; q->link.priority = TC_CBQ_MAXPRIO-1; @@ -1674,7 +1675,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, if (cl) { if (new == NULL) { - if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL) + if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + cl->classid)) == NULL) return -ENOBUFS; } else { #ifdef CONFIG_NET_CLS_POLICE @@ -1685,7 +1687,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, sch_tree_lock(sch); *old = cl->q; cl->q = new; - sch->q.qlen -= (*old)->q.qlen; + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); @@ -1932,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t cl->R_tab = rtab; rtab = NULL; cl->refcnt = 1; - if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops))) + if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) cl->q = &noop_qdisc; cl->classid = classid; cl->tparent = parent; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 11c8a2119b9..d5421816f00 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -88,15 +88,16 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg, sch, p, new, old); if (new == NULL) { - new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + sch->handle); if (new == NULL) new = &noop_qdisc; } sch_tree_lock(sch); *old = xchg(&p->q, new); + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); - sch->q.qlen = 0; sch_tree_unlock(sch); return 0; @@ -307,7 +308,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) if (p->mask[index] != 0xff || p->value[index]) printk(KERN_WARNING "dsmark_dequeue: " "unsupported protocol %d\n", - htons(skb->protocol)); + ntohs(skb->protocol)); break; }; @@ -387,7 +388,7 @@ static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) p->default_index = default_index; p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]); - p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); if (p->q == NULL) p->q = &noop_qdisc; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 88c6a99ce53..bc116bd6937 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -450,13 +450,15 @@ errout: return ERR_PTR(-err); } -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) +struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, + unsigned int parentid) { struct Qdisc *sch; sch = qdisc_alloc(dev, ops); if (IS_ERR(sch)) goto errout; + sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0) return sch; @@ -520,7 +522,8 @@ void dev_activate(struct net_device *dev) if (dev->qdisc_sleeping == &noop_qdisc) { struct Qdisc *qdisc; if (dev->tx_queue_len) { - qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops); + qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops, + TC_H_ROOT); if (qdisc == NULL) { printk(KERN_INFO "%s: activation failed\n", dev->name); return; @@ -606,13 +609,10 @@ void dev_shutdown(struct net_device *dev) qdisc_unlock_tree(dev); } -EXPORT_SYMBOL(__netdev_watchdog_up); EXPORT_SYMBOL(netif_carrier_on); EXPORT_SYMBOL(netif_carrier_off); EXPORT_SYMBOL(noop_qdisc); -EXPORT_SYMBOL(noop_qdisc_ops); EXPORT_SYMBOL(qdisc_create_dflt); -EXPORT_SYMBOL(qdisc_alloc); EXPORT_SYMBOL(qdisc_destroy); EXPORT_SYMBOL(qdisc_reset); EXPORT_SYMBOL(qdisc_lock_tree); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6a6735a2ed3..6eefa699577 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -946,6 +946,7 @@ qdisc_peek_len(struct Qdisc *sch) if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { if (net_ratelimit()) printk("qdisc_peek_len: failed to requeue\n"); + qdisc_tree_decrease_qlen(sch, 1); return 0; } return len; @@ -957,11 +958,7 @@ hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) unsigned int len = cl->qdisc->q.qlen; qdisc_reset(cl->qdisc); - if (len > 0) { - update_vf(cl, 0, 0); - set_passive(cl); - sch->q.qlen -= len; - } + qdisc_tree_decrease_qlen(cl->qdisc, len); } static void @@ -1138,7 +1135,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->classid = classid; cl->sched = q; cl->cl_parent = parent; - cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; cl->stats_lock = &sch->dev->queue_lock; @@ -1271,7 +1268,8 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, if (cl->level > 0) return -EINVAL; if (new == NULL) { - new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + cl->classid); if (new == NULL) new = &noop_qdisc; } @@ -1294,6 +1292,17 @@ hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) return NULL; } +static void +hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) +{ + struct hfsc_class *cl = (struct hfsc_class *)arg; + + if (cl->qdisc->q.qlen == 0) { + update_vf(cl, 0, 0); + set_passive(cl); + } +} + static unsigned long hfsc_get_class(struct Qdisc *sch, u32 classid) { @@ -1514,7 +1523,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) q->root.refcnt = 1; q->root.classid = sch->handle; q->root.sched = q; - q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + sch->handle); if (q->root.qdisc == NULL) q->root.qdisc = &noop_qdisc; q->root.stats_lock = &sch->dev->queue_lock; @@ -1777,6 +1787,7 @@ static struct Qdisc_class_ops hfsc_class_ops = { .delete = hfsc_delete_class, .graft = hfsc_graft_class, .leaf = hfsc_class_leaf, + .qlen_notify = hfsc_qlen_notify, .get = hfsc_get_class, .put = hfsc_put_class, .bind_tcf = hfsc_bind_tcf, diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 9b9c555c713..215e68c2b61 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1223,17 +1223,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct htb_class *cl = (struct htb_class *)arg; if (cl && !cl->level) { - if (new == NULL && (new = qdisc_create_dflt(sch->dev, - &pfifo_qdisc_ops)) + if (new == NULL && + (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + cl->classid)) == NULL) return -ENOBUFS; sch_tree_lock(sch); if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { - if (cl->prio_activity) - htb_deactivate(qdisc_priv(sch), cl); - - /* TODO: is it correct ? Why CBQ doesn't do it ? */ - sch->q.qlen -= (*old)->q.qlen; + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); } sch_tree_unlock(sch); @@ -1248,6 +1245,14 @@ static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) return (cl && !cl->level) ? cl->un.leaf.q : NULL; } +static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) +{ + struct htb_class *cl = (struct htb_class *)arg; + + if (cl->un.leaf.q->q.qlen == 0) + htb_deactivate(qdisc_priv(sch), cl); +} + static unsigned long htb_get(struct Qdisc *sch, u32 classid) { struct htb_class *cl = htb_find(classid, sch); @@ -1269,9 +1274,9 @@ static void htb_destroy_filters(struct tcf_proto **fl) static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) { struct htb_sched *q = qdisc_priv(sch); + if (!cl->level) { BUG_TRAP(cl->un.leaf.q); - sch->q.qlen -= cl->un.leaf.q->q.qlen; qdisc_destroy(cl->un.leaf.q); } qdisc_put_rtab(cl->rate); @@ -1284,8 +1289,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) struct htb_class, sibling)); /* note: this delete may happen twice (see htb_delete) */ - if (!hlist_unhashed(&cl->hlist)) - hlist_del(&cl->hlist); + hlist_del_init(&cl->hlist); list_del(&cl->sibling); if (cl->prio_activity) @@ -1323,6 +1327,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) { struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; + unsigned int qlen; // TODO: why don't allow to delete subtree ? references ? does // tc subsys quarantee us that in htb_destroy it holds no class @@ -1333,8 +1338,13 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) sch_tree_lock(sch); /* delete from hash and active; remainder in destroy_class */ - if (!hlist_unhashed(&cl->hlist)) - hlist_del(&cl->hlist); + hlist_del_init(&cl->hlist); + + if (!cl->level) { + qlen = cl->un.leaf.q->q.qlen; + qdisc_reset(cl->un.leaf.q); + qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); + } if (cl->prio_activity) htb_deactivate(q, cl); @@ -1412,11 +1422,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) so that can't be used inside of sch_tree_lock -- thanks to Karlis Peisenieks */ - new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); sch_tree_lock(sch); if (parent && !parent->level) { + unsigned int qlen = parent->un.leaf.q->q.qlen; + /* turn parent into inner node */ - sch->q.qlen -= parent->un.leaf.q->q.qlen; + qdisc_reset(parent->un.leaf.q); + qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); qdisc_destroy(parent->un.leaf.q); if (parent->prio_activity) htb_deactivate(q, parent); @@ -1564,6 +1577,7 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) static struct Qdisc_class_ops htb_class_ops = { .graft = htb_graft, .leaf = htb_leaf, + .qlen_notify = htb_qlen_notify, .get = htb_get, .put = htb_put, .change = htb_change_class, diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0441876aa1e..79542af9dab 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -287,13 +287,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now); if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { + qdisc_tree_decrease_qlen(q->qdisc, 1); sch->qstats.drops++; - - /* After this qlen is confused */ printk(KERN_ERR "netem: queue discpline %s could not requeue\n", q->qdisc->ops->id); - - sch->q.qlen--; } mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay)); @@ -574,7 +571,8 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt) q->timer.function = netem_watchdog; q->timer.data = (unsigned long) sch; - q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops); + q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops, + TC_H_MAKE(sch->handle, 1)); if (!q->qdisc) { pr_debug("netem: qdisc create failed\n"); return -ENOMEM; @@ -661,8 +659,8 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, sch_tree_lock(sch); *old = xchg(&q->qdisc, new); + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); - sch->q.qlen = 0; sch_tree_unlock(sch); return 0; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index a5fa03c0c19..2567b4c96c1 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -222,21 +222,27 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt) for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); - if (child != &noop_qdisc) + if (child != &noop_qdisc) { + qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); + } } sch_tree_unlock(sch); for (i=0; i<q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child; - child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, + TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); child = xchg(&q->queues[i], child); - if (child != &noop_qdisc) + if (child != &noop_qdisc) { + qdisc_tree_decrease_qlen(child, + child->q.qlen); qdisc_destroy(child); + } sch_tree_unlock(sch); } } @@ -294,7 +300,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, sch_tree_lock(sch); *old = q->queues[band]; q->queues[band] = new; - sch->q.qlen -= (*old)->q.qlen; + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index d65cadddea6..acddad08850 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -175,12 +175,14 @@ static void red_destroy(struct Qdisc *sch) qdisc_destroy(q->qdisc); } -static struct Qdisc *red_create_dflt(struct net_device *dev, u32 limit) +static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) { - struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops); + struct Qdisc *q; struct rtattr *rta; int ret; + q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, + TC_H_MAKE(sch->handle, 1)); if (q) { rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); @@ -219,7 +221,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); if (ctl->limit > 0) { - child = red_create_dflt(sch->dev, ctl->limit); + child = red_create_dflt(sch, ctl->limit); if (child == NULL) return -ENOMEM; } @@ -227,8 +229,10 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; - if (child) + if (child) { + qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(xchg(&q->qdisc, child)); + } red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, @@ -306,8 +310,8 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, sch_tree_lock(sch); *old = xchg(&q->qdisc, new); + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); - sch->q.qlen = 0; sch_tree_unlock(sch); return 0; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d0d6e595a78..459cda258a5 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -393,6 +393,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = RTA_DATA(opt); + unsigned int qlen; if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) return -EINVAL; @@ -403,8 +404,10 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) if (ctl->limit) q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); + qlen = sch->q.qlen; while (sch->q.qlen >= q->limit-1) sfq_drop(sch); + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); del_timer(&q->perturb_timer); if (q->perturb_period) { diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index d9a5d298d75..ed9b6d93854 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -250,7 +250,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { /* When requeue fails skb is dropped */ - sch->q.qlen--; + qdisc_tree_decrease_qlen(q->qdisc, 1); sch->qstats.drops++; } @@ -273,12 +273,14 @@ static void tbf_reset(struct Qdisc* sch) del_timer(&q->wd_timer); } -static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit) +static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) { - struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops); + struct Qdisc *q; struct rtattr *rta; int ret; + q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, + TC_H_MAKE(sch->handle, 1)); if (q) { rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (rta) { @@ -341,13 +343,15 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt) goto done; if (qopt->limit > 0) { - if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL) + if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL) goto done; } sch_tree_lock(sch); - if (child) + if (child) { + qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(xchg(&q->qdisc, child)); + } q->limit = qopt->limit; q->mtu = qopt->mtu; q->max_size = max_size; @@ -449,8 +453,8 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, sch_tree_lock(sch); *old = xchg(&q->qdisc, new); + qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); - sch->q.qlen = 0; sch_tree_unlock(sch); return 0; |