diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-08-29 16:12:36 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-29 16:12:36 -0400 |
commit | 2fca877b68b2b4fc5b94277858a1bedd46017cde (patch) | |
tree | fd02725406299ba2f26354463b3c261721e9eb6b /net/sched | |
parent | ff40c6d3d1437ecdf295b8e39adcb06c3d6021ef (diff) | |
parent | 02b3e4e2d71b6058ec11cc01c72ac651eb3ded2b (diff) |
/spare/repo/libata-dev branch 'v2.6.13'
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/Kconfig | 50 | ||||
-rw-r--r-- | net/sched/Makefile | 3 | ||||
-rw-r--r-- | net/sched/act_api.c | 10 | ||||
-rw-r--r-- | net/sched/cls_api.c | 2 | ||||
-rw-r--r-- | net/sched/cls_rsvp.h | 1 | ||||
-rw-r--r-- | net/sched/em_meta.c | 68 | ||||
-rw-r--r-- | net/sched/em_text.c | 154 | ||||
-rw-r--r-- | net/sched/sch_api.c | 65 | ||||
-rw-r--r-- | net/sched/sch_blackhole.c | 54 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 3 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 43 | ||||
-rw-r--r-- | net/sched/sch_red.c | 2 |
12 files changed, 343 insertions, 112 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index b22c9beb604..59d3e71f8b8 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -1,6 +1,43 @@ # # Traffic control configuration. # + +menuconfig NET_SCHED + bool "QoS and/or fair queueing" + ---help--- + When the kernel has several packets to send out over a network + device, it has to decide which ones to send first, which ones to + delay, and which ones to drop. This is the job of the packet + scheduler, and several different algorithms for how to do this + "fairly" have been proposed. + + If you say N here, you will get the standard packet scheduler, which + is a FIFO (first come, first served). If you say Y here, you will be + able to choose from among several alternative algorithms which can + then be attached to different network devices. This is useful for + example if some of your network devices are real time devices that + need a certain minimum data flow rate, or if you need to limit the + maximum data flow rate for traffic which matches specified criteria. + This code is considered to be experimental. + + To administer these schedulers, you'll need the user-level utilities + from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>. + That package also contains some documentation; for more, check out + <http://snafu.freedom.org/linux2.2/iproute-notes.html>. + + This Quality of Service (QoS) support will enable you to use + Differentiated Services (diffserv) and Resource Reservation Protocol + (RSVP) on your Linux router if you also say Y to "QoS support", + "Packet classifier API" and to some classifiers below. Documentation + and software is at <http://diffserv.sourceforge.net/>. + + If you say Y here and to "/proc file system" below, you will be able + to read status information about packet schedulers from the file + /proc/net/psched. + + The available schedulers are listed in the following questions; you + can say Y to as many as you like. If unsure, say N now. + choice prompt "Packet scheduler clock source" depends on NET_SCHED @@ -449,6 +486,19 @@ config NET_EMATCH_META To compile this code as a module, choose M here: the module will be called em_meta. +config NET_EMATCH_TEXT + tristate "Textsearch" + depends on NET_EMATCH + select TEXTSEARCH + select TEXTSEARCH_KMP + select TEXTSEARCH_FSM + ---help--- + Say Y here if you want to be ablt to classify packets based on + textsearch comparisons. + + To compile this code as a module, choose M here: the + module will be called em_text. + config NET_CLS_ACT bool "Packet ACTION" depends on EXPERIMENTAL && NET_CLS && NET_QOS diff --git a/net/sched/Makefile b/net/sched/Makefile index eb3fe583eba..e48d0d456b3 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -4,7 +4,7 @@ obj-y := sch_generic.o -obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o +obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o obj-$(CONFIG_NET_CLS) += cls_api.o obj-$(CONFIG_NET_CLS_ACT) += act_api.o obj-$(CONFIG_NET_ACT_POLICE) += police.o @@ -40,3 +40,4 @@ obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o obj-$(CONFIG_NET_EMATCH_META) += em_meta.o +obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 9594206e603..249c61936ea 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -439,6 +439,8 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; + t->tca__pad1 = 0; + t->tca__pad2 = 0; x = (struct rtattr*) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); @@ -580,6 +582,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; + t->tca__pad1 = 0; + t->tca__pad2 = 0; x = (struct rtattr *) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); @@ -687,7 +691,9 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; - + t->tca__pad1 = 0; + t->tca__pad2 = 0; + x = (struct rtattr*) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); @@ -842,6 +848,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) cb->nlh->nlmsg_type, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; + t->tca__pad1 = 0; + t->tca__pad2 = 0; x = (struct rtattr *) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1616bf5c962..3b5714ef4d1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -331,6 +331,8 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh, nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); tcm = NLMSG_DATA(nlh); tcm->tcm_family = AF_UNSPEC; + tcm->tcm__pad1 = 0; + tcm->tcm__pad1 = 0; tcm->tcm_ifindex = tp->q->dev->ifindex; tcm->tcm_parent = tp->classid; tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 232fb919681..006168d6937 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -618,6 +618,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, pinfo.protocol = s->protocol; pinfo.tunnelid = s->tunnelid; pinfo.tunnelhdr = f->tunnelhdr; + pinfo.pad = 0; RTA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); if (f->res.classid) RTA_PUT(skb, TCA_RSVP_CLASSID, 4, &f->res.classid); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 48bb23c2a35..00eae5f9a01 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -27,17 +27,17 @@ * lvalue rvalue * +-----------+ +-----------+ * | type: INT | | type: INT | - * def | id: INDEV | | id: VALUE | + * def | id: DEV | | id: VALUE | * | data: | | data: 3 | * +-----------+ +-----------+ * | | - * ---> meta_ops[INT][INDEV](...) | + * ---> meta_ops[INT][DEV](...) | * | | * ----------- | * V V * +-----------+ +-----------+ * | type: INT | | type: INT | - * obj | id: INDEV | | id: VALUE | + * obj | id: DEV | | id: VALUE | * | data: 2 |<--data got filled out | data: 3 | * +-----------+ +-----------+ * | | @@ -170,26 +170,6 @@ META_COLLECTOR(var_dev) *err = var_dev(skb->dev, dst); } -META_COLLECTOR(int_indev) -{ - *err = int_dev(skb->input_dev, dst); -} - -META_COLLECTOR(var_indev) -{ - *err = var_dev(skb->input_dev, dst); -} - -META_COLLECTOR(int_realdev) -{ - *err = int_dev(skb->real_dev, dst); -} - -META_COLLECTOR(var_realdev) -{ - *err = var_dev(skb->real_dev, dst); -} - /************************************************************************** * skb attributes **************************************************************************/ @@ -205,11 +185,6 @@ META_COLLECTOR(int_protocol) dst->value = skb->protocol; } -META_COLLECTOR(int_security) -{ - dst->value = skb->security; -} - META_COLLECTOR(int_pkttype) { dst->value = skb->pkt_type; @@ -234,12 +209,14 @@ META_COLLECTOR(int_maclen) * Netfilter **************************************************************************/ -#ifdef CONFIG_NETFILTER META_COLLECTOR(int_nfmark) { +#ifdef CONFIG_NETFILTER dst->value = skb->nfmark; -} +#else + dst->value = 0; #endif +} /************************************************************************** * Traffic Control @@ -250,31 +227,21 @@ META_COLLECTOR(int_tcindex) dst->value = skb->tc_index; } -#ifdef CONFIG_NET_CLS_ACT -META_COLLECTOR(int_tcverd) -{ - dst->value = skb->tc_verd; -} - -META_COLLECTOR(int_tcclassid) -{ - dst->value = skb->tc_classid; -} -#endif - /************************************************************************** * Routing **************************************************************************/ -#ifdef CONFIG_NET_CLS_ROUTE META_COLLECTOR(int_rtclassid) { if (unlikely(skb->dst == NULL)) *err = -1; else +#ifdef CONFIG_NET_CLS_ROUTE dst->value = skb->dst->tclassid; -} +#else + dst->value = 0; #endif +} META_COLLECTOR(int_rtiif) { @@ -510,8 +477,6 @@ struct meta_ops static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [TCF_META_TYPE_VAR] = { [META_ID(DEV)] = META_FUNC(var_dev), - [META_ID(INDEV)] = META_FUNC(var_indev), - [META_ID(REALDEV)] = META_FUNC(var_realdev), [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), }, [TCF_META_TYPE_INT] = { @@ -520,26 +485,15 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1), [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2), [META_ID(DEV)] = META_FUNC(int_dev), - [META_ID(INDEV)] = META_FUNC(int_indev), - [META_ID(REALDEV)] = META_FUNC(int_realdev), [META_ID(PRIORITY)] = META_FUNC(int_priority), [META_ID(PROTOCOL)] = META_FUNC(int_protocol), - [META_ID(SECURITY)] = META_FUNC(int_security), [META_ID(PKTTYPE)] = META_FUNC(int_pkttype), [META_ID(PKTLEN)] = META_FUNC(int_pktlen), [META_ID(DATALEN)] = META_FUNC(int_datalen), [META_ID(MACLEN)] = META_FUNC(int_maclen), -#ifdef CONFIG_NETFILTER [META_ID(NFMARK)] = META_FUNC(int_nfmark), -#endif [META_ID(TCINDEX)] = META_FUNC(int_tcindex), -#ifdef CONFIG_NET_CLS_ACT - [META_ID(TCVERDICT)] = META_FUNC(int_tcverd), - [META_ID(TCCLASSID)] = META_FUNC(int_tcclassid), -#endif -#ifdef CONFIG_NET_CLS_ROUTE [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), -#endif [META_ID(RTIIF)] = META_FUNC(int_rtiif), [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family), [META_ID(SK_STATE)] = META_FUNC(int_sk_state), diff --git a/net/sched/em_text.c b/net/sched/em_text.c new file mode 100644 index 00000000000..77beabc91fa --- /dev/null +++ b/net/sched/em_text.c @@ -0,0 +1,154 @@ +/* + * net/sched/em_text.c Textsearch ematch + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf <tgraf@suug.ch> + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/skbuff.h> +#include <linux/textsearch.h> +#include <linux/tc_ematch/tc_em_text.h> +#include <net/pkt_cls.h> + +struct text_match +{ + u16 from_offset; + u16 to_offset; + u8 from_layer; + u8 to_layer; + struct ts_config *config; +}; + +#define EM_TEXT_PRIV(m) ((struct text_match *) (m)->data) + +static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m, + struct tcf_pkt_info *info) +{ + struct text_match *tm = EM_TEXT_PRIV(m); + int from, to; + struct ts_state state; + + from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data; + from += tm->from_offset; + + to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data; + to += tm->to_offset; + + return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX; +} + +static int em_text_change(struct tcf_proto *tp, void *data, int len, + struct tcf_ematch *m) +{ + struct text_match *tm; + struct tcf_em_text *conf = data; + struct ts_config *ts_conf; + int flags = 0; + + if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len)) + return -EINVAL; + + if (conf->from_layer > conf->to_layer) + return -EINVAL; + + if (conf->from_layer == conf->to_layer && + conf->from_offset > conf->to_offset) + return -EINVAL; + +retry: + ts_conf = textsearch_prepare(conf->algo, (u8 *) conf + sizeof(*conf), + conf->pattern_len, GFP_KERNEL, flags); + + if (flags & TS_AUTOLOAD) + rtnl_lock(); + + if (IS_ERR(ts_conf)) { + if (PTR_ERR(ts_conf) == -ENOENT && !(flags & TS_AUTOLOAD)) { + rtnl_unlock(); + flags |= TS_AUTOLOAD; + goto retry; + } else + return PTR_ERR(ts_conf); + } else if (flags & TS_AUTOLOAD) { + textsearch_destroy(ts_conf); + return -EAGAIN; + } + + tm = kmalloc(sizeof(*tm), GFP_KERNEL); + if (tm == NULL) { + textsearch_destroy(ts_conf); + return -ENOBUFS; + } + + tm->from_offset = conf->from_offset; + tm->to_offset = conf->to_offset; + tm->from_layer = conf->from_layer; + tm->to_layer = conf->to_layer; + tm->config = ts_conf; + + m->datalen = sizeof(*tm); + m->data = (unsigned long) tm; + + return 0; +} + +static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) +{ + textsearch_destroy(EM_TEXT_PRIV(m)->config); +} + +static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) +{ + struct text_match *tm = EM_TEXT_PRIV(m); + struct tcf_em_text conf; + + strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1); + conf.from_offset = tm->from_offset; + conf.to_offset = tm->to_offset; + conf.from_layer = tm->from_layer; + conf.to_layer = tm->to_layer; + conf.pattern_len = textsearch_get_pattern_len(tm->config); + conf.pad = 0; + + RTA_PUT_NOHDR(skb, sizeof(conf), &conf); + RTA_APPEND(skb, conf.pattern_len, textsearch_get_pattern(tm->config)); + return 0; + +rtattr_failure: + return -1; +} + +static struct tcf_ematch_ops em_text_ops = { + .kind = TCF_EM_TEXT, + .change = em_text_change, + .match = em_text_match, + .destroy = em_text_destroy, + .dump = em_text_dump, + .owner = THIS_MODULE, + .link = LIST_HEAD_INIT(em_text_ops.link) +}; + +static int __init init_em_text(void) +{ + return tcf_em_register(&em_text_ops); +} + +static void __exit exit_em_text(void) +{ + tcf_em_unregister(&em_text_ops); +} + +MODULE_LICENSE("GPL"); + +module_init(init_em_text); +module_exit(exit_em_text); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 97c1c75d5c7..b9a069af4a0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -399,10 +399,8 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) { int err; struct rtattr *kind = tca[TCA_KIND-1]; - void *p = NULL; struct Qdisc *sch; struct Qdisc_ops *ops; - int size; ops = qdisc_lookup_ops(kind); #ifdef CONFIG_KMOD @@ -437,64 +435,55 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) if (ops == NULL) goto err_out; - /* ensure that the Qdisc and the private data are 32-byte aligned */ - size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST); - size += ops->priv_size + QDISC_ALIGN_CONST; - - p = kmalloc(size, GFP_KERNEL); - err = -ENOBUFS; - if (!p) + sch = qdisc_alloc(dev, ops); + if (IS_ERR(sch)) { + err = PTR_ERR(sch); goto err_out2; - memset(p, 0, size); - sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST) - & ~QDISC_ALIGN_CONST); - sch->padded = (char *)sch - (char *)p; - - INIT_LIST_HEAD(&sch->list); - skb_queue_head_init(&sch->q); + } - if (handle == TC_H_INGRESS) + if (handle == TC_H_INGRESS) { sch->flags |= TCQ_F_INGRESS; - - sch->ops = ops; - sch->enqueue = ops->enqueue; - sch->dequeue = ops->dequeue; - sch->dev = dev; - dev_hold(dev); - atomic_set(&sch->refcnt, 1); - sch->stats_lock = &dev->queue_lock; - if (handle == 0) { + handle = TC_H_MAKE(TC_H_INGRESS, 0); + } else if (handle == 0) { handle = qdisc_alloc_handle(dev); err = -ENOMEM; if (handle == 0) goto err_out3; } - if (handle == TC_H_INGRESS) - sch->handle =TC_H_MAKE(TC_H_INGRESS, 0); - else - sch->handle = handle; + sch->handle = handle; if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { +#ifdef CONFIG_NET_ESTIMATOR + if (tca[TCA_RATE-1]) { + err = gen_new_estimator(&sch->bstats, &sch->rate_est, + sch->stats_lock, + tca[TCA_RATE-1]); + if (err) { + /* + * Any broken qdiscs that would require + * a ops->reset() here? The qdisc was never + * in action so it shouldn't be necessary. + */ + if (ops->destroy) + ops->destroy(sch); + goto err_out3; + } + } +#endif qdisc_lock_tree(dev); list_add_tail(&sch->list, &dev->qdisc_list); qdisc_unlock_tree(dev); -#ifdef CONFIG_NET_ESTIMATOR - if (tca[TCA_RATE-1]) - gen_new_estimator(&sch->bstats, &sch->rate_est, - sch->stats_lock, tca[TCA_RATE-1]); -#endif return sch; } err_out3: dev_put(dev); + kfree((char *) sch - sch->padded); err_out2: module_put(ops->owner); err_out: *errp = err; - if (p) - kfree(p); return NULL; } @@ -770,6 +759,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); tcm = NLMSG_DATA(nlh); tcm->tcm_family = AF_UNSPEC; + tcm->tcm__pad1 = 0; + tcm->tcm__pad2 = 0; tcm->tcm_ifindex = q->dev->ifindex; tcm->tcm_parent = clid; tcm->tcm_handle = q->handle; diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c new file mode 100644 index 00000000000..81f0b8346d1 --- /dev/null +++ b/net/sched/sch_blackhole.c @@ -0,0 +1,54 @@ +/* + * net/sched/sch_blackhole.c Black hole queue + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf <tgraf@suug.ch> + * + * Note: Quantum tunneling is not supported. + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <net/pkt_sched.h> + +static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + qdisc_drop(skb, sch); + return NET_XMIT_SUCCESS; +} + +static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) +{ + return NULL; +} + +static struct Qdisc_ops blackhole_qdisc_ops = { + .id = "blackhole", + .priv_size = 0, + .enqueue = blackhole_enqueue, + .dequeue = blackhole_dequeue, + .owner = THIS_MODULE, +}; + +static int __init blackhole_module_init(void) +{ + return register_qdisc(&blackhole_qdisc_ops); +} + +static void __exit blackhole_module_exit(void) +{ + unregister_qdisc(&blackhole_qdisc_ops); +} + +module_init(blackhole_module_init) +module_exit(blackhole_module_exit) + +MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index d43e3b8cbf6..09453f997d8 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1528,6 +1528,7 @@ static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) opt.strategy = cl->ovl_strategy; opt.priority2 = cl->priority2+1; + opt.pad = 0; opt.penalty = (cl->penalty*1000)/HZ; RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); return skb->len; @@ -1563,6 +1564,8 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) if (cl->police) { opt.police = cl->police; + opt.__res1 = 0; + opt.__res2 = 0; RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); } return skb->len; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 7683b34dc6a..0d066c96534 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -331,11 +331,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) int prio; struct sk_buff_head *list = qdisc_priv(qdisc); - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++, list++) { - struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); - if (skb) { + for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { + if (!skb_queue_empty(list + prio)) { qdisc->q.qlen--; - return skb; + return __qdisc_dequeue_head(qdisc, list + prio); } } @@ -395,24 +394,23 @@ static struct Qdisc_ops pfifo_fast_ops = { .owner = THIS_MODULE, }; -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) +struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) { void *p; struct Qdisc *sch; - int size; + unsigned int size; + int err = -ENOBUFS; /* ensure that the Qdisc and the private data are 32-byte aligned */ - size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST); - size += ops->priv_size + QDISC_ALIGN_CONST; + size = QDISC_ALIGN(sizeof(*sch)); + size += ops->priv_size + (QDISC_ALIGNTO - 1); p = kmalloc(size, GFP_KERNEL); if (!p) - return NULL; + goto errout; memset(p, 0, size); - - sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST) - & ~QDISC_ALIGN_CONST); - sch->padded = (char *)sch - (char *)p; + sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); + sch->padded = (char *) sch - (char *) p; INIT_LIST_HEAD(&sch->list); skb_queue_head_init(&sch->q); @@ -423,11 +421,25 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) dev_hold(dev); sch->stats_lock = &dev->queue_lock; atomic_set(&sch->refcnt, 1); + + return sch; +errout: + return ERR_PTR(-err); +} + +struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) +{ + struct Qdisc *sch; + + sch = qdisc_alloc(dev, ops); + if (IS_ERR(sch)) + goto errout; + if (!ops->init || ops->init(sch, NULL) == 0) return sch; - dev_put(dev); - kfree(p); + qdisc_destroy(sch); +errout: return NULL; } @@ -591,6 +603,7 @@ EXPORT_SYMBOL(__netdev_watchdog_up); EXPORT_SYMBOL(noop_qdisc); EXPORT_SYMBOL(noop_qdisc_ops); EXPORT_SYMBOL(qdisc_create_dflt); +EXPORT_SYMBOL(qdisc_alloc); EXPORT_SYMBOL(qdisc_destroy); EXPORT_SYMBOL(qdisc_reset); EXPORT_SYMBOL(qdisc_restart); diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 664d0e47374..7845d045eec 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -385,7 +385,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256); q->qcount = -1; - if (skb_queue_len(&sch->q) == 0) + if (skb_queue_empty(&sch->q)) PSCHED_SET_PASTPERFECT(q->qidlestart); sch_tree_unlock(sch); return 0; |